repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
680k
|
---|---|---|---|---|
j-rivero/ros_buildfarm | ros_buildfarm/debian_repo.py | 840d2dc1dd5db00d5407da4644cd2bcbc5e0ac88 | # Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from .common import PlatformPackageDescriptor
from .http_cache import fetch_and_cache_gzip
def get_debian_repo_index(debian_repository_baseurl, target, cache_dir):
url = os.path.join(
debian_repository_baseurl, 'dists', target.os_code_name, 'main')
if target.arch == 'source':
url = os.path.join(url, 'source', 'Sources.gz')
else:
url = os.path.join(url, 'binary-%s' % target.arch, 'Packages.gz')
cache_filename = fetch_and_cache_gzip(url, cache_dir)
logging.debug('Reading file: %s' % cache_filename)
# split package blocks
with open(cache_filename, 'rb') as f:
blocks = f.read().decode('utf8').split('\n\n')
blocks = [b.splitlines() for b in blocks if b]
# extract version number of every package
package_versions = {}
for lines in blocks:
prefix = 'Package: '
assert lines[0].startswith(prefix)
debian_pkg_name = lines[0][len(prefix):]
prefix = 'Version: '
versions = [l[len(prefix):] for l in lines if l.startswith(prefix)]
version = versions[0] if len(versions) == 1 else None
prefix = 'Source: '
source_names = [l[len(prefix):] for l in lines if l.startswith(prefix)]
source_name = source_names[0] if len(source_names) == 1 else None
package_versions[debian_pkg_name] = PlatformPackageDescriptor(version, source_name)
return package_versions
| [((804, 881), 'os.path.join', 'os.path.join', (['debian_repository_baseurl', '"""dists"""', 'target.os_code_name', '"""main"""'], {}), "(debian_repository_baseurl, 'dists', target.os_code_name, 'main')\n", (816, 881), False, 'import os\n'), ((1127, 1177), 'logging.debug', 'logging.debug', (["('Reading file: %s' % cache_filename)"], {}), "('Reading file: %s' % cache_filename)\n", (1140, 1177), False, 'import logging\n'), ((937, 978), 'os.path.join', 'os.path.join', (['url', '"""source"""', '"""Sources.gz"""'], {}), "(url, 'source', 'Sources.gz')\n", (949, 978), False, 'import os\n'), ((1003, 1062), 'os.path.join', 'os.path.join', (['url', "('binary-%s' % target.arch)", '"""Packages.gz"""'], {}), "(url, 'binary-%s' % target.arch, 'Packages.gz')\n", (1015, 1062), False, 'import os\n')] |
Drayux/Battlematus | player.py | 1709a15b58d9274b99ec36eff1a181014d155037 | # PLAYER
class player:
def __init__(self):
| [] |
ConvolutedDog/Implicit-Im2col-for-Backpropagation | Framwork-Backpropagation/utils/utils_v2.py | 529a62f52903326b9289091b7d0abb45e6c7bb31 | # Copyright 2022 ConvolutedDog (https://github.com/ConvolutedDog/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python3
import torch
import torch.nn as nn
import torch.nn.functional as F
from graphviz import Digraph, render
from torch.autograd import Variable
@torch.no_grad()
def cross_entropy_loss(y_predict, y_true):
print('\n=========================== Layer:'+' {0:18}'.format('cross_entropy_loss')+' Start ===========================')
print('# y_predict.shape: ', list(y_predict.shape))
print('# y_true.shape: ', list(y_true.shape))
y_shift = torch.sub(y_predict, torch.max(y_predict, dim=1, keepdim=True).values)
y_exp = torch.exp(y_shift)
y_probability = torch.div(y_exp, torch.sum(y_exp, dim=1, keepdim=True))
ypred_loss = torch.mean(-torch.sum(torch.mul(y_true, torch.log(y_probability)), dim=1, keepdim=True))
dLoss_dypred = y_probability - y_true
print('# dLoss_dypred.shape: ', list(dLoss_dypred.shape))
print('# Self calculated loss: ', ypred_loss.item())
print('=========================== Layer:'+' {0:18}'.format('cross_entropy_loss')+' End =============================')
return ypred_loss, dLoss_dypred
@torch.no_grad()
def fc_backward(dLoss_dnextz, z, w):
print('# next_dz.shape: ', list(dLoss_dnextz.shape))
print('# z.shape: ', list(z.shape))
print('# weight.shape: ', list(w.shape))
print('# bias.shape: ', '['+str(dLoss_dnextz.shape[1])+']')
N = z.shape[0]
if len(z.shape) == 4:
z = z.view(z.size(0), -1)
dLoss_dz = torch.matmul(dLoss_dnextz, w) #delta
dLoss_dfcW = torch.matmul(dLoss_dnextz.t(), z)
dLoss_dfcB = torch.sum(dLoss_dnextz, dim=0)
print('# dz.shape: ', list(dLoss_dz.shape))
print('# dweight.shape: ', list(dLoss_dfcW.shape))
print('# dbias.shape: ', list(dLoss_dfcB.shape))
return dLoss_dz, dLoss_dfcW/N, dLoss_dfcB/N
@torch.no_grad()
def view_backward(dLoss_dnextz, last_z, params):
print('# next_dz.shape: ', list(dLoss_dnextz.shape))
print('# last_z.shape: ', list(last_z.shape))
if params:
pooling = params[0]
stride = params[1]
padding = params[2]
output_size = (int((last_z.shape[2]-pooling[0]+2*padding[0])/stride[0]+1), \
int((last_z.shape[3]-pooling[0]+2*padding[0])/stride[0]+1))
dLoss_dz = dLoss_dnextz.reshape(last_z.shape[0], last_z.shape[1], output_size[0], output_size[1])
else:
dLoss_dz = dLoss_dnextz.reshape(last_z.shape)
print('# dz.shape: ', list(dLoss_dz.shape))
return dLoss_dz
def add_backward(dLoss_dnextz):
print('# next_dz.shape: ', list(dLoss_dnextz.shape))
dLoss_dz = dLoss_dnextz
print('# dz.shape: ', list(dLoss_dz.shape))
return dLoss_dz
@torch.no_grad()
def relu_backward(next_dz, z):
print('# next_dz.shape: ', list(next_dz.shape))
print('# z.shape: ', list(z.shape))
zeros_tensor = torch.zeros_like(next_dz)
dLoss_dz = torch.where(torch.gt(z, 0), next_dz, zeros_tensor)
print('# dz.shape: ', list(dLoss_dz.shape))
return dLoss_dz
@torch.no_grad()
def dropback_backward(next_dz, mask, p):
print('# zeros probability: ', p)
print('# next_dz.shape: ', list(next_dz.shape))
print('# mask.shape: ', list(mask.shape))
zeros_tensor = torch.zeros_like(mask)
dLoss_dz = torch.mul(torch.where(torch.eq(mask, 1.), next_dz, zeros_tensor), 1./(1.-p))
print('# dz.shape: ', list(dLoss_dz.shape))
return dLoss_dz
@torch.no_grad()
def max_pooling_backward(next_dz, z, pooling, strides, padding=(0, 0)):
print('# next_dz.shape: ', list(next_dz.shape))
print('# z.shape: ', list(z.shape))
print('# padding: ', padding)
print('# strides: ', strides)
N, C, H, W = z.shape
_, _, out_h, out_w = next_dz.shape
padding_z = F.pad(z, pad=(padding[1],padding[1],padding[0],\
padding[0],0,0), mode='constant', value=0)
padding_dz = torch.zeros_like(padding_z)
for n in torch.arange(N):
for c in torch.arange(C):
for i in torch.arange(out_h):
for j in torch.arange(out_w):
flat_idx = torch.argmax(padding_z[n, c,
strides[0] * i:strides[0] * i + pooling[0],
strides[1] * j:strides[1] * j + pooling[1]])
h_idx = strides[0] * i + flat_idx // pooling[1]
w_idx = strides[1] * j + flat_idx % pooling[1]
padding_dz[n, c, h_idx, w_idx] += next_dz[n, c, i, j]
dz = _remove_padding(padding_dz, padding) # padding_z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]]
print('# dz.shape: ', list(dz.shape))
return dz
@torch.no_grad()
def batchnorm2d_backward(next_dz, z, eps, gamma=torch.Tensor([1.,1.,1.])):
print('# next_dz.shape: ', list(next_dz.shape))
print('# z.shape: ', list(z.shape))
print('# eps: ', eps)
print('# gamma.shape: ', list(gamma.shape))
N, C, H, W = z.shape
m = N*H*W
shape = [N,C,H,W]
import numpy as np
ax = list(np.arange(len(shape)))
shape.pop(1)
ax.pop(1)
axis = tuple(ax)
dxhut = torch.zeros_like(next_dz)
for c in range(C):
dxhut[:,c] = next_dz[:,c]*gamma[c]
dz1 = m*dxhut
mu = z.mean(axis=axis, keepdim=True)
xmu = z - mu
xmu2 = xmu**2
var = xmu2.sum(axis=axis, keepdim=True)/m
ivar = 1./torch.pow(var+eps, 0.5)
dz2 = (ivar**2)*((dxhut*xmu).sum(axis=axis, keepdim=True))*xmu
dz3 = dxhut.sum(axis=axis, keepdim=True)
dz = ivar/m*(dz1-dz2-dz3)
print('# dz.shape: ', list(dz.shape))
return dz
@torch.no_grad()
def average_pooling_backward(next_dz, z, pooling, strides, padding=(0, 0)):
print('# next_dz.shape: ', list(next_dz.shape))
print('# z.shape: ', list(z.shape))
print('# padding: ', padding)
print('# strides: ', strides)
N, C, H, W = z.shape
_, _, out_h, out_w = next_dz.shape
padding_z = F.pad(z, pad=(padding[1],padding[1],padding[0],\
padding[0],0,0), mode='constant', value=0)
padding_dz = torch.zeros_like(padding_z)
for n in torch.arange(N):
for c in torch.arange(C):
for i in torch.arange(out_h):
for j in torch.arange(out_w):
padding_dz[n, c,
strides[0] * i:strides[0] * i + pooling[0],
strides[1] * j:strides[1] * j + pooling[1]] += next_dz[n, c, i, j] / (pooling[0] * pooling[1])
dz = _remove_padding(padding_dz, padding) # padding_z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]]
print('# dz.shape: ', list(dz.shape))
return dz
@torch.no_grad()
def _remove_padding(z, padding):
if padding[0] > 0 and padding[1] > 0:
return z[:, :, padding[0]:-padding[0], padding[1]:-padding[1]]
elif padding[0] > 0:
return z[:, :, padding[0]:-padding[0], :]
elif padding[1] > 0:
return z[:, :, :, padding[1]:-padding[1]]
else:
return z
@torch.no_grad()
def conv_backward(next_dz, K, z, padding=(0, 0), strides=(1, 1)):
N, C, H, W = z.shape
D, C, k1, k2 = K.shape
N, D, H1, W1 = next_dz.shape
print('# next_dz.shape: ', list(next_dz.shape))
print('# z.shape: ', list(z.shape))
print('# weight.shape: ', list(K.shape))
print('# bias.shape: ', '['+str(K.shape[0])+']')
print('# padding: ', padding)
print('# strides: ', strides)
padding_next_dz = _insert_zeros(next_dz, strides)
flip_K = torch.flip(K, (2, 3))
swap_flip_K = torch.swapaxes(flip_K, 0, 1)
ppadding_next_dz = F.pad(padding_next_dz, pad=(k2-1-padding[1],k2-1-padding[1],\
k1-1-padding[0],k1-1-padding[0],0,0), mode='constant', value=0)
dz = _conv_forward(ppadding_next_dz, swap_flip_K)
swap_z = torch.swapaxes(z, 0, 1)
dK = _conv_forward(torch.swapaxes(F.pad(z, pad=(padding[1],padding[1],\
padding[0],padding[0],0,0), mode='constant', value=0), 0, 1), torch.swapaxes(padding_next_dz, 0, 1))
db = torch.sum(torch.sum(torch.sum(next_dz, axis=-1), axis=-1), axis=0) # 在高度、宽度上相加;批量大小上相加
print('# dz.shape: ', list(dz.shape))
print('# dweight.shape: ', list(dK.transpose(0,1).shape))
print('# dbias.shape: ', list(db.shape))
return dz, (dK/N).transpose(0,1), db/N
@torch.no_grad()
def _conv_forward(x, weight, strides=(1,1)):
n, c, h_in, w_in = x.shape
d, c, k, j = weight.shape
x_pad = x
x_pad = x_pad.unfold(2, k, strides[0])
x_pad = x_pad.unfold(3, j, strides[1])
out = torch.einsum(
'nchwkj,dckj->ndhw',
x_pad, weight)
return out
@torch.no_grad()
def _insert_zeros(dz, strides):
N, D, H, W = dz.shape
H_last = (H-1)*(strides[0]-1) + H
W_last = (W-1)*(strides[1]-1) + W
pz = torch.zeros(N, D, H_last, W_last)
for n in range(N):
for d in range(D):
for h in range(0, H_last, strides[0]):
for w in range(0, W_last, strides[1]):
pz[n,d,h,w] = dz[n,d,h//strides[0],w//strides[1]]
return pz
@torch.no_grad()
def judge_tensors_equal(tensor_A, tensor_B):
if(not tensor_A.shape == tensor_B.shape):
print('Shape of two compard tensors is not equal.')
return None
error = 0
error_tolerance = 0.001
np_A = tensor_A.detach().numpy()
np_B = tensor_B.detach().numpy()
if len(tensor_A.shape) == 4:
N, C, H, W = tensor_A.shape
for n in range(N):
for c in range(C):
for h in range(H):
for w in range(W):
if np_A[n,c,h,w]-np_B[n,c,h,w] > error_tolerance or np_B[n,c,h,w]-np_A[n,c,h,w] > error_tolerance:
error += 1
if error%20 == 0:
pass
print('error', np_A[n,c,h,w], np_B[n,c,h,w])
else:
if n*c*h*w % 20000000000000 == 0:
pass
#print('right', np_A[n,c,h,w], np_B[n,c,h,w])
#print('Error rate: ', error/(N*C*H*W))
print('4D-error-rate: ', end=' ')
return error/(N*C*H*W)
elif len(tensor_A.shape) == 1:
C = tensor_A.shape[0]
for c in range(C):
if np_A[c]-np_B[c] > error_tolerance or np_B[c]-np_A[c] > error_tolerance:
#print(np_A[c], np_B[c])
error += 1
#print('Error rate: ', error/C)
print('1D-error-rate: ', end=' ')
return error/C
elif len(tensor_A.shape) == 2:
N, C = tensor_A.shape
for n in range(N):
for c in range(C):
if np_A[n,c]-np_B[n,c] > error_tolerance or np_B[n,c]-np_A[n,c] > error_tolerance:
#print(np_A[n,c], np_B[n,c])
error += 1
#print('Error rate: ', error/(C*N))
print('2D-error-rate: ', end=' ')
return error/(C*N)
@torch.no_grad()
def get_featuremap(featuremap_dir=None):
import os
featuremap = []
if featuremap_dir == None:
pth_dir = "./tmp_file/"
else:
pth_dir = featuremap_dir
files = os.listdir(pth_dir)
file_nums = []
for i in range(len(files)):
if '.pth' in files[i]:
file_nums.append(int(files[i].split('.pth')[0]))
file_nums.sort()
for file_num in file_nums:
tensor = torch.load(pth_dir+str(file_num)+'.pth')
featuremap.append(tensor)
delete_allpths(pth_dir=None)
return featuremap
@torch.no_grad()
def get_structure_parameters_v1(model):
layers = []
for layer in model.modules():
if not ':' in str(layer):
layers.append(layer)
parameters = []
fc_conv_weights = []
for layer in layers:
if isinstance(layer, nn.Conv2d):
layer_name = 'Conv2d'
Conv2d_params = {}
Conv2d_params['layer_name'] = layer_name
# in_channel
in_channel = layer.__dict__.get('in_channels')
Conv2d_params['in_channel'] = in_channel
# out_channel
out_channel = layer.__dict__.get('out_channels')
Conv2d_params['out_channel'] = out_channel
# kernel_size
kernel_size = layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
Conv2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
Conv2d_params['kernel_size'] = kernel_size
# stride
stride = layer.__dict__.get('stride')
if not isinstance(stride, tuple):
Conv2d_params['stride'] = (stride, stride)
else:
Conv2d_params['stride'] = stride
# padding
padding = layer.__dict__.get('padding')
if not isinstance(padding, tuple):
Conv2d_params['padding'] = (padding, padding)
else:
Conv2d_params['padding'] = padding
# return
fc_conv_weights.append(layer.weight)
parameters.append(Conv2d_params)
elif isinstance(layer, nn.ReLU):
layer_name = 'ReLU'
parameters.append({'layer_name': layer_name})
elif isinstance(layer, nn.MaxPool2d):
layer_name = 'MaxPool2d'
MaxPool2d_params = {}
MaxPool2d_params['layer_name'] = layer_name
# kernel_size
kernel_size = layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
MaxPool2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
MaxPool2d_params['kernel_size'] = kernel_size
# stride
stride = layer.__dict__.get('stride')
if not isinstance(stride, tuple):
MaxPool2d_params['stride'] = (stride, stride)
else:
MaxPool2d_params['stride'] = stride
# padding
padding = layer.__dict__.get('padding')
if not isinstance(padding, tuple):
MaxPool2d_params['padding'] = (padding, padding)
else:
MaxPool2d_params['padding'] = padding
# return
parameters.append(MaxPool2d_params)
elif isinstance(layer, nn.AvgPool2d):
layer_name = 'AvgPool2d'
AvgPool2d_params = {}
AvgPool2d_params['layer_name'] = layer_name
# kernel_size
kernel_size = layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
AvgPool2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
AvgPool2d_params['kernel_size'] = kernel_size
# stride
stride = layer.__dict__.get('stride')
if not isinstance(stride, tuple):
AvgPool2d_params['stride'] = (stride, stride)
else:
AvgPool2d_params['stride'] = stride
# padding
padding = layer.__dict__.get('padding')
if not isinstance(padding, tuple):
AvgPool2d_params['padding'] = (padding, padding)
else:
AvgPool2d_params['padding'] = padding
# return
parameters.append(AvgPool2d_params)
elif isinstance(layer, nn.Dropout):
layer_name = 'Dropout'
Dropout_params = {}
Dropout_params['layer_name'] = layer_name
# p
p = layer.__dict__.get('p')
Dropout_params['p'] = p
# return
parameters.append(Dropout_params)
elif isinstance(layer, nn.BatchNorm2d):
layer_name = 'BatchNorm2d'
BatchNorm2d_params = {}
BatchNorm2d_params['layer_name'] = layer_name
# num_features
num_features = layer.__dict__.get('num_features')
BatchNorm2d_params['num_features'] = num_features
# eps
eps = layer.__dict__.get('eps')
BatchNorm2d_params['eps'] = eps
# return
fc_conv_weights.append(layer.weight)
parameters.append(BatchNorm2d_params)
elif isinstance(layer, nn.Linear):
layer_name = 'Linear'
Linear_params = {}
Linear_params['layer_name'] = layer_name
# in_features
in_features = layer.__dict__.get('in_features')
Linear_params['in_features'] = in_features
# out_features
out_features = layer.__dict__.get('out_features')
Linear_params['out_features'] = out_features
# return
fc_conv_weights.append(layer.weight)
parameters.append(Linear_params)
elif isinstance(layer, nn.AdaptiveAvgPool2d):
layer_name = 'AdaptiveAvgPool2d'
AdaptiveAvgPool2d_params = {}
AdaptiveAvgPool2d_params['layer_name'] = layer_name
# output_size
output_size = layer.__dict__.get('output_size')
if not isinstance(output_size, tuple):
AdaptiveAvgPool2d_params['output_size'] = (output_size, output_size)
else:
AdaptiveAvgPool2d_params['output_size'] = output_size
# return
parameters.append(AdaptiveAvgPool2d_params)
else:
print('The layer has not been processed in get_structure_parameters_v1!')
return parameters, fc_conv_weights
@torch.no_grad()
def delete_allpths(pth_dir=None):
import os
if pth_dir == None:
pth_dir = "./tmp_file/"
for root, dirs, files in os.walk(pth_dir, topdown=False):
for name in files:
if name.endswith('.pth',):
os.remove(os.path.join(root, name))
@torch.no_grad()
def mul_items(tensor_size):
x = list(tensor_size)
mul = 1.
for i in range(len(x)):
mul *= x[i]
return mul
@torch.no_grad()
def gradient_backward_v1(model, img, label, num_class=1000):
return_dz = []
parameters, fc_conv_weights = get_structure_parameters_v1(model)
featuremap = get_featuremap(featuremap_dir=None)
featuremap.insert(0, img) ###
y_true = F.one_hot(label, num_classes=num_class).float()
loss, dLoss_dz = cross_entropy_loss(featuremap[-1], y_true)
print('Self calculated loss: ', loss)
featuremap.pop()
return_dz.append(dLoss_dz)
dW_dB_fc_conv = []
for i in range(len(parameters)-1, -1, -1):
layer = parameters[i]
print('\n======================== {0:3} Layer: '.format(str(i))+'{0:9}'.format(layer['layer_name'])+' Backward Start ========================')
if layer['layer_name'] == 'Conv2d':
z = featuremap[-1]
weight_z = fc_conv_weights[-1]
try:
padding = layer['padding']
except:
padding = (0, 0)
stride = layer['stride']
dLoss_dz, dLoss_dW, dLoss_dB = conv_backward(dLoss_dz, weight_z, z, padding, stride)
return_dz.append(dLoss_dz)
fc_conv_weights.pop()
if not len(featuremap) == 1:
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
elif layer['layer_name'] == 'ReLU':
z = featuremap[-1]
dLoss_dz = relu_backward(dLoss_dz, z)
return_dz.append(dLoss_dz)
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
elif layer['layer_name'] == 'MaxPool2d':
z = featuremap[-1]
pooling = layer['kernel_size']
stride = layer['stride']
padding = layer['padding']
dLoss_dz = max_pooling_backward(dLoss_dz, z, pooling, stride, padding)
return_dz.append(dLoss_dz)
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
elif layer['layer_name'] == 'AvgPool2d':
z = featuremap[-1]
pooling = layer['kernel_size']
stride = layer['stride']
padding = layer['padding']
dLoss_dz = average_pooling_backward(dLoss_dz, z, pooling, stride, padding)
return_dz.append(dLoss_dz)
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
elif layer['layer_name'] == 'Linear':
weight_z = fc_conv_weights[-1]
z = featuremap[-1]
dLoss_dz, dLoss_dW, dLoss_dB = fc_backward(dLoss_dz, z, weight_z)
return_dz.append(dLoss_dz)
fc_conv_weights.pop()
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
elif layer['layer_name'] == 'Dropout':
p = layer['p']
mask = featuremap[-1]
dLoss_dz = dropback_backward(dLoss_dz, mask, p)
return_dz.append(dLoss_dz)
featuremap.pop()
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
elif layer['layer_name'] == 'BatchNorm2d':
eps = layer['eps']
z = featuremap[-1]
gamma = fc_conv_weights[-1]
dLoss_dz = batchnorm2d_backward(dLoss_dz, z, eps, gamma)
return_dz.append(dLoss_dz)
fc_conv_weights.pop()
lastpop = featuremap.pop()
if not len(dLoss_dz.shape) == len(lastpop.shape):
dLoss_dz = dLoss_dz.reshape(lastpop.shape)
else:
print('Not completed in gradient_backward_v1!')
print('======================== {0:3} Layer: '.format(str(i))+'{0:9}'.format(layer['layer_name'])+' Backward End ==========================')
delete_allpths(pth_dir=None)
return return_dz, dLoss_dW, dLoss_dB
@torch.no_grad()
def make_dot(var, params=None):
""" Produces Graphviz representation of PyTorch autograd graph
Blue nodes are the Variables that require grad, orange are Tensors
saved for backward in torch.autograd.Function
Args:
var: output Variable
params: dict of (name, Variable) to add names to node that
require grad (TODO: make optional)
"""
if params is not None:
assert isinstance(params.values()[0], Variable)
param_map = {id(v): k for k, v in params.items()}
node_attr = dict(style='filled',
shape='box',
align='left',
fontsize='12',
ranksep='0.1',
height='0.2')
dot = Digraph(node_attr=node_attr, graph_attr=dict(size="12,12"))
seen = set()
def size_to_str(size):
return '('+(', ').join(['%d' % v for v in size])+')'
def add_nodes(var):
if var not in seen:
if torch.is_tensor(var):
dot.node(str(id(var)), size_to_str(var.size()), fillcolor='orange')
elif hasattr(var, 'variable'):
u = var.variable
name = param_map[id(u)] if params is not None else ''
node_name = '%s\n %s' % (name, size_to_str(u.size()))
dot.node(str(id(var)), node_name, fillcolor='lightblue')
else:
dot.node(str(id(var)), str(type(var).__name__))
seen.add(var)
if hasattr(var, 'next_functions'):
for u in var.next_functions:
if u[0] is not None:
dot.edge(str(id(u[0])), str(id(var)))
add_nodes(u[0])
if hasattr(var, 'saved_tensors'):
for t in var.saved_tensors:
dot.edge(str(id(t)), str(id(var)))
add_nodes(t)
print(var)
add_nodes(var.grad_fn)
return dot
def generate_g(model, x):
delete_allpths(pth_dir=None)
print('\n=========================== Store network model Results Start =========================')
y = model(x)
print('=========================== Store network model Results End ===========================\n')
if 'GoogLeNet' in str(model).split('\n')[0]:
g = make_dot(y[0])
return g
else:
g = make_dot(y)
return g
@torch.no_grad()
def exchange_name(name):
if 'Relu' in name:
return 'ReLU'
elif 'AddmmBackward' in name:
return 'Linear'
elif 'ViewBackward' in name:
return 'View'
elif 'Mean' in name or 'Avg' in name:
return 'AvgPool2d'
elif 'BatchNorm' in name:
return 'BatchNorm2d'
elif 'Conv' in name:
return 'Conv2d'
elif 'MaxPool' in name:
return 'MaxPool2d'
elif 'MulBackward' in name:
return 'Dropout_2'
elif 'DivBackward' in name:
return 'Dropout_1'
elif 'AddBackward' in name:
return 'Add'
elif 'Cat' in name:
return 'Cat'
elif 'Hardtanh' in name:
return 'ReLU6'
else:
return 'None'
@torch.no_grad()
def generate_connections(g):
graph = str(g).split('\n')
labels = {}
connections = []
for i in range(len(graph)):
if 'label' in graph[i] and graph[i][-1] == '"':
labels[(graph[i]+graph[i+1][1:]).split('\t')[1].split(' ')[0]]=\
(graph[i]+graph[i+1][1:]).split('\t')[1].split('"')[1]
if 'label' in graph[i] and graph[i][-1] == ']':
labels[graph[i].split('\t')[1].split(' ')[0]]=\
graph[i].split('\t')[1].split('=')[1].split(']')[0]
for i in range(len(graph)):
if '->' in graph[i]:
connections.append({labels[graph[i].split('\t')[1].split(' -> ')[0]]+'_'+\
graph[i].split('\t')[1].split(' -> ')[0]:\
labels[graph[i].split('\t')[1].split(' -> ')[1]]+'_'+\
graph[i].split('\t')[1].split(' -> ')[1]})
pop_index = []
for i in range(len(connections)):
item_key = list(connections[i].keys())[0]
if '(' in item_key or 'TBackward' in item_key:
pop_index.append(connections[i])
for i in range(len(pop_index)-1, -1, -1):
connections.remove(pop_index[i])
new_connections = []
for item in connections:
key, value = list(item.items())[0]
key1 = exchange_name(key.split('_')[0]) + '_' + key.split('_')[1]
value1 = exchange_name(value.split('_')[0]) + '_' + value.split('_')[1]
if 'None' in key1 or 'None' in value1:
print('Not completed for '+key+' or '+value+'! Check exchange_name function!')
exit()
new_connections.append({key1: value1})
if not len(new_connections) == len(connections):
print('Generate connections not done! Check generate_connections function!')
exit()
new_connections.insert(0, {list(new_connections[0].values())[0]: None})
new_connections.append({'None': 'None'})
return connections, new_connections
@torch.no_grad()
def get_split_connections(connections):
return_connections = []
tmp_split = []
for i in range(len(connections)):
item = connections[i]
if len(tmp_split) == 0:
tmp_split.append(item)
continue
value = list(item.values())[0]
last_key = list(tmp_split[-1].keys())[0]
if value == last_key:
tmp_split.append(item)
else:
return_connections.append(tmp_split)
tmp_split = [item]
return return_connections
@torch.no_grad()
def find_start_end(list_dic_key_value, i, j):
key1 = list(list_dic_key_value[i].values())[0]
key2 = list(list_dic_key_value[j].keys())[0]
start = 0
end = len(list_dic_key_value)-1
for index in range(len(list_dic_key_value)):
if key1 == list(list_dic_key_value[index].keys())[0]:
start = index
break
for index in range(len(list_dic_key_value)):
if key2 == list(list_dic_key_value[index].keys())[0]:
end = index
break
return start+1, end-1
@torch.no_grad()
def merge_connections(connections):
import copy
last_connections = copy.deepcopy(connections)
connections.append({'None':'None'})
num_Throwed = 0
notchoosed = []
print('\n=========================== Restore network model Start ===============================')
for i in range(len(connections)):
print('# Restore network model: processing {}/{}'.format(i, len(connections)-1))
item_key = list(connections[i].keys())[0]
if not 'None' in item_key:
if i == 0:
pass
else:
last_item_key = list(connections[i-1].keys())[0]
if not connections[i][item_key] == last_item_key:
for j in range(i+1, len(connections)):
if not list(connections[j].values())[0] == list(connections[j-1].keys())[0]:
notchoosed.append(i)
start, end = find_start_end(connections, i, j-1)
tmp = []
tmp.append(connections[start:end+1])
tmp.append(connections[i:j-1])
last_connections[start:end+1] = [tmp]
for kk in range(end-start):
last_connections.insert(start, 'Throwed')
num_Throwed += 1
break
if not notchoosed == []:
last_connections = last_connections[:notchoosed[0]]
else:
pass
for i in range(num_Throwed):
last_connections.remove('Throwed')
if last_connections[-1] == {'None': 'None'}:
last_connections.remove({'None': 'None'})
print('=========================== Restore network model End =================================\n')
return last_connections
@torch.no_grad()
def find_next_layer_by_name(layers, name, start_i):
for i in range(start_i, len(layers)):
layer = layers[i]
if name in str(layer):
return layer, i
@torch.no_grad()
def get_layers(last_connections, model):
return_layers = []
tmp_layers = []
for layer in model.modules():
if not ':' in str(layer):
tmp_layers.append(layer)
index_tmp_layers = 0
for i in range(len(last_connections)-1, -1, -1):
if not isinstance(last_connections[i], list):
# 单一层,无分支
current_layer_name = list(last_connections[i].keys())[0].split('_')[0]
if 'ReLU' in current_layer_name:
return_layers.insert(0, torch.nn.ReLU(inplace=True))
elif 'Add' in current_layer_name:
return_layers.insert(0, 'Add')
elif 'View' in current_layer_name:
return_layers.insert(0, 'View')
else:
tmp = find_next_layer_by_name(tmp_layers, current_layer_name, index_tmp_layers)
return_layers.insert(0, tmp[0])
if isinstance(last_connections[i-1], list):
index_tmp_layers = tmp[1] + 1
elif not list(last_connections[i-1].keys())[0].split('_')[0] == 'Dropout':
index_tmp_layers = tmp[1] + 1
else:
return_layers.insert(0, [])
for j in range(len(last_connections[i])):
return_layers[0].append([])
if len(last_connections[i][j]) == 0:
continue
for k in range(len(last_connections[i][j])-1, -1, -1):
current_layer_name = list(last_connections[i][j][k].keys())[0].split('_')[0]
if 'ReLU' in current_layer_name:
return_layers[0][j].insert(0, torch.nn.ReLU(inplace=True))
elif 'Add' in current_layer_name:
return_layers[0][j].insert(0, 'Add')
elif 'View' in current_layer_name:
return_layers.insert(0, 'View')
else:
tmp = find_next_layer_by_name(tmp_layers, current_layer_name, index_tmp_layers)
return_layers[0][j].insert(0, tmp[0])
if not list(last_connections[i][j][k-1].keys())[0].split('_')[0] == 'Dropout':
index_tmp_layers = tmp[1] + 1
return return_layers
@torch.no_grad()
def get_tensors(last_connections):
tensors = get_featuremap(featuremap_dir=None)
index_tensors = 0
import copy
last_tensors = copy.deepcopy(last_connections)
for i in range(len(last_connections)-1, -1, -1):
if not isinstance(last_connections[i], list):
current_layer_name = list(last_connections[i].keys())[0].split('_')[0]
if 'Add' in current_layer_name:
last_tensors[i] = 'Add'
elif 'View' in current_layer_name:
last_tensors[i] = 'View'
else:
last_tensors[i] = tensors[index_tensors]
index_tensors += 1
else:
for j in range(len(last_connections[i])):
if len(last_connections[i][j]) == 0:
continue
for k in range(len(last_connections[i][j])-1, -1, -1):
current_layer_name = list(last_connections[i][j][k].keys())[0].split('_')[0]
if 'Add' in current_layer_name:
last_tensors[i][j][k] = 'Add'
elif 'View' in current_layer_name:
last_tensors[i][j][k] = 'View'
else:
last_tensors[i][j][k] = tensors[index_tensors]
index_tensors += 1
for i in range(len(last_tensors)-1, -1, -1):
if isinstance(last_tensors[i], str):
# Add or View
if last_tensors[i] == 'Add':
last_tensors[i] = last_tensors[i+1][0][0] + last_tensors[i+1][1][0]
if last_tensors[i] == 'View':
last_tensors[i] = last_tensors[i+1].view(last_tensors[i+1].size(0), -1)
elif isinstance(last_tensors[i], list):
for j in range(len(last_tensors[i])):
if len(last_tensors[i][j]) == 0:
last_tensors[i][j].append(last_tensors[i+1])
return last_tensors
@torch.no_grad()
def get_structure_parameters(return_layers):
import copy
parameters = copy.deepcopy(return_layers)
fc_conv_weights = copy.deepcopy(return_layers)
for i in range(len(return_layers)):
layer = return_layers[i]
if isinstance(layer, nn.Conv2d):
layer_name = 'Conv2d'
Conv2d_params = {}
Conv2d_params['layer_name'] = layer_name
# in_channel
in_channel = layer.__dict__.get('in_channels')
Conv2d_params['in_channel'] = in_channel
# out_channel
out_channel = layer.__dict__.get('out_channels')
Conv2d_params['out_channel'] = out_channel
# kernel_size
kernel_size = layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
Conv2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
Conv2d_params['kernel_size'] = kernel_size
# stride
stride = layer.__dict__.get('stride')
if not isinstance(stride, tuple):
Conv2d_params['stride'] = (stride, stride)
else:
Conv2d_params['stride'] = stride
# padding
padding = layer.__dict__.get('padding')
if not isinstance(padding, tuple):
Conv2d_params['padding'] = (padding, padding)
else:
Conv2d_params['padding'] = padding
# return
fc_conv_weights[i] = layer.weight
parameters[i] = Conv2d_params
elif isinstance(layer, nn.ReLU):
layer_name = 'ReLU'
parameters[i] = {'layer_name': layer_name}
elif layer == 'Add':
layer_name = 'Add'
parameters[i] = {'layer_name': layer_name}
elif layer == 'View':
layer_name = 'View'
parameters[i] = {'layer_name': layer_name}
elif layer == 'Cat':
layer_name = 'Cat'
parameters[i] = {'layer_name': layer_name}
elif isinstance(layer, nn.MaxPool2d):
layer_name = 'MaxPool2d'
MaxPool2d_params = {}
MaxPool2d_params['layer_name'] = layer_name
# kernel_size
kernel_size = layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
MaxPool2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
MaxPool2d_params['kernel_size'] = kernel_size
# stride
stride = layer.__dict__.get('stride')
if not isinstance(stride, tuple):
MaxPool2d_params['stride'] = (stride, stride)
else:
MaxPool2d_params['stride'] = stride
# padding
padding = layer.__dict__.get('padding')
if not isinstance(padding, tuple):
MaxPool2d_params['padding'] = (padding, padding)
else:
MaxPool2d_params['padding'] = padding
# return
parameters[i] = MaxPool2d_params
elif isinstance(layer, nn.AvgPool2d):
layer_name = 'AvgPool2d'
AvgPool2d_params = {}
AvgPool2d_params['layer_name'] = layer_name
# kernel_size
kernel_size = layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
AvgPool2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
AvgPool2d_params['kernel_size'] = kernel_size
# stride
stride = layer.__dict__.get('stride')
if not isinstance(stride, tuple):
AvgPool2d_params['stride'] = (stride, stride)
else:
AvgPool2d_params['stride'] = stride
# padding
padding = layer.__dict__.get('padding')
if not isinstance(padding, tuple):
AvgPool2d_params['padding'] = (padding, padding)
else:
AvgPool2d_params['padding'] = padding
# return
parameters[i] = AvgPool2d_params
elif isinstance(layer, nn.Dropout):
layer_name = 'Dropout'
Dropout_params = {}
Dropout_params['layer_name'] = layer_name
# p
p = layer.__dict__.get('p')
Dropout_params['p'] = p
# return
parameters[i] = Dropout_params
elif isinstance(layer, nn.BatchNorm2d):
layer_name = 'BatchNorm2d'
BatchNorm2d_params = {}
BatchNorm2d_params['layer_name'] = layer_name
# num_features
num_features = layer.__dict__.get('num_features')
BatchNorm2d_params['num_features'] = num_features
# eps
eps = layer.__dict__.get('eps')
BatchNorm2d_params['eps'] = eps
# return
fc_conv_weights[i] = layer.weight
parameters[i] = BatchNorm2d_params
elif isinstance(layer, nn.Linear):
layer_name = 'Linear'
Linear_params = {}
Linear_params['layer_name'] = layer_name
# in_features
in_features = layer.__dict__.get('in_features')
Linear_params['in_features'] = in_features
# out_features
out_features = layer.__dict__.get('out_features')
Linear_params['out_features'] = out_features
# return
fc_conv_weights[i] = layer.weight
parameters[i] = Linear_params
elif isinstance(layer, nn.AdaptiveAvgPool2d):
layer_name = 'AdaptiveAvgPool2d'
AdaptiveAvgPool2d_params = {}
AdaptiveAvgPool2d_params['layer_name'] = layer_name
# output_size
output_size = layer.__dict__.get('output_size')
if not isinstance(output_size, tuple):
AdaptiveAvgPool2d_params['output_size'] = (output_size, output_size)
else:
AdaptiveAvgPool2d_params['output_size'] = output_size
# return
parameters[i] = AdaptiveAvgPool2d_params
elif isinstance(layer, list):
for j in range(len(layer)):
for k in range(len(layer[j])):
tmp_layer = layer[j][k]
###
if isinstance(tmp_layer, nn.Conv2d):
layer_name = 'Conv2d'
Conv2d_params = {}
Conv2d_params['layer_name'] = layer_name
# in_channel
in_channel = tmp_layer.__dict__.get('in_channels')
Conv2d_params['in_channel'] = in_channel
# out_channel
out_channel = tmp_layer.__dict__.get('out_channels')
Conv2d_params['out_channel'] = out_channel
# kernel_size
kernel_size = tmp_layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
Conv2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
Conv2d_params['kernel_size'] = kernel_size
# stride
stride = tmp_layer.__dict__.get('stride')
if not isinstance(stride, tuple):
Conv2d_params['stride'] = (stride, stride)
else:
Conv2d_params['stride'] = stride
# padding
padding = tmp_layer.__dict__.get('padding')
if not isinstance(padding, tuple):
Conv2d_params['padding'] = (padding, padding)
else:
Conv2d_params['padding'] = padding
# return
fc_conv_weights[i][j][k] = tmp_layer.weight
parameters[i][j][k] = Conv2d_params
elif isinstance(tmp_layer, nn.ReLU):
layer_name = 'ReLU'
parameters[i][j][k] = {'layer_name': layer_name}
elif tmp_layer == 'Add':
layer_name = 'Add'
parameters[i][j][k] = {'layer_name': layer_name}
elif tmp_layer == 'View':
layer_name = 'View'
parameters[i][j][k] = {'layer_name': layer_name}
elif tmp_layer == 'Cat':
layer_name = 'Cat'
parameters[i][j][k] = {'layer_name': layer_name}
elif isinstance(tmp_layer, nn.MaxPool2d):
layer_name = 'MaxPool2d'
MaxPool2d_params = {}
MaxPool2d_params['layer_name'] = layer_name
# kernel_size
kernel_size = tmp_layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
MaxPool2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
MaxPool2d_params['kernel_size'] = kernel_size
# stride
stride = tmp_layer.__dict__.get('stride')
if not isinstance(stride, tuple):
MaxPool2d_params['stride'] = (stride, stride)
else:
MaxPool2d_params['stride'] = stride
# padding
padding = tmp_layer.__dict__.get('padding')
if not isinstance(padding, tuple):
MaxPool2d_params['padding'] = (padding, padding)
else:
MaxPool2d_params['padding'] = padding
# return
parameters[i][j][k] = MaxPool2d_params
elif isinstance(tmp_layer, nn.AvgPool2d):
layer_name = 'AvgPool2d'
AvgPool2d_params = {}
AvgPool2d_params['layer_name'] = layer_name
# kernel_size
kernel_size = tmp_layer.__dict__.get('kernel_size')
if not isinstance(kernel_size, tuple):
AvgPool2d_params['kernel_size'] = (kernel_size, kernel_size)
else:
AvgPool2d_params['kernel_size'] = kernel_size
# stride
stride = tmp_layer.__dict__.get('stride')
if not isinstance(stride, tuple):
AvgPool2d_params['stride'] = (stride, stride)
else:
AvgPool2d_params['stride'] = stride
# padding
padding = tmp_layer.__dict__.get('padding')
if not isinstance(padding, tuple):
AvgPool2d_params['padding'] = (padding, padding)
else:
AvgPool2d_params['padding'] = padding
# return
parameters[i][j][k] = AvgPool2d_params
elif isinstance(tmp_layer, nn.Dropout):
layer_name = 'Dropout'
Dropout_params = {}
Dropout_params['layer_name'] = layer_name
# p
p = tmp_layer.__dict__.get('p')
Dropout_params['p'] = p
# return
parameters[i][j][k] = Dropout_params
elif isinstance(tmp_layer, nn.BatchNorm2d):
layer_name = 'BatchNorm2d'
BatchNorm2d_params = {}
BatchNorm2d_params['layer_name'] = layer_name
# num_features
num_features = tmp_layer.__dict__.get('num_features')
BatchNorm2d_params['num_features'] = num_features
# eps
eps = tmp_layer.__dict__.get('eps')
BatchNorm2d_params['eps'] = eps
# return
fc_conv_weights[i][j][k] = tmp_layer.weight
parameters[i][j][k] = BatchNorm2d_params
elif isinstance(tmp_layer, nn.Linear):
layer_name = 'Linear'
Linear_params = {}
Linear_params['layer_name'] = layer_name
# in_features
in_features = tmp_layer.__dict__.get('in_features')
Linear_params['in_features'] = in_features
# out_features
out_features = tmp_layer.__dict__.get('out_features')
Linear_params['out_features'] = out_features
# return
fc_conv_weights[i][j][k] = tmp_layer.weight
parameters[i][j][k] = Linear_params
elif isinstance(tmp_layer, nn.AdaptiveAvgPool2d):
layer_name = 'AdaptiveAvgPool2d'
AdaptiveAvgPool2d_params = {}
AdaptiveAvgPool2d_params['layer_name'] = layer_name
# output_size
output_size = tmp_layer.__dict__.get('output_size')
if not isinstance(output_size, tuple):
AdaptiveAvgPool2d_params['output_size'] = (output_size, output_size)
else:
AdaptiveAvgPool2d_params['output_size'] = output_size
# return
parameters[i][j][k] = AdaptiveAvgPool2d_params
###
else:
print('The layer has not been processed in get_structure_parameters!')
return parameters, fc_conv_weights
def gradient_backward_v2(model, img, label, num_class=1000, g_view=False):
x = Variable(img)
g = generate_g(model, x)
if g_view:
g.view()
delete_allpths(pth_dir=None)
print('\n=========================== Generate Tensors Start ====================================')
result = model(img)
print('=========================== Generate Tensors End ======================================\n')
Loss = nn.CrossEntropyLoss()
if 'GoogLeNet' in str(model).split('\n')[0]:
loss_torch = Loss(result[0], label)
else:
loss_torch = Loss(result, label)
_, connections = generate_connections(g)
last_connections = merge_connections(connections)
return_layers = get_layers(last_connections, model)
return_tensors = get_tensors(last_connections)
parameters, fc_conv_weights = get_structure_parameters(return_layers)
'''
print('================')
for i in range(len(last_connections)):
print(i, last_connections[i])
print('================')
print('================')
for i in range(len(return_layers)):
print(i, return_layers[i])
print('================')
print('================')
for i in range(len(parameters)):
print(i, parameters[i])
print('================')
print('================')
for i in range(len(return_tensors)):
if not isinstance(return_tensors[i], list) and not isinstance(return_tensors[i], str):
print('=========', i, return_tensors[i].shape)
print('================')
'''
import copy
return_dz = copy.deepcopy(last_connections)
featuremap = return_tensors
featuremap.append(img)
y_true = F.one_hot(label, num_classes=num_class).float()
loss, dLoss_dz = cross_entropy_loss(featuremap[0], y_true)
featuremap.pop(0)
return_dz.append(dLoss_dz)
#####################tensors
'''
for i in range(len(last_connections)):
print(last_connections[i])
for i in range(len(featuremap)):
if not isinstance(featuremap[i], list):
print('=========', i, featuremap[i].shape)
else:
for j in range(len(featuremap[i])):
for k in range(len(featuremap[i][j])):
print(' =========', i, j, k, featuremap[i][j][k].shape)
'''
#####################
# 前面n层倒序遍历
for i in range(len(parameters)):
layer = parameters[i]
if not isinstance(layer, list):
print('\n======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward Start ========================')
if layer['layer_name'] == 'Conv2d':
z = featuremap[i]
weight_z = fc_conv_weights[i]
try:
padding = layer['padding']
except:
padding = (0, 0)
stride = layer['stride']
dLoss_dz, dLoss_dW, dLoss_dB = conv_backward(dLoss_dz, weight_z, z, padding, stride)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'ReLU':
z = featuremap[i]
dLoss_dz = relu_backward(dLoss_dz, z)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'MaxPool2d':
z = featuremap[i]
pooling = layer['kernel_size']
stride = layer['stride']
padding = layer['padding']
dLoss_dz = max_pooling_backward(dLoss_dz, z, pooling, stride, padding)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'AvgPool2d':
z = featuremap[i]
pooling = layer['kernel_size']
stride = layer['stride']
padding = layer['padding']
dLoss_dz = average_pooling_backward(dLoss_dz, z, pooling, stride, padding)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'Linear':
weight_z = fc_conv_weights[i]
z = featuremap[i]
dLoss_dz, dLoss_dW, dLoss_dB = fc_backward(dLoss_dz, z, weight_z)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'View':
last_z = featuremap[i+1]
if 'Pool' in parameters[i+1]['layer_name']:
params = (parameters[i+1]['kernel_size'], parameters[i+1]['stride'], parameters[i+1]['padding'])
else:
params = None
dLoss_dz = view_backward(dLoss_dz, last_z, params)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'Add':
dLoss_dz = add_backward(dLoss_dz)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'Dropout':
if parameters[i-1]['layer_name'] == 'Dropout':
return_dz[i] = dLoss_dz
print('# Skip this layer because the layer has been calcualted!')
print('======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.\
format(layer['layer_name'])+' Backward End ==========================')
continue
p = layer['p']
mask = featuremap[i]
dLoss_dz = dropback_backward(dLoss_dz, mask, p)
return_dz[i] = dLoss_dz
elif layer['layer_name'] == 'BatchNorm2d':
eps = layer['eps']
z = featuremap[i]
gamma = fc_conv_weights[i]
dLoss_dz = batchnorm2d_backward(dLoss_dz, z, eps, gamma)
return_dz[i] = dLoss_dz
print('======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward End ==========================')
elif isinstance(layer, list):
import copy
tmp_dLoss_dz = []
for j in range(len(layer)):
tmp_dLoss_dz.append(copy.deepcopy(dLoss_dz))
for k in range(len(layer[j])):
tmp_layer = layer[j][k]
print('\n=========================== {0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward Start ====================')
if tmp_layer['layer_name'] == 'Conv2d':
if k+1 >= len(featuremap[i-1][j]):
z = featuremap[i]
else:
z = featuremap[i-1][j][k+1]
weight_z = fc_conv_weights[i][j][k]
try:
padding = tmp_layer['padding']
except:
padding = (0, 0)
stride = tmp_layer['stride']
tmp_dLoss_dz[-1], dLoss_dW, dLoss_dB = conv_backward(tmp_dLoss_dz[-1], weight_z, z, padding, stride)
return_dz[i][j][k] = tmp_dLoss_dz[-1]
elif tmp_layer['layer_name'] == 'ReLU':
z = featuremap[i-1][j][k+1]
tmp_dLoss_dz[-1] = relu_backward(tmp_dLoss_dz[-1], z)
return_dz[i][j][k] = tmp_dLoss_dz[-1]
elif tmp_layer['layer_name'] == 'BatchNorm2d':
eps = tmp_layer['eps']
z = featuremap[i-1][j][k+1]
gamma = fc_conv_weights[i][j][k]
tmp_dLoss_dz[-1] = batchnorm2d_backward(tmp_dLoss_dz[-1], z, eps, gamma)
return_dz[i][j][k] = tmp_dLoss_dz[-1]
print('=========================== {0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward End ======================')
print(tmp_dLoss_dz[0].shape, tmp_dLoss_dz[1].shape)
dLoss_dz = tmp_dLoss_dz[0] + tmp_dLoss_dz[1]
else:
print('Not completed in gradient_backward!')
print('# Torch calculated loss: ', loss_torch.detach().numpy())
loss_torch.backward()
if 'VGG' in str(model) or 'AlexNet' in str(model):
print(judge_tensors_equal(dLoss_dW, model.features[0].weight.grad))
elif 'ResNet' in str(model):
print(judge_tensors_equal(dLoss_dW, model.conv1.weight.grad))
delete_allpths(pth_dir=None)
return return_dz, dLoss_dW, dLoss_dB | [((806, 821), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (819, 821), False, 'import torch\n'), ((1697, 1712), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1710, 1712), False, 'import torch\n'), ((2365, 2380), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2378, 2380), False, 'import torch\n'), ((3167, 3182), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3180, 3182), False, 'import torch\n'), ((3478, 3493), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3491, 3493), False, 'import torch\n'), ((3864, 3879), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3877, 3879), False, 'import torch\n'), ((4947, 4962), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4960, 4962), False, 'import torch\n'), ((5815, 5830), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5828, 5830), False, 'import torch\n'), ((6750, 6765), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6763, 6765), False, 'import torch\n'), ((7066, 7081), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7079, 7081), False, 'import torch\n'), ((8339, 8354), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8352, 8354), False, 'import torch\n'), ((8660, 8675), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8673, 8675), False, 'import torch\n'), ((9052, 9067), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9065, 9067), False, 'import torch\n'), ((10590, 10605), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10603, 10605), False, 'import torch\n'), ((11113, 11128), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11126, 11128), False, 'import torch\n'), ((15997, 16012), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16010, 16012), False, 'import torch\n'), ((16267, 16282), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16280, 16282), False, 'import torch\n'), ((16407, 16422), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16420, 16422), False, 'import torch\n'), ((20048, 20063), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (20061, 20063), False, 'import torch\n'), ((22082, 22097), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (22095, 22097), False, 'import torch\n'), ((22729, 22744), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (22742, 22744), False, 'import torch\n'), ((24482, 24497), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (24495, 24497), False, 'import torch\n'), ((24945, 24960), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (24958, 24960), False, 'import torch\n'), ((25440, 25455), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (25453, 25455), False, 'import torch\n'), ((26957, 26972), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (26970, 26972), False, 'import torch\n'), ((27137, 27152), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (27150, 27152), False, 'import torch\n'), ((29020, 29035), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (29033, 29035), False, 'import torch\n'), ((30623, 30638), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (30636, 30638), False, 'import torch\n'), ((1185, 1203), 'torch.exp', 'torch.exp', (['y_shift'], {}), '(y_shift)\n', (1194, 1203), False, 'import torch\n'), ((2033, 2062), 'torch.matmul', 'torch.matmul', (['dLoss_dnextz', 'w'], {}), '(dLoss_dnextz, w)\n', (2045, 2062), False, 'import torch\n'), ((2134, 2164), 'torch.sum', 'torch.sum', (['dLoss_dnextz'], {'dim': '(0)'}), '(dLoss_dnextz, dim=0)\n', (2143, 2164), False, 'import torch\n'), ((3320, 3345), 'torch.zeros_like', 'torch.zeros_like', (['next_dz'], {}), '(next_dz)\n', (3336, 3345), False, 'import torch\n'), ((3683, 3705), 'torch.zeros_like', 'torch.zeros_like', (['mask'], {}), '(mask)\n', (3699, 3705), False, 'import torch\n'), ((4181, 4280), 'torch.nn.functional.pad', 'F.pad', (['z'], {'pad': '(padding[1], padding[1], padding[0], padding[0], 0, 0)', 'mode': '"""constant"""', 'value': '(0)'}), "(z, pad=(padding[1], padding[1], padding[0], padding[0], 0, 0), mode=\n 'constant', value=0)\n", (4186, 4280), True, 'import torch.nn.functional as F\n'), ((4298, 4325), 'torch.zeros_like', 'torch.zeros_like', (['padding_z'], {}), '(padding_z)\n', (4314, 4325), False, 'import torch\n'), ((4337, 4352), 'torch.arange', 'torch.arange', (['N'], {}), '(N)\n', (4349, 4352), False, 'import torch\n'), ((5012, 5041), 'torch.Tensor', 'torch.Tensor', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (5024, 5041), False, 'import torch\n'), ((5366, 5391), 'torch.zeros_like', 'torch.zeros_like', (['next_dz'], {}), '(next_dz)\n', (5382, 5391), False, 'import torch\n'), ((6136, 6235), 'torch.nn.functional.pad', 'F.pad', (['z'], {'pad': '(padding[1], padding[1], padding[0], padding[0], 0, 0)', 'mode': '"""constant"""', 'value': '(0)'}), "(z, pad=(padding[1], padding[1], padding[0], padding[0], 0, 0), mode=\n 'constant', value=0)\n", (6141, 6235), True, 'import torch.nn.functional as F\n'), ((6253, 6280), 'torch.zeros_like', 'torch.zeros_like', (['padding_z'], {}), '(padding_z)\n', (6269, 6280), False, 'import torch\n'), ((6292, 6307), 'torch.arange', 'torch.arange', (['N'], {}), '(N)\n', (6304, 6307), False, 'import torch\n'), ((7545, 7566), 'torch.flip', 'torch.flip', (['K', '(2, 3)'], {}), '(K, (2, 3))\n', (7555, 7566), False, 'import torch\n'), ((7585, 7613), 'torch.swapaxes', 'torch.swapaxes', (['flip_K', '(0)', '(1)'], {}), '(flip_K, 0, 1)\n', (7599, 7613), False, 'import torch\n'), ((7637, 7786), 'torch.nn.functional.pad', 'F.pad', (['padding_next_dz'], {'pad': '(k2 - 1 - padding[1], k2 - 1 - padding[1], k1 - 1 - padding[0], k1 - 1 -\n padding[0], 0, 0)', 'mode': '"""constant"""', 'value': '(0)'}), "(padding_next_dz, pad=(k2 - 1 - padding[1], k2 - 1 - padding[1], k1 - \n 1 - padding[0], k1 - 1 - padding[0], 0, 0), mode='constant', value=0)\n", (7642, 7786), True, 'import torch.nn.functional as F\n'), ((7840, 7863), 'torch.swapaxes', 'torch.swapaxes', (['z', '(0)', '(1)'], {}), '(z, 0, 1)\n', (7854, 7863), False, 'import torch\n'), ((8565, 8613), 'torch.einsum', 'torch.einsum', (['"""nchwkj,dckj->ndhw"""', 'x_pad', 'weight'], {}), "('nchwkj,dckj->ndhw', x_pad, weight)\n", (8577, 8613), False, 'import torch\n'), ((8812, 8845), 'torch.zeros', 'torch.zeros', (['N', 'D', 'H_last', 'W_last'], {}), '(N, D, H_last, W_last)\n', (8823, 8845), False, 'import torch\n'), ((10780, 10799), 'os.listdir', 'os.listdir', (['pth_dir'], {}), '(pth_dir)\n', (10790, 10799), False, 'import os\n'), ((16136, 16167), 'os.walk', 'os.walk', (['pth_dir'], {'topdown': '(False)'}), '(pth_dir, topdown=False)\n', (16143, 16167), False, 'import os\n'), ((25528, 25554), 'copy.deepcopy', 'copy.deepcopy', (['connections'], {}), '(connections)\n', (25541, 25554), False, 'import copy\n'), ((29171, 29202), 'copy.deepcopy', 'copy.deepcopy', (['last_connections'], {}), '(last_connections)\n', (29184, 29202), False, 'import copy\n'), ((30714, 30742), 'copy.deepcopy', 'copy.deepcopy', (['return_layers'], {}), '(return_layers)\n', (30727, 30742), False, 'import copy\n'), ((30763, 30791), 'copy.deepcopy', 'copy.deepcopy', (['return_layers'], {}), '(return_layers)\n', (30776, 30791), False, 'import copy\n'), ((41412, 41425), 'torch.autograd.Variable', 'Variable', (['img'], {}), '(img)\n', (41420, 41425), False, 'from torch.autograd import Variable\n'), ((41746, 41767), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (41765, 41767), True, 'import torch.nn as nn\n'), ((42829, 42860), 'copy.deepcopy', 'copy.deepcopy', (['last_connections'], {}), '(last_connections)\n', (42842, 42860), False, 'import copy\n'), ((1239, 1276), 'torch.sum', 'torch.sum', (['y_exp'], {'dim': '(1)', 'keepdim': '(True)'}), '(y_exp, dim=1, keepdim=True)\n', (1248, 1276), False, 'import torch\n'), ((3371, 3385), 'torch.gt', 'torch.gt', (['z', '(0)'], {}), '(z, 0)\n', (3379, 3385), False, 'import torch\n'), ((4366, 4381), 'torch.arange', 'torch.arange', (['C'], {}), '(C)\n', (4378, 4381), False, 'import torch\n'), ((5597, 5622), 'torch.pow', 'torch.pow', (['(var + eps)', '(0.5)'], {}), '(var + eps, 0.5)\n', (5606, 5622), False, 'import torch\n'), ((6321, 6336), 'torch.arange', 'torch.arange', (['C'], {}), '(C)\n', (6333, 6336), False, 'import torch\n'), ((8012, 8049), 'torch.swapaxes', 'torch.swapaxes', (['padding_next_dz', '(0)', '(1)'], {}), '(padding_next_dz, 0, 1)\n', (8026, 8049), False, 'import torch\n'), ((1125, 1166), 'torch.max', 'torch.max', (['y_predict'], {'dim': '(1)', 'keepdim': '(True)'}), '(y_predict, dim=1, keepdim=True)\n', (1134, 1166), False, 'import torch\n'), ((3741, 3760), 'torch.eq', 'torch.eq', (['mask', '(1.0)'], {}), '(mask, 1.0)\n', (3749, 3760), False, 'import torch\n'), ((4396, 4415), 'torch.arange', 'torch.arange', (['out_h'], {}), '(out_h)\n', (4408, 4415), False, 'import torch\n'), ((6351, 6370), 'torch.arange', 'torch.arange', (['out_h'], {}), '(out_h)\n', (6363, 6370), False, 'import torch\n'), ((7903, 8002), 'torch.nn.functional.pad', 'F.pad', (['z'], {'pad': '(padding[1], padding[1], padding[0], padding[0], 0, 0)', 'mode': '"""constant"""', 'value': '(0)'}), "(z, pad=(padding[1], padding[1], padding[0], padding[0], 0, 0), mode=\n 'constant', value=0)\n", (7908, 8002), True, 'import torch.nn.functional as F\n'), ((8081, 8108), 'torch.sum', 'torch.sum', (['next_dz'], {'axis': '(-1)'}), '(next_dz, axis=-1)\n', (8090, 8108), False, 'import torch\n'), ((16663, 16702), 'torch.nn.functional.one_hot', 'F.one_hot', (['label'], {'num_classes': 'num_class'}), '(label, num_classes=num_class)\n', (16672, 16702), True, 'import torch.nn.functional as F\n'), ((20916, 20936), 'torch.is_tensor', 'torch.is_tensor', (['var'], {}), '(var)\n', (20931, 20936), False, 'import torch\n'), ((42935, 42974), 'torch.nn.functional.one_hot', 'F.one_hot', (['label'], {'num_classes': 'num_class'}), '(label, num_classes=num_class)\n', (42944, 42974), True, 'import torch.nn.functional as F\n'), ((4431, 4450), 'torch.arange', 'torch.arange', (['out_w'], {}), '(out_w)\n', (4443, 4450), False, 'import torch\n'), ((6386, 6405), 'torch.arange', 'torch.arange', (['out_w'], {}), '(out_w)\n', (6398, 6405), False, 'import torch\n'), ((1333, 1357), 'torch.log', 'torch.log', (['y_probability'], {}), '(y_probability)\n', (1342, 1357), False, 'import torch\n'), ((4469, 4594), 'torch.argmax', 'torch.argmax', (['padding_z[(n), (c), strides[0] * i:strides[0] * i + pooling[0], strides[1] *\n j:strides[1] * j + pooling[1]]'], {}), '(padding_z[(n), (c), strides[0] * i:strides[0] * i + pooling[0],\n strides[1] * j:strides[1] * j + pooling[1]])\n', (4481, 4594), False, 'import torch\n'), ((16237, 16261), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (16249, 16261), False, 'import os\n'), ((27606, 27633), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (27619, 27633), False, 'import torch\n'), ((46488, 46511), 'copy.deepcopy', 'copy.deepcopy', (['dLoss_dz'], {}), '(dLoss_dz)\n', (46501, 46511), False, 'import copy\n'), ((28526, 28553), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (28539, 28553), False, 'import torch\n')] |
yubuyuabc/ark-nlp | ark_nlp/factory/utils/attack.py | 165d35cfacd7476791c0aeba19bf43f4f8079553 | import torch
class FGM(object):
"""
基于FGM算法的攻击机制
Args:
module (:obj:`torch.nn.Module`): 模型
Examples::
>>> # 初始化
>>> fgm = FGM(module)
>>> for batch_input, batch_label in data:
>>> # 正常训练
>>> loss = module(batch_input, batch_label)
>>> loss.backward() # 反向传播,得到正常的grad
>>> # 对抗训练
>>> fgm.attack() # 在embedding上添加对抗扰动
>>> loss_adv = module(batch_input, batch_label)
>>> loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度
>>> fgm.restore() # 恢复embedding参数
>>> # 梯度下降,更新参数
>>> optimizer.step()
>>> optimizer.zero_grad()
Reference:
[1] https://zhuanlan.zhihu.com/p/91269728
"""
def __init__(self, module):
self.module = module
self.backup = {}
def attack(
self,
epsilon=1.,
emb_name='word_embeddings'
):
for name, param in self.module.named_parameters():
if param.requires_grad and emb_name in name:
self.backup[name] = param.data.clone()
norm = torch.norm(param.grad)
if norm != 0 and not torch.isnan(norm):
r_at = epsilon * param.grad / norm
param.data.add_(r_at)
def restore(
self,
emb_name='word_embeddings'
):
for name, param in self.module.named_parameters():
if param.requires_grad and emb_name in name:
assert name in self.backup
param.data = self.backup[name]
self.backup = {}
class PGD(object):
"""
基于PGD算法的攻击机制
Args:
module (:obj:`torch.nn.Module`): 模型
Examples::
>>> pgd = PGD(module)
>>> K = 3
>>> for batch_input, batch_label in data:
>>> # 正常训练
>>> loss = module(batch_input, batch_label)
>>> loss.backward() # 反向传播,得到正常的grad
>>> pgd.backup_grad()
>>> # 对抗训练
>>> for t in range(K):
>>> pgd.attack(is_first_attack=(t==0)) # 在embedding上添加对抗扰动, first attack时备份param.data
>>> if t != K-1:
>>> optimizer.zero_grad()
>>> else:
>>> pgd.restore_grad()
>>> loss_adv = module(batch_input, batch_label)
>>> loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度
>>> pgd.restore() # 恢复embedding参数
>>> # 梯度下降,更新参数
>>> optimizer.step()
>>> optimizer.zero_grad()
Reference:
[1] https://zhuanlan.zhihu.com/p/91269728
"""
def __init__(self, module):
self.module = module
self.emb_backup = {}
self.grad_backup = {}
def attack(
self,
epsilon=1.,
alpha=0.3,
emb_name='emb.',
is_first_attack=False
):
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.module.named_parameters():
if param.requires_grad and emb_name in name:
if is_first_attack:
self.emb_backup[name] = param.data.clone()
norm = torch.norm(param.grad)
if norm != 0 and not torch.isnan(norm):
r_at = alpha * param.grad / norm
param.data.add_(r_at)
param.data = self.project(name, param.data, epsilon)
def restore(self, emb_name='emb.'):
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.module.named_parameters():
if param.requires_grad and emb_name in name:
assert name in self.emb_backup
param.data = self.emb_backup[name]
self.emb_backup = {}
def project(self, param_name, param_data, epsilon):
r = param_data - self.emb_backup[param_name]
if torch.norm(r) > epsilon:
r = epsilon * r / torch.norm(r)
return self.emb_backup[param_name] + r
def backup_grad(self):
for name, param in self.module.named_parameters():
if param.requires_grad:
self.grad_backup[name] = param.grad.clone()
def restore_grad(self):
for name, param in self.module.named_parameters():
if param.requires_grad:
param.grad = self.grad_backup[name]
| [((3897, 3910), 'torch.norm', 'torch.norm', (['r'], {}), '(r)\n', (3907, 3910), False, 'import torch\n'), ((1148, 1170), 'torch.norm', 'torch.norm', (['param.grad'], {}), '(param.grad)\n', (1158, 1170), False, 'import torch\n'), ((3202, 3224), 'torch.norm', 'torch.norm', (['param.grad'], {}), '(param.grad)\n', (3212, 3224), False, 'import torch\n'), ((3952, 3965), 'torch.norm', 'torch.norm', (['r'], {}), '(r)\n', (3962, 3965), False, 'import torch\n'), ((1208, 1225), 'torch.isnan', 'torch.isnan', (['norm'], {}), '(norm)\n', (1219, 1225), False, 'import torch\n'), ((3262, 3279), 'torch.isnan', 'torch.isnan', (['norm'], {}), '(norm)\n', (3273, 3279), False, 'import torch\n')] |
sreejithr/deepfake | core.py | c7115ce90ea281e2eb95d75f436efa102c8f2e3c | import cv2
import torch
import yaml
import imageio
import throttle
import numpy as np
import matplotlib.pyplot as plt
from argparse import ArgumentParser
from skimage.transform import resize
from scipy.spatial import ConvexHull
from modules.generator import OcclusionAwareGenerator
from modules.keypoint_detector import KPDetector
from sync_batchnorm import DataParallelWithCallback
#from animate import normalize_kp
# command = [ffmpeg,
# '-y',
# '-f', 'rawvideo',
# '-vcodec','rawvideo',
# '-pix_fmt', 'bgr24',
# '-s', dimension,
# '-i', '-',
# '-c:v', 'libx264',
# '-pix_fmt', 'yuv420p',
# '-preset', 'ultrafast',
# '-f', 'flv',
# 'rtmp://10.10.10.80/live/mystream']
def normalize_kp(kp_source, kp_driving, kp_driving_initial, adapt_movement_scale=False,
use_relative_movement=False, use_relative_jacobian=False):
if adapt_movement_scale:
source_area = ConvexHull(kp_source['value'][0].data.cpu().numpy()).volume
driving_area = ConvexHull(kp_driving_initial['value'][0].data.cpu().numpy()).volume
adapt_movement_scale = np.sqrt(source_area) / np.sqrt(driving_area)
else:
adapt_movement_scale = 1
kp_new = {k: v for k, v in kp_driving.items()}
if use_relative_movement:
kp_value_diff = (kp_driving['value'] - kp_driving_initial['value'])
kp_value_diff *= adapt_movement_scale
kp_new['value'] = kp_value_diff + kp_source['value']
if use_relative_jacobian:
jacobian_diff = torch.matmul(kp_driving['jacobian'], torch.inverse(kp_driving_initial['jacobian']))
kp_new['jacobian'] = torch.matmul(jacobian_diff, kp_source['jacobian'])
return kp_new
def load_checkpoints(config_path, checkpoint_path, cpu=False):
with open(config_path) as f:
config = yaml.load(f)
generator = OcclusionAwareGenerator(**config['model_params']['generator_params'],
**config['model_params']['common_params'])
if not cpu:
generator.cuda()
kp_detector = KPDetector(**config['model_params']['kp_detector_params'],
**config['model_params']['common_params'])
if not cpu:
kp_detector.cuda()
if cpu:
checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu'))
else:
checkpoint = torch.load(checkpoint_path)
generator.load_state_dict(checkpoint['generator'])
kp_detector.load_state_dict(checkpoint['kp_detector'])
if not cpu:
generator = DataParallelWithCallback(generator)
kp_detector = DataParallelWithCallback(kp_detector)
generator.eval()
kp_detector.eval()
return generator, kp_detector
@throttle.wrap(1, 2)
def forward(source_image, driving_frame, kp_source, kp_driving_initial, generator, kp_detector, relative=True, adapt_scale=True, cpu=True):
kp_driving = kp_detector(driving_frame)
kp_norm = normalize_kp(
kp_source=kp_source,
kp_driving=kp_driving,
kp_driving_initial=kp_driving_initial,
use_relative_movement=relative,
use_relative_jacobian=relative,
adapt_movement_scale=adapt_scale
)
out = generator(source_image, kp_source=kp_source, kp_driving=kp_norm)
return np.transpose(out["prediction"].data.cpu().numpy(), [0, 2, 3, 1])[0]
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--config", required=True, help="path to config")
parser.add_argument("--source_image", required=True, help="path to source image")
parser.add_argument("--checkpoint", default="vox-cpk.pth.tar", help="path to checkpoint")
parser.add_argument("--relative", dest="relative", action="store_true", help="use relative or absolute keypoint coordinates")
parser.add_argument("--adapt_scale", dest="adapt_scale", action="store_true", help="adapt movement scale based on convex hull of keypoints")
parser.add_argument("--cpu", dest="cpu", action="store_true", help="CPU mode")
parser.set_defaults(relative=False)
parser.set_defaults(adapt_scale=False)
opt = parser.parse_args()
generator, kp_detector = load_checkpoints(config_path=opt.config, checkpoint_path=opt.checkpoint, cpu=opt.cpu)
source_image = imageio.imread(opt.source_image)
source_image = resize(source_image, (256, 256))[..., :3]
source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)
if not opt.cpu:
source = source.cuda()
kp_source = kp_detector(source)
#out = cv2.VideoWriter('outpy.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 30, (256, 256))
kp_driving_initial = None
camera = cv2.VideoCapture(0)
ret, frame = camera.read()
while True:
ret, frame = camera.read()
resized = resize(frame, (256, 256))[..., :3]
if not opt.cpu:
resized = resized.cuda()
# y = torch.tensor(np.array(resized))
# x = y.cpu().numpy()
# image = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)
# # x = y.permute(1, 2, 0)
# plt.imshow(np.array(image))
# plt.show()
driving_resized = torch.tensor(np.array(resized)[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)
if not kp_driving_initial:
kp_driving_initial = kp_detector(driving_resized)
fake_frame = forward(
source,
driving_resized,
kp_source,
kp_driving_initial,
generator,
kp_detector,
relative=opt.relative,
adapt_scale=opt.adapt_scale,
cpu=opt.cpu
)
cv2.imshow("frame", fake_frame)
#x = np.squeeze(driving_resized, axis=(0,))
#x = driving_resized[0].permute(1, 2, 0)
# plt_driving = driving_resized #permute(2, 3, 1)
#print(plt_driving.shape)
#plt.imshow(x)
#plt.show()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
camera.release()
cv2.destroyAllWindows()
| [((2735, 2754), 'throttle.wrap', 'throttle.wrap', (['(1)', '(2)'], {}), '(1, 2)\n', (2748, 2754), False, 'import throttle\n'), ((1863, 1980), 'modules.generator.OcclusionAwareGenerator', 'OcclusionAwareGenerator', ([], {}), "(**config['model_params']['generator_params'], **\n config['model_params']['common_params'])\n", (1886, 1980), False, 'from modules.generator import OcclusionAwareGenerator\n'), ((2076, 2182), 'modules.keypoint_detector.KPDetector', 'KPDetector', ([], {}), "(**config['model_params']['kp_detector_params'], **config[\n 'model_params']['common_params'])\n", (2086, 2182), False, 'from modules.keypoint_detector import KPDetector\n'), ((3360, 3376), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (3374, 3376), False, 'from argparse import ArgumentParser\n'), ((4216, 4248), 'imageio.imread', 'imageio.imread', (['opt.source_image'], {}), '(opt.source_image)\n', (4230, 4248), False, 'import imageio\n'), ((4614, 4633), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (4630, 4633), False, 'import cv2\n'), ((5766, 5789), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5787, 5789), False, 'import cv2\n'), ((1833, 1845), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (1842, 1845), False, 'import yaml\n'), ((2377, 2404), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (2387, 2404), False, 'import torch\n'), ((2557, 2592), 'sync_batchnorm.DataParallelWithCallback', 'DataParallelWithCallback', (['generator'], {}), '(generator)\n', (2581, 2592), False, 'from sync_batchnorm import DataParallelWithCallback\n'), ((2615, 2652), 'sync_batchnorm.DataParallelWithCallback', 'DataParallelWithCallback', (['kp_detector'], {}), '(kp_detector)\n', (2639, 2652), False, 'from sync_batchnorm import DataParallelWithCallback\n'), ((4266, 4298), 'skimage.transform.resize', 'resize', (['source_image', '(256, 256)'], {}), '(source_image, (256, 256))\n', (4272, 4298), False, 'from skimage.transform import resize\n'), ((5441, 5472), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'fake_frame'], {}), "('frame', fake_frame)\n", (5451, 5472), False, 'import cv2\n'), ((1115, 1135), 'numpy.sqrt', 'np.sqrt', (['source_area'], {}), '(source_area)\n', (1122, 1135), True, 'import numpy as np\n'), ((1138, 1159), 'numpy.sqrt', 'np.sqrt', (['driving_area'], {}), '(driving_area)\n', (1145, 1159), True, 'import numpy as np\n'), ((1649, 1699), 'torch.matmul', 'torch.matmul', (['jacobian_diff', "kp_source['jacobian']"], {}), "(jacobian_diff, kp_source['jacobian'])\n", (1661, 1699), False, 'import torch\n'), ((4723, 4748), 'skimage.transform.resize', 'resize', (['frame', '(256, 256)'], {}), '(frame, (256, 256))\n', (4729, 4748), False, 'from skimage.transform import resize\n'), ((1569, 1614), 'torch.inverse', 'torch.inverse', (["kp_driving_initial['jacobian']"], {}), "(kp_driving_initial['jacobian'])\n", (1582, 1614), False, 'import torch\n'), ((2325, 2344), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2337, 2344), False, 'import torch\n'), ((5694, 5708), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5705, 5708), False, 'import cv2\n'), ((5046, 5063), 'numpy.array', 'np.array', (['resized'], {}), '(resized)\n', (5054, 5063), True, 'import numpy as np\n')] |
cpempire/soupy | soupy/approximations/taylor/backup/__init__.py | 9f65e3329fa126619c893daa4cd80478d83f840c | from __future__ import absolute_import, division, print_function
from .controlPDEProblem import ControlPDEProblem
from .controlPDEProblemMultiPDE import ControlPDEProblemMultiPDE
from .costFunctionalConstant import CostFunctionalConstant
from .costFunctionalConstantMultiPDE import CostFunctionalConstantMultiPDE
from .costFunctionalLinear import CostFunctionalLinear
from .costFunctionalLinearMultiPDE import CostFunctionalLinearMultiPDE
from .costFunctionalQuadratic import CostFunctionalQuadratic
from .costFunctionalQuadraticMultiPDE import CostFunctionalQuadraticMultiPDE
# from .chanceConstraintQuadratic import ChanceConstraintQuadratic
# from .chanceConstraintLinear import ChanceConstraintLinear
# from .chanceConstraintConstant import ChanceConstraintConstant
# to do list
# 0. implement zero, Hessian term
# 1. implement linear
# 2. implement quadratic
# 3. impelement SAA
# to do list
# 1. SAA does not run well in ccgo1, multiprocessor does not work,
### not clear bug, simplifing adjoint solver works
# 2. quadratic approximation does not converge well, even without variance, does not converge
### record eigenvector after m_tr[i].zero()
# 3. check gradient for quadratic + correction
# what to show tomorrow
# 1. variance reduction by mean square error
# 2. trace estimation by MC and randomized SVD
# 3. scaling with repsect to mesh (design + uncertainty), trace, variance reduction, #bfgs
# 4. show the design and state, for both disk and submarine
# 5. random sample and state at different design
# April 9, 2018, work on reporting results
# 1. random samples and states at different design
# 2. table for variance reduction
# 3. plot trace estimation
# 4. plot #bfgs iterations
# obtain all results as planned | [] |
navekshasood/HuBMAP---Hacking-the-Kidney | models/1-Tom/train/kaggle-hubmap-main/src/02_train/transforms.py | 018100fe4bfa5e8764b9df5a9d188e2c670ac061 | import numpy as np
from albumentations import (Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90,
ShiftScaleRotate, ElasticTransform,
GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop,
RandomBrightnessContrast, HueSaturationValue, IAASharpen,
RandomGamma, RandomBrightness, RandomBrightnessContrast,
GaussianBlur,CLAHE,
Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion,
Normalize, OneOf, NoOp)
from albumentations.pytorch import ToTensorV2 as ToTensor
from get_config import get_config
config = get_config()
MEAN = np.array([0.485, 0.456, 0.406])
STD = np.array([0.229, 0.224, 0.225])
def get_transforms_train():
transform_train = Compose([
#Basic
RandomRotate90(p=1),
HorizontalFlip(p=0.5),
#Morphology
ShiftScaleRotate(shift_limit=0, scale_limit=(-0.2,0.2), rotate_limit=(-30,30),
interpolation=1, border_mode=0, value=(0,0,0), p=0.5),
GaussNoise(var_limit=(0,50.0), mean=0, p=0.5),
GaussianBlur(blur_limit=(3,7), p=0.5),
#Color
RandomBrightnessContrast(brightness_limit=0.35, contrast_limit=0.5,
brightness_by_max=True,p=0.5),
HueSaturationValue(hue_shift_limit=30, sat_shift_limit=30,
val_shift_limit=0, p=0.5),
CoarseDropout(max_holes=2,
max_height=config['input_resolution'][0]//4, max_width=config['input_resolution'][1]//4,
min_holes=1,
min_height=config['input_resolution'][0]//16, min_width=config['input_resolution'][1]//16,
fill_value=0, mask_fill_value=0, p=0.5),
Normalize(mean=(MEAN[0], MEAN[1], MEAN[2]),
std=(STD[0], STD[1], STD[2])),
ToTensor(),
])
return transform_train
def get_transforms_valid():
transform_valid = Compose([
Normalize(mean=(MEAN[0], MEAN[1], MEAN[2]),
std=(STD[0], STD[1], STD[2])),
ToTensor(),
] )
return transform_valid
def denormalize(z, mean=MEAN.reshape(-1,1,1), std=STD.reshape(-1,1,1)):
return std*z + mean
| [((737, 749), 'get_config.get_config', 'get_config', ([], {}), '()\n', (747, 749), False, 'from get_config import get_config\n'), ((758, 789), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (766, 789), True, 'import numpy as np\n'), ((797, 828), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (805, 828), True, 'import numpy as np\n'), ((913, 932), 'albumentations.RandomRotate90', 'RandomRotate90', ([], {'p': '(1)'}), '(p=1)\n', (927, 932), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((942, 963), 'albumentations.HorizontalFlip', 'HorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (956, 963), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((1002, 1142), 'albumentations.ShiftScaleRotate', 'ShiftScaleRotate', ([], {'shift_limit': '(0)', 'scale_limit': '(-0.2, 0.2)', 'rotate_limit': '(-30, 30)', 'interpolation': '(1)', 'border_mode': '(0)', 'value': '(0, 0, 0)', 'p': '(0.5)'}), '(shift_limit=0, scale_limit=(-0.2, 0.2), rotate_limit=(-30,\n 30), interpolation=1, border_mode=0, value=(0, 0, 0), p=0.5)\n', (1018, 1142), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((1170, 1216), 'albumentations.GaussNoise', 'GaussNoise', ([], {'var_limit': '(0, 50.0)', 'mean': '(0)', 'p': '(0.5)'}), '(var_limit=(0, 50.0), mean=0, p=0.5)\n', (1180, 1216), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((1225, 1263), 'albumentations.GaussianBlur', 'GaussianBlur', ([], {'blur_limit': '(3, 7)', 'p': '(0.5)'}), '(blur_limit=(3, 7), p=0.5)\n', (1237, 1263), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((1296, 1398), 'albumentations.RandomBrightnessContrast', 'RandomBrightnessContrast', ([], {'brightness_limit': '(0.35)', 'contrast_limit': '(0.5)', 'brightness_by_max': '(True)', 'p': '(0.5)'}), '(brightness_limit=0.35, contrast_limit=0.5,\n brightness_by_max=True, p=0.5)\n', (1320, 1398), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((1437, 1526), 'albumentations.HueSaturationValue', 'HueSaturationValue', ([], {'hue_shift_limit': '(30)', 'sat_shift_limit': '(30)', 'val_shift_limit': '(0)', 'p': '(0.5)'}), '(hue_shift_limit=30, sat_shift_limit=30, val_shift_limit=\n 0, p=0.5)\n', (1455, 1526), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((1568, 1849), 'albumentations.CoarseDropout', 'CoarseDropout', ([], {'max_holes': '(2)', 'max_height': "(config['input_resolution'][0] // 4)", 'max_width': "(config['input_resolution'][1] // 4)", 'min_holes': '(1)', 'min_height': "(config['input_resolution'][0] // 16)", 'min_width': "(config['input_resolution'][1] // 16)", 'fill_value': '(0)', 'mask_fill_value': '(0)', 'p': '(0.5)'}), "(max_holes=2, max_height=config['input_resolution'][0] // 4,\n max_width=config['input_resolution'][1] // 4, min_holes=1, min_height=\n config['input_resolution'][0] // 16, min_width=config[\n 'input_resolution'][1] // 16, fill_value=0, mask_fill_value=0, p=0.5)\n", (1581, 1849), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((1937, 2010), 'albumentations.Normalize', 'Normalize', ([], {'mean': '(MEAN[0], MEAN[1], MEAN[2])', 'std': '(STD[0], STD[1], STD[2])'}), '(mean=(MEAN[0], MEAN[1], MEAN[2]), std=(STD[0], STD[1], STD[2]))\n', (1946, 2010), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((2039, 2049), 'albumentations.pytorch.ToTensorV2', 'ToTensor', ([], {}), '()\n', (2047, 2049), True, 'from albumentations.pytorch import ToTensorV2 as ToTensor\n'), ((2155, 2228), 'albumentations.Normalize', 'Normalize', ([], {'mean': '(MEAN[0], MEAN[1], MEAN[2])', 'std': '(STD[0], STD[1], STD[2])'}), '(mean=(MEAN[0], MEAN[1], MEAN[2]), std=(STD[0], STD[1], STD[2]))\n', (2164, 2228), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((2257, 2267), 'albumentations.pytorch.ToTensorV2', 'ToTensor', ([], {}), '()\n', (2265, 2267), True, 'from albumentations.pytorch import ToTensorV2 as ToTensor\n')] |
Majikat/cubspack | cubspack/geometry.py | 16aa6df0603d48d757d74837d3457a1934601d89 | # -*- coding: utf-8 -*-
from math import sqrt
class Point(object):
__slots__ = ('x', 'y', 'z')
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __eq__(self, other):
return (self.x == other.x and self.y == other.y and self.z == other.z)
def __repr__(self):
return "P({}, {}, {})".format(self.x, self.y, self.z)
def distance(self, point):
"""Calculate distance to another point"""
return sqrt((self.x - point.x)**2 + (self.y - point.y)**2 + (
self.z - point.z)**2)
def distance_squared(self, point):
return (self.x - point.x)**2 + (self.y - point.y)**2 + (
self.z - point.z)**2
class Segment(object):
__slots__ = ('start', 'end')
def __init__(self, start, end):
"""Arguments:
start (Point): Segment start point
end (Point): Segment end point
"""
assert(isinstance(start, Point) and isinstance(end, Point))
self.start = start
self.end = end
def __eq__(self, other):
if not isinstance(other, self.__class__):
None
return self.start == other.start and self.end == other.end
def __repr__(self):
return "S({}, {})".format(self.start, self.end)
@property
def length_squared(self):
"""Faster than length and useful for some comparisons"""
return self.start.distance_squared(self.end)
@property
def length(self):
return self.start.distance(self.end)
@property
def top(self):
return max(self.start.y, self.end.y)
@property
def bottom(self):
return min(self.start.y, self.end.y)
@property
def right(self):
return max(self.start.x, self.end.x)
@property
def left(self):
return min(self.start.x, self.end.x)
@property
def ineye(self):
return max(self.start.z, self.end.z)
@property
def outeye(self):
return min(self.start.z, self.end.z)
class HSegment(Segment):
"""Horizontal Segment"""
def __init__(self, start, length):
"""Create an Horizontal segment given its left most end point and its length.
Arguments:
- start (Point): Starting Point
- length (number): segment length
"""
assert(isinstance(start, Point) and not isinstance(length, Point))
super(HSegment, self).__init__(
start, Point(start.x + length, start.y, start.z))
@property
def length(self):
return self.end.x - self.start.x
class VSegment(Segment):
"""Vertical Segment"""
def __init__(self, start, length):
"""Create a Vertical segment given its bottom most end point and its length.
Arguments:
- start (Point): Starting Point
- length (number): segment length
"""
assert(isinstance(start, Point) and not isinstance(length, Point))
super(VSegment, self).__init__(
start, Point(start.x, start.y + length, start.z))
@property
def length(self):
return self.end.y - self.start.y
class DSegment(Segment):
"""In-Depth Segment"""
def __init__(self, start, length):
"""Create an In-Depth segment given its bottom most end point and its length.
Arguments:
- start (Point): Starting Point
- length (number): segment length
"""
assert(isinstance(start, Point) and not isinstance(length, Point))
super(VSegment, self).__init__(
start, Point(start.x, start.y, start.z + length))
@property
def length(self):
return self.end.z - self.start.z
class Cuboid(object):
"""Basic cuboid primitive class.
x, y, z-> Lower right corner coordinates
width -
height -
depth -
"""
__slots__ = ('width', 'height', 'depth', 'x', 'y', 'z', 'rid')
def __init__(self, x, y, z, width, height, depth, rid=None):
"""Initiating the Cuboid
Args:
x (int, float):
y (int, float):
z (int, float):
width (int, float):
height (int, float):
depth (int, float):
rid (identifier object):
"""
assert(height >= 0 and width >= 0 and depth >= 0)
self.width = width
self.height = height
self.depth = depth
self.x = x
self.y = y
self.z = z
self.rid = rid
@property
def bottom(self):
"""Cuboid bottom edge y coordinate"""
return self.y
@property
def top(self):
"""Cuboid top edge y coordiante"""
return self.y + self.height
@property
def left(self):
"""Cuboid left edge x coordinate"""
return self.x
@property
def right(self):
"""Cuboid right edge x coordinate"""
return self.x + self.width
@property
def outeye(self):
"""Cuboid farther from eye edge z coordinate"""
return self.z
@property
def ineye(self):
"""Cuboid nearer from eye edge z coordinate"""
return self.z + self.depth
@property
def corner_top_l(self):
return Point(self.left, self.top, self.outeye)
@property
def corner_top_r(self):
return Point(self.right, self.top, self.outeye)
@property
def corner_bot_r(self):
return Point(self.right, self.bottom, self.outeye)
@property
def corner_bot_l(self):
return Point(self.left, self.bottom, self.outeye)
@property
def corner_top_l_out(self):
return Point(self.left, self.top, self.ineye)
@property
def corner_top_r_out(self):
return Point(self.right, self.top, self.ineye)
@property
def corner_bot_r_out(self):
return Point(self.right, self.bottom, self.ineye)
@property
def corner_bot_l_out(self):
return Point(self.left, self.bottom, self.ineye)
def __lt__(self, other):
"""Compare cuboids by volume (used for sorting)"""
return self.volume() < other.volume()
def __eq__(self, other):
"""Equal cuboids have same properties."""
if not isinstance(other, self.__class__):
return False
return (self.width == other.width and
self.height == other.height and
self.depth == other.depth and
self.x == other.x and
self.y == other.y and
self.z == other.z)
def __hash__(self):
return hash(
(self.x, self.y, self.z, self.width, self.height, self.depth))
def __iter__(self):
"""Iterate through cuboid corners"""
yield self.corner_top_l
yield self.corner_top_r
yield self.corner_bot_r
yield self.corner_bot_l
yield self.corner_top_l_out
yield self.corner_top_r_out
yield self.corner_bot_r_out
yield self.corner_bot_l_out
def __repr__(self):
return "R({}, {}, {}, {}, {}, {})".format(
self.x, self.y, self.z, self.width, self.height, self.depth)
def volume(self):
"""Cuboid volume"""
return self.width * self.height * self.depth
def move(self, x, y, z):
"""Move Cuboid to x,y,z coordinates
Arguments:
x (int, float): X coordinate
y (int, float): Y coordinate
z (int, float): Z coordinate
"""
self.x = x
self.y = y
self.z = z
def contains(self, cub):
"""Tests if another cuboid is contained by this one
Arguments:
cub (Cuboid): The other cuboiud
Returns:
bool: True if it is inside this one, False otherwise
"""
return (cub.y >= self.y and
cub.x >= self.x and
cub.z >= self.z and
cub.y + cub.height <= self.y + self.height and
cub.x + cub.width <= self.x + self.width and
cub.z + cub.depth <= self.z + self.depth)
def intersects(self, cub, edges=False):
"""Detect intersections between this cuboid and cub.
Args:
cub (Cuboid): Cuboid to test for intersections.
edges (bool): Accept edge touching cuboids as intersects or not
Returns:
bool: True if the cuboids intersect, False otherwise
"""
# Not even touching
if (self.bottom > cub.top or
self.top < cub.bottom or
self.left > cub.right or
self.right < cub.left or
self.outeye > cub.ineye or
self.ineye < cub.outeye):
return False
# Discard edge intersects
if not edges:
if (self.bottom == cub.top or
self.top == cub.bottom or
self.left == cub.right or
self.right == cub.left or
self.outeye == cub.ineye or
self.ineye == cub.outeye):
return False
# Discard corner intersects
if (self.left == cub.right and self.bottom == cub.top and
self.outeye == cub.ineye or
self.left == cub.right and cub.bottom == self.top and
self.outeye == cub.ineye or
self.left == cub.right and self.bottom == cub.top and
cub.outeye == self.ineye or
self.left == cub.right and cub.bottom == self.top and
cub.outeye == self.ineye or
cub.left == self.right and self.bottom == cub.top and
self.outeye == cub.ineye or
cub.left == self.right and cub.bottom == self.top and
self.outeye == cub.ineye or
cub.left == self.right and self.bottom == cub.top and
cub.outeye == self.ineye or
cub.left == self.right and cub.bottom == self.top and
cub.outeye == self.ineye):
return False
return True
def intersection(self, cub, edges=False):
"""Returns the cuboid resulting of the intersection of this and cub
If the cuboids are only touching by their edges, and the
argument 'edges' is True the cuboid returned will have a volume of 0.
Returns None if there is no intersection.
Arguments:
cub (Cuboid): The other cuboid.
edges (bool): If true, touching edges are considered an
intersection, and a cuboid of 0 height or width or depth will be
returned
Returns:
Cuboid: Intersection.
None: There was no intersection.
"""
if not self.intersects(cub, edges=edges):
return None
bottom = max(self.bottom, cub.bottom)
left = max(self.left, cub.left)
top = min(self.top, cub.top)
right = min(self.right, cub.right)
outeye = max(self.outeye, cub.outeye)
ineye = min(self.ineye, cub.ineye)
return Cuboid(
left, bottom, outeye,
right - left, top - bottom, ineye - outeye)
def join(self, other):
"""Try to join a cuboid to this one.
If the result is also a cuboid and the operation is successful then
this cuboid is modified to the union.
Arguments:
other (Cuboid): Cuboid to join
Returns:
bool: True when successfully joined, False otherwise
"""
if self.contains(other):
return True
if other.contains(self):
self.x = other.x
self.y = other.y
self.z = other.z
self.width = other.width
self.height = other.height
self.depth = other.depth
return True
if not self.intersects(other, edges=True):
return False
# Other cuboid is Up/Down from this
if self.left == other.left and self.width == other.width and \
self.outeye == other.outeye and self.depth == self.depth:
y_min = min(self.bottom, other.bottom)
y_max = max(self.top, other.top)
self.y = y_min
self.height = y_max - y_min
return True
# Other cuboid is Right/Left from this
if self.bottom == other.bottom and self.height == other.height and \
self.outeye == other.outeye and self.depth == self.depth:
x_min = min(self.left, other.left)
x_max = max(self.right, other.right)
self.x = x_min
self.width = x_max - x_min
return True
# Other cuboid is Right/Left from this
if self.bottom == other.bottom and self.height == other.height and \
self.left == other.left and self.width == other.width:
z_min = min(self.outeye, other.outeye)
z_max = max(self.ineye, other.ineye)
self.z = z_min
self.depth = z_max - z_min
return True
return False
| [((487, 572), 'math.sqrt', 'sqrt', (['((self.x - point.x) ** 2 + (self.y - point.y) ** 2 + (self.z - point.z) ** 2)'], {}), '((self.x - point.x) ** 2 + (self.y - point.y) ** 2 + (self.z - point.z) **\n 2)\n', (491, 572), False, 'from math import sqrt\n')] |
tahmadvand/recipe_app_api | app/recipe/tests/test_recipe_api.py | 40b4cc6960d7dc4858373b5f6ccca980ed0eeac8 | from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
# use that for making our API requests
from core.models import Recipe, Tag, Ingredient
from ..serializers import RecipeSerializer, RecipeDetailSerializer
import tempfile
# allows you to call a function which will then create a temp file
# somewhere in the system and then you can remove that file after
# you've used it
import os
# this allows us to perform things like
# creating path names and also checking if files exist on the system
from PIL import Image
# pillow, this will import our image class which will let us then
# create test images which we can then upload to our API
RECIPES_URL = reverse('recipe:recipe-list')
# since we're going to need to access the URL in more
# or less all the tests let's assign that as a variable
# at top of the class in all capitals.
# app : identifier of the URL in the app
# /api/recipe/recipes
# /api/recipe/recipes/1/ (id) --> detail url
def image_upload_url(recipe_id):
"""Return URL for recipe image upload"""
return reverse('recipe:recipe-upload-image', args=[recipe_id])
# generate our upload image url
# you're going to need the existing recipe ID in order to upload an image
def detail_url(recipe_id):
"""Return recipe detail URL"""
return reverse('recipe:recipe-detail', args=[recipe_id])
# name of the end point that the default router will create
# for our viewset because we're going to have a detail action
# this is how you specify arguments with the reverse function
# you just pass in args and then you pass in a list of the
# arguments you want to add
# here we have single item
def sample_tag(user, name='Main course'):
"""Create and return a sample tag"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Cinnamon'):
"""Create and return a sample ingredient"""
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
"""Create and return a sample recipe"""
defaults = {
'title': 'Sample recipe',
'time_minutes': 10,
'price': 5.00,
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
# convert the dictionary into the argument
# when you use the two asterisks when calling a
# function it has the reverse effect.
class PublicRecipeApiTests(TestCase):
"""Test unauthenticated recipe API access"""
def setUp(self):
self.client = APIClient()
def test_required_auth(self):
"""Test the authenticaiton is required"""
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTests(TestCase):
"""Test authenticated recipe API access"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'[email protected]',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
"""Test retrieving list of recipes"""
sample_recipe(user=self.user)
sample_recipe(user=self.user)
# we're going to access them by retrieving
# all of the recipes from our database.
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
"""Test retrieving recipes for user"""
# test recipes are limited to the authenticated user.
user2 = get_user_model().objects.create_user(
'[email protected]',
'pass'
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
# filter our recipes by the authenticated user
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
# many=true: this is because we were returning the list view
# or we wanted to simulate the list view in our serializer
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
"""Test viewing a recipe detail"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
# in this case we just want to serialize a single object
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
"""Test creating recipe"""
payload = {
'title': 'Test recipe',
'time_minutes': 30,
'price': 10.00,
}
res = self.client.post(RECIPES_URL, payload)
# post this payload dictionary to our recipes URL.
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
# this is the standard HTTP response code for creating objects
# in an API.
recipe = Recipe.objects.get(id=res.data['id'])
# When you create an object using the Django rest framework the
# default behavior is that it will return a dictionary containing
# the created object This is how I know that if we do res.data and
# retrieve the id key this will get the id of the created object.
# Next what we're going to do is we're going to loop through each
# one of these keys and then we're going to check
# that is the correct value assigned to our recipe model.
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
# assertion for each one of these keys, check that it is
# equal to the same key in the recipe
# payload[key]: This will actually get the value of the
# key in our payload object
# getattr: that allows you to retrieve an attribute from
# an object by passing in a variable. (instead of recipe.key)
def test_create_recipe_with_tags(self):
"""Test creating a recipe with tags"""
tag1 = sample_tag(user=self.user, name='Tag 1')
tag2 = sample_tag(user=self.user, name='Tag 2')
payload = {
'title': 'Test recipe with two tags',
'tags': [tag1.id, tag2.id],
'time_minutes': 30,
'price': 10.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
# retrieve the created recipe
tags = recipe.tags.all()
# retrieve the tags that were created with the recipe
self.assertEqual(tags.count(), 2)
# because we expect two tags to be assigned.
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
# check if the tags that we created as our sample tags are
# the same as the tags that are in our queryset.
def test_create_recipe_with_ingredients(self):
"""Test creating recipe with ingredients"""
ingredient1 = sample_ingredient(user=self.user, name='Ingredient 1')
ingredient2 = sample_ingredient(user=self.user, name='Ingredient 2')
payload = {
'title': 'Test recipe with ingredients',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 45,
'price': 15.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
# get the ingredients queryset
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
# test partial update and full update of an object
# there are two ways in which you can update an object using the
# API there's two different HTTP methods: put, patch
# patch: Patch is used to update the fields that are provided
# in the payload so the only fields that it will change are the
# fields that are provided and any fields that are omitted from
# the request will not be modified in the object that's being updated.
def test_partial_update_recipe(self):
"""Test updating a recipe with patch"""
# make a request to change a field in our recipe.
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
# add a tag to the recipe
new_tag = sample_tag(user=self.user, name='Curry')
# add a new tag and what we're going to do is we're going
# to swap out this tag that we create here and we're going
# to replace it with a new tag
payload = {'title': 'Partially Updated sample recipe',
'tags': [new_tag.id]}
# tags will be replaced with this new tag so the existing tag that
# we created won't be assigned to it
url = detail_url(recipe.id)
# the way that you update an object using the Django rest framework
# view sets is you use the detail URL so that is the URL of the
# recipe with the ID of the recipe that we want to update.
self.client.patch(url, payload)
# make request
# We're going to retrieve an update to the recipe from the
# database and then we're going to check the fields that
# are assigned and just make sure they match what we expect.
recipe.refresh_from_db()
# refreshes the details in our recipe from the database
# typically when you create a new model and you have a
# reference to a model the details of that won't change
# unless you do refresh from dB if the values have changed
# in the database.
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
# check that the tag new tag is in the tags that we retrieved
# test full update
# put: it will replace the object that we're updating with the full
# object that is provided in the request that means if you exclude
# any fields in the payload those fields will actually be removed
# from the object that you're updating
def test_full_update_recipe(self):
"""Test updating a recipe with put"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title': 'Fully Updated sample recipe',
'time_minutes': 25,
'price': 5.00
}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
# we will check that the tags assigned are zero now as I explained
# because when we do a HTTP put if we omit a field
# that should clear the value of that field so now our recipe
# that did have a sample tag assigned should not have any tags
# assigned
class RecipeImageUploadTests(TestCase):
# what happens at the setup of the test
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user('user', 'testpass')
self.client.force_authenticate(self.user)
# authenticate our user
self.recipe = sample_recipe(user=self.user)
# after the test runs it runs tear down
def tearDown(self):
self.recipe.image.delete()
# make sure that our file system is kept clean after our test
# removing all of the test files that we create
# delete the image if it exists in the recipe
def test_upload_image_to_recipe(self):
"""Test uploading an image to recipe"""
url = image_upload_url(self.recipe.id)
# going to use the sample recipe that gets created
# it creates a named temporary file on the system at a random
# location usually in the /temp folder
# create a temporary file we're going to write an image
# to that file and then we're going to upload that file
# through the API like you would with a HTTP POST and then
# we're going to run some assertions to check that it
# uploaded correctly
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (10, 10))
# creates black square
img.save(ntf, format='JPEG')
ntf.seek(0)
# it's the way that Python reads files so because we've
# saved the file it will be the seeking will be done to the
# end of the file so if you try to access it then it would
# just be blank because you've already read up to the end
# of the file so use this seek function to set
# the pointer back to the beginning of the file
res = self.client.post(url, {'image': ntf}, format='multipart')
# assertions
# refreshing the database for our recipe
self.recipe.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
# check that the images in the response so that's the path to
# the image that should be accessible
self.assertIn('image', res.data)
# check that the path exists for the image that is saved to our model
self.assertTrue(os.path.exists(self.recipe.image.path))
def test_upload_image_bad_request(self):
"""Test uploading an invalid image"""
url = image_upload_url(self.recipe.id)
res = self.client.post(url, {'image': 'notimage'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_filter_recipes_by_tags(self):
"""Test returning recipes with specific tags"""
recipe1 = sample_recipe(user=self.user, title='Thai vegetable curry')
recipe2 = sample_recipe(user=self.user, title='Aubergine with tahini')
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Vegetarian')
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
recipe3 = sample_recipe(user=self.user, title='Fish and chips')
res = self.client.get(
RECIPES_URL,
{'tags': '{},{}'.format(tag1.id, tag2.id)}
)
# this will create a comma separated list string and assign
# it to the tags get parameter
# if our filtering is working
# should only return the first two recipe
# test the response:
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
# serialize the recipes and we're going to check if
# they exist in the responses returned
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
# check the return result
def test_filter_recipes_by_ingredients(self):
"""Test returning recipes with specific ingredients"""
recipe1 = sample_recipe(user=self.user, title='Posh beans on toast')
recipe2 = sample_recipe(user=self.user, title='Chicken cacciatore')
ingredient1 = sample_ingredient(user=self.user, name='Feta cheese')
ingredient2 = sample_ingredient(user=self.user, name='Chicken')
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
recipe3 = sample_recipe(user=self.user, title='Steak and mushrooms')
# test API
res = self.client.get(
RECIPES_URL,
{'ingredients': '{},{}'.format(ingredient1.id, ingredient2.id)}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
| [((792, 821), 'django.urls.reverse', 'reverse', (['"""recipe:recipe-list"""'], {}), "('recipe:recipe-list')\n", (799, 821), False, 'from django.urls import reverse\n'), ((1171, 1226), 'django.urls.reverse', 'reverse', (['"""recipe:recipe-upload-image"""'], {'args': '[recipe_id]'}), "('recipe:recipe-upload-image', args=[recipe_id])\n", (1178, 1226), False, 'from django.urls import reverse\n'), ((1408, 1457), 'django.urls.reverse', 'reverse', (['"""recipe:recipe-detail"""'], {'args': '[recipe_id]'}), "('recipe:recipe-detail', args=[recipe_id])\n", (1415, 1457), False, 'from django.urls import reverse\n'), ((1853, 1893), 'core.models.Tag.objects.create', 'Tag.objects.create', ([], {'user': 'user', 'name': 'name'}), '(user=user, name=name)\n', (1871, 1893), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((2001, 2048), 'core.models.Ingredient.objects.create', 'Ingredient.objects.create', ([], {'user': 'user', 'name': 'name'}), '(user=user, name=name)\n', (2026, 2048), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((2278, 2322), 'core.models.Recipe.objects.create', 'Recipe.objects.create', ([], {'user': 'user'}), '(user=user, **defaults)\n', (2299, 2322), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((2585, 2596), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (2594, 2596), False, 'from rest_framework.test import APIClient\n'), ((2930, 2941), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (2939, 2941), False, 'from rest_framework.test import APIClient\n'), ((4104, 4141), 'core.models.Recipe.objects.filter', 'Recipe.objects.filter', ([], {'user': 'self.user'}), '(user=self.user)\n', (4125, 4141), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((5450, 5487), 'core.models.Recipe.objects.get', 'Recipe.objects.get', ([], {'id': "res.data['id']"}), "(id=res.data['id'])\n", (5468, 5487), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((6921, 6958), 'core.models.Recipe.objects.get', 'Recipe.objects.get', ([], {'id': "res.data['id']"}), "(id=res.data['id'])\n", (6939, 6958), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((7971, 8008), 'core.models.Recipe.objects.get', 'Recipe.objects.get', ([], {'id': "res.data['id']"}), "(id=res.data['id'])\n", (7989, 8008), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((11775, 11786), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (11784, 11786), False, 'from rest_framework.test import APIClient\n'), ((12894, 12936), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".jpg"""'}), "(suffix='.jpg')\n", (12921, 12936), False, 'import tempfile\n'), ((12963, 12989), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(10, 10)'], {}), "('RGB', (10, 10))\n", (12972, 12989), False, 'from PIL import Image\n'), ((13996, 14034), 'os.path.exists', 'os.path.exists', (['self.recipe.image.path'], {}), '(self.recipe.image.path)\n', (14010, 14034), False, 'import os\n'), ((3434, 3454), 'core.models.Recipe.objects.all', 'Recipe.objects.all', ([], {}), '()\n', (3452, 3454), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((2962, 2978), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (2976, 2978), False, 'from django.contrib.auth import get_user_model\n'), ((3813, 3829), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (3827, 3829), False, 'from django.contrib.auth import get_user_model\n'), ((11807, 11823), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (11821, 11823), False, 'from django.contrib.auth import get_user_model\n')] |
paju3125/LetsUpgrade-Python-B7 | Assignment 1 n 2 Day 8.py | c5767361f60f1ec405ab235af85035e2bb9a71e3 | # Assignment 1 Day 8
# write a decorator function for taking input for you
# any kind of function you want to build
def getInput(calculate_arg_fuc):
def wrap_function():
print("Enter two numbers ")
a=int(input("Enter first number = "))
b=int(input("Enter second number = "))
calculate_arg_fuc(a,b)
return wrap_function
@getInput
def addition(num1,num2):
print("Addition = ",num1+num2)
@getInput
def subtraction(num1,num2):
print("Subtraction = ",num1-num2)
@getInput
def multiplication(num1,num2):
print("Multiplication = ",num1*num2)
@getInput
def division(num1,num2):
print("Division = ",num1/num2)
addition()
subtraction()
multiplication()
division()
# Assignment 2 day 8
# you need to develop a python program to open a file in read only mode and
# try writing something to it and handlethe subsequent errorusing Exception Handling
try:
f=open("abc.txt","r");
f.write("Heyy, i am prajval");
f.close();
except:
print("File is in read only mode...")
| [] |
migueldvb/gwcs | gwcs/coordinate_frames.py | 4eb2abdb1d9d49ee10c1edbcae0d1cec4c758c39 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Defines coordinate frames and ties them to data axes.
"""
from __future__ import absolute_import, division, unicode_literals, print_function
import numpy as np
from astropy import units as u
from astropy import utils as astutil
from astropy import coordinates as coord
from astropy.extern import six
from . import utils as gwutils
__all__ = ['Frame2D', 'CelestialFrame', 'SpectralFrame', 'CompositeFrame',
'CoordinateFrame']
STANDARD_REFERENCE_FRAMES = [frame.upper() for frame in coord.builtin_frames.__all__]
STANDARD_REFERENCE_POSITION = ["GEOCENTER", "BARYCENTER", "HELIOCENTER",
"TOPOCENTER", "LSR", "LSRK", "LSRD",
"GALACTIC_CENTER", "LOCAL_GROUP_CENTER"]
class CoordinateFrame(object):
"""
Base class for CoordinateFrames.
Parameters
----------
naxes : int
Number of axes.
axes_type : str
One of ["SPATIAL", "SPECTRAL", "TIME"]
axes_order : tuple of int
A dimension in the input data that corresponds to this axis.
reference_frame : astropy.coordinates.builtin_frames
Reference frame (usually used with output_frame to convert to world coordinate objects).
reference_position : str
Reference position - one of `STANDARD_REFERENCE_POSITION`
unit : list of astropy.units.Unit
Unit for each axis.
axes_names : list
Names of the axes in this frame.
name : str
Name of this frame.
"""
def __init__(self, naxes, axes_type, axes_order, reference_frame=None,
reference_position=None, unit=None, axes_names=None,
name=None):
self._naxes = naxes
self._axes_order = tuple(axes_order)
if isinstance(axes_type, six.string_types):
self._axes_type = (axes_type,)
else:
self._axes_type = tuple(axes_type)
self._reference_frame = reference_frame
if unit is not None:
if astutil.isiterable(unit):
unit = tuple(unit)
else:
unit = (unit,)
if len(unit) != naxes:
raise ValueError("Number of units does not match number of axes.")
else:
self._unit = tuple([u.Unit(au) for au in unit])
if axes_names is not None:
if isinstance(axes_names, six.string_types):
axes_names = (axes_names,)
else:
axes_names = tuple(axes_names)
if len(axes_names) != naxes:
raise ValueError("Number of axes names does not match number of axes.")
else:
axes_names = tuple([""] * naxes)
self._axes_names = axes_names
if name is None:
self._name = self.__class__.__name__
else:
self._name = name
if reference_position is not None:
self._reference_position = reference_position
else:
self._reference_position = None
super(CoordinateFrame, self).__init__()
def __repr__(self):
fmt = '<{0}(name="{1}", unit={2}, axes_names={3}, axes_order={4}'.format(
self.__class__.__name__, self.name,
self.unit, self.axes_names, self.axes_order)
if self.reference_position is not None:
fmt += ', reference_position="{0}"'.format(self.reference_position)
if self.reference_frame is not None:
fmt += ", reference_frame={0}".format(self.reference_frame)
fmt += ")>"
return fmt
def __str__(self):
if self._name is not None:
return self._name
else:
return self.__class__.__name__
@property
def name(self):
""" A custom name of this frame."""
return self._name
@name.setter
def name(self, val):
""" A custom name of this frame."""
self._name = val
@property
def naxes(self):
""" The number of axes intheis frame."""
return self._naxes
@property
def unit(self):
"""The unit of this frame."""
return self._unit
@property
def axes_names(self):
""" Names of axes in the frame."""
return self._axes_names
@property
def axes_order(self):
""" A tuple of indices which map inputs to axes."""
return self._axes_order
@property
def reference_frame(self):
return self._reference_frame
@property
def reference_position(self):
try:
return self._reference_position
except AttributeError:
return None
def input_axes(self, start_frame=None):
"""
Computes which axes in `start_frame` contribute to each axis in the current frame.
Parameters
----------
start_frame : ~gwcs.coordinate_frames.CoordinateFrame
A frame in the WCS pipeline
The transform between start_frame and the current frame is used to compute the
mapping inputs: outputs.
"""
sep = self._separable(start_frame)
inputs = []
for ax in self.axes_order:
inputs.append(list(sep[ax].nonzero()[0]))
return inputs
@property
def axes_type(self):
""" Type of this frame : 'SPATIAL', 'SPECTRAL', 'TIME'. """
return self._axes_type
def coordinates(self, *args):
""" Create world coordinates object"""
raise NotImplementedError("Subclasses may implement this")
class CelestialFrame(CoordinateFrame):
"""
Celestial Frame Representation
Parameters
----------
axes_order : tuple of int
A dimension in the input data that corresponds to this axis.
reference_frame : astropy.coordinates.builtin_frames
A reference frame.
reference_position : str
Reference position.
unit : str or units.Unit instance or iterable of those
Units on axes.
axes_names : list
Names of the axes in this frame.
name : str
Name of this frame.
"""
def __init__(self, axes_order=None, reference_frame=None,
unit=None, axes_names=None,
name=None):
naxes = 2
if reference_frame is not None:
if reference_frame.name.upper() in STANDARD_REFERENCE_FRAMES:
_axes_names = list(reference_frame.representation_component_names.values())
if 'distance' in _axes_names:
_axes_names.remove('distance')
if axes_names is None:
axes_names = _axes_names
naxes = len(_axes_names)
_unit = list(reference_frame.representation_component_units.values())
if unit is None and _unit:
unit = _unit
if axes_order is None:
axes_order = tuple(range(naxes))
if unit is None:
unit = tuple([u.degree] * naxes)
axes_type = ['SPATIAL'] * naxes
super(CelestialFrame, self).__init__(naxes=naxes, axes_type=axes_type,
axes_order=axes_order,
reference_frame=reference_frame,
unit=unit,
axes_names=axes_names,
name=name)
def coordinates(self, *args):
"""
Create a SkyCoord object.
Parameters
----------
args : float
inputs to wcs.input_frame
"""
# Reorder axes if necesary.
try:
return coord.SkyCoord(*args, unit=self.unit, frame=self._reference_frame)
except:
raise
class SpectralFrame(CoordinateFrame):
"""
Represents Spectral Frame
Parameters
----------
axes_order : tuple or int
A dimension in the input data that corresponds to this axis.
reference_frame : astropy.coordinates.builtin_frames
Reference frame (usually used with output_frame to convert to world coordinate objects).
unit : str or units.Unit instance
Spectral unit.
axes_names : str
Spectral axis name.
name : str
Name for this frame.
"""
def __init__(self, axes_order=(0,), reference_frame=None, unit=None,
axes_names=None, name=None, reference_position=None):
super(SpectralFrame, self).__init__(naxes=1, axes_type="SPECTRAL", axes_order=axes_order,
axes_names=axes_names, reference_frame=reference_frame,
unit=unit, name=name,
reference_position=reference_position)
def coordinates(self, *args):
if np.isscalar(args):
return args * self.unit[0]
else:
return args[0] * self.unit[0]
class CompositeFrame(CoordinateFrame):
"""
Represents one or more frames.
Parameters
----------
frames : list
List of frames (TimeFrame, CelestialFrame, SpectralFrame, CoordinateFrame).
name : str
Name for this frame.
"""
def __init__(self, frames, name=None):
self._frames = frames[:]
naxes = sum([frame._naxes for frame in self._frames])
axes_type = list(range(naxes))
unit = list(range(naxes))
axes_names = list(range(naxes))
axes_order = []
for frame in frames:
axes_order.extend(frame.axes_order)
for frame in frames:
for ind, axtype, un, n in zip(frame.axes_order, frame.axes_type,
frame.unit, frame.axes_names):
axes_type[ind] = axtype
axes_names[ind] = n
unit[ind] = un
if len(np.unique(axes_order)) != len(axes_order):
raise ValueError("Incorrect numbering of axes, "
"axes_order should contain unique numbers, "
"got {}.".format(axes_order))
super(CompositeFrame, self).__init__(naxes, axes_type=axes_type,
axes_order=axes_order,
unit=unit, axes_names=axes_names,
name=name)
@property
def frames(self):
return self._frames
def __repr__(self):
return repr(self.frames)
def coordinates(self, *args):
coo = []
for frame in self.frames:
fargs = [args[i] for i in frame.axes_order]
print(frame, fargs, frame.axes_order)
coo.append(frame.coordinates(*fargs))
return coo
class Frame2D(CoordinateFrame):
"""
A 2D coordinate frame.
Parameters
----------
axes_order : tuple of int
A dimension in the input data that corresponds to this axis.
unit : list of astropy.units.Unit
Unit for each axis.
axes_names : list
Names of the axes in this frame.
name : str
Name of this frame.
"""
def __init__(self, axes_order=(0, 1), unit=(u.pix, u.pix), axes_names=('x', 'y'),
name=None):
super(Frame2D, self).__init__(2, ["SPATIAL", "SPATIAL"], axes_order, name=name,
axes_names=axes_names, unit=unit)
def coordinates(self, *args):
args = [args[i] for i in self.axes_order]
coo = tuple([arg * un for arg, un in zip(args, self.unit)])
return coo
| [((8880, 8897), 'numpy.isscalar', 'np.isscalar', (['args'], {}), '(args)\n', (8891, 8897), True, 'import numpy as np\n'), ((2050, 2074), 'astropy.utils.isiterable', 'astutil.isiterable', (['unit'], {}), '(unit)\n', (2068, 2074), True, 'from astropy import utils as astutil\n'), ((7717, 7783), 'astropy.coordinates.SkyCoord', 'coord.SkyCoord', (['*args'], {'unit': 'self.unit', 'frame': 'self._reference_frame'}), '(*args, unit=self.unit, frame=self._reference_frame)\n', (7731, 7783), True, 'from astropy import coordinates as coord\n'), ((9919, 9940), 'numpy.unique', 'np.unique', (['axes_order'], {}), '(axes_order)\n', (9928, 9940), True, 'import numpy as np\n'), ((2332, 2342), 'astropy.units.Unit', 'u.Unit', (['au'], {}), '(au)\n', (2338, 2342), True, 'from astropy import units as u\n')] |
lukpazera/modox | modox/chan_modifier.py | 4ee5a6033e405f9f7f3a7c80a1cb3c558c90fb01 |
import lx
import modo
import select
import item
from run import run
class ChannelModifierUtils(object):
@classmethod
def attachModifierToItem(cls, modifierModoItem, hostModoItem):
"""
Allows for attaching modifier to locator type item.
Attached item will show up under the locator item in item list
(you can unfold it with a little plus icons next to item name in item list).
Attached modifiers are getting deleted together with locator they are attached to.
Parameters
----------
modifierModoItem : modo.Item
Modifier item that you want to attach.
hostModoItem : modo.Item
Locator type item you want to attach modifier to.
"""
item.ItemUtils.addForwardGraphConnections(modifierModoItem, hostModoItem, 'chanMods')
class TransformConstraintOperation(object):
POSITION = 'pos'
ROTATION = 'rot'
SCALE = 'scl'
class CMTransformConstraint(object):
"""
This class represents Transform Constraint channel modifier.
Parameters
----------
modoItem : modo.Item
The constraint modo item.
"""
Operation = TransformConstraintOperation
@classmethod
def new(cls, assemblyItem, hostItem, name='TransformConstraint'):
"""
Adds new transform constraint to the scene.
Parameters
----------
assemblyItem : modo.Item
This is assembly item to which the constraint will be added.
Passing this item is mandatory. However, if you don't want to add constraints
to any assembly pass an item that is not a group.
This doesn't throw an error and it doesn't add constraint to any groups either.
hostItem : modo.Item
Constraint can be attached to an item such that it'll be under this item
in item list. It'll also get deleted when the host item is deleted.
name : str
Name for new constraint item.
Returns
-------
CMTransformConstraint
"""
itemSelection = select.ItemSelection()
itemSelection.clear()
run('modifier.create "cmTransformConstraint:rot" item:{%s} insert:false' % assemblyItem.id)
cnsItem = itemSelection.getOfTypeModo("cmTransformConstraint")[0]
cnsItem.name = name
ChannelModifierUtils.attachModifierToItem(cnsItem, hostItem)
return CMTransformConstraint(cnsItem)
@property
def operation(self):
"""
Gets the type of the constraint.
Returns
-------
str
One of TransformConstraintOperation constants.
"""
return self._item.channel('operation').get()
@property
def inputChannel(self):
return self._item.channel('matrixInput')
@property
def outputChannel(self):
return self._item.channel('matrixOutput')
@property
def isRotationConstraint(self):
"""
Tests if this is rotation constraint.
Returns
-------
bool
"""
return self.operation == self.Operation.ROTATION
@property
def offset(self):
"""
Gets the constraint offset vector.
Returns
-------
modo.Vector3
"""
x = self._item.channel('offset.X').get()
y = self._item.channel('offset.Y').get()
z = self._item.channel('offset.Z').get()
return modo.Vector3(x, y, z)
@offset.setter
def offset(self, offsetVec):
"""
Sets new offset for the constraint.
Parameters
----------
offsetVec : modo.Vector3
"""
self._item.channel('offset.X').set(offsetVec[0], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP)
self._item.channel('offset.Y').set(offsetVec[1], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP)
self._item.channel('offset.Z').set(offsetVec[2], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP)
@property
def modoItem(self):
return self._item
# -------- Private methods
def __init__(self, modoItem):
if modoItem.type != 'cmTransformConstraint':
raise TypeError
self._item = modoItem | [((757, 846), 'item.ItemUtils.addForwardGraphConnections', 'item.ItemUtils.addForwardGraphConnections', (['modifierModoItem', 'hostModoItem', '"""chanMods"""'], {}), "(modifierModoItem, hostModoItem,\n 'chanMods')\n", (798, 846), False, 'import item\n'), ((2133, 2155), 'select.ItemSelection', 'select.ItemSelection', ([], {}), '()\n', (2153, 2155), False, 'import select\n'), ((2195, 2290), 'run.run', 'run', (['(\'modifier.create "cmTransformConstraint:rot" item:{%s} insert:false\' %\n assemblyItem.id)'], {}), '(\'modifier.create "cmTransformConstraint:rot" item:{%s} insert:false\' %\n assemblyItem.id)\n', (2198, 2290), False, 'from run import run\n'), ((3496, 3517), 'modo.Vector3', 'modo.Vector3', (['x', 'y', 'z'], {}), '(x, y, z)\n', (3508, 3517), False, 'import modo\n')] |
WangCHEN9/solidity_demos | brownie_fund_me/scripts/fund_and_withdraw.py | cf28111a1e972ab9dde70f6d3fac22c897d8b660 | from brownie import FundMe
from scripts.helpful_scripts import get_account
def fund():
fund_me = FundMe[-1]
account = get_account()
entrance_fee = fund_me.getEntranceFee()
print(f"entrance is {entrance_fee}")
print("funding..")
fund_me.fund({"from": account, "value": entrance_fee})
def withdraw():
fund_me = FundMe[-1]
account = get_account()
fund_me.withdraw({"from": account})
def main():
fund()
withdraw()
if __name__ == "__main__":
main()
| [((128, 141), 'scripts.helpful_scripts.get_account', 'get_account', ([], {}), '()\n', (139, 141), False, 'from scripts.helpful_scripts import get_account\n'), ((366, 379), 'scripts.helpful_scripts.get_account', 'get_account', ([], {}), '()\n', (377, 379), False, 'from scripts.helpful_scripts import get_account\n')] |
jefernathan/Python | ex019.py | 2f840a625e8d46d41ab36df07ef50ae15a03c5ab | # Um professor quer sortear um dos seus quatro alunos para apagar o quadro. Faça um programa que ajude ele, lendo o nome dos alunos e escrevendo na tela o nome do escolhido.
from random import choice
nome1 = input('Digite um nome: ')
nome2 = input('Digite outro nome: ')
nome3 = input('Digite mais um nome: ')
nome4 = input('Digite o último nome: ')
nome = [nome1, nome2, nome3, nome4]
print(choice(nome))
| [((395, 407), 'random.choice', 'choice', (['nome'], {}), '(nome)\n', (401, 407), False, 'from random import choice\n')] |
liviamendes/agenda-django-project | contacts/admin.py | d602bb5e762ea477c3c97b5a475ad79036c0c93d | from django.contrib import admin
from .models import Categoria, Contact
class ContactAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'last_name', 'phone', 'email', 'creation_date', 'categoria', 'show')
list_display_links = ('id', 'name', 'last_name')
list_filter = ('categoria',)
list_per_page = 10
search_fields = ('name', 'last_name', 'phone')
list_editable = ('phone', 'show')
admin.site.register(Categoria)
admin.site.register(Contact, ContactAdmin)
| [((415, 445), 'django.contrib.admin.site.register', 'admin.site.register', (['Categoria'], {}), '(Categoria)\n', (434, 445), False, 'from django.contrib import admin\n'), ((446, 488), 'django.contrib.admin.site.register', 'admin.site.register', (['Contact', 'ContactAdmin'], {}), '(Contact, ContactAdmin)\n', (465, 488), False, 'from django.contrib import admin\n')] |
robinrobinzon/fastpic | upload_from_folder.py | 966f1aa8c6d7e98651727e7ed7f6b25970d5da11 | import datetime
import os
import shutil
import tempfile
from joblib import Parallel, delayed
from fastpic_upload import upload_file_to_fastpic
_n_jobs_for_upload = 20
_root_folders_set = (
'/path/to/folder',
)
_spoiler_for_each_file = True
def process_one_pic(result_key, pic_path, tmp_dir):
pic_url, pic_link = upload_file_to_fastpic(pic_path, tmp_dir)
print(pic_url)
return result_key, (pic_url, pic_link)
def upload_from_folder(folder_path):
pics_to_upload = {}
for root, dirs, files in os.walk(folder_path):
for file in files:
if file.split('.')[-1] not in ('jpg', 'jpeg', 'bmp', 'png'):
continue
file_path = os.path.join(root, file)
pics_to_upload[file] = file_path
print(pics_to_upload)
print('Need upload {} photo'.format(len(pics_to_upload)))
result = {}
tmp_dir = tempfile.mkdtemp()
try:
sub_results = Parallel(n_jobs=_n_jobs_for_upload, backend='threading')(
delayed(process_one_pic)(key, pics_to_upload[key], tmp_dir) for key in sorted(pics_to_upload))
for sub_result in sub_results:
result[sub_result[0]] = sub_result[1]
finally:
shutil.rmtree(tmp_dir)
return result
def print_result_to_file(result, result_file_path):
with open(result_file_path, 'w', encoding='utf8', newline='') as codes_file:
codes_file.write('[spoiler="Скриншоты"]')
codes_file.write(os.linesep)
codes_file.write(os.linesep)
for result_key in sorted(result):
if _spoiler_for_each_file:
codes_file.write('[spoiler="{}"]'.format(result_key))
codes_file.write(os.linesep)
url, link = result[result_key]
codes_file.write('[url={}][img]{}[/img][/url]'.format(link, url))
if _spoiler_for_each_file:
codes_file.write(os.linesep)
codes_file.write('[/spoiler]')
codes_file.write(os.linesep)
codes_file.write(os.linesep)
codes_file.write('[/spoiler]')
def main():
for root_folder in _root_folders_set:
result = upload_from_folder(root_folder)
print_result_to_file(result, os.path.join(root_folder, 'result_codes.txt'))
if __name__ == '__main__':
started = datetime.datetime.now()
print(started, 'started')
main()
finished = datetime.datetime.now()
print(finished, 'all done in', finished - started)
| [((326, 367), 'fastpic_upload.upload_file_to_fastpic', 'upload_file_to_fastpic', (['pic_path', 'tmp_dir'], {}), '(pic_path, tmp_dir)\n', (348, 367), False, 'from fastpic_upload import upload_file_to_fastpic\n'), ((523, 543), 'os.walk', 'os.walk', (['folder_path'], {}), '(folder_path)\n', (530, 543), False, 'import os\n'), ((884, 902), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (900, 902), False, 'import tempfile\n'), ((2311, 2334), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2332, 2334), False, 'import datetime\n'), ((2391, 2414), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2412, 2414), False, 'import datetime\n'), ((1210, 1232), 'shutil.rmtree', 'shutil.rmtree', (['tmp_dir'], {}), '(tmp_dir)\n', (1223, 1232), False, 'import shutil\n'), ((694, 718), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (706, 718), False, 'import os\n'), ((934, 990), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '_n_jobs_for_upload', 'backend': '"""threading"""'}), "(n_jobs=_n_jobs_for_upload, backend='threading')\n", (942, 990), False, 'from joblib import Parallel, delayed\n'), ((2221, 2266), 'os.path.join', 'os.path.join', (['root_folder', '"""result_codes.txt"""'], {}), "(root_folder, 'result_codes.txt')\n", (2233, 2266), False, 'import os\n'), ((1004, 1028), 'joblib.delayed', 'delayed', (['process_one_pic'], {}), '(process_one_pic)\n', (1011, 1028), False, 'from joblib import Parallel, delayed\n')] |
TolyaTalamanov/open_model_zoo | tools/accuracy_checker/openvino/tools/accuracy_checker/postprocessor/clip_segmentation_mask.py | 1697e60712df4ca72635a2080a197b9d3bc24129 | """
Copyright (c) 2018-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from .postprocessor import PostprocessorWithSpecificTargets
from ..representation import BrainTumorSegmentationAnnotation, BrainTumorSegmentationPrediction
from ..config import NumberField, ConfigError
class ClipSegmentationMask(PostprocessorWithSpecificTargets):
__provider__ = 'clip_segmentation_mask'
annotation_types = (BrainTumorSegmentationAnnotation, )
prediction_types = (BrainTumorSegmentationPrediction, )
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'min_value': NumberField(value_type=int, min_value=0, optional=True, default=0, description="Min value"),
'max_value': NumberField(value_type=int, description="Max value")
})
return parameters
def configure(self):
self.min_value = self.get_value_from_config('min_value')
self.max_value = self.get_value_from_config('max_value')
if self.max_value < self.min_value:
raise ConfigError('max_value should be greater than min_value')
def process_image(self, annotation, prediction):
for target in annotation:
target.mask = np.clip(target.mask, a_min=self.min_value, a_max=self.max_value)
for target in prediction:
target.mask = np.clip(target.mask, a_min=self.min_value, a_max=self.max_value)
return annotation, prediction
| [((1764, 1828), 'numpy.clip', 'np.clip', (['target.mask'], {'a_min': 'self.min_value', 'a_max': 'self.max_value'}), '(target.mask, a_min=self.min_value, a_max=self.max_value)\n', (1771, 1828), True, 'import numpy as np\n'), ((1890, 1954), 'numpy.clip', 'np.clip', (['target.mask'], {'a_min': 'self.min_value', 'a_max': 'self.max_value'}), '(target.mask, a_min=self.min_value, a_max=self.max_value)\n', (1897, 1954), True, 'import numpy as np\n')] |
isabella232/pynacl | tests/test_utils.py | b3f6c320569d858ba61d4bdf2ac788564528c1c9 | # Copyright 2013 Donald Stufft and individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import nacl.secret
import nacl.utils
def test_random_bytes_produces():
assert len(nacl.utils.random(16)) == 16
def test_random_bytes_produces_different_bytes():
assert nacl.utils.random(16) != nacl.utils.random(16)
def test_string_fixer():
assert str(nacl.secret.SecretBox(b"\x00" * 32)) == str(b"\x00" * 32)
def test_deterministic_random_bytes():
expected = (
b"0d8e6cc68715648926732e7ea73250cfaf2d58422083904c841a8ba"
b"33b986111f346ba50723a68ae283524a6bded09f83be6b80595856f"
b"72e25b86918e8b114bafb94bc8abedd73daab454576b7c5833eb0bf"
b"982a1bb4587a5c970ff0810ca3b791d7e12"
)
seed = (
b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d"
b"\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b"
b"\x1c\x1d\x1e\x1f"
)
assert (
nacl.utils.randombytes_deterministic(
100, seed, encoder=nacl.utils.encoding.HexEncoder
)
== expected
)
def test_deterministic_random_bytes_invalid_seed_length():
expected = "Deterministic random bytes must be generated from 32 bytes"
seed = b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a"
with pytest.raises(TypeError) as e:
nacl.utils.randombytes_deterministic(100, seed)
assert expected in str(e.value)
| [((1802, 1826), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1815, 1826), False, 'import pytest\n')] |
citrok25/Codewars-1 | Solutions/6kyu/6kyu_mister_safetys_treasure.py | dc641c5079e2e8b5955eb027fd15427e5bdb2e26 | def unlock(m):
return m.lower().translate(
str.maketrans(
'abcdefghijklmnopqrstuvwxyz',
'22233344455566677778889999'
)
)
| [] |
Cha0sNation/RandomPython | guesstheword.py | 7ba41d78f27bd90e9c09efcd4d5c26eac93e74ec | #! /home/cha0snation/anaconda3/bin/python
import random
def setup():
words = ["banana", "apple", "orange", "peach", "grape", "watermelon"]
output = []
word = words[random.randint(0, len(words) - 1)]
playing = True
tries = 5
return [words, output, word, tries, playing]
def check_finished(output, tries):
if tries == 0:
print("You ran out of tries")
print()
return True
count = 0
for letter in output:
if letter != "_":
count += 1
if count == len(output):
print_output(output)
print()
print()
return True
return False
def check_letter(letter, word, tries):
correct = False
for index, letter in enumerate(word):
if letter == guess:
output[index] = guess
correct = True
if index == len(word) - 1:
if not correct:
print("Incorrect guess")
print()
return tries - 1
else:
return tries
def check_same(guess, output):
same = False
for i in output:
if i == guess:
same = True
if same:
print("You already found that letter")
print()
print_output(output)
print()
print()
while True:
guess = str(input("Guess: "))
if len(guess) == 1:
break
return guess
else:
return guess
def print_output(output):
for i in output:
print("{0} ".format(i), end="")
if __name__ == "__main__":
words, output, word, tries, playing = setup()
while playing:
print("Try to guess the word:")
if tries == 1:
print("You have {0} try left.".format(tries))
else:
print("You have {0} tries left.".format(tries))
# print("DEBUG: word is {0}".format(word))
if output == []:
for i in word:
output.append("_")
for i in range(len(output)):
print("_ ", end="")
else:
print_output(output)
print()
print()
try:
while True:
guess = str(input("Guess: "))
if len(guess) == 1:
break
except (EOFError, KeyboardInterrupt):
print()
break
except ValueError:
print("Invalid guess")
break
print()
guess = check_same(guess, output)
tries = check_letter(guess, word, tries)
if check_finished(output, tries):
choice = input("Do you want to play again ? (y or n): ")
print()
if choice.lower().startswith("y"):
words, output, word, tries, playing = setup()
else:
playing = False
| [] |
svakulenk0/ArtDATIS | web_app/index.py | 29e646f7bcb931e733ee248cc973411ffb18be64 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on Dec 8, 2019
.. codeauthor: svitlana vakulenko
<[email protected]>
Index docs into ES
https://qbox.io/blog/building-an-elasticsearch-index-with-python
'''
from settings import *
import glob
import re
# n first characters for the doc preview
LIMIT_START = 100
txts_path = '%s/artdatis/tagging/OCRed/typed/' % DATA_PATH
text_corpus = []
def corpus_iterator():
# filter out and collect text files
for file_path in glob.glob(txts_path+'*_text.txt'):
with open(file_path, encoding="utf-8") as file:
text = file.read()
# filter duplicates
if text not in text_corpus:
text_corpus.append(text)
text = re.sub(' +', ' ', text)
start_text = text.lstrip()[:LIMIT_START]
with open(file_path.split('_text.txt')[0]+'_path.txt') as path_file:
path = path_file.read().strip().replace(DATA_PATH, '/images')
yield {
"_index": INDEX_NAME,
"_type": TYPE_NAME,
"_source": {"file_path": path, "text": text, "start_text": start_text},
}
print("Loaded %d documents"%len(text_corpus))
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
# create ES client, create index
es = Elasticsearch(hosts = [ES_HOST])
if es.indices.exists(INDEX_NAME):
print("deleting '%s' index..." % (INDEX_NAME))
res = es.indices.delete(index = INDEX_NAME)
print(" response: '%s'" % (res))
request_body = {
"settings" : {
"number_of_shards": 1,
"number_of_replicas": 0
}
}
print("creating '%s' index..." % (INDEX_NAME))
res = es.indices.create(index = INDEX_NAME, body = request_body)
print(" response: '%s'" % (res))
# bulk index the data
print("bulk indexing...")
bulk(es, corpus_iterator())
# sanity check
res = es.search(index = INDEX_NAME, size=2, body={"query": {"match_all": {}}})
print("results:")
for hit in res['hits']['hits']:
print(hit["_source"])
| [((1427, 1457), 'elasticsearch.Elasticsearch', 'Elasticsearch', ([], {'hosts': '[ES_HOST]'}), '(hosts=[ES_HOST])\n', (1440, 1457), False, 'from elasticsearch import Elasticsearch\n'), ((499, 534), 'glob.glob', 'glob.glob', (["(txts_path + '*_text.txt')"], {}), "(txts_path + '*_text.txt')\n", (508, 534), False, 'import glob\n'), ((757, 780), 're.sub', 're.sub', (['""" +"""', '""" """', 'text'], {}), "(' +', ' ', text)\n", (763, 780), False, 'import re\n')] |
PythonIsMagic/ponyup | src/tokens.py | 3b2630d573cd46d0569f713c6d4c3790688dc62d | """
A Token is a button or other object on the table that represents a position, a game state, layer state, or some other piece of info
"""
class Token(object):
def __init__(self, name, table):
self.table = table
self.name = name
self.seat = None
| [] |
maa76/SSof-Project1920 | T05-09/program.py | 9b4ad9ac41a648c425fcfcd49cd52ff84e528bde | nis=get('nis')
q1="xpto1"
q2=nis + "xpto2"
query=query1.q2
koneksi=0
q=execute(query,koneksi)
| [] |
Xiangs18/Algorithms-with-Python-Second-Edition | Chapter09/interpolation_search.py | 96844e1ae7054e099772dc691c1f41f15c2bfba5 | def nearest_mid(input_list, lower_bound_index, upper_bound_index, search_value):
return lower_bound_index + (
(upper_bound_index - lower_bound_index)
// (input_list[upper_bound_index] - input_list[lower_bound_index])
) * (search_value - input_list[lower_bound_index])
def interpolation_search(ordered_list, term):
size_of_list = len(ordered_list) - 1
index_of_first_element = 0
index_of_last_element = size_of_list
while index_of_first_element <= index_of_last_element:
mid_point = nearest_mid(
ordered_list, index_of_first_element, index_of_last_element, term
)
if mid_point > index_of_last_element or mid_point < index_of_first_element:
return None
if ordered_list[mid_point] == term:
return mid_point
if term > ordered_list[mid_point]:
index_of_first_element = mid_point + 1
else:
index_of_last_element = mid_point - 1
store = [2, 4, 5, 12, 43, 54, 60, 77]
a = interpolation_search(store, 2)
print("Index position of value 2 is ", a)
| [] |
javixeneize/asvs-1 | projects/models.py | 31e9fdfd2d538c8ed1adf23fcb4f143ef28541c6 | from django.db import models
from django.db.models import Q
from django.contrib.auth.models import User
from django.urls import reverse
class ProjectQuerySet(models.QuerySet):
def projects_per_user(self, user):
return self.filter(
Q(project_owner=user.username)
)
class Projects(models.Model):
project_name = models.CharField(max_length=60)
project_owner = models.CharField(default=User, max_length=60)
project_created = models.DateTimeField(auto_now_add=True)
project_description = models.CharField(max_length=255)
project_level = models.IntegerField(default=0)
objects = ProjectQuerySet.as_manager()
def __str__(self):
return str(self.pk)
| [((349, 380), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(60)'}), '(max_length=60)\n', (365, 380), False, 'from django.db import models\n'), ((401, 446), 'django.db.models.CharField', 'models.CharField', ([], {'default': 'User', 'max_length': '(60)'}), '(default=User, max_length=60)\n', (417, 446), False, 'from django.db import models\n'), ((469, 508), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (489, 508), False, 'from django.db import models\n'), ((535, 567), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (551, 567), False, 'from django.db import models\n'), ((588, 618), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (607, 618), False, 'from django.db import models\n'), ((257, 287), 'django.db.models.Q', 'Q', ([], {'project_owner': 'user.username'}), '(project_owner=user.username)\n', (258, 287), False, 'from django.db.models import Q\n')] |
peaudecastor/checkov | tests/serverless/checks/aws/test_AdminPolicyDocument.py | a4804b61c1b1390b7abd44ab53285fcbc3e7e80b | import os
import unittest
from checkov.serverless.checks.function.aws.AdminPolicyDocument import check
from checkov.serverless.runner import Runner
from checkov.runner_filter import RunnerFilter
class TestAdminPolicyDocument(unittest.TestCase):
def test_summary(self):
runner = Runner()
current_dir = os.path.dirname(os.path.realpath(__file__))
# Used in
os.environ["sneaky_var"] = "*"
test_files_dir = current_dir + "/example_AdminPolicyDocument"
report = runner.run(root_folder=test_files_dir, runner_filter=RunnerFilter(checks=[check.id]))
summary = report.get_summary()
self.assertEqual(summary['passed'], 2,
f"Passed checks: {[fc.file_path for fc in report.passed_checks]}")
self.assertEqual(summary['failed'], 6,
f"Failed checks: {[fc.file_path for fc in report.failed_checks]}")
self.assertEqual(summary['skipped'], 0,
f"Skipped checks: {[fc.file_path for fc in report.skipped_checks]}")
self.assertEqual(summary['parsing_errors'], 0)
if __name__ == '__main__':
unittest.main()
| [((1148, 1163), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1161, 1163), False, 'import unittest\n'), ((293, 301), 'checkov.serverless.runner.Runner', 'Runner', ([], {}), '()\n', (299, 301), False, 'from checkov.serverless.runner import Runner\n'), ((340, 366), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (356, 366), False, 'import os\n'), ((567, 598), 'checkov.runner_filter.RunnerFilter', 'RunnerFilter', ([], {'checks': '[check.id]'}), '(checks=[check.id])\n', (579, 598), False, 'from checkov.runner_filter import RunnerFilter\n')] |
lulinsheng/macro_pack | src/macro_pack.py | 4e9d0178354bad2aa557298f44ba5d4385a72a2b | #!/usr/bin/python3
# encoding: utf-8
import os
import sys
import getopt
import logging
import shutil
import psutil
from modules.com_run import ComGenerator
from modules.web_server import ListenServer
from modules.Wlisten_server import WListenServer
from modules.payload_builder_factory import PayloadBuilderFactory
from common import utils, mp_session, help
from common.utils import MSTypes
from common.definitions import VERSION, LOGLEVEL
if sys.platform == "win32":
try:
import win32com.client #@UnresolvedImport @UnusedImport
except:
print("Error: Could not find win32com.")
sys.exit(1)
MP_TYPE="Pro"
if utils.checkModuleExist("pro_core"):
from pro_modules.utilities.dcom_run import DcomGenerator
from pro_modules.payload_builders.containers import ContainerGenerator
from pro_core.payload_builder_factory_pro import PayloadBuilderFactoryPro
from pro_core import arg_mgt_pro, mp_session_pro
else:
MP_TYPE="Community"
from colorama import init
from termcolor import colored
# {PyArmor Protection Code}
# {PyArmor Plugins}
# use Colorama to make Termcolor work on Windows too
init()
WORKING_DIR = "temp"
BANNER = help.getToolPres()
def main(argv):
global MP_TYPE
logLevel = LOGLEVEL
# initialize macro_pack session object
working_directory = os.path.join(os.getcwd(), WORKING_DIR)
if MP_TYPE == "Pro":
mpSession = mp_session_pro.MpSessionPro(working_directory, VERSION, MP_TYPE)
else:
mpSession = mp_session.MpSession(working_directory, VERSION, MP_TYPE)
try:
longOptions = ["embed=", "listen=", "port=", "webdav-listen=", "generate=", "quiet", "input-file=", "encode",
"obfuscate", "obfuscate-form", "obfuscate-names", "obfuscate-declares", "obfuscate-strings",
"obfuscate-names-charset=", "obfuscate-names-minlen=", "obfuscate-names-maxlen=",
"file=","template=","listtemplates","listformats","icon=", "start-function=","uac-bypass",
"unicode-rtlo=", "dde", "print", "force-yes", "help"]
shortOptions= "e:l:w:s:f:t:G:hqmop"
# only for Pro release
if MP_TYPE == "Pro":
longOptions.extend(arg_mgt_pro.proArgsLongOptions)
shortOptions += arg_mgt_pro.proArgsShortOptions
# Only enabled on windows
if sys.platform == "win32":
longOptions.extend(["run=", "run-visible"])
opts, args = getopt.getopt(argv, shortOptions, longOptions) # @UnusedVariable
except getopt.GetoptError:
help.printUsage(BANNER, sys.argv[0])
sys.exit(2)
for opt, arg in opts:
if opt in ("-o", "--obfuscate"):
mpSession.obfuscateForm = True
mpSession.obfuscateNames = True
mpSession.obfuscateStrings = True
mpSession.obfuscateDeclares = True
elif opt=="--obfuscate-form":
mpSession.obfuscateForm = True
elif opt=="--obfuscate-declares":
mpSession.obfuscateDeclares = True
elif opt=="--obfuscate-names":
mpSession.obfuscateNames = True
elif opt=="--obfuscate-names-charset":
try:
mpSession.obfuscatedNamesCharset = arg
except ValueError:
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
elif opt=="--obfuscate-names-minlen":
try:
mpSession.obfuscatedNamesMinLen = int(arg)
except ValueError:
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
if mpSession.obfuscatedNamesMinLen < 4 or mpSession.obfuscatedNamesMinLen > 255:
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
elif opt=="--obfuscate-names-maxlen":
try:
mpSession.obfuscatedNamesMaxLen = int(arg)
except ValueError:
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
if mpSession.obfuscatedNamesMaxLen < 4 or mpSession.obfuscatedNamesMaxLen > 255:
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
elif opt=="--obfuscate-strings":
mpSession.obfuscateStrings = True
elif opt=="-s" or opt=="--start-function":
mpSession.startFunction = arg
elif opt=="-l" or opt=="--listen":
mpSession.listen = True
mpSession.listenRoot = os.path.abspath(arg)
elif opt=="--port":
mpSession.listenPort = int(arg)
mpSession.WlistenPort = int(arg)
elif opt=="--icon":
mpSession.icon = arg
elif opt=="-w" or opt=="--webdav-listen":
mpSession.Wlisten = True
mpSession.WRoot = os.path.abspath(arg)
elif opt == "-f" or opt== "--input-file":
mpSession.fileInput = arg
elif opt == "-e" or opt== "--embed":
mpSession.embeddedFilePath = os.path.abspath(arg)
elif opt=="-t" or opt=="--template":
mpSession.template = arg
elif opt == "--listtemplates":
help.printTemplatesUsage(BANNER, sys.argv[0])
sys.exit(0)
elif opt=="-q" or opt=="--quiet":
logLevel = "WARN"
elif opt=="-p" or opt=="--print":
mpSession.printFile = True
elif opt == "--dde":
if sys.platform == "win32":
mpSession.ddeMode = True
elif opt == "--run":
if sys.platform == "win32":
mpSession.runTarget = os.path.abspath(arg)
elif opt == "--run-visible":
if sys.platform == "win32":
mpSession.runVisible = True
elif opt == "--force-yes":
mpSession.forceYes = True
elif opt=="--uac-bypass":
mpSession.uacBypass = True
elif opt == "--unicode-rtlo":
mpSession.unicodeRtlo = arg
elif opt in ("-G", "--generate"):
mpSession.outputFilePath = os.path.abspath(arg)
elif opt == "--listformats":
help.printAvailableFormats(BANNER)
sys.exit(0)
elif opt=="-h" or opt=="--help":
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
else:
if MP_TYPE == "Pro":
arg_mgt_pro.processProArg(opt, arg, mpSession, BANNER)
else:
help.printUsage(BANNER, sys.argv[0])
sys.exit(0)
if logLevel == "INFO":
os.system('cls' if os.name == 'nt' else 'clear')
# Logging
logging.basicConfig(level=getattr(logging, logLevel),format="%(message)s", handlers=[utils.ColorLogFiler()])
logging.info(colored(BANNER, 'green'))
logging.info(" [+] Preparations...")
# check input args
if mpSession.fileInput is None:
# Argument not supplied, try to get file content from stdin
if not os.isatty(0): # check if something is being piped
logging.info(" [-] Waiting for piped input feed...")
mpSession.stdinContent = sys.stdin.readlines()
# Close Stdin pipe, so we can call input() later without triggering EOF
#sys.stdin.close()
if sys.platform == "win32":
sys.stdin = open("conIN$")
else:
sys.stdin = sys.__stdin__
else:
if not os.path.isfile(mpSession.fileInput):
logging.error(" [!] ERROR: Could not find %s!" % mpSession.fileInput)
sys.exit(2)
else:
logging.info(" [-] Input file path: %s" % mpSession.fileInput)
if MP_TYPE == "Pro":
if mpSession.communityMode:
logging.warning(" [!] Running in community mode (pro features not applied)")
MP_TYPE="Community"
else:
arg_mgt_pro.verify(mpSession)
# Check output file format
if mpSession.outputFilePath:
if not os.path.isdir(os.path.dirname(mpSession.outputFilePath)):
logging.error(" [!] Could not find output folder %s." % os.path.dirname(mpSession.outputFilePath))
sys.exit(2)
if mpSession.outputFileType == MSTypes.UNKNOWN:
logging.error(" [!] %s is not a supported extension. Use --listformats to view supported MacroPack formats." % os.path.splitext(mpSession.outputFilePath)[1])
sys.exit(2)
else:
logging.info(" [-] Target output format: %s" % mpSession.outputFileType)
elif not mpSession.listen and not mpSession.Wlisten and mpSession.runTarget is None and (MP_TYPE != "Pro" or mpSession.dcomTarget is None):
logging.error(" [!] You need to provide an output file! (get help using %s -h)" % os.path.basename(utils.getRunningApp()))
sys.exit(2)
if not mpSession.isTrojanMode:
# verify that output file does not already exist
if os.path.isfile(mpSession.outputFilePath):
logging.error(" [!] ERROR: Output file %s already exist!" % mpSession.outputFilePath)
sys.exit(2)
#Create temporary folder
logging.info(" [-] Temporary working dir: %s" % working_directory)
if not os.path.exists(working_directory):
os.makedirs(working_directory)
try:
# Create temporary work file.
if mpSession.ddeMode or mpSession.template or (mpSession.outputFileType not in MSTypes.VB_FORMATS+[MSTypes.VBA] and not mpSession.htaMacro):
inputFile = os.path.join(working_directory, "command.cmd")
else:
inputFile = os.path.join(working_directory, utils.randomAlpha(9)) + ".vba"
if mpSession.stdinContent is not None:
import time
time.sleep(0.4) # Needed to avoid some weird race condition
logging.info(" [-] Store std input in file...")
f = open(inputFile, 'w')
f.writelines(mpSession.stdinContent)
f.close()
else:
# Create temporary work file
if mpSession.fileInput is not None:
# Check there are not binary chars in input fil
if utils.isBinaryString(open(mpSession.fileInput, 'rb').read(1024)):
logging.error(" [!] ERROR: Invalid format for %s. Input should be text format containing your VBA script." % mpSession.fileInput)
logging.info(" [+] Cleaning...")
if os.path.isdir(working_directory):
shutil.rmtree(working_directory)
sys.exit(2)
logging.info(" [-] Store input file...")
shutil.copy2(mpSession.fileInput, inputFile)
if os.path.isfile(inputFile):
logging.info(" [-] Temporary input file: %s" % inputFile)
# Edit outputfile name to spoof extension if unicodeRtlo option is enabled
if mpSession.unicodeRtlo:
# Reminder; mpSession.unicodeRtlo contains the extension we want to spoof, such as "jpg"
logging.info(" [+] Inject %s false extension with unicode RTLO" % mpSession.unicodeRtlo)
# Separate document path and extension
(fileName, fileExtension) = os.path.splitext(mpSession.outputFilePath)
logging.info(" [-] Extension %s " % fileExtension)
# Append unicode RTLO to file name
fileName += '\u202e'
# Append extension to spoof in reverse order
fileName += '\u200b' + mpSession.unicodeRtlo[::-1] # Prepend invisible space so filename does not end with flagged extension
# Append file extension
fileName += fileExtension
mpSession.outputFilePath = fileName
logging.info(" [-] File name modified to: %s" % mpSession.outputFilePath)
# Retrieve the right payload builder
if mpSession.outputFileType != MSTypes.UNKNOWN:
if MP_TYPE == "Pro" and not mpSession.communityMode:
payloadBuilder = PayloadBuilderFactoryPro().getPayloadBuilder(mpSession)
else:
payloadBuilder = PayloadBuilderFactory().getPayloadBuilder(mpSession)
# Build payload
if payloadBuilder is not None:
payloadBuilder.run()
if MP_TYPE == "Pro":
generator = ContainerGenerator(mpSession)
generator.run()
#run com attack
if mpSession.runTarget:
generator = ComGenerator(mpSession)
generator.run()
if MP_TYPE == "Pro":
#run dcom attack
if mpSession.dcom:
generator = DcomGenerator(mpSession)
generator.run()
# Activate Web server
if mpSession.listen:
listener = ListenServer(mpSession)
listener.run()
# Activate WebDav server
if mpSession.Wlisten:
Wlistener = WListenServer(mpSession)
Wlistener.run()
except Exception:
logging.exception(" [!] Exception caught!")
except KeyboardInterrupt:
logging.error(" [!] Keyboard interrupt caught!")
logging.info(" [+] Cleaning...")
if os.path.isdir(working_directory):
shutil.rmtree(working_directory)
logging.info(" Done!\n")
sys.exit(0)
if __name__ == '__main__':
# check if running from explorer, if yes restart from cmd line
# running_from = psutil.Process(os.getpid()).parent().parent().name()
# if running_from == 'explorer.exe':
# os.system("cmd.exe /k \"%s\"" % utils.getRunningApp())
# PyArmor Plugin: checkPlug()
main(sys.argv[1:])
| [((640, 674), 'common.utils.checkModuleExist', 'utils.checkModuleExist', (['"""pro_core"""'], {}), "('pro_core')\n", (662, 674), False, 'from common import utils, mp_session, help\n'), ((1132, 1138), 'colorama.init', 'init', ([], {}), '()\n', (1136, 1138), False, 'from colorama import init\n'), ((1173, 1191), 'common.help.getToolPres', 'help.getToolPres', ([], {}), '()\n', (1189, 1191), False, 'from common import utils, mp_session, help\n'), ((6758, 6794), 'logging.info', 'logging.info', (['""" [+] Preparations..."""'], {}), "(' [+] Preparations...')\n", (6770, 6794), False, 'import logging\n'), ((9157, 9225), 'logging.info', 'logging.info', (["(' [-] Temporary working dir: %s' % working_directory)"], {}), "(' [-] Temporary working dir: %s' % working_directory)\n", (9169, 9225), False, 'import logging\n'), ((13256, 13288), 'logging.info', 'logging.info', (['""" [+] Cleaning..."""'], {}), "(' [+] Cleaning...')\n", (13268, 13288), False, 'import logging\n'), ((13296, 13328), 'os.path.isdir', 'os.path.isdir', (['working_directory'], {}), '(working_directory)\n', (13309, 13328), False, 'import os\n'), ((13376, 13400), 'logging.info', 'logging.info', (['""" Done!\n"""'], {}), "(' Done!\\n')\n", (13388, 13400), False, 'import logging\n'), ((13407, 13418), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (13415, 13418), False, 'import sys\n'), ((1333, 1344), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1342, 1344), False, 'import os\n'), ((1404, 1468), 'pro_core.mp_session_pro.MpSessionPro', 'mp_session_pro.MpSessionPro', (['working_directory', 'VERSION', 'MP_TYPE'], {}), '(working_directory, VERSION, MP_TYPE)\n', (1431, 1468), False, 'from pro_core import arg_mgt_pro, mp_session_pro\n'), ((1499, 1556), 'common.mp_session.MpSession', 'mp_session.MpSession', (['working_directory', 'VERSION', 'MP_TYPE'], {}), '(working_directory, VERSION, MP_TYPE)\n', (1519, 1556), False, 'from common import utils, mp_session, help\n'), ((2472, 2518), 'getopt.getopt', 'getopt.getopt', (['argv', 'shortOptions', 'longOptions'], {}), '(argv, shortOptions, longOptions)\n', (2485, 2518), False, 'import getopt\n'), ((6531, 6579), 'os.system', 'os.system', (["('cls' if os.name == 'nt' else 'clear')"], {}), "('cls' if os.name == 'nt' else 'clear')\n", (6540, 6579), False, 'import os\n'), ((6727, 6751), 'termcolor.colored', 'colored', (['BANNER', '"""green"""'], {}), "(BANNER, 'green')\n", (6734, 6751), False, 'from termcolor import colored\n'), ((8957, 8997), 'os.path.isfile', 'os.path.isfile', (['mpSession.outputFilePath'], {}), '(mpSession.outputFilePath)\n', (8971, 8997), False, 'import os\n'), ((9237, 9270), 'os.path.exists', 'os.path.exists', (['working_directory'], {}), '(working_directory)\n', (9251, 9270), False, 'import os\n'), ((9280, 9310), 'os.makedirs', 'os.makedirs', (['working_directory'], {}), '(working_directory)\n', (9291, 9310), False, 'import os\n'), ((10737, 10762), 'os.path.isfile', 'os.path.isfile', (['inputFile'], {}), '(inputFile)\n', (10751, 10762), False, 'import os\n'), ((13338, 13370), 'shutil.rmtree', 'shutil.rmtree', (['working_directory'], {}), '(working_directory)\n', (13351, 13370), False, 'import shutil\n'), ((611, 622), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (619, 622), False, 'import sys\n'), ((2576, 2612), 'common.help.printUsage', 'help.printUsage', (['BANNER', 'sys.argv[0]'], {}), '(BANNER, sys.argv[0])\n', (2591, 2612), False, 'from common import utils, mp_session, help\n'), ((2621, 2632), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (2629, 2632), False, 'import sys\n'), ((6938, 6950), 'os.isatty', 'os.isatty', (['(0)'], {}), '(0)\n', (6947, 6950), False, 'import os\n'), ((7000, 7054), 'logging.info', 'logging.info', (['""" [-] Waiting for piped input feed..."""'], {}), "(' [-] Waiting for piped input feed...')\n", (7012, 7054), False, 'import logging\n'), ((7092, 7113), 'sys.stdin.readlines', 'sys.stdin.readlines', ([], {}), '()\n', (7111, 7113), False, 'import sys\n'), ((7423, 7458), 'os.path.isfile', 'os.path.isfile', (['mpSession.fileInput'], {}), '(mpSession.fileInput)\n', (7437, 7458), False, 'import os\n'), ((7472, 7543), 'logging.error', 'logging.error', (["(' [!] ERROR: Could not find %s!' % mpSession.fileInput)"], {}), "(' [!] ERROR: Could not find %s!' % mpSession.fileInput)\n", (7485, 7543), False, 'import logging\n'), ((7556, 7567), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (7564, 7567), False, 'import sys\n'), ((7594, 7658), 'logging.info', 'logging.info', (["(' [-] Input file path: %s' % mpSession.fileInput)"], {}), "(' [-] Input file path: %s' % mpSession.fileInput)\n", (7606, 7658), False, 'import logging\n'), ((7733, 7811), 'logging.warning', 'logging.warning', (['""" [!] Running in community mode (pro features not applied)"""'], {}), "(' [!] Running in community mode (pro features not applied)')\n", (7748, 7811), False, 'import logging\n'), ((7870, 7899), 'pro_core.arg_mgt_pro.verify', 'arg_mgt_pro.verify', (['mpSession'], {}), '(mpSession)\n', (7888, 7899), False, 'from pro_core import arg_mgt_pro, mp_session_pro\n'), ((8180, 8191), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (8188, 8191), False, 'import sys\n'), ((8441, 8452), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (8449, 8452), False, 'import sys\n'), ((8479, 8553), 'logging.info', 'logging.info', (["(' [-] Target output format: %s' % mpSession.outputFileType)"], {}), "(' [-] Target output format: %s' % mpSession.outputFileType)\n", (8491, 8553), False, 'import logging\n'), ((8840, 8851), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (8848, 8851), False, 'import sys\n'), ((9011, 9103), 'logging.error', 'logging.error', (["(' [!] ERROR: Output file %s already exist!' % mpSession.outputFilePath)"], {}), "(' [!] ERROR: Output file %s already exist!' % mpSession.\n outputFilePath)\n", (9024, 9103), False, 'import logging\n'), ((9111, 9122), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (9119, 9122), False, 'import sys\n'), ((9532, 9578), 'os.path.join', 'os.path.join', (['working_directory', '"""command.cmd"""'], {}), "(working_directory, 'command.cmd')\n", (9544, 9578), False, 'import os\n'), ((9763, 9778), 'time.sleep', 'time.sleep', (['(0.4)'], {}), '(0.4)\n', (9773, 9778), False, 'import time\n'), ((9835, 9884), 'logging.info', 'logging.info', (['""" [-] Store std input in file..."""'], {}), "(' [-] Store std input in file...')\n", (9847, 9884), False, 'import logging\n'), ((10777, 10836), 'logging.info', 'logging.info', (["(' [-] Temporary input file: %s' % inputFile)"], {}), "(' [-] Temporary input file: %s' % inputFile)\n", (10789, 10836), False, 'import logging\n'), ((11094, 11187), 'logging.info', 'logging.info', (["(' [+] Inject %s false extension with unicode RTLO' % mpSession.unicodeRtlo)"], {}), "(' [+] Inject %s false extension with unicode RTLO' % mpSession\n .unicodeRtlo)\n", (11106, 11187), False, 'import logging\n'), ((11274, 11316), 'os.path.splitext', 'os.path.splitext', (['mpSession.outputFilePath'], {}), '(mpSession.outputFilePath)\n', (11290, 11316), False, 'import os\n'), ((11342, 11394), 'logging.info', 'logging.info', (["(' [-] Extension %s ' % fileExtension)"], {}), "(' [-] Extension %s ' % fileExtension)\n", (11354, 11394), False, 'import logging\n'), ((11808, 11883), 'logging.info', 'logging.info', (["(' [-] File name modified to: %s' % mpSession.outputFilePath)"], {}), "(' [-] File name modified to: %s' % mpSession.outputFilePath)\n", (11820, 11883), False, 'import logging\n'), ((12586, 12609), 'modules.com_run.ComGenerator', 'ComGenerator', (['mpSession'], {}), '(mpSession)\n', (12598, 12609), False, 'from modules.com_run import ComGenerator\n'), ((12896, 12919), 'modules.web_server.ListenServer', 'ListenServer', (['mpSession'], {}), '(mpSession)\n', (12908, 12919), False, 'from modules.web_server import ListenServer\n'), ((13035, 13059), 'modules.Wlisten_server.WListenServer', 'WListenServer', (['mpSession'], {}), '(mpSession)\n', (13048, 13059), False, 'from modules.Wlisten_server import WListenServer\n'), ((13119, 13162), 'logging.exception', 'logging.exception', (['""" [!] Exception caught!"""'], {}), "(' [!] Exception caught!')\n", (13136, 13162), False, 'import logging\n'), ((13201, 13249), 'logging.error', 'logging.error', (['""" [!] Keyboard interrupt caught!"""'], {}), "(' [!] Keyboard interrupt caught!')\n", (13214, 13249), False, 'import logging\n'), ((6684, 6705), 'common.utils.ColorLogFiler', 'utils.ColorLogFiler', ([], {}), '()\n', (6703, 6705), False, 'from common import utils, mp_session, help\n'), ((8011, 8052), 'os.path.dirname', 'os.path.dirname', (['mpSession.outputFilePath'], {}), '(mpSession.outputFilePath)\n', (8026, 8052), False, 'import os\n'), ((10613, 10655), 'logging.info', 'logging.info', (['""" [-] Store input file..."""'], {}), "(' [-] Store input file...')\n", (10625, 10655), False, 'import logging\n'), ((10672, 10716), 'shutil.copy2', 'shutil.copy2', (['mpSession.fileInput', 'inputFile'], {}), '(mpSession.fileInput, inputFile)\n', (10684, 10716), False, 'import shutil\n'), ((12756, 12780), 'pro_modules.utilities.dcom_run.DcomGenerator', 'DcomGenerator', (['mpSession'], {}), '(mpSession)\n', (12769, 12780), False, 'from pro_modules.utilities.dcom_run import DcomGenerator\n'), ((8125, 8166), 'os.path.dirname', 'os.path.dirname', (['mpSession.outputFilePath'], {}), '(mpSession.outputFilePath)\n', (8140, 8166), False, 'import os\n'), ((9649, 9669), 'common.utils.randomAlpha', 'utils.randomAlpha', (['(9)'], {}), '(9)\n', (9666, 9669), False, 'from common import utils, mp_session, help\n'), ((10266, 10407), 'logging.error', 'logging.error', (["(' [!] ERROR: Invalid format for %s. Input should be text format containing your VBA script.'\n % mpSession.fileInput)"], {}), "(\n ' [!] ERROR: Invalid format for %s. Input should be text format containing your VBA script.'\n % mpSession.fileInput)\n", (10279, 10407), False, 'import logging\n'), ((10418, 10450), 'logging.info', 'logging.info', (['""" [+] Cleaning..."""'], {}), "(' [+] Cleaning...')\n", (10430, 10450), False, 'import logging\n'), ((10474, 10506), 'os.path.isdir', 'os.path.isdir', (['working_directory'], {}), '(working_directory)\n', (10487, 10506), False, 'import os\n'), ((10585, 10596), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (10593, 10596), False, 'import sys\n'), ((12439, 12468), 'pro_modules.payload_builders.containers.ContainerGenerator', 'ContainerGenerator', (['mpSession'], {}), '(mpSession)\n', (12457, 12468), False, 'from pro_modules.payload_builders.containers import ContainerGenerator\n'), ((8382, 8424), 'os.path.splitext', 'os.path.splitext', (['mpSession.outputFilePath'], {}), '(mpSession.outputFilePath)\n', (8398, 8424), False, 'import os\n'), ((8808, 8829), 'common.utils.getRunningApp', 'utils.getRunningApp', ([], {}), '()\n', (8827, 8829), False, 'from common import utils, mp_session, help\n'), ((10532, 10564), 'shutil.rmtree', 'shutil.rmtree', (['working_directory'], {}), '(working_directory)\n', (10545, 10564), False, 'import shutil\n'), ((12102, 12128), 'pro_core.payload_builder_factory_pro.PayloadBuilderFactoryPro', 'PayloadBuilderFactoryPro', ([], {}), '()\n', (12126, 12128), False, 'from pro_core.payload_builder_factory_pro import PayloadBuilderFactoryPro\n'), ((12209, 12232), 'modules.payload_builder_factory.PayloadBuilderFactory', 'PayloadBuilderFactory', ([], {}), '()\n', (12230, 12232), False, 'from modules.payload_builder_factory import PayloadBuilderFactory\n'), ((3305, 3341), 'common.help.printUsage', 'help.printUsage', (['BANNER', 'sys.argv[0]'], {}), '(BANNER, sys.argv[0])\n', (3320, 3341), False, 'from common import utils, mp_session, help\n'), ((3358, 3369), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3366, 3369), False, 'import sys\n'), ((3713, 3749), 'common.help.printUsage', 'help.printUsage', (['BANNER', 'sys.argv[0]'], {}), '(BANNER, sys.argv[0])\n', (3728, 3749), False, 'from common import utils, mp_session, help\n'), ((3766, 3777), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3774, 3777), False, 'import sys\n'), ((3539, 3575), 'common.help.printUsage', 'help.printUsage', (['BANNER', 'sys.argv[0]'], {}), '(BANNER, sys.argv[0])\n', (3554, 3575), False, 'from common import utils, mp_session, help\n'), ((3592, 3603), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3600, 3603), False, 'import sys\n'), ((4121, 4157), 'common.help.printUsage', 'help.printUsage', (['BANNER', 'sys.argv[0]'], {}), '(BANNER, sys.argv[0])\n', (4136, 4157), False, 'from common import utils, mp_session, help\n'), ((4174, 4185), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4182, 4185), False, 'import sys\n'), ((3947, 3983), 'common.help.printUsage', 'help.printUsage', (['BANNER', 'sys.argv[0]'], {}), '(BANNER, sys.argv[0])\n', (3962, 3983), False, 'from common import utils, mp_session, help\n'), ((4000, 4011), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4008, 4011), False, 'import sys\n'), ((4482, 4502), 'os.path.abspath', 'os.path.abspath', (['arg'], {}), '(arg)\n', (4497, 4502), False, 'import os\n'), ((4799, 4819), 'os.path.abspath', 'os.path.abspath', (['arg'], {}), '(arg)\n', (4814, 4819), False, 'import os\n'), ((4994, 5014), 'os.path.abspath', 'os.path.abspath', (['arg'], {}), '(arg)\n', (5009, 5014), False, 'import os\n'), ((5148, 5193), 'common.help.printTemplatesUsage', 'help.printTemplatesUsage', (['BANNER', 'sys.argv[0]'], {}), '(BANNER, sys.argv[0])\n', (5172, 5193), False, 'from common import utils, mp_session, help\n'), ((5206, 5217), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (5214, 5217), False, 'import sys\n'), ((5588, 5608), 'os.path.abspath', 'os.path.abspath', (['arg'], {}), '(arg)\n', (5603, 5608), False, 'import os\n'), ((6035, 6055), 'os.path.abspath', 'os.path.abspath', (['arg'], {}), '(arg)\n', (6050, 6055), False, 'import os\n'), ((6105, 6139), 'common.help.printAvailableFormats', 'help.printAvailableFormats', (['BANNER'], {}), '(BANNER)\n', (6131, 6139), False, 'from common import utils, mp_session, help\n'), ((6152, 6163), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (6160, 6163), False, 'import sys\n'), ((6217, 6253), 'common.help.printUsage', 'help.printUsage', (['BANNER', 'sys.argv[0]'], {}), '(BANNER, sys.argv[0])\n', (6232, 6253), False, 'from common import utils, mp_session, help\n'), ((6266, 6277), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (6274, 6277), False, 'import sys\n'), ((6341, 6395), 'pro_core.arg_mgt_pro.processProArg', 'arg_mgt_pro.processProArg', (['opt', 'arg', 'mpSession', 'BANNER'], {}), '(opt, arg, mpSession, BANNER)\n', (6366, 6395), False, 'from pro_core import arg_mgt_pro, mp_session_pro\n'), ((6430, 6466), 'common.help.printUsage', 'help.printUsage', (['BANNER', 'sys.argv[0]'], {}), '(BANNER, sys.argv[0])\n', (6445, 6466), False, 'from common import utils, mp_session, help\n'), ((6483, 6494), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (6491, 6494), False, 'import sys\n')] |
binhmuc/faced | faced/const.py | cbc18f552da9c53628d61d56de7dfda451a6e25f | import os
MODELS_PATH = os.path.join(os.path.dirname(__file__), "models")
YOLO_SIZE = 288
YOLO_TARGET = 9
CORRECTOR_SIZE = 50
| [((40, 65), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (55, 65), False, 'import os\n')] |
bilalelhoudaigui/plant-brapi-etl-data-lookup-gnpis | etl/load/elasticsearch.py | 973dc444eac6d1cc80c020dd8b9a4656f70eeafb | # Load json bulk files into elasticsearch
import json
import os
import time
import traceback
import elasticsearch
from etl.common.store import list_entity_files
from etl.common.utils import get_folder_path, get_file_path, create_logger, first, replace_template
class ElasticSearchException(Exception):
pass
# Init Elasticsearch and test connection
def init_es_client(url, logger):
es_client = elasticsearch.Elasticsearch([url])
try:
info = es_client.info()
logger.debug('Connected to node "{}" of cluster "{}" on "{}"'.format(info['name'], info['cluster_name'], url))
except elasticsearch.exceptions.ConnectionError as e:
logger.error('Connection error: Elasticsearch unavailable on "{}".\nPlease check your configuration'.format(url))
raise e
return es_client
def check_error(response):
if response.get('errors'):
raise ElasticSearchException(response)
def create_index(es_client, index_name, logger):
logger.debug('Creating index "{}"...'.format(index_name))
check_error(es_client.indices.create(index_name))
def delete_index(es_client, index_name, logger):
logger.debug('Deleting index "{}"...'.format(index_name))
check_error(es_client.indices.delete(index_name))
def create_template(es_client, es_config, document_type, base_index_name, logger):
template_name = 'template_elixir_' + base_index_name
template_pattern = base_index_name + '-d*'
mapping = es_config['document-mappings'].get(document_type+"_mapping")
if not mapping:
return
logger.debug('Creating template "{}" on pattern "{}"...'.format(template_name, template_pattern))
template_body = {'template': template_pattern, 'mappings': mapping}
if 'index-settings' in es_config:
template_body['settings'] = es_config['index-settings']
check_error(es_client.indices.put_template(name=template_name, body=template_body))
def bulk_index(es_client, index_name, file_path, logger):
file_name = os.path.basename(file_path)
logger.debug('Bulk indexing file "{}" in index "{}"...'.format(file_name, index_name))
with open(file_path, 'r') as file:
check_error(es_client.bulk(index=index_name, body=file.read(), timeout='2000ms'))
def create_alias(es_client, alias_name, base_index_name, logger):
logger.debug('Creating alias "{}" for index "{}"'.format(alias_name, base_index_name))
check_error(es_client.indices.put_alias(alias_name, base_index_name))
def get_indices(es_client, base_index_name):
indices = es_client.cat.indices(base_index_name + '-d*', params={'h': 'index'})
index_names = list(map(lambda i: i['index'], indices))
index_names.sort(reverse=True)
return index_names
def load_source(source, config, source_bulk_dir, log_dir):
"""
Full Elasticsearch documents indexing
"""
source_name = source['schema:identifier']
action = 'load-elasticsearch-' + source_name
log_file = get_file_path([log_dir, action], ext='.log', recreate=True)
logger = create_logger(source_name, log_file, config['options']['verbose'])
load_config = config['load-elasticsearch']
es_client = init_es_client(load_config['url'], logger)
logger.info("Loading '{}' into elasticsearch '{}'...".format(source_bulk_dir, load_config['url']))
try:
if not os.path.exists(source_bulk_dir):
raise FileNotFoundError(
'No such file or directory: \'{}\'.\n'
'Please make sure you have run the BrAPI extraction and Elasticsearch document transformation'
' before trying to launch the transformation process.'
.format(source_bulk_dir))
bulk_files = list(list_entity_files(source_bulk_dir))
all_document_types = set(map(first, bulk_files))
document_types = load_config.get('document-types') or all_document_types
document_types = document_types.intersection(all_document_types)
index_by_document = dict()
logger.info("Preparing index with template mapping...")
timestamp = int(time.time())
for document_type in document_types:
base_index_name = replace_template(
load_config['index-template'],
{'source': source['schema:identifier'], 'documentType': document_type}
).lower()
create_template(es_client, load_config, document_type, base_index_name, logger)
index_name = base_index_name + '-d' + str(timestamp)
create_index(es_client, index_name, logger)
index_by_document[document_type] = base_index_name, index_name
logger.info("Bulk indexing...")
for document_type, file_path in bulk_files:
if document_type in index_by_document:
base_index_name, index_name = index_by_document[document_type]
bulk_index(es_client, index_name, file_path, logger)
logger.info("Creating index aliases and deleting old indices...")
for document_type, (base_index_name, index_name) in index_by_document.items():
create_alias(es_client, index_name, base_index_name, logger)
new_index, *old_indices = get_indices(es_client, base_index_name)
for old_index in old_indices[1:]:
delete_index(es_client, old_index, logger)
logger.info("SUCCEEDED Loading {}.".format(source_name))
except Exception as e:
logger.debug(traceback.format_exc())
logger.debug(getattr(e, 'long_message', ''))
logger.info("FAILED Loading {} Elasticsearch documents.\n"
"=> Check the logs ({}) for more details."
.format(source_name, log_file))
def main(config):
log_dir = config['log-dir']
bulk_dir = os.path.join(config['data-dir'], 'json-bulk')
if not os.path.exists(bulk_dir):
raise Exception('No json bulk folder found in ' + bulk_dir)
sources = config['sources']
for (source_name, source) in sources.items():
source_bulk_dir = get_folder_path([bulk_dir, source_name])
load_source(source, config, source_bulk_dir, log_dir)
| [((407, 441), 'elasticsearch.Elasticsearch', 'elasticsearch.Elasticsearch', (['[url]'], {}), '([url])\n', (434, 441), False, 'import elasticsearch\n'), ((2003, 2030), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (2019, 2030), False, 'import os\n'), ((2961, 3020), 'etl.common.utils.get_file_path', 'get_file_path', (['[log_dir, action]'], {'ext': '""".log"""', 'recreate': '(True)'}), "([log_dir, action], ext='.log', recreate=True)\n", (2974, 3020), False, 'from etl.common.utils import get_folder_path, get_file_path, create_logger, first, replace_template\n'), ((3034, 3100), 'etl.common.utils.create_logger', 'create_logger', (['source_name', 'log_file', "config['options']['verbose']"], {}), "(source_name, log_file, config['options']['verbose'])\n", (3047, 3100), False, 'from etl.common.utils import get_folder_path, get_file_path, create_logger, first, replace_template\n'), ((5785, 5830), 'os.path.join', 'os.path.join', (["config['data-dir']", '"""json-bulk"""'], {}), "(config['data-dir'], 'json-bulk')\n", (5797, 5830), False, 'import os\n'), ((5842, 5866), 'os.path.exists', 'os.path.exists', (['bulk_dir'], {}), '(bulk_dir)\n', (5856, 5866), False, 'import os\n'), ((6045, 6085), 'etl.common.utils.get_folder_path', 'get_folder_path', (['[bulk_dir, source_name]'], {}), '([bulk_dir, source_name])\n', (6060, 6085), False, 'from etl.common.utils import get_folder_path, get_file_path, create_logger, first, replace_template\n'), ((3336, 3367), 'os.path.exists', 'os.path.exists', (['source_bulk_dir'], {}), '(source_bulk_dir)\n', (3350, 3367), False, 'import os\n'), ((3712, 3746), 'etl.common.store.list_entity_files', 'list_entity_files', (['source_bulk_dir'], {}), '(source_bulk_dir)\n', (3729, 3746), False, 'from etl.common.store import list_entity_files\n'), ((4084, 4095), 'time.time', 'time.time', ([], {}), '()\n', (4093, 4095), False, 'import time\n'), ((5459, 5481), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (5479, 5481), False, 'import traceback\n'), ((4172, 4296), 'etl.common.utils.replace_template', 'replace_template', (["load_config['index-template']", "{'source': source['schema:identifier'], 'documentType': document_type}"], {}), "(load_config['index-template'], {'source': source[\n 'schema:identifier'], 'documentType': document_type})\n", (4188, 4296), False, 'from etl.common.utils import get_folder_path, get_file_path, create_logger, first, replace_template\n')] |
redfrexx/geoplot | geoplot/crs.py | 8231baab0e286f1dec870dd5e8c6c8218e5b5da7 | """
This module defines the ``geoplot`` coordinate reference system classes, wrappers on
``cartopy.crs`` objects meant to be used as parameters to the ``projection`` parameter of all
front-end ``geoplot`` outputs. For the list of Cartopy CRS objects this module derives from,
refer to http://scitools.org.uk/cartopy/docs/latest/crs/projections.html.
"""
import cartopy.crs as ccrs
import geopandas as gpd
class Base:
# TODO: RotatedPole
"""
Generate instances of ``cartopy.crs``.*name* where *name* matches the instance's class name.
Parameters
----------
`load` : Return a Cartopy CRS initialized with defaults from the `centerings` dictionary,
overridden by initialization parameters.
`_as_mpl_axes` : Return the result of calling cartopy's ``_as_mpl_axes`` for `self.load`
called with empty `df` and `centerings`.
"""
def __init__(self, **kwargs):
"""Save parameters that initialize Cartopy CRSs."""
self.args = kwargs
def load(self, df, centerings):
"""
A meta-method which abstracts the internals of individual projections' load procedures.
Parameters
----------
df : GeoDataFrame
The GeoDataFrame which has been passed as input to the plotter at the top level.
This data is needed to calculate reasonable centering variables in cases in which the
user does not already provide them; which is, incidentally, the reason behind all of
this funny twice-instantiation loading in the first place.
centerings: dict
A dictionary containing names and centering methods. Certain projections have certain
centering parameters whilst others lack them. For example, the geospatial projection
contains both ``central_longitude`` and ``central_latitude`` instance parameter, which
together control the center of the plot, while the North Pole Stereo projection has
only a ``central_longitude`` instance parameter, implying that latitude is fixed (as
indeed it is, as this projection is centered on the North Pole!).
A top-level centerings method is provided in each of the ``geoplot`` top-level plot
functions; each of the projection wrapper classes defined here in turn selects the
functions from this list relevent to this particular instance and passes them to
the ``_generic_load`` method here.
We then in turn execute these functions to get defaults for our ``df`` and pass them
off to our output ``cartopy.crs`` instance.
Returns
-------
crs : ``cartopy.crs`` object instance
Returns a ``cartopy.crs`` object instance whose appropriate instance variables have
been set to reasonable defaults wherever not already provided by the user.
"""
return getattr(ccrs, self.__class__.__name__)(**{**centerings, **self.args})
def _as_mpl_axes(self):
"""
When ``matplotlib`` is provided a projection via a ``projection`` keyword argument, it
expects to get something with a callable ``as_mpl_axes`` method. The precise details of
what this method does, exactly, are not important: it suffices to know that every
``cartopy`` coordinate reference system object has one.
When we pass a ``geoplot.crs`` crs object to a ``geoplot`` function, the loading and
centering of the data occurs automatically (using the function defined immediately above).
Since we control what ``geoplot`` does at execution, we gracefully integrate this two-step
procedure into the function body.
But there are also use cases outside of our control in which we are forced to pass a
``geoplot.crs`` object without having first called ``load``: most prominently, when
creating a plot containing subplots, the "overall" projection must be pre-loaded. It's
possible to get around this by using ``cartopy.crs`` objects instead, but this is
inelegant. This method is a better way: when a ``geoplot.crs`` object called by
``matplotlib``, it silently swaps itself out for a vanilla version of its ``cartopy.crs``
mirror, and calls that function's ``_as_mpl_axes`` instead.
Parameters
----------
proj : geoplot.crs projection instance
The instance in question (self, in the method body).
Returns
-------
Mutates into a ``cartopy.crs`` object and returns the result of executing ``_as_mpl_axes``
on that object instead.
"""
proj = self.load(gpd.GeoDataFrame(), dict())
return proj._as_mpl_axes()
class Filtering(Base):
"""CRS that `load`s with `centering` restricted to keys in `self.filter_`."""
def load(self, df, centerings):
"""Call `load` method with `centerings` filtered to keys in `self.filter_`."""
return super().load(
df,
{key: value
for key, value in centerings.items()
if key in self.filter_}
)
class LongitudeCentering(Filtering):
"""Form a CRS that centers by longitude."""
filter_ = {'central_longitude'}
class LatitudeCentering(Filtering):
"""For a CRS that centers by latitude."""
filter_ = {'central_latitude'}
PlateCarree,\
LambertCylindrical,\
Mercator,\
Miller,\
Mollweide,\
Robinson,\
Sinusoidal,\
InterruptedGoodeHomolosine,\
Geostationary,\
NorthPolarStereo,\
SouthPolarStereo = tuple(
type(name, (LongitudeCentering,), {})
for name in ('PlateCarree',
'LambertCylindrical',
'Mercator',
'Miller',
'Mollweide',
'Robinson',
'Sinusoidal',
'InterruptedGoodeHomolosine',
'Geostationary',
'NorthPolarStereo',
'SouthPolarStereo')
)
Gnomonic = type('Gnomonic', (LatitudeCentering,), {})
AlbersEqualArea,\
AzimuthalEquidistant,\
LambertConformal,\
Orthographic,\
Stereographic,\
TransverseMercator,\
LambertAzimuthalEqualArea,\
UTM,\
OSGB,\
EuroPP,\
OSNI = tuple(
type(name, (Base,), {})
for name in ('AlbersEqualArea',
'AzimuthalEquidistant',
'LambertConformal',
'Orthographic',
'Stereographic',
'TransverseMercator',
'LambertAzimuthalEqualArea',
'UTM',
'OSGB',
'EuroPP',
'OSNI')
)
| [((4688, 4706), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', ([], {}), '()\n', (4704, 4706), True, 'import geopandas as gpd\n')] |
cderwin/maps | api/views/stores/att_handler.py | 0146260935a749679396022b6d2b1d90b6df2539 | from .default_handler import StoresHandler
class ATTStoresHandler(StoresHandler):
def handle_request(self, **kwargs):
kwargs.update({'provider': 'att'})
return super(ATTStoresHandler, self).handle_request(**kwargs)
def get_url(self, **kwargs):
lat = float(kwargs.get('lat'))
lon = float(kwargs.get('lon'))
sw_corner = "{0},{1}".format(lat - 1, lon - 1)
ne_corner = "{0},{1}".format(lat + 1, lon + 1)
return self.config[kwargs['provider']]['url'].format(lat=lat, lon=lon, sw_corner=sw_corner, ne_corner=ne_corner)
| [] |
lucasjlgc/Aulas-de-Python- | pythonProject/MUNDO 2/Desafio 54.py | 6aaed1c660487a680e9c449210600ccdfa326612 | #Leia o ano de nascimento de 7 pessoas e mostre quantas ja atingiram a maioridade e quantas ainda não
for c in range(1,8):
p=int(input('Qual o ano de seu nascimento? '))
a=2021-p
if a>= 18:
print('A pessoa numero {} já é maior de idade'.format(c))
else:
print('A pessoa numero {} não é maior de idade!'.format(c))
| [] |
zjg540066169/tmoga | tmoga/utils/SDE.py | a3c3ecd0d72fc7c57fd5e5a624780e7ebf199c61 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Provide function to calculate SDE distance
@auth: Jungang Zou
@date: 2021/05/05
"""
def SDE(front, values1, values2):
shifted_dict = {}
for i in front:
shifted_dict[i] = [(values1[i], values2[i])]
shifted_list = []
for j in front:
if i == j:
continue
else:
shifted_list.append((min(values1[i], values1[j]), min(values2[i], values2[j])))
shifted_dict[i].append(shifted_list)
return shifted_dict
| [] |
pscly/shua_shouji | a1.py | 1c03056c8f5db4a3a1222b2d31fdf44c3ab07cf6 | # -*- encoding=utf8 -*-
__author__ = "pscly"
from airtest.core.api import *
from airtest.cli.parser import cli_setup
# from douyin import *
if not cli_setup():
auto_setup(__file__, logdir=True, devices=[
"android://127.0.0.1:5037/decc8da3?cap_method=MINICAP_STREAM&&ori_method=MINICAPORI&&touch_method=MINITOUCH",
])
# script content
print("start...")
print("冲冲冲!")
print("-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=")
print("-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=")
print("-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=")
wake() # 启动手机
start_app("com.ss.android.ugc.aweme.lite")
hua = 0
滑动方向 = 0
while 1:
hua += 1
滑动方向 += 1
if hua == 10:
touch(Template(r"tpl1607564875731.png", record_pos=(-0.404, -0.67), resolution=(1079, 2340)))
sleep(5)
swipe((484, 1711),(531,709))
print("-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=")
print("-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=")
print("-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=")
# generate html report
# from airtest.report.report import simple_report
# simple_report(__file__, logpath=True)
| [((149, 160), 'airtest.cli.parser.cli_setup', 'cli_setup', ([], {}), '()\n', (158, 160), False, 'from airtest.cli.parser import cli_setup\n')] |
bmdepesa/validation-tests | tests/v3_validation/cattlevalidationtest/core/test_logs_api.py | 23e7ab95ce76744483a0657f790b42a88a93436d | from common_fixtures import * # NOQA
import websocket as ws
import pytest
def get_logs(client):
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
in_log = random_str()
cmd = '/bin/bash -c "echo {}; sleep 2"'.format(in_log)
c = client.create_container(image=TEST_IMAGE_UUID, command=cmd)
c = client.wait_success(c)
logs = c.logs()
return logs, in_log, c
def test_logs_token(client):
logs, in_log, c = get_logs(client)
conn = ws.create_connection(logs.url + '?token='+logs.token)
result = conn.recv()
assert result is not None
assert in_log in result
delete_all(client, [c])
def test_logs_no_token(client):
logs, _, c = get_logs(client)
with pytest.raises(Exception) as excinfo:
ws.create_connection(logs.url)
assert 'Handshake status 401' in str(excinfo.value)
delete_all(client, [c])
def test_host_api_garbage_token(client):
logs, _, c = get_logs(client)
with pytest.raises(Exception) as excinfo:
ws.create_connection(logs.url+'?token=random.garbage.token')
assert 'Handshake status 401' in str(excinfo.value)
delete_all(client, [c])
| [((500, 555), 'websocket.create_connection', 'ws.create_connection', (["(logs.url + '?token=' + logs.token)"], {}), "(logs.url + '?token=' + logs.token)\n", (520, 555), True, 'import websocket as ws\n'), ((743, 767), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (756, 767), False, 'import pytest\n'), ((792, 822), 'websocket.create_connection', 'ws.create_connection', (['logs.url'], {}), '(logs.url)\n', (812, 822), True, 'import websocket as ws\n'), ((993, 1017), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1006, 1017), False, 'import pytest\n'), ((1038, 1100), 'websocket.create_connection', 'ws.create_connection', (["(logs.url + '?token=random.garbage.token')"], {}), "(logs.url + '?token=random.garbage.token')\n", (1058, 1100), True, 'import websocket as ws\n')] |
VITA-Group/Peek-a-Boo | models/psg_seed_resnet.py | 9290d4e5e3aee0dff994e1a664ec91bd6ec93176 | '''ResNet using PSG in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
from numpy.lib.arraysetops import isin
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from models.masked_psg_seed_conv import PredictiveSeedConv2d
from masked_layers import layers
# Fixed
NUM_BITS = 32
NUM_BITS_WEIGHT = 32
NUM_BITS_GRAD = None
BIPRECISION = False
PREDICTIVE_FORWARD = False
WRITER = None
WRITER_PREFIX_COUNTER = 0
# Tunable
PREDICTIVE_BACKWARD = True
MSB_BITS = 4
MSB_BITS_WEIGHT = 4
MSB_BITS_GRAD = 8
THRESHOLD = 0.0
SPARSIFY = False
SIGN = True
def conv1x1(in_planes, out_planes, stride=1, input_signed=True, predictive_forward=True, writer_prefix=""):
"1x1 convolution with no padding"
predictive_forward = PREDICTIVE_FORWARD and predictive_forward
return PredictiveSeedConv2d(
in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False,
num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD,
biprecision=BIPRECISION, input_signed=input_signed,
predictive_forward=predictive_forward, predictive_backward=PREDICTIVE_BACKWARD,
msb_bits=MSB_BITS, msb_bits_weight=MSB_BITS_WEIGHT, msb_bits_grad=MSB_BITS_GRAD,
threshold=THRESHOLD, sparsify=SPARSIFY, sign=SIGN,
writer=WRITER, writer_prefix=writer_prefix)
def conv3x3(in_planes, out_planes, stride=1, input_signed=False, predictive_forward=True, writer_prefix=""):
"3x3 convolution with padding"
predictive_forward = PREDICTIVE_FORWARD and predictive_forward
return PredictiveSeedConv2d(
in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False,
num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD,
biprecision=BIPRECISION, input_signed=input_signed,
predictive_forward=predictive_forward, predictive_backward=PREDICTIVE_BACKWARD,
msb_bits=MSB_BITS, msb_bits_weight=MSB_BITS_WEIGHT, msb_bits_grad=MSB_BITS_GRAD,
threshold=THRESHOLD, sparsify=SPARSIFY, sign=SIGN,
writer=WRITER, writer_prefix=writer_prefix)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride=stride, input_signed=False, predictive_forward=False, writer_prefix=None)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride=1, input_signed=False, predictive_forward=False, writer_prefix=None)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
# nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
conv1x1(in_planes, self.expansion*planes, stride=stride, input_signed=False, predictive_forward=False, writer_prefix=None),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
# self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.conv1 = conv1x1(in_planes, planes, stride=1, input_signed=False, predictive_forward=False, writer_prefix=None)
self.bn1 = nn.BatchNorm2d(planes)
# self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.conv2 = conv3x3(planes, planes, stride=stride, input_signed=False, predictive_forward=False, writer_prefix=None)
self.bn2 = nn.BatchNorm2d(planes)
# self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.conv3 = conv1x1(planes, self.expansion*planes, stride=1, input_signed=False, predictive_forward=False, writer_prefix=None)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
# nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
conv1x1(in_planes, self.expansion*planes, stride=stride, input_signed=False, predictive_forward=False, writer_prefix=None),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, in_planes=64, num_classes=10, init_method='standard'):
super(ResNet, self).__init__()
self.in_planes = in_planes
self.conv1 = conv3x3(3, self.in_planes, stride=1, input_signed=True, predictive_forward=False, writer_prefix=None)
self.bn1 = nn.BatchNorm2d(self.in_planes)
if self.in_planes == 64:
# self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
#self.linear = layers.Linear(512*block.expansion, num_classes)
elif self.in_planes == 16:
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.layer4 = None
self.linear = nn.Linear(64, num_classes)
self.reset_conv_parameters(init_method)
print('conv weights reset to {}'.format(init_method))
def reset_parameters(self, module, init_method="kaiming_uniform") -> None:
if init_method == "kaiming_constant_signed":
fan = nn.init._calculate_correct_fan(module.weight, "fan_in")
gain = nn.init.calculate_gain("relu")
std = gain / math.sqrt(fan)
with torch.no_grad():
module.weight.data = module.weight.data.sign() * std
elif init_method == "kaiming_constant_unsigned":
fan = nn.init._calculate_correct_fan(module.weight, "fan_in")
gain = nn.init.calculate_gain("relu")
std = gain / math.sqrt(fan)
with torch.no_grad():
module.weight.data = torch.ones_like(module.weight.data) * std
elif init_method == "kaiming_normal":
nn.init.kaiming_normal_(module.weight, mode="fan_in", nonlinearity="relu")
elif init_method == "kaiming_uniform":
nn.init.kaiming_uniform_(module.weight, mode="fan_in", nonlinearity="relu")
elif init_method == "kaiming_laplace":
fan = nn.init._calculate_correct_fan(module.weight, "fan_in")
gain = nn.init.calculate_gain("relu")
scale = gain / math.sqrt(2.0 * fan)
with torch.no_grad():
new_weight = np.random.laplace(loc=0.0, scale=scale, size=module.weight.shape)
module.weight.data = module.weight.data.new_tensor(torch.from_numpy(new_weight).clone().detach())
elif init_method == "xavier_normal":
nn.init.xavier_normal_(module.weight)
elif init_method == "xavier_constant":
fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out(module.weight)
std = math.sqrt(2.0 / float(fan_in + fan_out))
with torch.no_grad():
module.weight.data = module.weight.data.sign() * std
elif init_method == "standard":
nn.init.kaiming_uniform_(module.weight, a=math.sqrt(5))
else:
raise ValueError(f"{init_method} is not an initialization option!")
def reset_conv_parameters(self, init_method="standard") -> None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
self.reset_parameters(m, init_method)
def get_bop_params(self):
bop_params = []
for m in self.modules():
if isinstance(m, nn.Conv2d):
bop_params += list(m.parameters())
return bop_params
def get_non_bop_params(self):
non_bop_params = []
for m in self.modules():
if isinstance(m, (nn.Linear, nn.BatchNorm2d,)):
non_bop_params += list(m.parameters())
return non_bop_params
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
if self.layer4 is not None:
out = self.layer4(out)
# out = F.avg_pool2d(out, 4)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def PsgSeedResNet20(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(BasicBlock, [3,3,3], in_planes=16, num_classes=num_classes, init_method=init_method)
def PsgSeedResNet18(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(BasicBlock, [2,2,2,2], num_classes=num_classes, init_method=init_method)
def PsgSeedResNet34(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(BasicBlock, [3,4,6,3], num_classes=num_classes, init_method=init_method)
def PsgSeedResNet50(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(Bottleneck, [3,4,6,3], num_classes=num_classes, init_method=init_method)
def PsgSeedResNet101(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(Bottleneck, [3,4,23,3], num_classes=num_classes, init_method=init_method)
def PsgSeedResNet152(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(Bottleneck, [3,8,36,3], num_classes=num_classes, init_method=init_method)
def test():
net = ResNet18()
y = net(torch.randn(1,3,32,32))
print(y.size())
# test()
| [((971, 1487), 'models.masked_psg_seed_conv.PredictiveSeedConv2d', 'PredictiveSeedConv2d', (['in_planes', 'out_planes'], {'kernel_size': '(1)', 'stride': 'stride', 'padding': '(0)', 'bias': '(False)', 'num_bits': 'NUM_BITS', 'num_bits_weight': 'NUM_BITS_WEIGHT', 'num_bits_grad': 'NUM_BITS_GRAD', 'biprecision': 'BIPRECISION', 'input_signed': 'input_signed', 'predictive_forward': 'predictive_forward', 'predictive_backward': 'PREDICTIVE_BACKWARD', 'msb_bits': 'MSB_BITS', 'msb_bits_weight': 'MSB_BITS_WEIGHT', 'msb_bits_grad': 'MSB_BITS_GRAD', 'threshold': 'THRESHOLD', 'sparsify': 'SPARSIFY', 'sign': 'SIGN', 'writer': 'WRITER', 'writer_prefix': 'writer_prefix'}), '(in_planes, out_planes, kernel_size=1, stride=stride,\n padding=0, bias=False, num_bits=NUM_BITS, num_bits_weight=\n NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD, biprecision=BIPRECISION,\n input_signed=input_signed, predictive_forward=predictive_forward,\n predictive_backward=PREDICTIVE_BACKWARD, msb_bits=MSB_BITS,\n msb_bits_weight=MSB_BITS_WEIGHT, msb_bits_grad=MSB_BITS_GRAD, threshold\n =THRESHOLD, sparsify=SPARSIFY, sign=SIGN, writer=WRITER, writer_prefix=\n writer_prefix)\n', (991, 1487), False, 'from models.masked_psg_seed_conv import PredictiveSeedConv2d\n'), ((1738, 2254), 'models.masked_psg_seed_conv.PredictiveSeedConv2d', 'PredictiveSeedConv2d', (['in_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)', 'num_bits': 'NUM_BITS', 'num_bits_weight': 'NUM_BITS_WEIGHT', 'num_bits_grad': 'NUM_BITS_GRAD', 'biprecision': 'BIPRECISION', 'input_signed': 'input_signed', 'predictive_forward': 'predictive_forward', 'predictive_backward': 'PREDICTIVE_BACKWARD', 'msb_bits': 'MSB_BITS', 'msb_bits_weight': 'MSB_BITS_WEIGHT', 'msb_bits_grad': 'MSB_BITS_GRAD', 'threshold': 'THRESHOLD', 'sparsify': 'SPARSIFY', 'sign': 'SIGN', 'writer': 'WRITER', 'writer_prefix': 'writer_prefix'}), '(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False, num_bits=NUM_BITS, num_bits_weight=\n NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD, biprecision=BIPRECISION,\n input_signed=input_signed, predictive_forward=predictive_forward,\n predictive_backward=PREDICTIVE_BACKWARD, msb_bits=MSB_BITS,\n msb_bits_weight=MSB_BITS_WEIGHT, msb_bits_grad=MSB_BITS_GRAD, threshold\n =THRESHOLD, sparsify=SPARSIFY, sign=SIGN, writer=WRITER, writer_prefix=\n writer_prefix)\n', (1758, 2254), False, 'from models.masked_psg_seed_conv import PredictiveSeedConv2d\n'), ((2575, 2597), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (2589, 2597), True, 'import torch.nn as nn\n'), ((2738, 2760), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (2752, 2760), True, 'import torch.nn as nn\n'), ((2786, 2801), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (2799, 2801), True, 'import torch.nn as nn\n'), ((3379, 3390), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (3385, 3390), True, 'import torch.nn.functional as F\n'), ((3778, 3800), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (3792, 3800), True, 'import torch.nn as nn\n'), ((4048, 4070), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (4062, 4070), True, 'import torch.nn as nn\n'), ((4317, 4356), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(self.expansion * planes)'], {}), '(self.expansion * planes)\n', (4331, 4356), True, 'import torch.nn as nn\n'), ((4380, 4395), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (4393, 4395), True, 'import torch.nn as nn\n'), ((5021, 5032), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (5027, 5032), True, 'import torch.nn.functional as F\n'), ((5393, 5423), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.in_planes'], {}), '(self.in_planes)\n', (5407, 5423), True, 'import torch.nn as nn\n'), ((9475, 9497), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (9488, 9497), True, 'import torch.nn as nn\n'), ((13663, 13688), 'torch.randn', 'torch.randn', (['(1)', '(3)', '(32)', '(32)'], {}), '(1, 3, 32, 32)\n', (13674, 13688), False, 'import torch\n'), ((5895, 5940), 'torch.nn.Linear', 'nn.Linear', (['(512 * block.expansion)', 'num_classes'], {}), '(512 * block.expansion, num_classes)\n', (5904, 5940), True, 'import torch.nn as nn\n'), ((6632, 6687), 'torch.nn.init._calculate_correct_fan', 'nn.init._calculate_correct_fan', (['module.weight', '"""fan_in"""'], {}), "(module.weight, 'fan_in')\n", (6662, 6687), True, 'import torch.nn as nn\n'), ((6707, 6737), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (6729, 6737), True, 'import torch.nn as nn\n'), ((3168, 3207), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(self.expansion * planes)'], {}), '(self.expansion * planes)\n', (3182, 3207), True, 'import torch.nn as nn\n'), ((4762, 4801), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(self.expansion * planes)'], {}), '(self.expansion * planes)\n', (4776, 4801), True, 'import torch.nn as nn\n'), ((6343, 6369), 'torch.nn.Linear', 'nn.Linear', (['(64)', 'num_classes'], {}), '(64, num_classes)\n', (6352, 6369), True, 'import torch.nn as nn\n'), ((6763, 6777), 'math.sqrt', 'math.sqrt', (['fan'], {}), '(fan)\n', (6772, 6777), False, 'import math\n'), ((6795, 6810), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6808, 6810), False, 'import torch\n'), ((6956, 7011), 'torch.nn.init._calculate_correct_fan', 'nn.init._calculate_correct_fan', (['module.weight', '"""fan_in"""'], {}), "(module.weight, 'fan_in')\n", (6986, 7011), True, 'import torch.nn as nn\n'), ((7031, 7061), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (7053, 7061), True, 'import torch.nn as nn\n'), ((7087, 7101), 'math.sqrt', 'math.sqrt', (['fan'], {}), '(fan)\n', (7096, 7101), False, 'import math\n'), ((7119, 7134), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7132, 7134), False, 'import torch\n'), ((7273, 7347), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['module.weight'], {'mode': '"""fan_in"""', 'nonlinearity': '"""relu"""'}), "(module.weight, mode='fan_in', nonlinearity='relu')\n", (7296, 7347), True, 'import torch.nn as nn\n'), ((7173, 7208), 'torch.ones_like', 'torch.ones_like', (['module.weight.data'], {}), '(module.weight.data)\n', (7188, 7208), False, 'import torch\n'), ((7407, 7482), 'torch.nn.init.kaiming_uniform_', 'nn.init.kaiming_uniform_', (['module.weight'], {'mode': '"""fan_in"""', 'nonlinearity': '"""relu"""'}), "(module.weight, mode='fan_in', nonlinearity='relu')\n", (7431, 7482), True, 'import torch.nn as nn\n'), ((7548, 7603), 'torch.nn.init._calculate_correct_fan', 'nn.init._calculate_correct_fan', (['module.weight', '"""fan_in"""'], {}), "(module.weight, 'fan_in')\n", (7578, 7603), True, 'import torch.nn as nn\n'), ((7623, 7653), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (7645, 7653), True, 'import torch.nn as nn\n'), ((7681, 7701), 'math.sqrt', 'math.sqrt', (['(2.0 * fan)'], {}), '(2.0 * fan)\n', (7690, 7701), False, 'import math\n'), ((7719, 7734), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7732, 7734), False, 'import torch\n'), ((7765, 7830), 'numpy.random.laplace', 'np.random.laplace', ([], {'loc': '(0.0)', 'scale': 'scale', 'size': 'module.weight.shape'}), '(loc=0.0, scale=scale, size=module.weight.shape)\n', (7782, 7830), True, 'import numpy as np\n'), ((8002, 8039), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['module.weight'], {}), '(module.weight)\n', (8024, 8039), True, 'import torch.nn as nn\n'), ((8117, 8169), 'torch.nn.init._calculate_fan_in_and_fan_out', 'nn.init._calculate_fan_in_and_fan_out', (['module.weight'], {}), '(module.weight)\n', (8154, 8169), True, 'import torch.nn as nn\n'), ((8246, 8261), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8259, 8261), False, 'import torch\n'), ((8426, 8438), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (8435, 8438), False, 'import math\n'), ((7898, 7926), 'torch.from_numpy', 'torch.from_numpy', (['new_weight'], {}), '(new_weight)\n', (7914, 7926), False, 'import torch\n')] |
jsnlp/snorkel-tutorials | drybell/drybell_lfs_spark.py | b4cda9f918daf77f4011ec1598c08d9bd7e51c39 | from pyspark.sql import Row
from snorkel.labeling.lf import labeling_function
from snorkel.labeling.lf.nlp_spark import spark_nlp_labeling_function
from snorkel.preprocess import preprocessor
from drybell_lfs import load_celebrity_knowledge_base
ABSTAIN = -1
NEGATIVE = 0
POSITIVE = 1
@preprocessor()
def combine_text(x):
return Row(title=x.title, body=x.body, article=f"{x.title} {x.body}")
@spark_nlp_labeling_function(text_field="article", pre=[combine_text])
def article_mentions_person(x):
for ent in x.doc.ents:
if ent.label_ == "PERSON":
return ABSTAIN
return NEGATIVE
@spark_nlp_labeling_function(
text_field="article",
pre=[combine_text],
resources=dict(celebrity_knowledge_base=load_celebrity_knowledge_base()),
)
def person_in_db(x, celebrity_knowledge_base):
for ent in x.doc.ents:
if ent.label_ == "PERSON" and ent.text.lower() in celebrity_knowledge_base:
return POSITIVE
return ABSTAIN
@labeling_function()
def body_contains_fortune(x):
return POSITIVE if "fortune" in x.body else ABSTAIN
| [((290, 304), 'snorkel.preprocess.preprocessor', 'preprocessor', ([], {}), '()\n', (302, 304), False, 'from snorkel.preprocess import preprocessor\n'), ((403, 472), 'snorkel.labeling.lf.nlp_spark.spark_nlp_labeling_function', 'spark_nlp_labeling_function', ([], {'text_field': '"""article"""', 'pre': '[combine_text]'}), "(text_field='article', pre=[combine_text])\n", (430, 472), False, 'from snorkel.labeling.lf.nlp_spark import spark_nlp_labeling_function\n'), ((984, 1003), 'snorkel.labeling.lf.labeling_function', 'labeling_function', ([], {}), '()\n', (1001, 1003), False, 'from snorkel.labeling.lf import labeling_function\n'), ((337, 399), 'pyspark.sql.Row', 'Row', ([], {'title': 'x.title', 'body': 'x.body', 'article': 'f"""{x.title} {x.body}"""'}), "(title=x.title, body=x.body, article=f'{x.title} {x.body}')\n", (340, 399), False, 'from pyspark.sql import Row\n'), ((740, 771), 'drybell_lfs.load_celebrity_knowledge_base', 'load_celebrity_knowledge_base', ([], {}), '()\n', (769, 771), False, 'from drybell_lfs import load_celebrity_knowledge_base\n')] |
dongleecsu/DREAMPlace | dreamplace/ops/dct/discrete_spectral_transform.py | 86b56521a3eacfb5cadff935631302bf6986a689 | ##
# @file discrete_spectral_transform.py
# @author Yibo Lin
# @date Jun 2018
#
import os
import sys
import numpy as np
import torch
import torch.nn.functional as F
import pdb
""" Discrete spectral transformation leveraging fast fourier transform engine.
The math here mainly uses Prosthaphaeresis properties.
The trigonometric identities exploited by prosthaphaeresis relate products of trigonometric functions to sums.
sin(a) sin(b) = 1/2 * (cos(a-b) - cos(a+b))
cos(a) cos(b) = 1/2 * (cos(a-b) + cos(a+b))
sin(a) cos(b) = 1/2 * (sin(a+b) + sin(a-b))
cos(a) sin(b) = 1/2 * (sin(a-b) - sin(a+b))
A 2D FFT performs
y_{u, v} = \sum_i \sum_j x_{i, j} exp(-j*2*pi*u*i/M) exp(-j*2*pi*v*j/N)
= \sum_i \sum_j x_{i, j} exp(-j*2*pi*(u*i/M + v*j/N))
= \sum_i \sum_j x_{i, j} (cos(-2*pi*(u*i/M + v*j/N)) + j sin(-2*pi*(u*i/M + v*j/N))).
By mapping the original image from (i, j) to (i, N-j), we can have (u*i/M - v*j/N) inside exp.
This will enable us to derive various cos/sin transformation by computing FFT twice.
"""
def get_expk(N, dtype, device):
""" Compute 2*exp(-1j*pi*u/(2N)), but not exactly the same.
The actual return is 2*cos(pi*u/(2N)), 2*sin(pi*u/(2N)).
This will make later multiplication easier.
"""
pik_by_2N = torch.arange(N, dtype=dtype, device=device)
pik_by_2N.mul_(np.pi/(2*N))
# cos, sin
# I use sin because the real part requires subtraction
# this will be easier for multiplication
expk = torch.stack([pik_by_2N.cos(), pik_by_2N.sin()], dim=-1)
expk.mul_(2)
return expk.contiguous()
def get_expkp1(N, dtype, device):
""" Compute 2*exp(-1j*pi*(u+1)/(2N)), but not exactly the same.
The actual return is 2*cos(pi*(u+1)/(2N)), 2*sin(pi*(u+1)/(2N))
"""
neg_pik_by_2N = torch.arange(1, N+1, dtype=dtype, device=device)
neg_pik_by_2N.mul_(np.pi/(2*N))
# sin, -cos
# I swap -cos and sin because we need the imag part
# this will be easier for multiplication
expk = torch.stack([neg_pik_by_2N.cos(), neg_pik_by_2N.sin()], dim=-1)
expk.mul_(2)
return expk.contiguous()
def get_exact_expk(N, dtype, device):
# Compute exp(-j*pi*u/(2N)) = cos(pi*u/(2N)) - j * sin(pi*u/(2N))
pik_by_2N = torch.arange(N, dtype=dtype, device=device)
pik_by_2N.mul_(np.pi/(2*N))
# cos, -sin
expk = torch.stack([pik_by_2N.cos(), -pik_by_2N.sin()], dim=-1)
return expk.contiguous()
def get_perm(N, dtype, device):
""" Compute permutation to generate following array
0, 2, 4, ..., 2*(N//2)-2, 2*(N//2)-1, 2*(N//2)-3, ..., 3, 1
"""
perm = torch.zeros(N, dtype=dtype, device=device)
perm[0:(N-1)//2+1] = torch.arange(0, N, 2, dtype=dtype, device=device)
perm[(N-1)//2+1:] = torch.arange(2*(N//2)-1, 0, -2, dtype=dtype, device=device)
return perm
def dct_2N(x, expk=None):
""" Batch Discrete Cosine Transformation without normalization to coefficients.
Compute y_u = \sum_i x_i cos(pi*(2i+1)*u/(2N)),
Impelements the 2N padding trick to solve DCT with FFT in the following link,
https://dsp.stackexchange.com/questions/2807/fast-cosine-transform-via-fft
1. Pad x by zeros
2. Perform FFT
3. Multiply by 2*exp(-1j*pi*u/(2N))
4. Extract the real part
"""
# last dimension
N = x.size(-1)
# pad last dimension
x_pad = F.pad(x, (0, N), 'constant', 0)
# the last dimension here becomes -2 because complex numbers introduce a new dimension
y = torch.rfft(x_pad, signal_ndim=1, normalized=False, onesided=True)[..., 0:N, :]
y.mul_(1.0/N)
if expk is None:
expk = get_expk(N, dtype=x.dtype, device=x.device)
# get real part
y.mul_(expk)
# I found add is much faster than sum
#y = y.sum(dim=-1)
return y[..., 0]+y[..., 1]
def dct_N(x, perm=None, expk=None):
""" Batch Discrete Cosine Transformation without normalization to coefficients.
Compute y_u = \sum_i x_i cos(pi*(2i+1)*u/(2N)),
Impelements the N permuting trick to solve DCT with FFT in the following link,
https://dsp.stackexchange.com/questions/2807/fast-cosine-transform-via-fft
1. permute x such that [a, b, c, d, e, f] becomes [a, c, e, f, d, b]
2. Perform FFT
3. Multiply by 2*exp(-1j*pi*u/(2N))
4. Extract the real part
"""
# last dimension
N = x.size(-1)
if perm is None:
perm = get_perm(N, dtype=torch.int64, device=x.device)
if x.ndimension() <= 1:
x_reorder = x.view([1, N])
else:
x_reorder = x.clone()
# switch from row-major to column-major for speedup
x_reorder.transpose_(dim0=-2, dim1=-1)
#x_reorder = x_reorder[..., perm, :]
x_reorder = x_reorder.index_select(dim=-2, index=perm)
# switch back
x_reorder.transpose_(dim0=-2, dim1=-1)
y = torch.rfft(x_reorder, signal_ndim=1, normalized=False, onesided=False)[..., 0:N, :]
y.mul_(1.0/N)
if expk is None:
expk = get_expk(N, dtype=x.dtype, device=x.device)
# get real part
y.mul_(expk)
# I found add is much faster than sum
#y = y.sum(dim=-1)
return y[..., 0]+y[..., 1]
def idct_2N(x, expk=None):
""" Batch Inverse Discrete Cosine Transformation without normalization to coefficients.
Compute y_u = \sum_i x_i cos(pi*(2u+1)*i/(2N)),
Impelements the 2N padding trick to solve IDCT with IFFT in the following link,
https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/spectral_ops.py
1. Multiply by 2*exp(1j*pi*u/(2N))
2. Pad x by zeros
3. Perform IFFT
4. Extract the real part
"""
# last dimension
N = x.size(-1)
if expk is None:
expk = get_expk(N, dtype=x.dtype, device=x.device)
# multiply by 2*exp(1j*pi*u/(2N))
x_pad = x.unsqueeze(-1).mul(expk)
# pad second last dimension, excluding the complex number dimension
x_pad = F.pad(x_pad, (0, 0, 0, N), 'constant', 0)
if len(x.size()) == 1:
x_pad.unsqueeze_(0)
# the last dimension here becomes -2 because complex numbers introduce a new dimension
y = torch.irfft(x_pad, signal_ndim=1, normalized=False, onesided=False, signal_sizes=[2*N])[..., 0:N]
y.mul_(N)
if len(x.size()) == 1:
y.squeeze_(0)
return y
def idct_N(x, expk=None):
N = x.size(-1)
if expk is None:
expk = get_expk(N, dtype=x.dtype, device=x.device)
size = list(x.size())
size.append(2)
x_reorder = torch.zeros(size, dtype=x.dtype, device=x.device)
x_reorder[..., 0] = x
x_reorder[..., 1:, 1] = x.flip([x.ndimension()-1])[..., :N-1].mul_(-1)
x_reorder[..., 0] = x.mul(expk[..., 0]).sub_(x_reorder[..., 1].mul(expk[..., 1]))
x_reorder[..., 1].mul_(expk[..., 0])
x_reorder[..., 1].add_(x.mul(expk[..., 1]))
# this is to match idct_2N
# normal way should multiply 0.25
x_reorder.mul_(0.5)
y = torch.ifft(x_reorder, signal_ndim=1, normalized=False)
y.mul_(N)
z = torch.empty_like(x)
z[..., 0:N:2] = y[..., :(N+1)//2, 0]
z[..., 1:N:2] = y[..., (N+1)//2:, 0].flip([x.ndimension()-1])
return z
def dst(x, expkp1=None):
""" Batch Discrete Sine Transformation without normalization to coefficients.
Compute y_u = \sum_i x_i sin(pi*(2i+1)*(u+1)/(2N)),
Impelements the 2N padding trick to solve DCT with FFT in the following link,
https://dsp.stackexchange.com/questions/2807/fast-cosine-transform-via-fft
1. Pad x by zeros
2. Perform FFT
3. Multiply by 2*exp(-1j*pi*u/(2N))
4. Extract the real part
"""
# last dimension
N = x.size(-1)
# pad last dimension
x_pad = F.pad(x, (0, N), 'constant', 0)
# the last dimension here becomes -2 because complex numbers introduce a new dimension
y = torch.rfft(x_pad, signal_ndim=1, normalized=False, onesided=True)[..., 1:N+1, :]
if expkp1 is None:
expkp1 = get_expkp1(N, dtype=x.dtype, device=x.device)
# get imag part
y = y[..., 1].mul(expkp1[:, 0]) - y[..., 0].mul(expkp1[:, 1])
return y
def idst(x, expkp1=None):
""" Batch Inverse Discrete Sine Transformation without normalization to coefficients.
Compute y_u = \sum_i x_i cos(pi*(2u+1)*i/(2N)),
Impelements the 2N padding trick to solve IDCT with IFFT in the following link,
https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/spectral_ops.py
1. Multiply by 2*exp(1j*pi*u/(2N))
2. Pad x by zeros
3. Perform IFFT
4. Extract the real part
"""
# last dimension
N = x.size(-1)
if expkp1 is None:
expkp1 = get_expkp1(N, dtype=x.dtype, device=x.device)
# multiply by 2*exp(1j*pi*u/(2N))
x_pad = x.unsqueeze(-1).mul(expkp1)
# pad second last dimension, excluding the complex number dimension
x_pad = F.pad(x_pad, (0, 0, 0, N), 'constant', 0)
if len(x.size()) == 1:
x_pad.unsqueeze_(0)
# the last dimension here becomes -2 because complex numbers introduce a new dimension
y = torch.irfft(x_pad, signal_ndim=1, normalized=False, onesided=False, signal_sizes=[2*N])[..., 1:N+1]
y.mul_(N)
if len(x.size()) == 1:
y.squeeze_(0)
return y
def idxt(x, cos_or_sin_flag, expk=None):
""" Batch Inverse Discrete Cosine Transformation without normalization to coefficients.
Compute y_u = \sum_i x_i cos(pi*(2u+1)*i/(2N)),
Impelements the 2N padding trick to solve IDCT with IFFT in the following link,
https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/spectral_ops.py
1. Multiply by 2*exp(1j*pi*u/(2N))
2. Pad x by zeros
3. Perform IFFT
4. Extract the real part
@param x batch 1D tensor for conversion
@param cos_or_sin_flag 0 for cosine tranformation and 1 or sine transformation
@param expk 2*exp(j*pi*k/(2N))
"""
# last dimension
N = x.size(-1)
if expk is None:
expk = get_expk(N, dtype=x.dtype, device=x.device)
# multiply by 2*exp(1j*pi*u/(2N))
x_pad = x.unsqueeze(-1).mul(expk)
# pad second last dimension, excluding the complex number dimension
x_pad = F.pad(x_pad, (0, 0, 0, N), 'constant', 0)
if len(x.size()) == 1:
x_pad.unsqueeze_(0)
# the last dimension here becomes -2 because complex numbers introduce a new dimension
# Must use IFFT here
y = torch.ifft(x_pad, signal_ndim=1, normalized=False)[..., 0:N, cos_or_sin_flag]
y.mul_(N)
if len(x.size()) == 1:
y.squeeze_(0)
return y
def dct2_2N(x, expk0=None, expk1=None):
""" Batch 2D Discrete Cosine Transformation without normalization to coefficients.
Compute 1D DCT twice.
@param x batch tensor, the 2D part is MxN
@param expk0 with length M
@param expk1 with length N
"""
return dct_2N(dct_2N(x.transpose(dim0=-2, dim1=-1), expk0).transpose_(dim0=-2, dim1=-1), expk1)
def dct2_N(x, perm0=None, expk0=None, perm1=None, expk1=None):
""" Batch 2D Discrete Cosine Transformation without normalization to coefficients.
Compute 1D DCT twice.
@param x batch tensor, the 2D part is MxN
@param perm0 with length M
@param expk0 with length M
@param perm1 with length N
@param expk1 with length N
"""
return dct_N(dct_N(x.transpose(dim0=-2, dim1=-1), perm=perm0, expk=expk0).transpose_(dim0=-2, dim1=-1), perm=perm1, expk=expk1)
def idct2_2N(x, expk0=None, expk1=None):
""" Batch 2D Discrete Cosine Transformation without normalization to coefficients.
Compute 1D DCT twice.
@param x batch tensor, the 2D part is MxN
@param expk0 with length M
@param expk1 with length N
"""
return idct_2N(idct_2N(x.transpose(dim0=-2, dim1=-1), expk0).transpose_(dim0=-2, dim1=-1), expk1)
def idct2_N(x, expk0=None, expk1=None):
""" Batch 2D Discrete Cosine Transformation without normalization to coefficients.
Compute 1D DCT twice.
@param x batch tensor, the 2D part is MxN
@param expk0 with length M
@param expk1 with length N
"""
return idct_N(idct_N(x.transpose(dim0=-2, dim1=-1), expk0).transpose_(dim0=-2, dim1=-1), expk1)
def dst2(x, expkp1_0=None, expkp1_1=None):
""" Batch 2D Discrete Sine Transformation without normalization to coefficients.
Compute 1D DST twice.
@param x batch tensor, the 2D part is MxN
@param expkp1_0 with length M
@param expkp1_1 with length N
"""
return dst(dst(x.transpose(dim0=-2, dim1=-1), expkp1_0).transpose_(dim0=-2, dim1=-1), expkp1_1)
def idcct2(x, expk_0=None, expk_1=None):
""" Batch 2D Inverse Discrete Cosine-Cosine Transformation without normalization to coefficients.
It computes following equation, which is slightly different from standard DCT formulation.
y_{u, v} = \sum_p \sum_q x_{p, q} cos(pi/M*p*(u+0.5)) cos(pi/N*q*(v+0.5))
Compute 1D DCT twice.
@param x batch tensor, the 2D part is MxN
@param expk_0 with length M, 2*exp(-1j*pi*k/(2M))
@param expk_1 with length N, 2*exp(-1j*pi*k/(2N))
"""
return idxt(idxt(x, 0, expk_1).transpose_(dim0=-2, dim1=-1), 0, expk_0).transpose(dim0=-2, dim1=-1)
# return idxt(idxt(x.transpose(dim0=-2, dim1=-1), 0, expk_0).transpose_(dim0=-2, dim1=-1), 0, expk_1)
def idsct2(x, expk_0=None, expk_1=None):
""" Batch 2D Inverse Discrete Sine-Cosine Transformation without normalization to coefficients.
It computes following equation, which is slightly different from standard DCT formulation.
y_{u, v} = \sum_p \sum_q x_{p, q} sin(pi/M*p*(u+0.5)) cos(pi/N*q*(v+0.5))
Compute 1D DST and then 1D DCT.
@param x batch tensor, the 2D part is MxN
@param expk_0 with length M, 2*exp(-1j*pi*k/(2M))
@param expk_1 with length N, 2*exp(-1j*pi*k/(2N))
"""
return idxt(idxt(x, 0, expk_1).transpose_(dim0=-2, dim1=-1), 1, expk_0).transpose_(dim0=-2, dim1=-1)
# return idxt(idxt(x.transpose(dim0=-2, dim1=-1), 1, expk_0).transpose_(dim0=-2, dim1=-1), 0, expk_1)
def idcst2(x, expk_0=None, expk_1=None):
""" Batch 2D Inverse Discrete Cosine-Sine Transformation without normalization to coefficients.
It computes following equation, which is slightly different from standard DCT formulation.
y_{u, v} = \sum_p \sum_q x_{p, q} cos(pi/M*p*(u+0.5)) sin(pi/N*q*(v+0.5))
Compute 1D DCT and then 1D DST.
@param x batch tensor, the 2D part is MxN
@param expk_0 with length M, 2*exp(-1j*pi*k/(2M))
@param expk_1 with length N, 2*exp(-1j*pi*k/(2N))
"""
return idxt(idxt(x, 1, expk_1).transpose_(dim0=-2, dim1=-1), 0, expk_0).transpose_(dim0=-2, dim1=-1)
# return idxt(idxt(x.transpose(dim0=-2, dim1=-1), 0, expk_0).transpose_(dim0=-2, dim1=-1), 1, expk_1)
def idxst_idct(x, expk_0=None, expk_1=None):
'''
Batch 2D Inverse Discrete Sine-Cosine Transformation without normalization to coefficients.
Compute idxst(idct(x))
@param x batch tensor, the 2D part is MxN
@param expk_0 with length M, 2*exp(-1j*pi*k/(2M))
@param expk_1 with length N, 2*exp(-1j*pi*k/(2N))
'''
return idxt(idct_N(x, expk_1).transpose_(dim0=-2, dim1=-1), 1, expk_0).transpose_(dim0=-2, dim1=-1)
def idct_idxst(x, expk_0=None, expk_1=None):
'''
Batch 2D Inverse Discrete Cosine-Sine Transformation without normalization to coefficients.
Compute idct(idxst(x)).
@param x batch tensor, the 2D part is MxN
@param expk_0 with length M, 2*exp(-1j*pi*k/(2M))
@param expk_1 with length N, 2*exp(-1j*pi*k/(2N))
'''
return idct_N(idxt(x, 1, expk_1).transpose_(dim0=-2, dim1=-1), expk_0).transpose_(dim0=-2, dim1=-1)
| [((1268, 1311), 'torch.arange', 'torch.arange', (['N'], {'dtype': 'dtype', 'device': 'device'}), '(N, dtype=dtype, device=device)\n', (1280, 1311), False, 'import torch\n'), ((1777, 1827), 'torch.arange', 'torch.arange', (['(1)', '(N + 1)'], {'dtype': 'dtype', 'device': 'device'}), '(1, N + 1, dtype=dtype, device=device)\n', (1789, 1827), False, 'import torch\n'), ((2227, 2270), 'torch.arange', 'torch.arange', (['N'], {'dtype': 'dtype', 'device': 'device'}), '(N, dtype=dtype, device=device)\n', (2239, 2270), False, 'import torch\n'), ((2589, 2631), 'torch.zeros', 'torch.zeros', (['N'], {'dtype': 'dtype', 'device': 'device'}), '(N, dtype=dtype, device=device)\n', (2600, 2631), False, 'import torch\n'), ((2657, 2706), 'torch.arange', 'torch.arange', (['(0)', 'N', '(2)'], {'dtype': 'dtype', 'device': 'device'}), '(0, N, 2, dtype=dtype, device=device)\n', (2669, 2706), False, 'import torch\n'), ((2731, 2796), 'torch.arange', 'torch.arange', (['(2 * (N // 2) - 1)', '(0)', '(-2)'], {'dtype': 'dtype', 'device': 'device'}), '(2 * (N // 2) - 1, 0, -2, dtype=dtype, device=device)\n', (2743, 2796), False, 'import torch\n'), ((3330, 3361), 'torch.nn.functional.pad', 'F.pad', (['x', '(0, N)', '"""constant"""', '(0)'], {}), "(x, (0, N), 'constant', 0)\n", (3335, 3361), True, 'import torch.nn.functional as F\n'), ((5849, 5890), 'torch.nn.functional.pad', 'F.pad', (['x_pad', '(0, 0, 0, N)', '"""constant"""', '(0)'], {}), "(x_pad, (0, 0, 0, N), 'constant', 0)\n", (5854, 5890), True, 'import torch.nn.functional as F\n'), ((6413, 6462), 'torch.zeros', 'torch.zeros', (['size'], {'dtype': 'x.dtype', 'device': 'x.device'}), '(size, dtype=x.dtype, device=x.device)\n', (6424, 6462), False, 'import torch\n'), ((6842, 6896), 'torch.ifft', 'torch.ifft', (['x_reorder'], {'signal_ndim': '(1)', 'normalized': '(False)'}), '(x_reorder, signal_ndim=1, normalized=False)\n', (6852, 6896), False, 'import torch\n'), ((6920, 6939), 'torch.empty_like', 'torch.empty_like', (['x'], {}), '(x)\n', (6936, 6939), False, 'import torch\n'), ((7584, 7615), 'torch.nn.functional.pad', 'F.pad', (['x', '(0, N)', '"""constant"""', '(0)'], {}), "(x, (0, N), 'constant', 0)\n", (7589, 7615), True, 'import torch.nn.functional as F\n'), ((8743, 8784), 'torch.nn.functional.pad', 'F.pad', (['x_pad', '(0, 0, 0, N)', '"""constant"""', '(0)'], {}), "(x_pad, (0, 0, 0, N), 'constant', 0)\n", (8748, 8784), True, 'import torch.nn.functional as F\n'), ((10049, 10090), 'torch.nn.functional.pad', 'F.pad', (['x_pad', '(0, 0, 0, N)', '"""constant"""', '(0)'], {}), "(x_pad, (0, 0, 0, N), 'constant', 0)\n", (10054, 10090), True, 'import torch.nn.functional as F\n'), ((3462, 3527), 'torch.rfft', 'torch.rfft', (['x_pad'], {'signal_ndim': '(1)', 'normalized': '(False)', 'onesided': '(True)'}), '(x_pad, signal_ndim=1, normalized=False, onesided=True)\n', (3472, 3527), False, 'import torch\n'), ((4779, 4849), 'torch.rfft', 'torch.rfft', (['x_reorder'], {'signal_ndim': '(1)', 'normalized': '(False)', 'onesided': '(False)'}), '(x_reorder, signal_ndim=1, normalized=False, onesided=False)\n', (4789, 4849), False, 'import torch\n'), ((6047, 6140), 'torch.irfft', 'torch.irfft', (['x_pad'], {'signal_ndim': '(1)', 'normalized': '(False)', 'onesided': '(False)', 'signal_sizes': '[2 * N]'}), '(x_pad, signal_ndim=1, normalized=False, onesided=False,\n signal_sizes=[2 * N])\n', (6058, 6140), False, 'import torch\n'), ((7716, 7781), 'torch.rfft', 'torch.rfft', (['x_pad'], {'signal_ndim': '(1)', 'normalized': '(False)', 'onesided': '(True)'}), '(x_pad, signal_ndim=1, normalized=False, onesided=True)\n', (7726, 7781), False, 'import torch\n'), ((8941, 9034), 'torch.irfft', 'torch.irfft', (['x_pad'], {'signal_ndim': '(1)', 'normalized': '(False)', 'onesided': '(False)', 'signal_sizes': '[2 * N]'}), '(x_pad, signal_ndim=1, normalized=False, onesided=False,\n signal_sizes=[2 * N])\n', (8952, 9034), False, 'import torch\n'), ((10272, 10322), 'torch.ifft', 'torch.ifft', (['x_pad'], {'signal_ndim': '(1)', 'normalized': '(False)'}), '(x_pad, signal_ndim=1, normalized=False)\n', (10282, 10322), False, 'import torch\n')] |
vkuznet/h2o | py/testdir_multi_jvm/test_many_fp_formats_libsvm_2.py | e08f7014f228cbaecfb21f57379970e6a3ac0756 | import unittest, random, sys, time
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_browse as h2b, h2o_import as h2i, h2o_exec as h2e, h2o_glm
import h2o_util
zeroList = [
'Result0 = 0',
]
# the first column should use this
exprList = [
'Result<n> = sum(<keyX>[<col1>])',
]
DO_SUMMARY = False
DO_COMPARE_SUM = False
def write_syn_dataset(csvPathname, rowCount, colCount, SEEDPERFILE, sel, distribution):
# we can do all sorts of methods off the r object
r = random.Random(SEEDPERFILE)
def addRandValToRowStuff(colNumber, valMin, valMax, rowData, synColSumDict):
# colNumber should not be 0, because the output will be there
## val = r.uniform(MIN,MAX)
val = r.triangular(valMin,valMax,0)
valFormatted = h2o_util.fp_format(val, sel)
# force it to be zero in this range. so we don't print zeroes for svm!
if (val > valMin/2) and (val < valMax/2):
return None
else:
rowData.append(str(colNumber) + ":" + valFormatted) # f should always return string
if colNumber in synColSumDict:
synColSumDict[colNumber] += val # sum of column (dict)
else:
synColSumDict[colNumber] = val # sum of column (dict)
return val
valMin = -1e2
valMax = 1e2
classMin = -36
classMax = 36
dsf = open(csvPathname, "w+")
synColSumDict = {0: 0} # guaranteed to have col 0 for output
# even though we try to get a max colCount with random, we might fall short
# track what max we really got
colNumberMax = 0
for i in range(rowCount):
rowData = []
d = random.randint(0,2)
if d==0:
if distribution == 'sparse':
# only one value per row!
# is it okay to specify col 0 in svm? where does the output data go? (col 0)
colNumber = random.randint(1, colCount)
val = addRandValToRowStuff(colNumber, valMin, valMax, rowData, synColSumDict)
# did we add a val?
if val and (colNumber > colNumberMax):
colNumberMax = colNumber
else:
# some number of values per row.. 50% or so?
for colNumber in range(1, colCount+1):
val = addRandValToRowStuff(colNumber, valMin, valMax, rowData, synColSumDict)
if val and (colNumber > colNumberMax):
colNumberMax = colNumber
# always need an output class, even if no cols are non-zero
# space is the only valid separator
# add the output (col 0)
# random integer for class
val = random.randint(classMin,classMax)
rowData.insert(0, val)
synColSumDict[0] += val # sum of column (dict)
rowDataCsv = " ".join(map(str,rowData))
# FIX! vary the eol ?
# randomly skip some rows. only write 1/3
dsf.write(rowDataCsv + "\n")
dsf.close()
return (colNumberMax, synColSumDict)
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(2,java_heap_GB=5)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_many_fp_formats_libsvm_2(self):
# h2b.browseTheCloud()
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
(100, 10000, 'cA', 300, 'sparse50'),
(100, 10000, 'cB', 300, 'sparse'),
# (100, 40000, 'cC', 300, 'sparse50'),
# (100, 40000, 'cD', 300, 'sparse'),
]
# h2b.browseTheCloud()
for (rowCount, colCount, hex_key, timeoutSecs, distribution) in tryList:
NUM_CASES = h2o_util.fp_format()
for sel in [random.randint(0,NUM_CASES-1)]: # len(caseList)
SEEDPERFILE = random.randint(0, sys.maxint)
csvFilename = "syn_%s_%s_%s_%s.csv" % (SEEDPERFILE, sel, rowCount, colCount)
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "Creating random", csvPathname
# dict of col sums for comparison to exec col sums below
(colNumberMax, synColSumDict) = write_syn_dataset(csvPathname, rowCount, colCount, SEEDPERFILE, sel, distribution)
selKey2 = hex_key + "_" + str(sel)
print "This dataset requires telling h2o parse it's a libsvm..doesn't detect automatically"
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=selKey2,
timeoutSecs=timeoutSecs, doSummary=False, parser_type='SVMLight')
print csvFilename, 'parse time:', parseResult['response']['time']
print "Parse result['destination_key']:", parseResult['destination_key']
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], max_column_display=colNumberMax+1, timeoutSecs=timeoutSecs)
num_cols = inspect['num_cols']
num_rows = inspect['num_rows']
print "\n" + csvFilename
# SUMMARY****************************************
# gives us some reporting on missing values, constant values,
# to see if we have x specified well
# figures out everything from parseResult['destination_key']
# needs y to avoid output column (which can be index or name)
# assume all the configs have the same y..just check with the firs tone
goodX = h2o_glm.goodXFromColumnInfo(y=0,
key=parseResult['destination_key'], timeoutSecs=300, noPrint=True)
if DO_SUMMARY:
summaryResult = h2o_cmd.runSummary(key=selKey2, max_column_display=colNumberMax+1, timeoutSecs=timeoutSecs)
h2o_cmd.infoFromSummary(summaryResult, noPrint=True)
self.assertEqual(colNumberMax+1, num_cols, msg="generated %s cols (including output). parsed to %s cols" % (colNumberMax+1, num_cols))
# Exec (column sums)*************************************************
if DO_COMPARE_SUM:
h2e.exec_zero_list(zeroList)
colResultList = h2e.exec_expr_list_across_cols(None, exprList, selKey2, maxCol=colNumberMax+1,
timeoutSecs=timeoutSecs)
print "\n*************"
print "colResultList", colResultList
print "*************"
self.assertEqual(rowCount, num_rows, msg="generated %s rows, parsed to %s rows" % (rowCount, num_rows))
# need to fix this for compare to expected
# we should be able to keep the list of fp sums per col above
# when we generate the dataset
### print "\nsynColSumDict:", synColSumDict
for k,v in synColSumDict.iteritems():
if DO_COMPARE_SUM:
# k should be integers that match the number of cols
self.assertTrue(k>=0 and k<len(colResultList))
compare = colResultList[k]
print "\nComparing col sums:", v, compare
# Even though we're comparing floating point sums, the operations probably should have
# been done in same order, so maybe the comparison can be exact (or not!)
self.assertAlmostEqual(v, compare, places=0,
msg='%0.6f col sum is not equal to expected %0.6f' % (v, compare))
synMean = (v + 0.0)/rowCount
# enums don't have mean, but we're not enums
mean = float(inspect['cols'][k]['mean'])
# our fp formats in the syn generation sometimes only have two places?
self.assertAlmostEqual(mean, synMean, places=0,
msg='col %s mean %0.6f is not equal to generated mean %0.6f' % (k, mean, synMean))
num_missing_values = inspect['cols'][k]['num_missing_values']
self.assertEqual(0, num_missing_values,
msg='col %s num_missing_values %d should be 0' % (k, num_missing_values))
if __name__ == '__main__':
h2o.unit_main()
| [] |
SA-22C-smoothswing/spectrum-protect-sppmon | python/influx/database_tables.py | 8a9c70f65d9faf6ffc35f3400383dcaa6e0fcbc6 | """Provides all database and table structures used for the influx database.
Classes:
Datatype
Database
Table
RetentionPolicy
"""
from __future__ import annotations
from enum import Enum, unique
import re
import json
from typing import Any, Dict, List, Set, Tuple, Union
import influx.influx_queries as Queries
from utils.execption_utils import ExceptionUtils
from utils.influx_utils import InfluxUtils
from utils.spp_utils import SppUtils
@unique
class Datatype(Enum):
"""
This enum differentiates between the different Influx-Types.
By declaring the type SPPMon will automatically insert the data in the right format.
The order of the types within the enum is important: bool is a int, but a int is not a bool.
Important: only use `TIME` for epoch timestamps, *NOT* for durations or counts.
`TIME` is automatically converted into second format.
Note: The return type is just a helper and not of a big use.
Methods:
get_auto_datatype - get Datatype enum by value typ analysis
"""
NONE = type(None)
"""Undeclared, only use as a placeholder."""
STRING = str
"""Special symbols and \" will be escaped."""
BOOL = bool
"""Any boolean, be aware it is a subtype of int.
TODO Untested, saves as Boolean within Influx.
"""
INT = int
"""Appends a 'i' at end of number to declare. Fails if the data is mixed with any other type."""
FLOAT = float
"""Unchanged value. Default Influx numeric data type. Mixing with ints works."""
TIMESTAMP = type(int)
"""Automatic transform a timestamp into seconds. Important: Only use for Epoch timestamps, not duration or counter.
Caution: Type is just a placeholder, do not set to int - causing problems!
"""
@staticmethod
def get_auto_datatype(value: Any) -> Datatype:
"""get Datatype enum by value typ analysis. Usage should be avoided.
Only use if no datatype is declared. It skips time-type and fails if ints are mixed with floats.
If no type is detected emits a warning and returns `NONE`.
Arguments:
value {Union[str, float, int, bool, None]} -- Value to be analyzed
Returns:
Datatype -- type of value or `NONE`.
"""
for enum in Datatype:
if(enum is Datatype.TIMESTAMP):
continue
if(isinstance(value, enum.value)):
return enum
ExceptionUtils.error_message(f"No auto type found for {value}")
return Datatype.NONE
class RetentionPolicy:
"""Represents a influxdb retention policy.
By this policy it is declared afer which ammount of time a dataset is deleted from the DB.
Attributes
name - name of RP
database - associated database
duration - time until the data is purged
replication - How often the date is replicated
shard_duration - Size of memory-groups
default - whether this is the default RP
Methods
to_dict - creates a dict out of the values
"""
@property
def name(self) -> str:
"""name of the Retention Policy"""
return self.__name
@property
def database(self) -> Database:
"""associated database"""
return self.__database
@property
def duration(self) -> str:
"""time until the data is purged"""
return self.__duration
@property
def replication(self) -> int:
"""How often the date is replicated. We only have 1 db instance so replication is always 1"""
return self.__replication
@property
def shard_duration(self) -> str:
"""Size of memory-groups. Default time is 0s, then the db decides what to take"""
return self.__shard_duration
@property
def default(self) -> bool:
""" whether this is the default RP"""
return self.__default
def __init__(self, name: str, database: Database, duration: str,
replication: int = 1, shard_duration: str = "0s",
default: bool = False) -> None:
if(not name):
raise ValueError("need retention policy name for creation")
if(not database):
raise ValueError("need retention policy database for creation")
if(not duration):
raise ValueError("need retention policy duration for creation")
if(not replication):
raise ValueError("need retention policy replication factor for creation")
if(not shard_duration):
raise ValueError("need retention policy shard duration for creation")
if(default is None):
raise ValueError("need retention policy default setting for creation")
self.__name = name
self.__database = database
self.__replication = replication
self.__shard_duration = shard_duration
self.__default = default
try:
# str due usage of method
self.__duration: str = InfluxUtils.transform_time_literal(duration, single_vals=False)
except ValueError as error:
ExceptionUtils.exception_info(error)
raise ValueError(f"duration for retention policy {name} is not in the correct time format")
try:
# str due usage of method
self.__shard_duration: str = InfluxUtils.transform_time_literal(shard_duration, single_vals=False)
except ValueError as error:
ExceptionUtils.exception_info(error)
raise ValueError(f"shard duration for retention policy {name} is not in the correct time format")
def to_dict(self) -> Dict[str, Union[str, int, bool]]:
"""Used to create a dict out of the values, able to compare to influxdb-created dict"""
return {
'name': self.name,
'duration': self.duration,
'shardGroupDuration': self.__shard_duration,
'replicaN': self.__replication,
'default': self.default
}
def __str__(self) -> str:
return f"{self.database.name}.{self.name}"
def __repr__(self) -> str:
return f"Retention Policy: {self.name}"
def __eq__(self, o: object) -> bool:
if(isinstance(o, RetentionPolicy)):
return o.to_dict() == self.to_dict()
return False
def __hash__(self) -> int:
return hash(json.dumps(self.to_dict(), sort_keys=True))
class Table:
"""Represents a measurement in influx. Contains pre-defined tag and field definitions.
Attributes
name - name of table
fields - dict of field name with datatype
tags - tags as list of str
time_key - key name of the timestamp field
retention_policy - retention policy associated with this table
database - table is declared within this database
Methods
split_by_table_def - Split the given dict into a pre-defined set of tags, fields and a timestamp.
"""
@property
def fields(self) -> Dict[str, Datatype]:
"""fields of the table, name is key, value is datatype"""
return self.__fields
@property
def tags(self) -> List[str]:
"""tags of the table, datatype always string"""
return self.__tags
@property
def time_key(self) -> str:
"""name of the timestamp key"""
return self.__time_key
@property
def name(self) -> str:
"""name of the table"""
return self.__name
@property
def retention_policy(self) -> RetentionPolicy:
"""retention policy associated with this table"""
return self.__retention_policy
@property
def database(self) -> Database:
"""table is declared within this database"""
return self.__database
__bad_measurement_characters: List[str] = [' ', ',']
"""those chars need to be escaped within a measurement/table name"""
def __init__(self, database: Database, name: str, fields: Dict[str, Datatype] = None,
tags: List[str] = None, time_key: str = 'time', retention_policy: RetentionPolicy = None) -> None:
if(not database):
raise ValueError("need database to create table")
if(not name):
raise ValueError("need str name to create table")
if(not time_key):
raise ValueError("time key cannot be None")
if(not fields):
fields = {}
if(not tags):
tags = []
if(not retention_policy):
retention_policy = next(filter(lambda rp: rp.default, database.retention_policies))
self.__database: Database = database
self.__fields: Dict[str, Datatype] = fields
self.__tags: List[str] = tags
self.__time_key: str = time_key
self.__retention_policy = retention_policy
# escape not allowed characters in Measurement
for bad_character in self.__bad_measurement_characters:
if(re.search(bad_character, name)):
name = name.replace(bad_character, '\\%c'% bad_character)
self.__name: str = name
def __str__(self) -> str:
return f"{self.database.name}.{self.retention_policy.name}.{self.name}"
def __repr__(self) -> str:
return f"Table: {self.name}"
def split_by_table_def(self, mydict: Dict[str, Any]) -> Tuple[
Dict[str, Any], Dict[str, Any], Union[str, int, None]]:
"""Split the given dict into a pre-defined set of tags, fields and a timestamp.
None-Values and empty strings are ignored.
If there are no fields declared, it will split by a default pattern.
Undeclared collums will produce a warning.
This function uses the tag/field and timestamp definiton declared within this table.
Arguments:
self {Table} -- Table with predefined set of tags and fields
mydict {Dict[str, Any]} -- dict with colums as keys. None-Values are ignored
Raises:
ValueError: If no dict is given or not of type dict.
Returns:
(Dict[str, Any], Dict[str, Any], int) -- Tuple of: tags, fields, timestamp
"""
if(not mydict):
raise ValueError("need at least one value in dict to split")
# if table is not defined use default split
if(not self.fields):
return InfluxUtils.default_split(mydict=mydict)
# fill dicts
# table.fields is a dict, we only need the keys
fields: Dict[str, Any] = dict.fromkeys(self.fields.keys(), None)
tags: Dict[str, Any] = dict.fromkeys(self.tags, None)
# what field should be recorded as time
time_stamp_field = self.time_key
# helper variable to only overwrite if it is not the time_stamp_field
time_overwrite_allowed = True
# actualy timestamp saved
time_stamp: Union[str, int, None] = None
for (key, value) in mydict.items():
# Ignore empty entrys
if(value is None or (isinstance(value, str) and not value)):
continue
# Check timestamp value if it matches any of predefined time names
if(key in time_stamp_field or key in InfluxUtils.time_key_names):
# sppmonCTS has lowest priority, only set if otherwise None
if(time_stamp is None and key == SppUtils.capture_time_key):
time_stamp = value
# time_stamp_field is highest priority. Do not overwrite it.
elif(key is time_stamp_field):
time_overwrite_allowed: bool = False
time_stamp = value
# if time_stamp_field is not used yet, overwrite sppmonCaptureTime or others
elif(time_overwrite_allowed):
time_stamp = value
# if no overwrite allowed, continue and drop field
else:
continue
# Otherwise check for Keys or Fields
if(key in fields):
fields[key] = value
elif(key in tags):
tags[key] = value
elif(key in InfluxUtils.time_key_names or key in time_stamp_field):
continue
else:
ExceptionUtils.error_message(f"Not all columns for table {self.name} are declared: {key}")
# before key+"MISSING" : Removed to avoid death-circle on repeated queries.
fields[key] = value
return (tags, fields, time_stamp)
class Database:
"""
Represents a instance of influx database. Define all table definitions within the init method.
Attributes
name - name of the database
tables - tables with predefined tags & fields
retention_policies - Set of all provided Retention Policies
continuous_queries - Set of all provided Continuous Queries
Methods
__getitem__ - [] access on the tables via name. Creates empty table if missing.
"""
@property
def tables(self) -> Dict[str, Table]:
"""Dict with table definitions to look up"""
return self.__tables
@property
def retention_policies(self) -> Set[RetentionPolicy]:
"""Set of all provided Retention Policies"""
return self.__retention_policies
@property
def continuous_queries(self) -> Set[Queries.ContinuousQuery]:
"""Set of all provided Continuous Queries"""
return self.__continuous_queries
@property
def name(self) -> str:
"""name of the database, also used as reference"""
return self.__name
def __getitem__(self, table_name: str) -> Table:
"""Aquire a instance of a predefined table, returns a empty table if it was not defined. []-Access.
Arguments:
table_name {str} -- name of the table you want to aquire
Returns:
Table -- Instance of a predefined table, otherwise new empty table
"""
return self.tables.get(table_name, Table(self, table_name))
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return f'Database: {self.name}'
def __init__(self, name: str):
self.__name: str = name
self.__tables: Dict[str, Table] = {}
self.__retention_policies: Set[RetentionPolicy] = set()
self.__continuous_queries: Set[Queries.ContinuousQuery] = set()
| [((2449, 2512), 'utils.execption_utils.ExceptionUtils.error_message', 'ExceptionUtils.error_message', (['f"""No auto type found for {value}"""'], {}), "(f'No auto type found for {value}')\n", (2477, 2512), False, 'from utils.execption_utils import ExceptionUtils\n'), ((4991, 5054), 'utils.influx_utils.InfluxUtils.transform_time_literal', 'InfluxUtils.transform_time_literal', (['duration'], {'single_vals': '(False)'}), '(duration, single_vals=False)\n', (5025, 5054), False, 'from utils.influx_utils import InfluxUtils\n'), ((5336, 5405), 'utils.influx_utils.InfluxUtils.transform_time_literal', 'InfluxUtils.transform_time_literal', (['shard_duration'], {'single_vals': '(False)'}), '(shard_duration, single_vals=False)\n', (5370, 5405), False, 'from utils.influx_utils import InfluxUtils\n'), ((8980, 9010), 're.search', 're.search', (['bad_character', 'name'], {}), '(bad_character, name)\n', (8989, 9010), False, 'import re\n'), ((10376, 10416), 'utils.influx_utils.InfluxUtils.default_split', 'InfluxUtils.default_split', ([], {'mydict': 'mydict'}), '(mydict=mydict)\n', (10401, 10416), False, 'from utils.influx_utils import InfluxUtils\n'), ((5103, 5139), 'utils.execption_utils.ExceptionUtils.exception_info', 'ExceptionUtils.exception_info', (['error'], {}), '(error)\n', (5132, 5139), False, 'from utils.execption_utils import ExceptionUtils\n'), ((5454, 5490), 'utils.execption_utils.ExceptionUtils.exception_info', 'ExceptionUtils.exception_info', (['error'], {}), '(error)\n', (5483, 5490), False, 'from utils.execption_utils import ExceptionUtils\n'), ((12290, 12385), 'utils.execption_utils.ExceptionUtils.error_message', 'ExceptionUtils.error_message', (['f"""Not all columns for table {self.name} are declared: {key}"""'], {}), "(\n f'Not all columns for table {self.name} are declared: {key}')\n", (12318, 12385), False, 'from utils.execption_utils import ExceptionUtils\n')] |
calendar42/SleekXMPP--XEP-0080- | examples/rpc_server_side.py | d7bd5fd29f26a5d7de872a49ff63a353b8043e49 | """
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2011 Dann Martens
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.plugins.xep_0009.remote import Endpoint, remote, Remote, \
ANY_ALL
import threading
class Thermostat(Endpoint):
def FQN(self):
return 'thermostat'
def __init(self, initial_temperature):
self._temperature = initial_temperature
self._event = threading.Event()
@remote
def set_temperature(self, temperature):
print("Setting temperature to %s" % temperature)
self._temperature = temperature
@remote
def get_temperature(self):
return self._temperature
@remote(False)
def release(self):
self._event.set()
def wait_for_release(self):
self._event.wait()
def main():
session = Remote.new_session('[email protected]/rpc', '*****')
thermostat = session.new_handler(ANY_ALL, Thermostat, 18)
thermostat.wait_for_release()
session.close()
if __name__ == '__main__':
main()
| [((743, 756), 'sleekxmpp.plugins.xep_0009.remote.remote', 'remote', (['(False)'], {}), '(False)\n', (749, 756), False, 'from sleekxmpp.plugins.xep_0009.remote import Endpoint, remote, Remote, ANY_ALL\n'), ((917, 966), 'sleekxmpp.plugins.xep_0009.remote.Remote.new_session', 'Remote.new_session', (['"""[email protected]/rpc"""', '"""*****"""'], {}), "('[email protected]/rpc', '*****')\n", (935, 966), False, 'from sleekxmpp.plugins.xep_0009.remote import Endpoint, remote, Remote, ANY_ALL\n'), ((472, 489), 'threading.Event', 'threading.Event', ([], {}), '()\n', (487, 489), False, 'import threading\n')] |
wuhuikai/DeepDrone | lib/TelloAPI.py | f4700178a7568fa9e308f34d0223e28635eb7660 | import cv2
import time
import socket
import threading
class Response(object):
def __init__(self):
pass
def recv(self, data):
pass
def pop(self):
pass
def empty(self):
pass
class Command(Response):
def __init__(self):
super(Command, self).__init__()
self.response = None
self.lock = threading.RLock()
def recv(self, data):
with self.lock:
self.response = data.decode('utf-8')
def pop(self):
with self.lock:
response, self.response = self.response, None
return response
def empty(self):
with self.lock:
return self.response is None
class State(Response):
def __init__(self):
super(State, self).__init__()
self.response = {}
self.lock = threading.RLock()
def recv(self, data):
with self.lock:
self.response = {item.split(':')[0]:float(item.split(':')[1]) for item in data.decode('utf-8').split(';') if ':' in item}
def pop(self):
return self.response
def empty(self):
return False
class Client(object):
def __init__(self, local_port, buffer_size, daemon, response):
self.response = response
self.buffer_size = buffer_size
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.bind(('', local_port))
self.receive_thread = threading.Thread(target=self._receive_thread)
self.receive_thread.daemon = daemon
self.receive_thread.start()
def __del__(self):
"""Closes the local socket."""
self.socket.close()
def _receive_thread(self):
"""Listens for responses from the Tello.
Runs as a thread, sets self.response to whatever the Tello last returned.
"""
while True:
try:
self.response.recv(self.socket.recv(self.buffer_size))
except Exception as e:
print(e)
break
def empty(self):
return self.response.empty()
def pop(self):
return self.response.pop()
class Video(object):
def __init__(self, daemon=True):
self.video = cv2.VideoCapture('udp://@0.0.0.0:11111')
if not self.video.isOpened():
raise RuntimeError('Failed to connect to Tello')
self.frame = None
self.lock = threading.RLock()
self.thread = threading.Thread(target=self._update_thread)
self.thread.daemon = daemon
self.thread.start()
def __del__(self):
self.video.release()
def _update_thread(self):
while True:
ok, frame = self.video.read()
if ok:
with self.lock:
self.frame = frame
def empty(self):
with self.lock:
return self.frame is None
def pop(self):
with self.lock:
frame, self.frame = self.frame, None
return frame
class Tello(object):
def __init__(self, local_port=9999, command_timeout=0.35, state=True, video=True):
"""Connects to Tello in command mode.
Args:
local_port (int): port of local machine for receiving command response.
command_timeout (float): seconds to wait for a response of command.
state (bool): receive state from Tello?
video (bool): receive video from Tello?
Raises:
RuntimeError: If the Tello rejects the attempt to enter command mode or open the video stream.
"""
self.command_timeout = command_timeout
self.response_client = Client(local_port, 1024, True, Command())
self.state_client = Client(8890, 1024, True, State()) if state else None
self.tello_address = ('192.168.10.1', 8889)
self.enter_command_mode()
self.video_client = None
if video:
self.open_video_stream()
self.video_client = Video(True)
def send_command(self, command, with_return=True):
"""Sends a command to the Tello and waits for a response.
If self.command_timeout is exceeded before a response is received,
a RuntimeError exception is raised.
Args:
command (str): Command to send.
Returns:
str: Response from Tello.
Raises:
RuntimeError: If no response is received within self.timeout seconds.
"""
self.response_client.pop()
self.response_client.socket.sendto(command.encode('utf-8'), self.tello_address)
if not with_return:
return
st = time.time()
while self.response_client.empty():
if time.time() - st >= self.command_timeout:
raise RuntimeError('No response to command')
return self.response_client.pop()
def state(self):
return self.state_client.pop() if self.state_client else None
def read_frame(self):
if self.video_client is None:
raise RuntimeError('Video is not available')
while self.video_client.empty():
pass
return self.video_client.pop()
def enter_command_mode(self):
if self.send_command('command') != 'ok':
raise RuntimeError('Tello rejected the attempt to enter command mode')
def take_off(self):
"""
return: 'ok' or 'error'
"""
return self.send_command('takeoff')
def land(self):
"""
return: 'ok' or 'error'
"""
return self.send_command('land')
def open_video_stream(self):
if self.send_command('streamon') != 'ok':
raise RuntimeError('Tello rejected to open the video stream')
def close_video_stream(self):
"""
return: 'ok' or 'error'
"""
return self.send_command('streamoff')
def emergency_shutdown(self):
"""
return: 'ok' or 'error'
"""
return self.send_command('emergency')
def move_up(self, x, with_return=False):
"""
param x: int, [20, 500]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('up {}'.format(x), with_return)
def move_down(self, x, with_return=False):
"""
param x: int, [20, 500]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('down {}'.format(x), with_return)
def move_left(self, x, with_return=False):
"""
param x: int, [20, 500]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('left {}'.format(x), with_return)
def move_right(self, x, with_return=False):
"""
param x: int, [20, 500]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('right {}'.format(x), with_return)
def move_forward(self, x, with_return=False):
"""
param x: int, [20, 500]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('forward {}'.format(x), with_return)
def move_backward(self, x, with_return=False):
"""
param x: int, [20, 500]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('back {}'.format(x), with_return)
def rotate_clockwise(self, x, with_return=False):
"""
param x: int, [1, 3600]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('cw {}'.format(x), with_return)
def rotate_counter_clockwise(self, x, with_return=False):
"""
param x: int, [1, 3600]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('ccw {}'.format(x), with_return)
def flip_left(self, with_return=False):
"""
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('flip l', with_return)
def flip_right(self, with_return=False):
"""
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('flip r', with_return)
def flip_forward(self, with_return=False):
"""
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('flip f', with_return)
def flip_backward(self, with_return=False):
"""
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('flip b', with_return)
def goto(self, x, y, z, speed, with_return=False):
"""
param x: int, [20, 500]
param y: int, [20, 500]
param z: int, [20, 500]
param speed: int, [10-100]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('go {} {} {} {}'.format(x, y, z, speed), with_return)
def goto_curve(self, x1, y1, z1, x2, y2, z2, speed, with_return=False):
"""fly a curve defined by (0, 0, 0), (x1, y1, z1), (x2, y2, z2) with speed
param x1, x2: int, [-500, 500]
param y1, y2: int, [-500, 500]
param z1, z2: int, [-500, 500]
param speed: int, [10-60]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('curve {} {} {} {} {} {} {}'.format(x1, y1, z1, x2, y2, z2, speed), with_return)
def set_speed(self, speed, with_return=False):
"""
param speed: int, [10-100]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('speed {}'.format(speed), with_return)
def set_remote_controller_command(self, left_right_velocity, forward_backward_velocity, up_down_velocity, rotate_velocity, with_return=False):
"""
param left_right_velocity: int, [-100, 100]
param forward_backward_velocity: int, [-100, 100]
param up_down_velocity: int, [-100, 100]
param rotate_velocity: int, [-100, 100]
param with_return: bool
return: 'ok' or 'error'
"""
return self.send_command('rc {} {} {} {}'.format(left_right_velocity, forward_backward_velocity, up_down_velocity, rotate_velocity), with_return)
def get(self, command, split=False):
"""
param command
param split: bool, multiple values?
return: int or list(int)
"""
result = self.send_command(command)
if split:
return [int(x) for x in result.split(' ')]
else:
return int(result)
def get_speed(self):
"""
return: int, [10, 100]
"""
return self.get('speed?')
def get_battery(self):
"""
return: int, [0, 100]
"""
return self.get('battery?')
def get_flight_time(self):
"""
return: int
"""
return self.get('time?')
def get_relative_height(self):
"""
return: int, [10, 3000]
"""
return self.get('height?')
def get_temperature(self):
"""
return: int, [0, 90]
"""
return self.get('temp?')
def get_imu_pose(self):
"""[pitch, roll, yaw]
return: list(int), [[-89, 89], [-179, 179], [-179, 179]]
"""
return self.get('attitude?', split=True)
def get_absolute_height(self):
"""
return: int
"""
return self.get('baro?')
def get_imu_acceleration(self):
"""
return: list(int)
"""
return self.get('acceleration?', split=True)
def get_tof_height(self):
"""
return: int, [10, 400]; 6553: out of bounds
"""
return self.get('tof?')
| [((365, 382), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (380, 382), False, 'import threading\n'), ((830, 847), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (845, 847), False, 'import threading\n'), ((1311, 1359), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (1324, 1359), False, 'import socket\n'), ((1434, 1479), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._receive_thread'}), '(target=self._receive_thread)\n', (1450, 1479), False, 'import threading\n'), ((2212, 2252), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""udp://@0.0.0.0:11111"""'], {}), "('udp://@0.0.0.0:11111')\n", (2228, 2252), False, 'import cv2\n'), ((2398, 2415), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (2413, 2415), False, 'import threading\n'), ((2438, 2482), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._update_thread'}), '(target=self._update_thread)\n', (2454, 2482), False, 'import threading\n'), ((4630, 4641), 'time.time', 'time.time', ([], {}), '()\n', (4639, 4641), False, 'import time\n'), ((4701, 4712), 'time.time', 'time.time', ([], {}), '()\n', (4710, 4712), False, 'import time\n')] |
mjuenema/python-terrascript | terrascript/resource/sematext.py | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | # terrascript/resource/sematext.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:26:36 UTC)
#
# For imports without namespace, e.g.
#
# >>> import terrascript.resource.sematext
#
# instead of
#
# >>> import terrascript.resource.sematext.sematext
#
# This is only available for 'official' and 'partner' providers.
from terrascript.resource.sematext.sematext import *
| [] |
lithium0003/Image2UTF8-Transformer | eval_encoder.py | 2620af2a8bdaf332e25b39ce05d610e21e6492fc | #!/usr/bin/env python3
import tensorflow as tf
physical_devices = tf.config.list_physical_devices('GPU')
try:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
except:
# Invalid device or cannot modify virtual devices once initialized.
pass
import numpy as np
import os, time, csv
import tqdm
import umap
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import datetime
import signal
import net
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Hiragino Maru Gothic Pro', 'Yu Gothic', 'Meirio', 'Takao', 'IPAexGothic', 'IPAPGothic', 'Noto Sans CJK JP']
import net
class SimpleEncodeDecoder:
def __init__(self):
self.save_dir = './result/step1/'
self.result_dir = './result/plot/'
os.makedirs(self.result_dir, exist_ok=True)
checkpoint_dir = self.save_dir
self.max_epoch = 300
self.steps_per_epoch = 1000
self.batch_size = 64
lr = tf.keras.optimizers.schedules.ExponentialDecay(1e-3, 1e5, 0.5)
self.optimizer = tf.keras.optimizers.Adam(lr)
self.encoder = net.FeatureBlock()
self.encoder.summary()
self.decoder = net.SimpleDecoderBlock()
self.decoder.summary()
inputs = {
'image': tf.keras.Input(shape=(128,128,3)),
}
feature_out = self.encoder(inputs)
outputs = self.decoder(feature_out)
self.model = tf.keras.Model(inputs, outputs, name='SimpleEncodeDecoder')
checkpoint = tf.train.Checkpoint(optimizer=self.optimizer,
model=self.model)
last = tf.train.latest_checkpoint(checkpoint_dir)
checkpoint.restore(last)
self.manager = tf.train.CheckpointManager(
checkpoint, directory=checkpoint_dir, max_to_keep=2)
if not last is None:
self.init_epoch = int(os.path.basename(last).split('-')[1])
print('loaded %d epoch'%self.init_epoch)
else:
self.init_epoch = 0
self.model.summary()
def eval(self):
self.data = net.FontData()
print("Plot: ", self.init_epoch + 1)
acc = self.make_plot(self.data.test_data(self.batch_size), (self.init_epoch + 1))
print('acc', acc)
@tf.function
def eval_substep(self, inputs):
input_data = {
'image': inputs['input'],
}
feature = self.encoder(input_data)
outputs = self.decoder(feature)
target_id = inputs['index']
target_id1 = inputs['idx1']
target_id2 = inputs['idx2']
pred_id1 = tf.nn.softmax(outputs['id1'], -1)
pred_id2 = tf.nn.softmax(outputs['id2'], -1)
return {
'feature': feature,
'pred_id1': pred_id1,
'pred_id2': pred_id2,
'target_id': target_id,
'target_id1': target_id1,
'target_id2': target_id2,
}
def make_plot(self, test_ds, epoch):
result = []
labels = []
with open(os.path.join(self.result_dir,'test_result-%d.txt'%epoch),'w') as txt:
correct_count = 0
failed_count = 0
with tqdm.tqdm(total=len(self.data.test_keys)) as pbar:
for inputs in test_ds:
pred = self.eval_substep(inputs)
result += [pred['feature']]
labels += [pred['target_id']]
for i in range(pred['target_id1'].shape[0]):
txt.write('---\n')
target = pred['target_id'][i].numpy()
txt.write('target: id %d = %s\n'%(target, self.data.glyphs[target-1]))
predid1 = np.argmax(pred['pred_id1'][i])
predid2 = np.argmax(pred['pred_id2'][i])
predid = predid1 * 100 + predid2
if predid == 0:
txt.write('predict: id %d nothing (p=%f)\n'%(predid, pred['pred_id1'][i][predid1] * pred['pred_id2'][i][predid2]))
elif predid > self.data.id_count + 1:
txt.write('predict: id %d nothing (p=%f)\n'%(predid, pred['pred_id1'][i][predid1] * pred['pred_id2'][i][predid2]))
else:
txt.write('predict: id %d = %s (p=%f)\n'%(predid, self.data.glyphs[predid-1], pred['pred_id1'][i][predid1] * pred['pred_id2'][i][predid2]))
if target == predid:
txt.write('Correct!\n')
correct_count += 1
else:
txt.write('Failed!\n')
failed_count += 1
pbar.update(1)
acc = correct_count / (correct_count + failed_count)
txt.write('==============\n')
txt.write('Correct = %d\n'%correct_count)
txt.write('Failed = %d\n'%failed_count)
txt.write('accuracy = %f\n'%acc)
result = np.concatenate(result)
labels = np.concatenate(labels)
print('run UMAP')
X_reduced = umap.UMAP(metric='cosine').fit_transform(result)
fig, ax = plt.subplots(figsize=(50, 50))
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=labels, cmap=plt.get_cmap('hsv'))
print('plot UMAP')
for i, label in enumerate(labels):
ax.annotate(self.data.glyphs[label-1], (X_reduced[i,0], X_reduced[i,1]))
plt.savefig(os.path.join(self.result_dir,'test_result-%d.png'%epoch), dpi=300)
plt.close('all')
return acc
def eval():
encoder = SimpleEncodeDecoder()
encoder.eval()
if __name__ == '__main__':
eval()
| [((67, 105), 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (98, 105), True, 'import tensorflow as tf\n'), ((355, 376), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (369, 376), False, 'import matplotlib\n'), ((115, 182), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['physical_devices[0]', '(True)'], {}), '(physical_devices[0], True)\n', (155, 182), True, 'import tensorflow as tf\n'), ((820, 863), 'os.makedirs', 'os.makedirs', (['self.result_dir'], {'exist_ok': '(True)'}), '(self.result_dir, exist_ok=True)\n', (831, 863), False, 'import os, time, csv\n'), ((1012, 1080), 'tensorflow.keras.optimizers.schedules.ExponentialDecay', 'tf.keras.optimizers.schedules.ExponentialDecay', (['(0.001)', '(100000.0)', '(0.5)'], {}), '(0.001, 100000.0, 0.5)\n', (1058, 1080), True, 'import tensorflow as tf\n'), ((1100, 1128), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['lr'], {}), '(lr)\n', (1124, 1128), True, 'import tensorflow as tf\n'), ((1153, 1171), 'net.FeatureBlock', 'net.FeatureBlock', ([], {}), '()\n', (1169, 1171), False, 'import net\n'), ((1226, 1250), 'net.SimpleDecoderBlock', 'net.SimpleDecoderBlock', ([], {}), '()\n', (1248, 1250), False, 'import net\n'), ((1475, 1534), 'tensorflow.keras.Model', 'tf.keras.Model', (['inputs', 'outputs'], {'name': '"""SimpleEncodeDecoder"""'}), "(inputs, outputs, name='SimpleEncodeDecoder')\n", (1489, 1534), True, 'import tensorflow as tf\n'), ((1556, 1619), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'optimizer': 'self.optimizer', 'model': 'self.model'}), '(optimizer=self.optimizer, model=self.model)\n', (1575, 1619), True, 'import tensorflow as tf\n'), ((1664, 1706), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (1690, 1706), True, 'import tensorflow as tf\n'), ((1763, 1842), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['checkpoint'], {'directory': 'checkpoint_dir', 'max_to_keep': '(2)'}), '(checkpoint, directory=checkpoint_dir, max_to_keep=2)\n', (1789, 1842), True, 'import tensorflow as tf\n'), ((2131, 2145), 'net.FontData', 'net.FontData', ([], {}), '()\n', (2143, 2145), False, 'import net\n'), ((2642, 2675), 'tensorflow.nn.softmax', 'tf.nn.softmax', (["outputs['id1']", '(-1)'], {}), "(outputs['id1'], -1)\n", (2655, 2675), True, 'import tensorflow as tf\n'), ((2695, 2728), 'tensorflow.nn.softmax', 'tf.nn.softmax', (["outputs['id2']", '(-1)'], {}), "(outputs['id2'], -1)\n", (2708, 2728), True, 'import tensorflow as tf\n'), ((5079, 5101), 'numpy.concatenate', 'np.concatenate', (['result'], {}), '(result)\n', (5093, 5101), True, 'import numpy as np\n'), ((5119, 5141), 'numpy.concatenate', 'np.concatenate', (['labels'], {}), '(labels)\n', (5133, 5141), True, 'import numpy as np\n'), ((5256, 5286), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(50, 50)'}), '(figsize=(50, 50))\n', (5268, 5286), True, 'import matplotlib.pyplot as plt\n'), ((5636, 5652), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (5645, 5652), True, 'import matplotlib.pyplot as plt\n'), ((1322, 1357), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(128, 128, 3)'}), '(shape=(128, 128, 3))\n', (1336, 1357), True, 'import tensorflow as tf\n'), ((5561, 5620), 'os.path.join', 'os.path.join', (['self.result_dir', "('test_result-%d.png' % epoch)"], {}), "(self.result_dir, 'test_result-%d.png' % epoch)\n", (5573, 5620), False, 'import os, time, csv\n'), ((3068, 3127), 'os.path.join', 'os.path.join', (['self.result_dir', "('test_result-%d.txt' % epoch)"], {}), "(self.result_dir, 'test_result-%d.txt' % epoch)\n", (3080, 3127), False, 'import os, time, csv\n'), ((5189, 5215), 'umap.UMAP', 'umap.UMAP', ([], {'metric': '"""cosine"""'}), "(metric='cosine')\n", (5198, 5215), False, 'import umap\n'), ((5355, 5374), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""hsv"""'], {}), "('hsv')\n", (5367, 5374), True, 'import matplotlib.pyplot as plt\n'), ((3754, 3784), 'numpy.argmax', 'np.argmax', (["pred['pred_id1'][i]"], {}), "(pred['pred_id1'][i])\n", (3763, 3784), True, 'import numpy as np\n'), ((3819, 3849), 'numpy.argmax', 'np.argmax', (["pred['pred_id2'][i]"], {}), "(pred['pred_id2'][i])\n", (3828, 3849), True, 'import numpy as np\n'), ((1923, 1945), 'os.path.basename', 'os.path.basename', (['last'], {}), '(last)\n', (1939, 1945), False, 'import os, time, csv\n')] |
SimonZsx/clipper | clipper_admin/clipper_admin/clipper_admin.py | 457088be2ebe68c68b94d90389d1308e35b4c844 | from __future__ import absolute_import, division, print_function
import logging
import docker
import tempfile
import requests
from requests.exceptions import RequestException
import json
import pprint
import time
import re
import os
import tarfile
import sys
from cloudpickle import CloudPickler
import pickle
import numpy as np
from google.protobuf.json_format import MessageToDict
if sys.version_info < (3, 0):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
PY3 = False
else:
from io import BytesIO as StringIO
PY3 = True
import grpc
from .rpc import model_pb2_grpc
from .rpc import model_pb2
from .rpc import prediction_pb2_grpc
from .rpc import prediction_pb2
from .rpc import management_pb2
from .rpc import management_pb2_grpc
from .container_manager import CONTAINERLESS_MODEL_IMAGE, ClusterAdapter
from .exceptions import ClipperException, UnconnectedException
from .version import __version__, __registry__
from . import graph_parser
DEFAULT_LABEL = []
DEFAULT_PREDICTION_CACHE_SIZE_BYTES = 33554432
CLIPPER_TEMP_DIR = "/tmp/clipper" # Used Internally for Test; Not Windows Compatible
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%y-%m-%d:%H:%M:%S',
level=logging.INFO)
# logging.basicConfig(
# format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
# datefmt='%y-%m-%d:%H:%M:%S',
# level=logging.INFO)
logger = logging.getLogger(__name__)
deploy_regex_str = "[a-z0-9]([-a-z0-9]*[a-z0-9])?\Z"
deployment_regex = re.compile(deploy_regex_str)
def _validate_versioned_model_name(name, version):
if deployment_regex.match(name) is None:
raise ClipperException(
"Invalid value: {name}: a model name must be a valid DNS-1123 "
" subdomain. It must consist of lower case "
"alphanumeric characters, '-' or '.', and must start and end with "
"an alphanumeric character (e.g. 'example.com', regex used for "
"validation is '{reg}'".format(name=name, reg=deploy_regex_str))
if deployment_regex.match(version) is None:
raise ClipperException(
"Invalid value: {version}: a model version must be a valid DNS-1123 "
" subdomain. It must consist of lower case "
"alphanumeric characters, '-' or '.', and must start and end with "
"an alphanumeric character (e.g. 'example.com', regex used for "
"validation is '{reg}'".format(
version=version, reg=deploy_regex_str))
class ClipperConnection(object):
def __init__(self, container_manager):
self.connected = False
self.cm = container_manager
#############TEST################
self.runtime_dag = ""
self.lock = False
#################################
self.logger = ClusterAdapter(logger, {
'cluster_name': self.cm.cluster_identifier
})
def start_clipper(self,
mgmt_frontend_image='{}/management_frontend:{}'.format(
__registry__, __version__),
cache_size=DEFAULT_PREDICTION_CACHE_SIZE_BYTES):
try:
self.cm.start_clipper(mgmt_frontend_image)
# while True:
# try:
# query_frontend_url = "http://{host}/metrics".format(
# host=self.cm.get_query_addr())
# mgmt_frontend_url = "http://{host}/admin/ping".format(
# host=self.cm.get_admin_addr())
# for name, url in [('query frontend', query_frontend_url),
# ('management frontend', mgmt_frontend_url)]:
# r = requests.get(url, timeout=5)
# if r.status_code != requests.codes.ok:
# raise RequestException(
# "{name} end point {url} health check failed".format(name=name, url=url))
# break
# except RequestException as e:
# self.logger.info("Clipper still initializing: \n {}".format(e))
# time.sleep(1)
self.logger.info("Clipper is running")
self.connected = True
except ClipperException as e:
self.logger.warning("Error starting Clipper: {}".format(e.msg))
raise e
def connect(self):
"""Connect to a running Clipper cluster."""
self.cm.connect()
self.connected = True
self.logger.info(
"Successfully connected to Clipper cluster at {}".format(
self.cm.get_query_addr()))
def build_and_deploy_DAG(self,
name,
version,
dag_description,
labels):
if not self.connected:
raise UnconnectedException()
def build_and_deploy_model(self,
name,
version,
input_type,
model_data_path,
base_image,
labels=None,
container_registry=None,
num_replicas=1,
batch_size=-1,
pkgs_to_install=None):
if not self.connected:
raise UnconnectedException()
image = self.build_model(name, version, model_data_path, base_image,
container_registry, pkgs_to_install)
self.deploy_model(name, version, input_type, image, labels,
num_replicas, batch_size)
def build_model(self,
name,
version,
model_data_path,
base_image,
container_registry=None,
pkgs_to_install=None):
version = str(version)
_validate_versioned_model_name(name, version)
run_cmd = ''
if pkgs_to_install:
run_as_lst = 'RUN apt-get -y install build-essential && pip install'.split(
' ')
run_cmd = ' '.join(run_as_lst + pkgs_to_install)
with tempfile.NamedTemporaryFile(
mode="w+b", suffix="tar") as context_file:
# Create build context tarfile
with tarfile.TarFile(
fileobj=context_file, mode="w") as context_tar:
context_tar.add(model_data_path)
# From https://stackoverflow.com/a/740854/814642
try:
df_contents = StringIO(
str.encode(
"FROM {container_name}\n{run_command}\nCOPY {data_path} /model/\n".
format(
container_name=base_image,
data_path=model_data_path,
run_command=run_cmd)))
df_tarinfo = tarfile.TarInfo('Dockerfile')
df_contents.seek(0, os.SEEK_END)
df_tarinfo.size = df_contents.tell()
df_contents.seek(0)
context_tar.addfile(df_tarinfo, df_contents)
except TypeError:
df_contents = StringIO(
"FROM {container_name}\n{run_command}\nCOPY {data_path} /model/\n".
format(
container_name=base_image,
data_path=model_data_path,
run_command=run_cmd))
df_tarinfo = tarfile.TarInfo('Dockerfile')
df_contents.seek(0, os.SEEK_END)
df_tarinfo.size = df_contents.tell()
df_contents.seek(0)
context_tar.addfile(df_tarinfo, df_contents)
# Exit Tarfile context manager to finish the tar file
# Seek back to beginning of file for reading
context_file.seek(0)
image = "{cluster}-{name}:{version}".format(
cluster=self.cm.cluster_identifier, name=name, version=version)
if container_registry is not None:
image = "{reg}/{image}".format(
reg=container_registry, image=image)
docker_client = docker.from_env()
self.logger.info(
"Building model Docker image with model data from {}".format(
model_data_path))
image_result, build_logs = docker_client.images.build(
fileobj=context_file, custom_context=True, tag=image)
for b in build_logs:
if 'stream' in b and b['stream'] != '\n': #log build steps only
self.logger.info(b['stream'].rstrip())
self.logger.info("Pushing model Docker image to {}".format(image))
for line in docker_client.images.push(repository=image, stream=True):
self.logger.debug(line)
return image
def deploy_model(self,
name,
version,
input_type,
image,
labels=None,
num_replicas=1,
batch_size=-1):
if not self.connected:
raise UnconnectedException()
version = str(version)
_validate_versioned_model_name(name, version)
self.cm.deploy_model(
name=name,
version=version,
input_type=input_type,
image=image,
num_replicas=num_replicas)
# self.register_model(
# name,
# version,
# input_type,
# image=image,
# labels=labels,
# batch_size=batch_size)
self.logger.info("Done deploying model {name}:{version}.".format(
name=name, version=version))
def connect_host(self, host_ip, host_port):
self.cm.connect_host(host_ip, "2375")
def add_model(self,
model_name,
model_version,
image,
input_type="string",
output_type="string",
stateful=False):
modelinfo = management_pb2.ModelInfo(modelname=model_name,
modelversion=model_version,
image=image,
inputtype=input_type,
outputtype=output_type,
stateful=stateful).SerializeToString()
self.cm.grpc_client("zsxhku/grpcclient", "--addmodel %s %s %s "%("localhost","33333", modelinfo))
return
def deploy_DAG(self, name, version, dag_description=None, runtime=""):
if not self.connected:
raise UnconnectedException()
# model_info = self.get_all_models()
dag_description_ = dag_description
#self.logger.info("dag_description: %s"%(dag_description_))
#if(dag_description==None):
# dag_description_=self.get_dag_description()
nodes_list = graph_parser.get_all_nodes(dag_description_)
container_info = []
proxy_info = []
backup_info = []
count = 1
for model_info in nodes_list:
model_name,model_version,model_image = graph_parser.get_name_version(model_info)
container_name, container_id, host = self.cm.add_replica(model_name, model_version, "22222", model_image, runtime=runtime)
self.logger.info("Started %s with container %s:%s (HOST:%s)"%(model_name, container_name, container_id, host))
container_ip = self.cm.get_container_ip(host, container_id)
proxy_name, proxy_id = self.cm.set_proxy("mxschen/ai-proxy:latest", container_name, container_ip, host)
## get the ip of the instances
proxy_ip = self.cm.get_container_ip(host, proxy_id)
proxy_info.append([proxy_name,proxy_id,proxy_ip])
container_info.append([container_name, container_id, container_ip])
if graph_parser.is_stateful(model_info):
backup_name, backup_id, backup_host = self.cm.add_replica(model_name, model_version, "22222", model_image)
self.logger.info("[Backup] Started %s with container %s:%s (HOST:%s)"%(model_name, backup_name, backup_id, backup_host))
backup_ip = self.cm.get_container_ip(backup_host, backup_id)
backup_proxy_name, backup_proxy_id = self.cm.set_proxy("mxschen/ai-proxy:latest", backup_name, backup_ip, backup_host)
backup_proxy_ip= self.cm.get_container_ip(backup_host, backup_proxy_id)
backup_info.append([backup_name, backup_id, backup_ip, backup_proxy_name, backup_proxy_id, backup_proxy_ip])
else:
backup_info.append([])
#self.cm.check_container_status(host, container_id, 0.3, 20)
#self.cm.check_container_status(host, proxy_id, 0.3, 20)
#time.sleep(25)
#self.logger.info("proxy_ip:%s"%(proxy_ip))
self.cm.grpc_client("zsxhku/grpcclient", "--setmodel %s %s %s %s %s %s"%(proxy_ip, "22223", container_name, count, container_ip, "22222" ))
self.logger.info('[DEPLOYMENT] Finished setting model info to proxy')
if(graph_parser.is_stateful(model_info)):
self.cm.grpc_client("zsxhku/grpcclient", "--setmodel %s %s %s %s %s %s"%(backup_info[-1][-1], "22223", backup_info[-1][0], count, backup_info[-1][2], "22222" ))
self.logger.info('[DEPLOYMENT][Backup] Finished setting model info to proxy')
count += 1
# self.cm.grpc_client("zsxhku/grpcclient", "--setproxy %s %s %s %s"%(container_ip, "22222", proxy_name, "22223"))
# self.logger.info('[DEPLOYMENT] Finished setting proxy info to model')
# if(graph_parser.is_stateful(model_info)):
# self.cm.grpc_client("zsxhku/grpcclient", "--setproxy %s %s %s %s"%(backup_info[-1][2], "22222", backup_info[-1][3], "22223"))
# self.logger.info('[DEPLOYMENT][Backup] Finished setting proxy info to model')
runtime_dag_id = name+version+str(1)
## Starting frontend
frontend_name, frontend_container_id = self.cm.add_frontend("localhost", "mxschen/frontend",runtime_dag_id, proxy_info[0][2], "22223", max_workers=2048)
frontend_ip = self.cm.get_container_ip("localhost", frontend_container_id)
frontend_info = [frontend_name, frontend_container_id, frontend_ip]
self.logger.info("[DEPLOYMENT] ################ Started Frontend #################")
#expand the dag description with the model/proxy instances info
expanded_dag = graph_parser.expand_dag(dag_description_, name, version, container_info, proxy_info, backup_info, frontend_info)
self.runtime_dag = expanded_dag
# TODO: need to modularize
self.cm.grpc_client("zsxhku/grpcclient", "--addruntimedag %s %s %s %s %s %s %s"%('1', name, version, 'old' , self.cm.admin_ip, self.cm.admin_port, expanded_dag))
self.logger.info("Added new runtime DAG to admin daemon\n%s"%(expanded_dag))
#tells the proxy runtime dag info
for tup in proxy_info:
proxy_name = tup[0]
proxy_id = tup[1]
proxy_ip = tup[2]
self.cm.grpc_client("zsxhku/grpcclient", "--setdag %s %s %s"%(proxy_ip, "22223", expanded_dag))
self.logger.info('[DEPLOYMENT] Finished setting DAG for proxy {proxy_name} '.format(proxy_name=proxy_name))
#tells the backups runtime dag info
for tup in backup_info:
if tup:
self.cm.grpc_client("zsxhku/grpcclient", "--setdag %s %s %s"%(tup[-1], "22223", expanded_dag))
self.logger.info('[DEPLOYMENT][Backup] Finished setting DAG for proxy {proxy_name} '.format(proxy_name=tup[-1]))
return
def inspect_instance(self):
"""Fetches performance metrics from the running Clipper cluster.
Returns
-------
str
The JSON string containing the current set of metrics
for this instance. On error, the string will be an error message
(not JSON formatted).
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
def get_query_addr(self):
"""Get the IP address at which the query frontend can be reached request predictions.
Returns
-------
str
The address as an IP address or hostname.
Raises
------
:py:exc:`clipper.UnconnectedException`
versions. All replicas for each version of each model will be stopped.
"""
if not self.connected:
raise UnconnectedException()
return self.cm.get_query_addr()
def stop_models(self, model_names):
"""Stops all versions of the specified models.
This is a convenience method to avoid the need to explicitly list all versions
of a model when calling :py:meth:`clipper_admin.ClipperConnection.stop_versioned_models`.
Parameters
----------
model_names : list(str)
A list of model names. All replicas of all versions of each model specified in the list
will be stopped.
Raises
------
:py:exc:`clipper.UnconnectedException`
versions. All replicas for each version of each model will be stopped.
"""
# if not self.connected:
# raise UnconnectedException()
# model_info = self.get_all_models(verbose=True)
# model_dict = {}
# for m in model_info:
# if m["model_name"] in model_names:
# if m["model_name"] in model_dict:
# model_dict[m["model_name"]].append(m["model_version"])
# else:
# model_dict[m["model_name"]] = [m["model_version"]]
# self.cm.stop_models(model_dict)
# pp = pprint.PrettyPrinter(indent=4)
# self.logger.info(
# "Stopped all containers for these models and versions:\n{}".format(
# pp.pformat(model_dict)))
def stop_versioned_models(self, model_versions_dict):
"""Stops the specified versions of the specified models.
Parameters
----------
model_versions_dict : dict(str, list(str))
For each entry in the dict, the key is a model name and the value is a list of model
Raises
------
:py:exc:`clipper.UnconnectedException`
versions. All replicas for each version of each model will be stopped.
Note
----
This method will stop the currently deployed versions of models if you specify them. You
almost certainly want to use one of the other stop_* methods. Use with caution.
"""
# if not self.connected:
# raise UnconnectedException()
# self.cm.stop_models(model_versions_dict)
# pp = pprint.PrettyPrinter(indent=4)
# self.logger.info(
# "Stopped all containers for these models and versions:\n{}".format(
# pp.pformat(model_versions_dict)))
def stop_inactive_model_versions(self, model_names):
"""Stops all model containers serving stale versions of the specified models.
For example, if you have deployed versions 1, 2, and 3 of model "music_recommender"
and version 3 is the current version::
clipper_conn.stop_inactive_model_versions(["music_recommender"])
will stop any containers serving versions 1 and 2 but will leave containers serving
version 3 untouched.
Parameters
----------
model_names : list(str)
The names of the models whose old containers you want to stop.
Raises
------
:py:exc:`clipper.UnconnectedException`
"""
# if not self.connected:
# raise UnconnectedException()
# model_info = self.get_all_models(verbose=True)
# model_dict = {}
# for m in model_info:
# if m["model_name"] in model_names and not m["is_current_version"]:
# if m["model_name"] in model_dict:
# model_dict[m["model_name"]].append(m["model_version"])
# else:
# model_dict[m["model_name"]] = [m["model_version"]]
# self.cm.stop_models(model_dict)
# pp = pprint.PrettyPrinter(indent=4)
# self.logger.info(
# "Stopped all containers for these models and versions:\n{}".format(
# pp.pformat(model_dict)))
def stop_all_model_containers(self):
"""Stops all model containers started via Clipper admin commands.
This method can be used to clean up leftover Clipper model containers even if the
Clipper management frontend or Redis has crashed. It can also be called without calling
``connect`` first.
"""
self.cm.stop_all_model_containers()
self.logger.info("Stopped all Clipper model containers")
def stop_all(self, graceful=True):
"""Stops all processes that were started via Clipper admin commands.
This includes the query and management frontend Docker containers and all model containers.
If you started Redis independently, this will not affect Redis. It can also be called
without calling ``connect`` first.
If graceful=False, Clipper will issue Docker Kill if it's in the Docker Mode. This parameter
will take not effect in Kubernetes.
"""
self.cm.stop_all(graceful=graceful)
self.logger.info(
"Stopped all Clipper cluster and all model containers")
| [((1180, 1302), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)-8s %(message)s"""', 'datefmt': '"""%y-%m-%d:%H:%M:%S"""', 'level': 'logging.INFO'}), "(format='%(asctime)s %(levelname)-8s %(message)s',\n datefmt='%y-%m-%d:%H:%M:%S', level=logging.INFO)\n", (1199, 1302), False, 'import logging\n'), ((1489, 1516), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1506, 1516), False, 'import logging\n'), ((1590, 1618), 're.compile', 're.compile', (['deploy_regex_str'], {}), '(deploy_regex_str)\n', (1600, 1618), False, 'import re\n'), ((6449, 6502), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w+b"""', 'suffix': '"""tar"""'}), "(mode='w+b', suffix='tar')\n", (6476, 6502), False, 'import tempfile\n'), ((8593, 8610), 'docker.from_env', 'docker.from_env', ([], {}), '()\n', (8608, 8610), False, 'import docker\n'), ((6597, 6644), 'tarfile.TarFile', 'tarfile.TarFile', ([], {'fileobj': 'context_file', 'mode': '"""w"""'}), "(fileobj=context_file, mode='w')\n", (6612, 6644), False, 'import tarfile\n'), ((7235, 7264), 'tarfile.TarInfo', 'tarfile.TarInfo', (['"""Dockerfile"""'], {}), "('Dockerfile')\n", (7250, 7264), False, 'import tarfile\n'), ((7875, 7904), 'tarfile.TarInfo', 'tarfile.TarInfo', (['"""Dockerfile"""'], {}), "('Dockerfile')\n", (7890, 7904), False, 'import tarfile\n')] |
VaniSHadow/tpGenerator | graph.py | 2a2e0a65df48c812d9fa2e2b1474573c6a6ab6c0 | import random
import numpy
import copy
class Graph:
"""n表示图中点的个数,m表示图中边的个数"""
def __init__(self, n, m, edge_weight=1, directed=True, connected='weak', loop=False, weighted=False, trim=True):
"""
n 图中点的个数
m 图中边的个数
edge_weight 边的权值上限
directed 有向性
connected 连通性
loop 有环性
weighted 带权性
trim True:点编号从1开始 False:点编号从0开始
"""
self.directed = directed
self.weighted = weighted
self.connected = connected
self.loop = loop
self.trim = trim
if directed==True and connected=='weak' and loop==False:#弱连通有向无环
self.n = n
self.m = m
self.matr = numpy.zeros((n, n))
self.topo = list(range(n))
random.shuffle(self.topo)
self.RandomGenerTopoEdges(m-(n-1))
weak_connected = self.CheckWeakConnectivity()
if weak_connected:
self.RandomGenerTopoEdges(n-1)
else:
count = 0
for i in range(n-1):
if self.matr[self.topo[i]][self.topo[i+1]]!=1:
self.matr[self.topo[i]][self.topo[i+1]]=1
count = count+1
self.RandomGenerTopoEdges(n-1-count)
self.edges = list()
for i in range(n):
for j in range(n):
if self.matr[i][j]==1:
e = (i, j)
self.edges.append(e)
"""检查图的弱连通性"""
def CheckWeakConnectivity(self):
temp = copy.deepcopy(self.matr)
for i in range(self.n):
for j in range(self.n):
if temp[i][j]==1:
temp[j][i]=1
elif temp[j][i]==1:
temp[i][j]=1
for i in range(self.n-1):
if i==0:
result = temp.dot(temp)
else:
result = result.dot(temp)
for i in range(self.n):
for j in range(self.n):
if result[i][j]==0 and i!=j:
return False
return True
"""在图中随机生成edge_num条边"""
def RandomGenerTopoEdges(self, edge_num):
for i in range(edge_num):
mid = random.randint(1, self.n-2)
st = random.randint(0, mid)
end = random.randint(mid+1, self.n-1)
while self.matr[self.topo[st]][self.topo[end]] != 0:
mid = random.randint(1, self.n-2)
st = random.randint(0, mid)
end = random.randint(mid+1, self.n-1)
self.matr[self.topo[st]][self.topo[end]] = 1
"""以字符串返回第i条边的信息"""
def GetEdge(self, i):
if self.trim:#点从1开始
if self.weighted == False:
return str(self.edges[i][0]+1) + " " + str(self.edges[i][1]+1)
else:
return str(self.edges[i][0]+1) + " " + str(self.edges[i][1]+1) + random.randint(1, edge_weight)
else:#点从0开始
if self.weighted == False:
return str(self.edges[i][0]) + " " + str(self.edges[i][1])
else:
return str(self.edges[i][0]) + " " + str(self.edges[i][1]) + random.randint(1, edge_weight)
| [] |
AlexSkrn/csv2googlesheets | csv2googlesheets/to_google_sheets.py | 71656dcc6827b1c58ffe80bc55aa6f1ee816f216 | """This module provides a console interface to convert CSV to Google Sheets."""
from csv2googlesheets.gapi_authorization import auth_with_google
from csv2googlesheets.gapi_create_sheet import create_sheet
from csv2googlesheets.gapi_write_to_sheet import write_to_sheet
from csv2googlesheets.parse_file import build_spreadsheet_title
from csv2googlesheets.parse_file import parse_file
from csv2googlesheets.parse_cli_args import parse_cli_args
def main():
"""Control the flow of operations to write data from csv to G Sheets."""
cli_args = parse_cli_args()
values = parse_file(path=cli_args.csv)
spreadsheet_title = build_spreadsheet_title(cli_args.csv)
google_service = auth_with_google(path_creds=cli_args.credentials_json)
spreadsheet_id = create_sheet(google_service, spreadsheet_title)
write_to_sheet(
google_service,
sheet_id=spreadsheet_id,
values=values,
)
if __name__ == '__main__':
main()
| [((551, 567), 'csv2googlesheets.parse_cli_args.parse_cli_args', 'parse_cli_args', ([], {}), '()\n', (565, 567), False, 'from csv2googlesheets.parse_cli_args import parse_cli_args\n'), ((581, 610), 'csv2googlesheets.parse_file.parse_file', 'parse_file', ([], {'path': 'cli_args.csv'}), '(path=cli_args.csv)\n', (591, 610), False, 'from csv2googlesheets.parse_file import parse_file\n'), ((635, 672), 'csv2googlesheets.parse_file.build_spreadsheet_title', 'build_spreadsheet_title', (['cli_args.csv'], {}), '(cli_args.csv)\n', (658, 672), False, 'from csv2googlesheets.parse_file import build_spreadsheet_title\n'), ((695, 749), 'csv2googlesheets.gapi_authorization.auth_with_google', 'auth_with_google', ([], {'path_creds': 'cli_args.credentials_json'}), '(path_creds=cli_args.credentials_json)\n', (711, 749), False, 'from csv2googlesheets.gapi_authorization import auth_with_google\n'), ((771, 818), 'csv2googlesheets.gapi_create_sheet.create_sheet', 'create_sheet', (['google_service', 'spreadsheet_title'], {}), '(google_service, spreadsheet_title)\n', (783, 818), False, 'from csv2googlesheets.gapi_create_sheet import create_sheet\n'), ((824, 894), 'csv2googlesheets.gapi_write_to_sheet.write_to_sheet', 'write_to_sheet', (['google_service'], {'sheet_id': 'spreadsheet_id', 'values': 'values'}), '(google_service, sheet_id=spreadsheet_id, values=values)\n', (838, 894), False, 'from csv2googlesheets.gapi_write_to_sheet import write_to_sheet\n')] |
nfco/netforce | netforce_account/netforce_account/migrations/credit_remain_cur.py | 35252eecd0a6633ab9d82162e9e3ff57d4da029a | from netforce.model import get_model
from netforce import migration
from netforce import database
class Migration(migration.Migration):
_name="account.credit_remain_cur"
_version="2.5.0"
def migrate(self):
db=database.get_connection()
db.execute("UPDATE account_invoice SET amount_credit_remain_cur=amount_credit_remain WHERE amount_credit_remain_cur IS NULL AND amount_credit_remain IS NOT NULL")
Migration.register()
| [((231, 256), 'netforce.database.get_connection', 'database.get_connection', ([], {}), '()\n', (254, 256), False, 'from netforce import database\n')] |
chevah/compat | chevah/compat/testing/testcase.py | d22e5f551a628f8a1652c9f2eea306e17930cb8f | # -*- coding: utf-8 -*-
# Copyright (c) 2011 Adi Roiban.
# See LICENSE for details.
"""
TestCase used for Chevah project.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from six import text_type
from six.moves import range
import contextlib
import inspect
import threading
import os
import platform
import socket
import sys
import time
from bunch import Bunch
from mock import patch, Mock
from nose import SkipTest
try:
from twisted.internet.defer import Deferred
from twisted.internet.posixbase import (
_SocketWaker, _UnixWaker, _SIGCHLDWaker
)
from twisted.python.failure import Failure
except ImportError:
# Twisted support is optional.
_SocketWaker = None
_UnixWaker = None
_SIGCHLDWaker = None
from chevah.compat import (
DefaultAvatar,
LocalFilesystem,
process_capabilities,
system_users,
SuperAvatar,
)
from chevah.compat.administration import os_administration
from chevah.compat.testing.assertion import AssertionMixin
from chevah.compat.testing.mockup import mk
from chevah.compat.testing.constant import (
TEST_NAME_MARKER,
)
from chevah.compat.testing.filesystem import LocalTestFilesystem
# For Python below 2.7 we use the separate unittest2 module.
# It comes by default in Python 2.7.
if sys.version_info[0:2] < (2, 7):
from unittest2 import TestCase
# Shut up you linter.
TestCase
else:
from unittest import TestCase
try:
# Import reactor last in case some other modules are changing the reactor.
from twisted.internet import reactor
except ImportError:
reactor = None
def _get_hostname():
"""
Return hostname as resolved by default DNS resolver.
"""
return socket.gethostname()
class TwistedTestCase(TestCase):
"""
Test case for Twisted specific code.
Provides support for running deferred and start/stop the reactor during
tests.
"""
# Number of second to wait for a deferred to have a result.
DEFERRED_TIMEOUT = 1
# List of names for delayed calls which should not be considered as
# required to wait for them when running the reactor.
EXCEPTED_DELAYED_CALLS = []
EXCEPTED_READERS = [
_UnixWaker,
_SocketWaker,
_SIGCHLDWaker,
]
# Scheduled event to stop waiting for a deferred.
_reactor_timeout_call = None
def setUp(self):
super(TwistedTestCase, self).setUp()
self._timeout_reached = False
self._reactor_timeout_failure = None
@property
def _caller_success_member(self):
"""
Retrieve the 'success' member from the None test case.
"""
success = None
for i in range(2, 6):
try:
success = inspect.stack()[i][0].f_locals['success']
break
except KeyError:
success = None
if success is None:
raise AssertionError('Failed to find "success" attribute.')
return success
def tearDown(self):
try:
if self._caller_success_member:
# Check for a clean reactor at shutdown, only if test
# passed.
self.assertIsNone(self._reactor_timeout_failure)
self._assertReactorIsClean()
finally:
self._cleanReactor()
super(TwistedTestCase, self).tearDown()
def _reactorQueueToString(self):
"""
Return a string representation of all delayed calls from reactor
queue.
"""
result = []
for delayed in reactor.getDelayedCalls(): # noqa:cover
result.append(text_type(delayed.func))
return '\n'.join(result)
def _threadPoolQueue(self):
"""
Return current tasks of thread Pool, or [] when threadpool does not
exists.
This should only be called at cleanup as it removes elements from
the Twisted thread queue, which will never be called.
"""
if not reactor.threadpool:
return []
result = []
while len(reactor.threadpool._team._pending):
result.append(reactor.threadpool._team._pending.pop())
return result
def _threadPoolThreads(self):
"""
Return current threads from pool, or empty list when threadpool does
not exists.
"""
if not reactor.threadpool:
return []
else:
return reactor.threadpool.threads
def _threadPoolWorking(self):
"""
Return working thread from pool, or empty when threadpool does not
exists or has no job.
"""
if not reactor.threadpool:
return []
else:
return reactor.threadpool.working
@classmethod
def _cleanReactor(cls):
"""
Remove all delayed calls, readers and writers from the reactor.
This is only for cleanup purpose and should not be used by normal
tests.
"""
if not reactor:
return
try:
reactor.removeAll()
except (RuntimeError, KeyError):
# FIXME:863:
# When running threads tests the reactor touched from the test
# case itself which run in one tread and from the fixtures/cleanup
# code which is executed from another thread.
# removeAll might fail since it detects that internal state
# is changed from other source.
pass
reactor.threadCallQueue = []
for delayed_call in reactor.getDelayedCalls():
try:
delayed_call.cancel()
except (ValueError, AttributeError):
# AlreadyCancelled and AlreadyCalled are ValueError.
# Might be canceled from the separate thread.
# AttributeError can occur when we do multi-threading.
pass
def _raiseReactorTimeoutError(self, timeout):
"""
Signal an timeout error while executing the reactor.
"""
self._timeout_reached = True
failure = AssertionError(
'Reactor took more than %.2f seconds to execute.' % timeout)
self._reactor_timeout_failure = failure
def _initiateTestReactor(self, timeout):
"""
Do the steps required to initiate a reactor for testing.
"""
self._timeout_reached = False
# Set up timeout.
self._reactor_timeout_call = reactor.callLater(
timeout, self._raiseReactorTimeoutError, timeout)
# Don't start the reactor if it is already started.
# This can happen if we prevent stop in a previous run.
if reactor._started:
return
reactor._startedBefore = False
reactor._started = False
reactor._justStopped = False
reactor.startRunning()
def _iterateTestReactor(self, debug=False):
"""
Iterate the reactor.
"""
reactor.runUntilCurrent()
if debug: # noqa:cover
# When debug is enabled with iterate using a small delay in steps,
# to have a much better debug output.
# Otherwise the debug messages will flood the output.
print (
u'delayed: %s\n'
u'threads: %s\n'
u'writers: %s\n'
u'readers: %s\n'
u'threadpool size: %s\n'
u'threadpool threads: %s\n'
u'threadpool working: %s\n'
u'\n' % (
self._reactorQueueToString(),
reactor.threadCallQueue,
reactor.getWriters(),
reactor.getReaders(),
reactor.getThreadPool().q.qsize(),
self._threadPoolThreads(),
self._threadPoolWorking(),
)
)
t2 = reactor.timeout()
# For testing we want to force to reactor to wake at an
# interval of at most 1 second.
if t2 is None or t2 > 1:
t2 = 0.1
t = reactor.running and t2
reactor.doIteration(t)
else:
# FIXME:4428:
# When not executed in debug mode, some test will fail as they
# will not spin the reactor.
# To not slow down all the tests, we run with a very small value.
reactor.doIteration(0.000001)
def _shutdownTestReactor(self, prevent_stop=False):
"""
Called at the end of a test reactor run.
When prevent_stop=True, the reactor will not be stopped.
"""
if not self._timeout_reached:
# Everything fine, disable timeout.
if (
self._reactor_timeout_call and
not self._reactor_timeout_call.cancelled
):
self._reactor_timeout_call.cancel()
if prevent_stop:
# Don't continue with stop procedure.
return
# Let the reactor know that we want to stop reactor.
reactor.stop()
# Let the reactor run one more time to execute the stop code.
reactor.iterate()
# Set flag to fake a clean reactor.
reactor._startedBefore = False
reactor._started = False
reactor._justStopped = False
reactor.running = False
# Start running has consumed the startup events, so we need
# to restore them.
reactor.addSystemEventTrigger(
'during', 'startup', reactor._reallyStartRunning)
def _assertReactorIsClean(self):
"""
Check that the reactor has no delayed calls, readers or writers.
This should only be called at teardown.
"""
if reactor is None:
return
def raise_failure(location, reason):
raise AssertionError(
'Reactor is not clean. %s: %s' % (location, reason))
if reactor._started: # noqa:cover
# Reactor was not stopped, so stop it before raising the error.
self._shutdownTestReactor()
raise AssertionError('Reactor was not stopped.')
# Look at threads queue.
if len(reactor.threadCallQueue) > 0:
raise_failure('queued threads', reactor.threadCallQueue)
if reactor.threadpool and len(reactor.threadpool.working) > 0:
raise_failure('active threads', reactor.threadCallQueue)
pool_queue = self._threadPoolQueue()
if pool_queue:
raise_failure('threadpoool queue', pool_queue)
if self._threadPoolWorking():
raise_failure('threadpoool working', self._threadPoolWorking())
if self._threadPoolThreads():
raise_failure('threadpoool threads', self._threadPoolThreads())
if len(reactor.getWriters()) > 0: # noqa:cover
raise_failure('writers', text_type(reactor.getWriters()))
for reader in reactor.getReaders():
excepted = False
for reader_type in self.EXCEPTED_READERS:
if isinstance(reader, reader_type):
excepted = True
break
if not excepted: # noqa:cover
raise_failure('readers', text_type(reactor.getReaders()))
for delayed_call in reactor.getDelayedCalls():
if delayed_call.active():
delayed_str = self._getDelayedCallName(delayed_call)
if delayed_str in self.EXCEPTED_DELAYED_CALLS:
continue
raise_failure('delayed calls', delayed_str)
def _runDeferred(
self, deferred, timeout=None, debug=False, prevent_stop=False):
"""
This is low level method. In most tests you would like to use
`getDeferredFailure` or `getDeferredResult`.
Run the deferred in the reactor loop.
Starts the reactor, waits for deferred execution,
raises error in timeout, stops the reactor.
This will do recursive calls, in case the original deferred returns
another deferred.
Usage::
checker = mk.credentialsChecker()
credentials = mk.credentials()
deferred = checker.requestAvatarId(credentials)
self._runDeferred(deferred)
self.assertIsNotFailure(deferred)
self.assertEqual('something', deferred.result)
"""
if not isinstance(deferred, Deferred):
raise AssertionError('This is not a deferred.')
if timeout is None:
timeout = self.DEFERRED_TIMEOUT
try:
self._initiateTestReactor(timeout=timeout)
self._executeDeferred(deferred, timeout, debug=debug)
finally:
self._shutdownTestReactor(
prevent_stop=prevent_stop)
def _executeDeferred(self, deferred, timeout, debug):
"""
Does the actual deferred execution.
"""
if not deferred.called:
deferred_done = False
while not deferred_done:
self._iterateTestReactor(debug=debug)
deferred_done = deferred.called
if self._timeout_reached:
raise AssertionError(
'Deferred took more than %d to execute.' % timeout)
# Check executing all deferred from chained callbacks.
result = deferred.result
while isinstance(result, Deferred):
self._executeDeferred(result, timeout=timeout, debug=debug)
result = deferred.result
def executeReactor(self, timeout=None, debug=False, run_once=False):
"""
Run reactor until no more delayed calls, readers or
writers or threads are in the queues.
Set run_once=True to only run the reactor once. This is useful if
you have persistent deferred which will be removed only at the end
of test.
Only use this for very high level integration code, where you don't
have the change to get a "root" deferred.
In most tests you would like to use one of the
`getDeferredFailure` or `getDeferredResult`.
Usage::
protocol = mk.makeFTPProtocol()
transport = mk.makeStringTransportProtocol()
protocol.makeConnection(transport)
transport.protocol = protocol
protocol.lineReceived('FEAT')
self.executeReactor()
result = transport.value()
self.assertStartsWith('211-Features:\n', result)
"""
if timeout is None:
timeout = self.DEFERRED_TIMEOUT
self._initiateTestReactor(timeout=timeout)
# Set it to True to enter the first loop.
have_callbacks = True
while have_callbacks and not self._timeout_reached:
self._iterateTestReactor(debug=debug)
have_callbacks = False
# Check for active jobs in thread pool.
if reactor.threadpool:
if (
reactor.threadpool.working or
(reactor.threadpool.q.qsize() > 0)
):
time.sleep(0.01)
have_callbacks = True
continue
# Look at delayed calls.
for delayed in reactor.getDelayedCalls():
# We skip our own timeout call.
if delayed is self._reactor_timeout_call:
continue
if not delayed.func:
# Was already called.
continue
delayed_str = self._getDelayedCallName(delayed)
is_exception = False
for excepted_callback in self.EXCEPTED_DELAYED_CALLS:
if excepted_callback in delayed_str:
is_exception = True
if not is_exception:
# No need to look for other delayed calls.
have_callbacks = True
break
# No need to look for other things as we already know that we need
# to wait at least for delayed calls.
if have_callbacks:
continue
if run_once:
if have_callbacks:
raise AssertionError(
'Reactor queue still contains delayed deferred.\n'
'%s' % (self._reactorQueueToString()))
break
# Look at writers buffers:
if len(reactor.getWriters()) > 0:
have_callbacks = True
continue
for reader in reactor.getReaders():
have_callbacks = True
for excepted_reader in self.EXCEPTED_READERS:
if isinstance(reader, excepted_reader):
have_callbacks = False
break
if have_callbacks:
break
if have_callbacks:
continue
# Look at threads queue and active thread.
if len(reactor.threadCallQueue) > 0:
have_callbacks = True
continue
if reactor.threadpool and len(reactor.threadpool.working) > 0:
have_callbacks = True
continue
self._shutdownTestReactor()
def executeDelayedCalls(self, timeout=None, debug=False):
"""
Run the reactor until no more delayed calls are scheduled.
This will wait for delayed calls to be executed and will not stop
the reactor.
"""
if timeout is None:
timeout = self.DEFERRED_TIMEOUT
self._initiateTestReactor(timeout=timeout)
while not self._timeout_reached:
self._iterateTestReactor(debug=debug)
delayed_calls = reactor.getDelayedCalls()
try:
delayed_calls.remove(self._reactor_timeout_call)
except ValueError: # noqa:cover
# Timeout might be no longer be there.
pass
if not delayed_calls:
break
self._shutdownTestReactor(prevent_stop=True)
if self._reactor_timeout_failure is not None:
self._reactor_timeout_failure = None
# We stop the reactor on failures.
self._shutdownTestReactor()
raise AssertionError(
'executeDelayedCalls took more than %s' % (timeout,))
def executeReactorUntil(
self, callable, timeout=None, debug=False, prevent_stop=True):
"""
Run the reactor until callable returns `True`.
"""
if timeout is None:
timeout = self.DEFERRED_TIMEOUT
self._initiateTestReactor(timeout=timeout)
while not self._timeout_reached:
self._iterateTestReactor(debug=debug)
if callable(reactor):
break
self._shutdownTestReactor(prevent_stop=prevent_stop)
def iterateReactor(self, count=1, timeout=None, debug=False):
"""
Iterate the reactor without stopping it.
"""
iterations = [False] * (count - 1)
iterations.append(True)
self.executeReactorUntil(
lambda _: iterations.pop(0), timeout=timeout, debug=debug)
def iterateReactorWithStop(self, count=1, timeout=None, debug=False):
"""
Iterate the reactor and stop it at the end.
"""
iterations = [False] * (count - 1)
iterations.append(True)
self.executeReactorUntil(
lambda _: iterations.pop(0),
timeout=timeout,
debug=debug,
prevent_stop=False,
)
def iterateReactorForSeconds(self, duration=1, debug=False):
"""
Iterate the reactor for `duration` seconds..
"""
start = time.time()
self.executeReactorUntil(
lambda _: time.time() - start > duration,
timeout=duration + 0.1,
debug=debug,
prevent_stop=False,
)
def _getDelayedCallName(self, delayed_call):
"""
Return a string representation of the delayed call.
"""
raw_name = text_type(delayed_call.func)
raw_name = raw_name.replace('<function ', '')
raw_name = raw_name.replace('<bound method ', '')
return raw_name.split(' ', 1)[0]
def getDeferredFailure(
self, deferred, timeout=None, debug=False, prevent_stop=False):
"""
Run the deferred and return the failure.
Usage::
checker = mk.credentialsChecker()
credentials = mk.credentials()
deferred = checker.requestAvatarId(credentials)
failure = self.getDeferredFailure(deferred)
self.assertFailureType(AuthenticationError, failure)
"""
self._runDeferred(
deferred,
timeout=timeout,
debug=debug,
prevent_stop=prevent_stop,
)
self.assertIsFailure(deferred)
failure = deferred.result
self.ignoreFailure(deferred)
return failure
def successResultOf(self, deferred):
"""
Return the current success result of C{deferred} or raise
C{self.failException}.
@param deferred: A L{Deferred<twisted.internet.defer.Deferred>} which
has a success result. This means
L{Deferred.callback<twisted.internet.defer.Deferred.callback>} or
L{Deferred.errback<twisted.internet.defer.Deferred.errback>} has
been called on it and it has reached the end of its callback chain
and the last callback or errback returned a
non-L{failure.Failure}.
@type deferred: L{Deferred<twisted.internet.defer.Deferred>}
@raise SynchronousTestCase.failureException: If the
L{Deferred<twisted.internet.defer.Deferred>} has no result or has
a failure result.
@return: The result of C{deferred}.
"""
# FIXME:1370:
# Remove / re-route this code after upgrading to Twisted 13.0.
result = []
deferred.addBoth(result.append)
if not result:
self.fail(
"Success result expected on %r, found no result instead" % (
deferred,))
elif isinstance(result[0], Failure):
self.fail(
"Success result expected on %r, "
"found failure result instead:\n%s" % (
deferred, result[0].getBriefTraceback().decode(
'utf-8', errors='replace')))
else:
return result[0]
def failureResultOf(self, deferred, *expectedExceptionTypes):
"""
Return the current failure result of C{deferred} or raise
C{self.failException}.
@param deferred: A L{Deferred<twisted.internet.defer.Deferred>} which
has a failure result. This means
L{Deferred.callback<twisted.internet.defer.Deferred.callback>} or
L{Deferred.errback<twisted.internet.defer.Deferred.errback>} has
been called on it and it has reached the end of its callback chain
and the last callback or errback raised an exception or returned a
L{failure.Failure}.
@type deferred: L{Deferred<twisted.internet.defer.Deferred>}
@param expectedExceptionTypes: Exception types to expect - if
provided, and the the exception wrapped by the failure result is
not one of the types provided, then this test will fail.
@raise SynchronousTestCase.failureException: If the
L{Deferred<twisted.internet.defer.Deferred>} has no result, has a
success result, or has an unexpected failure result.
@return: The failure result of C{deferred}.
@rtype: L{failure.Failure}
"""
# FIXME:1370:
# Remove / re-route this code after upgrading to Twisted 13
result = []
deferred.addBoth(result.append)
if not result:
self.fail(
"Failure result expected on %r, found no result instead" % (
deferred,))
elif not isinstance(result[0], Failure):
self.fail(
"Failure result expected on %r, "
"found success result (%r) instead" % (deferred, result[0]))
elif (expectedExceptionTypes and
not result[0].check(*expectedExceptionTypes)):
expectedString = " or ".join([
'.'.join((t.__module__, t.__name__)) for t in
expectedExceptionTypes])
self.fail(
"Failure of type (%s) expected on %r, "
"found type %r instead: %s" % (
expectedString, deferred, result[0].type,
result[0].getBriefTraceback().decode(
'utf-8', errors='replace')))
else:
return result[0]
def assertNoResult(self, deferred):
"""
Assert that C{deferred} does not have a result at this point.
If the assertion succeeds, then the result of C{deferred} is left
unchanged. Otherwise, any L{failure.Failure} result is swallowed.
@param deferred: A L{Deferred<twisted.internet.defer.Deferred>}
without a result. This means that neither
L{Deferred.callback<twisted.internet.defer.Deferred.callback>} nor
L{Deferred.errback<twisted.internet.defer.Deferred.errback>} has
been called, or that the
L{Deferred<twisted.internet.defer.Deferred>} is waiting on another
L{Deferred<twisted.internet.defer.Deferred>} for a result.
@type deferred: L{Deferred<twisted.internet.defer.Deferred>}
@raise SynchronousTestCase.failureException: If the
L{Deferred<twisted.internet.defer.Deferred>} has a result.
"""
# FIXME:1370:
# Remove / re-route this code after upgrading to Twisted 13
result = []
def cb(res):
result.append(res)
return res
deferred.addBoth(cb)
if result:
# If there is already a failure, the self.fail below will
# report it, so swallow it in the deferred
deferred.addErrback(lambda _: None)
self.fail(
"No result expected on %r, found %r instead" % (
deferred, result[0]))
def getDeferredResult(
self, deferred, timeout=None, debug=False, prevent_stop=False):
"""
Run the deferred and return the result.
Usage::
checker = mk.credentialsChecker()
credentials = mk.credentials()
deferred = checker.requestAvatarId(credentials)
result = self.getDeferredResult(deferred)
self.assertEqual('something', result)
"""
self._runDeferred(
deferred,
timeout=timeout,
debug=debug,
prevent_stop=prevent_stop,
)
self.assertIsNotFailure(deferred)
return deferred.result
def assertWasCalled(self, deferred):
"""
Check that deferred was called.
"""
if not deferred.called:
raise AssertionError('This deferred was not called yet.')
def ignoreFailure(self, deferred):
"""
Ignore the current failure on the deferred.
It transforms an failure into result `None` so that the failure
will not be raised at reactor shutdown for not being handled.
"""
deferred.addErrback(lambda failure: None)
def assertIsFailure(self, deferred):
"""
Check that deferred is a failure.
"""
if not isinstance(deferred.result, Failure):
raise AssertionError('Deferred is not a failure.')
def assertIsNotFailure(self, deferred):
"""
Raise assertion error if deferred is a Failure.
The failed deferred is handled by this method, to avoid propagating
the error into the reactor.
"""
self.assertWasCalled(deferred)
if isinstance(deferred.result, Failure):
error = deferred.result
self.ignoreFailure(deferred)
raise AssertionError(
'Deferred contains a failure: %s' % (error))
def _get_os_version():
"""
On non-Linux this is just the os_name.
On Linux is the distribution name and the version.
On Windows it is the `nt` followed by the major and minor NT version.
It is not the marketing name.
We only support the Windows NT family.
See: https://en.wikipedia.org/wiki/Windows_NT#Releases
On OSX it returns `osx` followed by the version.
It is not the version of the underlying Darwin OS.
See: https://en.wikipedia.org/wiki/MacOS#Release_history
"""
if os.name == 'nt':
parts = platform.version().split('.')
return 'nt-%s.%s' % (parts[0], parts[1])
# We are now in Unix zone.
os_name = os.uname()[0].lower()
if os_name == 'darwin':
parts = platform.mac_ver()[0].split('.')
return 'osx-%s.%s' % (parts[0], parts[1])
if os_name == 'sunos':
parts = platform.release().split('.')
return 'solaris-%s' % (parts[1],)
if os_name == 'aix': # noqa:cover
return 'aix-%s.%s' % (platform.version(), platform.release())
if os_name != 'linux':
return process_capabilities.os_name
# We delay the import as it will call lsb_release.
import ld
distro_name = ld.id()
if distro_name == 'arch':
# Arch has no version.
return 'arch'
if distro_name in ['centos', 'ol']:
# Normalize all RHEL variants.
distro_name = 'rhel'
distro_version = ld.version().split('.', 1)[0]
return '%s-%s' % (distro_name, distro_version)
def _get_cpu_type():
"""
Return the CPU type as used in the brink.sh script.
"""
base = platform.processor()
if base == 'aarch64':
return 'arm64'
if base == 'x86_64':
return 'x64'
return base
_CI_NAMES = Bunch(
LOCAL='local',
GITHUB='github-actions',
TRAVIS='travis',
BUILDBOT='buildbot',
UNKNOWN='unknown-ci',
AZURE='azure-pipelines',
)
def _get_ci_name():
"""
Return the name of the CI on which the tests are currently executed.
"""
if os.environ.get('BUILDBOT', '').lower() == 'true':
return _CI_NAMES.BUILDBOT
if os.environ.get('GITHUB_ACTIONS', '').lower() == 'true':
return _CI_NAMES.GITHUB
if os.environ.get('TRAVIS', '').lower() == 'true':
return _CI_NAMES.TRAVIS
if os.environ.get('INFRASTRUCTURE', '') == 'AZUREPIPELINES':
return _CI_NAMES.AZURE
if os.environ.get('CI', '').lower() == 'true':
return _CI_NAMES.UNKNOWN
return _CI_NAMES.LOCAL
class ChevahTestCase(TwistedTestCase, AssertionMixin):
"""
Test case for Chevah tests.
Checks that temporary folder is clean at exit.
"""
os_name = process_capabilities.os_name
os_family = process_capabilities.os_family
os_version = _get_os_version()
cpu_type = process_capabilities.cpu_type
ci_name = _get_ci_name()
CI = _CI_NAMES
TEST_LANGUAGE = os.getenv('TEST_LANG', 'EN')
# List of partial thread names to ignore during the tearDown.
# No need for the full thread name
excepted_threads = [
'MainThread',
'threaded_reactor',
'GlobalPool-WorkerHandler',
'GlobalPool-TaskHandler',
'GlobalPool-ResultHandler',
'PoolThread-twisted.internet.reactor',
]
# We assume that hostname does not change during test and this
# should save a few DNS queries.
hostname = _get_hostname()
Bunch = Bunch
Mock = Mock
#: Obsolete. Please use self.patch and self.patchObject.
Patch = patch
_environ_user = None
_drop_user = '-'
def setUp(self):
super(ChevahTestCase, self).setUp()
self.__cleanup__ = []
self._cleanup_stack = []
self._teardown_errors = []
self.test_segments = None
def tearDown(self):
self.callCleanup()
self._checkTemporaryFiles()
threads = threading.enumerate()
if len(threads) > 1:
for thread in threads:
thread_name = thread.getName()
if self._isExceptedThread(thread_name):
continue
self._teardown_errors.append(AssertionError(
'There are still active threads, '
'beside the main thread: %s - %s' % (
thread_name, threads)))
super(ChevahTestCase, self).tearDown()
errors, self._teardown_errors = self._teardown_errors, None
if errors:
raise AssertionError('Cleanup errors: %r' % (errors,))
def _isExceptedThread(self, name):
"""
Return `True` if is OK for thread to exist after test is done.
"""
for exception in self.excepted_threads:
if name in exception:
return True
if exception in name:
return True
return False
def addCleanup(self, function, *args, **kwargs):
"""
Overwrite unit-test behaviour to run cleanup method before tearDown.
"""
self.__cleanup__.append((function, args, kwargs))
def callCleanup(self):
"""
Call all cleanup methods.
If a cleanup fails, the next cleanups will continue to be called and
the first failure is raised.
"""
for function, args, kwargs in reversed(self.__cleanup__):
try:
function(*args, **kwargs)
except Exception as error: # noqa:cover
self._teardown_errors.append(error)
self.__cleanup__ = []
def enterCleanup(self):
"""
Called when start using stacked cleanups.
"""
self._cleanup_stack.append(self.__cleanup__)
self.__cleanup__ = []
def exitCleanup(self):
"""
To be called at the end of a stacked cleanup.
"""
self.callCleanup()
self.__cleanup__ = self._cleanup_stack.pop()
@contextlib.contextmanager
def stackedCleanup(self):
"""
Context manager for stacked cleanups.
"""
try:
self.enterCleanup()
yield
finally:
self.exitCleanup()
def _checkTemporaryFiles(self):
"""
Check that no temporary files or folders are present.
"""
# FIXME:922:
# Move all filesystem checks into a specialized class
if self.test_segments:
if mk.fs.isFolder(self.test_segments):
mk.fs.deleteFolder(
self.test_segments, recursive=True)
else:
mk.fs.deleteFile(self.test_segments)
checks = [
self.assertTempIsClean,
self.assertWorkingFolderIsClean,
]
errors = []
for check in checks:
try:
check()
except AssertionError as error:
errors.append(error.message)
if errors: # noqa:cover
self._teardown_errors.append(AssertionError(
'There are temporary files or folders left over.\n %s' % (
'\n'.join(errors))))
def shortDescription(self): # noqa:cover
"""
The short description for the test.
bla.bla.tests. is removed.
The format is customized for Chevah Nose runner.
This is only called when we run with -v or we show the error.
"""
class_name = text_type(self.__class__)[8:-2]
class_name = class_name.replace('.Test', ':Test')
tests_start = class_name.find('.tests.') + 7
class_name = class_name[tests_start:]
return "%s - %s.%s" % (
self._testMethodName,
class_name,
self._testMethodName)
def assertRaises(self, exception_class, callback=None, *args, **kwargs):
"""
Wrapper around the stdlib call to allow non-context usage.
"""
super_assertRaises = super(ChevahTestCase, self).assertRaises
if callback is None:
return super_assertRaises(exception_class)
with super_assertRaises(exception_class) as context:
callback(*args, **kwargs)
return context.exception
def assertSequenceEqual(self, first, second, msg, seq_type):
super(ChevahTestCase, self).assertSequenceEqual(
first, second, msg, seq_type)
for first_element, second_element in zip(first, second):
self.assertEqual(first_element, second_element)
def assertDictEqual(self, first, second, msg):
super(ChevahTestCase, self).assertDictEqual(first, second, msg)
first_keys = sorted(first.keys())
second_keys = sorted(second.keys())
first_values = [first[key] for key in first_keys]
second_values = [second[key] for key in second_keys]
self.assertSequenceEqual(first_keys, second_keys, msg, list)
self.assertSequenceEqual(first_values, second_values, msg, list)
def assertSetEqual(self, first, second, msg):
super(ChevahTestCase, self).assertSetEqual(first, second, msg)
first_elements = sorted(first)
second_elements = sorted(second)
self.assertSequenceEqual(first_elements, second_elements, msg, list)
def _baseAssertEqual(self, first, second, msg=None):
"""
Update to stdlib to make sure we don't compare str with unicode.
"""
if (
isinstance(first, text_type) and
not isinstance(second, text_type)
): # noqa:cover
if not msg:
msg = u'First is unicode while second is str for "%s".' % (
first,)
raise AssertionError(msg.encode('utf-8'))
if (
not isinstance(first, text_type) and
isinstance(second, text_type)
): # noqa:cover
if not msg:
msg = u'First is str while second is unicode for "%s".' % (
first,)
raise AssertionError(msg.encode('utf-8'))
return super(ChevahTestCase, self)._baseAssertEqual(
first, second, msg=msg)
@staticmethod
def getHostname():
"""
Return the hostname of the current system.
"""
return _get_hostname()
@classmethod
def initialize(cls, drop_user):
"""
Initialize the testing environment.
"""
cls._drop_user = drop_user
os.environ['DROP_USER'] = drop_user
if 'LOGNAME' in os.environ and 'USER' not in os.environ:
os.environ['USER'] = os.environ['LOGNAME']
if 'USER' in os.environ and 'USERNAME' not in os.environ:
os.environ['USERNAME'] = os.environ['USER']
if 'USERNAME' in os.environ and 'USER' not in os.environ:
os.environ['USER'] = os.environ['USERNAME']
cls._environ_user = os.environ['USER']
cls.cleanTemporaryFolder()
@classmethod
def dropPrivileges(cls):
'''Drop privileges to normal users.'''
if cls._drop_user == '-':
return
os.environ['USERNAME'] = cls._drop_user
os.environ['USER'] = cls._drop_user
# Test suite should be started as root and we drop effective user
# privileges.
system_users.dropPrivileges(username=cls._drop_user)
@staticmethod
def skipTest(message=''):
'''Return a SkipTest exception.'''
return SkipTest(message)
@property
def _caller_success_member(self):
'''Retrieve the 'success' member from the test case.'''
success_state = None
# We search starting with second stack, since first stack is the
# current stack and we don't care about it.
for level in inspect.stack()[1:]:
try:
success_state = level[0].f_locals['success']
break
except KeyError:
success_state = None
if success_state is None:
raise AssertionError('Failed to find "success" attribute.')
return success_state
@staticmethod
def patch(*args, **kwargs):
"""
Helper for generic patching.
"""
return patch(*args, **kwargs)
@staticmethod
def patchObject(*args, **kwargs):
"""
Helper for patching objects.
"""
return patch.object(*args, **kwargs)
def now(self):
"""
Return current Unix timestamp.
"""
return time.time()
@classmethod
def cleanTemporaryFolder(cls):
"""
Clean all test files from temporary folder.
Return a list of members which were removed.
"""
return cls._cleanFolder(mk.fs.temp_segments)
@classmethod
def cleanWorkingFolder(cls):
path = mk.fs.getAbsoluteRealPath('.')
segments = mk.fs.getSegmentsFromRealPath(path)
return cls._cleanFolder(segments, only_marked=True)
@classmethod
def _cleanFolder(cls, folder_segments, only_marked=False):
"""
Clean all test files from folder_segments.
Return a list of members which were removed.
"""
if not mk.fs.exists(folder_segments):
return []
# In case we are running the test suite as super user,
# we use super filesystem for cleaning.
if cls._environ_user == cls._drop_user:
temp_avatar = SuperAvatar()
else:
temp_avatar = DefaultAvatar()
temp_filesystem = LocalFilesystem(avatar=temp_avatar)
temp_members = []
for member in (temp_filesystem.getFolderContent(folder_segments)):
if only_marked and member.find(TEST_NAME_MARKER) == -1:
continue
temp_members.append(member)
segments = folder_segments[:]
segments.append(member)
if temp_filesystem.isFolder(segments):
temp_filesystem.deleteFolder(segments, recursive=True)
else:
temp_filesystem.deleteFile(segments)
return temp_members
@classmethod
def getPeakMemoryUsage(cls):
"""
Return maximum memory usage in kilo bytes.
"""
if cls.os_family == 'posix':
import resource
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
elif cls.os_family == 'nt':
from wmi import WMI
local_wmi = WMI('.')
query = (
u'SELECT PeakWorkingSetSize '
u'FROM Win32_Process '
u'WHERE Handle=%d' % os.getpid())
result = local_wmi.query(query.encode('utf-8'))
peak_working_set_size = int(result[0].PeakWorkingSetSize)
# FIXME:2099:
# Windows XP reports value in bytes, instead of Kilobytes.
return int(peak_working_set_size)
else:
raise AssertionError('OS not supported.')
def folderInTemp(self, *args, **kwargs):
"""
Create a folder in the default temp folder and mark it for cleanup.
"""
kwargs['cleanup'] = self.addCleanup
return mk.fs.folderInTemp(*args, **kwargs)
def fileInTemp(self, *args, **kwargs):
"""
Create a file in the default temp folder and mark it for cleanup.
"""
kwargs['cleanup'] = self.addCleanup
return mk.fs.fileInTemp(*args, **kwargs)
def assertIn(self, target, source):
"""
Overwrite stdlib to swap the arguments.
"""
if source not in target:
message = u'%s not in %s.' % (repr(source), repr(target))
raise AssertionError(message.encode('utf-8'))
def assertIsInstance(self, expected_type, value, msg=None):
"""
Raise an exception if `value` is not an instance of `expected_type`
"""
# In Python 2.7 isInstance is already defined, but with swapped
# arguments.
if not inspect.isclass(expected_type):
expected_type, value = value, expected_type
if not isinstance(value, expected_type):
raise AssertionError(
"Expecting type %s, but got %s. %s" % (
expected_type, type(value), msg))
def tempPath(self, prefix='', suffix=''):
"""
Return (path, segments) for a path which is not created yet.
"""
return mk.fs.makePathInTemp(prefix=prefix, suffix=suffix)
def tempPathCleanup(self, prefix='', suffix=''):
"""
Return (path, segments) for a path which is not created yet but which
will be automatically removed.
"""
return mk.fs.pathInTemp(
cleanup=self.addCleanup, prefix=prefix, suffix=suffix)
def tempFile(self, content='', prefix='', suffix='', cleanup=True):
"""
Return (path, segments) for a new file created in temp which is
auto cleaned.
"""
segments = mk.fs.createFileInTemp(prefix=prefix, suffix=suffix)
path = mk.fs.getRealPathFromSegments(segments)
if cleanup:
self.addCleanup(mk.fs.deleteFile, segments)
try:
opened_file = mk.fs.openFileForWriting(segments)
opened_file.write(content)
finally:
opened_file.close()
return (path, segments)
def tempFolder(self, name=None, prefix='', suffix=''):
"""
Create a new temp folder and return its path and segments, which is
auto cleaned.
"""
segments = mk.fs.createFolderInTemp(
foldername=name, prefix=prefix, suffix=suffix)
path = mk.fs.getRealPathFromSegments(segments)
self.addCleanup(mk.fs.deleteFolder, segments, recursive=True)
return (path, segments)
class FileSystemTestCase(ChevahTestCase):
"""
Common test case for all file-system tests using a real OS account.
"""
@classmethod
def setUpClass(cls):
# FIXME:924:
# Disabled when we can not find the home folder path.
if not process_capabilities.get_home_folder:
raise cls.skipTest()
super(FileSystemTestCase, cls).setUpClass()
cls.os_user = cls.setUpTestUser()
home_folder_path = system_users.getHomeFolder(
username=cls.os_user.name, token=cls.os_user.token)
cls.avatar = mk.makeFilesystemOSAvatar(
name=cls.os_user.name,
home_folder_path=home_folder_path,
token=cls.os_user.token,
)
cls.filesystem = LocalFilesystem(avatar=cls.avatar)
@classmethod
def tearDownClass(cls):
if not cls.os_user.windows_create_local_profile:
os_administration.deleteHomeFolder(cls.os_user)
os_administration.deleteUser(cls.os_user)
super(FileSystemTestCase, cls).tearDownClass()
@classmethod
def setUpTestUser(cls):
"""
Set-up OS user for file system testing.
"""
from chevah.compat.testing import TEST_ACCOUNT_GROUP
user = mk.makeTestUser(home_group=TEST_ACCOUNT_GROUP)
os_administration.addUser(user)
return user
def setUp(self):
super(FileSystemTestCase, self).setUp()
# Initialized only to clean the home folder.
test_filesystem = LocalTestFilesystem(avatar=self.avatar)
test_filesystem.cleanHomeFolder()
class OSAccountFileSystemTestCase(FileSystemTestCase):
"""
Test case for tests that need a dedicated local OS account present.
"""
#: User will be created before running the test case and removed on
#: teardown.
CREATE_TEST_USER = None
@classmethod
def setUpTestUser(cls):
"""
Add `CREATE_TEST_USER` to local OS.
"""
os_administration.addUser(cls.CREATE_TEST_USER)
return cls.CREATE_TEST_USER
| [((30389, 30524), 'bunch.Bunch', 'Bunch', ([], {'LOCAL': '"""local"""', 'GITHUB': '"""github-actions"""', 'TRAVIS': '"""travis"""', 'BUILDBOT': '"""buildbot"""', 'UNKNOWN': '"""unknown-ci"""', 'AZURE': '"""azure-pipelines"""'}), "(LOCAL='local', GITHUB='github-actions', TRAVIS='travis', BUILDBOT=\n 'buildbot', UNKNOWN='unknown-ci', AZURE='azure-pipelines')\n", (30394, 30524), False, 'from bunch import Bunch\n'), ((1767, 1787), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (1785, 1787), False, 'import socket\n'), ((29831, 29838), 'ld.id', 'ld.id', ([], {}), '()\n', (29836, 29838), False, 'import ld\n'), ((30241, 30261), 'platform.processor', 'platform.processor', ([], {}), '()\n', (30259, 30261), False, 'import platform\n'), ((31543, 31571), 'os.getenv', 'os.getenv', (['"""TEST_LANG"""', '"""EN"""'], {}), "('TEST_LANG', 'EN')\n", (31552, 31571), False, 'import os\n'), ((2740, 2751), 'six.moves.range', 'range', (['(2)', '(6)'], {}), '(2, 6)\n', (2745, 2751), False, 'from six.moves import range\n'), ((3622, 3647), 'twisted.internet.reactor.getDelayedCalls', 'reactor.getDelayedCalls', ([], {}), '()\n', (3645, 3647), False, 'from twisted.internet import reactor\n'), ((5605, 5630), 'twisted.internet.reactor.getDelayedCalls', 'reactor.getDelayedCalls', ([], {}), '()\n', (5628, 5630), False, 'from twisted.internet import reactor\n'), ((6524, 6591), 'twisted.internet.reactor.callLater', 'reactor.callLater', (['timeout', 'self._raiseReactorTimeoutError', 'timeout'], {}), '(timeout, self._raiseReactorTimeoutError, timeout)\n', (6541, 6591), False, 'from twisted.internet import reactor\n'), ((6896, 6918), 'twisted.internet.reactor.startRunning', 'reactor.startRunning', ([], {}), '()\n', (6916, 6918), False, 'from twisted.internet import reactor\n'), ((7029, 7054), 'twisted.internet.reactor.runUntilCurrent', 'reactor.runUntilCurrent', ([], {}), '()\n', (7052, 7054), False, 'from twisted.internet import reactor\n'), ((9159, 9173), 'twisted.internet.reactor.stop', 'reactor.stop', ([], {}), '()\n', (9171, 9173), False, 'from twisted.internet import reactor\n'), ((9252, 9269), 'twisted.internet.reactor.iterate', 'reactor.iterate', ([], {}), '()\n', (9267, 9269), False, 'from twisted.internet import reactor\n'), ((9559, 9638), 'twisted.internet.reactor.addSystemEventTrigger', 'reactor.addSystemEventTrigger', (['"""during"""', '"""startup"""', 'reactor._reallyStartRunning'], {}), "('during', 'startup', reactor._reallyStartRunning)\n", (9588, 9638), False, 'from twisted.internet import reactor\n'), ((11050, 11070), 'twisted.internet.reactor.getReaders', 'reactor.getReaders', ([], {}), '()\n', (11068, 11070), False, 'from twisted.internet import reactor\n'), ((11415, 11440), 'twisted.internet.reactor.getDelayedCalls', 'reactor.getDelayedCalls', ([], {}), '()\n', (11438, 11440), False, 'from twisted.internet import reactor\n'), ((20022, 20033), 'time.time', 'time.time', ([], {}), '()\n', (20031, 20033), False, 'import time\n'), ((20383, 20411), 'six.text_type', 'text_type', (['delayed_call.func'], {}), '(delayed_call.func)\n', (20392, 20411), False, 'from six import text_type\n'), ((30945, 30981), 'os.environ.get', 'os.environ.get', (['"""INFRASTRUCTURE"""', '""""""'], {}), "('INFRASTRUCTURE', '')\n", (30959, 30981), False, 'import os\n'), ((32517, 32538), 'threading.enumerate', 'threading.enumerate', ([], {}), '()\n', (32536, 32538), False, 'import threading\n'), ((39884, 39936), 'chevah.compat.system_users.dropPrivileges', 'system_users.dropPrivileges', ([], {'username': 'cls._drop_user'}), '(username=cls._drop_user)\n', (39911, 39936), False, 'from chevah.compat import DefaultAvatar, LocalFilesystem, process_capabilities, system_users, SuperAvatar\n'), ((40044, 40061), 'nose.SkipTest', 'SkipTest', (['message'], {}), '(message)\n', (40052, 40061), False, 'from nose import SkipTest\n'), ((40803, 40825), 'mock.patch', 'patch', (['*args'], {}), '(*args, **kwargs)\n', (40808, 40825), False, 'from mock import patch, Mock\n'), ((40959, 40988), 'mock.patch.object', 'patch.object', (['*args'], {}), '(*args, **kwargs)\n', (40971, 40988), False, 'from mock import patch, Mock\n'), ((41087, 41098), 'time.time', 'time.time', ([], {}), '()\n', (41096, 41098), False, 'import time\n'), ((41401, 41431), 'chevah.compat.testing.mockup.mk.fs.getAbsoluteRealPath', 'mk.fs.getAbsoluteRealPath', (['"""."""'], {}), "('.')\n", (41426, 41431), False, 'from chevah.compat.testing.mockup import mk\n'), ((41451, 41486), 'chevah.compat.testing.mockup.mk.fs.getSegmentsFromRealPath', 'mk.fs.getSegmentsFromRealPath', (['path'], {}), '(path)\n', (41480, 41486), False, 'from chevah.compat.testing.mockup import mk\n'), ((42108, 42143), 'chevah.compat.LocalFilesystem', 'LocalFilesystem', ([], {'avatar': 'temp_avatar'}), '(avatar=temp_avatar)\n', (42123, 42143), False, 'from chevah.compat import DefaultAvatar, LocalFilesystem, process_capabilities, system_users, SuperAvatar\n'), ((43744, 43779), 'chevah.compat.testing.mockup.mk.fs.folderInTemp', 'mk.fs.folderInTemp', (['*args'], {}), '(*args, **kwargs)\n', (43762, 43779), False, 'from chevah.compat.testing.mockup import mk\n'), ((43981, 44014), 'chevah.compat.testing.mockup.mk.fs.fileInTemp', 'mk.fs.fileInTemp', (['*args'], {}), '(*args, **kwargs)\n', (43997, 44014), False, 'from chevah.compat.testing.mockup import mk\n'), ((44999, 45049), 'chevah.compat.testing.mockup.mk.fs.makePathInTemp', 'mk.fs.makePathInTemp', ([], {'prefix': 'prefix', 'suffix': 'suffix'}), '(prefix=prefix, suffix=suffix)\n', (45019, 45049), False, 'from chevah.compat.testing.mockup import mk\n'), ((45260, 45331), 'chevah.compat.testing.mockup.mk.fs.pathInTemp', 'mk.fs.pathInTemp', ([], {'cleanup': 'self.addCleanup', 'prefix': 'prefix', 'suffix': 'suffix'}), '(cleanup=self.addCleanup, prefix=prefix, suffix=suffix)\n', (45276, 45331), False, 'from chevah.compat.testing.mockup import mk\n'), ((45555, 45607), 'chevah.compat.testing.mockup.mk.fs.createFileInTemp', 'mk.fs.createFileInTemp', ([], {'prefix': 'prefix', 'suffix': 'suffix'}), '(prefix=prefix, suffix=suffix)\n', (45577, 45607), False, 'from chevah.compat.testing.mockup import mk\n'), ((45623, 45662), 'chevah.compat.testing.mockup.mk.fs.getRealPathFromSegments', 'mk.fs.getRealPathFromSegments', (['segments'], {}), '(segments)\n', (45652, 45662), False, 'from chevah.compat.testing.mockup import mk\n'), ((46137, 46208), 'chevah.compat.testing.mockup.mk.fs.createFolderInTemp', 'mk.fs.createFolderInTemp', ([], {'foldername': 'name', 'prefix': 'prefix', 'suffix': 'suffix'}), '(foldername=name, prefix=prefix, suffix=suffix)\n', (46161, 46208), False, 'from chevah.compat.testing.mockup import mk\n'), ((46237, 46276), 'chevah.compat.testing.mockup.mk.fs.getRealPathFromSegments', 'mk.fs.getRealPathFromSegments', (['segments'], {}), '(segments)\n', (46266, 46276), False, 'from chevah.compat.testing.mockup import mk\n'), ((46847, 46925), 'chevah.compat.system_users.getHomeFolder', 'system_users.getHomeFolder', ([], {'username': 'cls.os_user.name', 'token': 'cls.os_user.token'}), '(username=cls.os_user.name, token=cls.os_user.token)\n', (46873, 46925), False, 'from chevah.compat import DefaultAvatar, LocalFilesystem, process_capabilities, system_users, SuperAvatar\n'), ((46961, 47074), 'chevah.compat.testing.mockup.mk.makeFilesystemOSAvatar', 'mk.makeFilesystemOSAvatar', ([], {'name': 'cls.os_user.name', 'home_folder_path': 'home_folder_path', 'token': 'cls.os_user.token'}), '(name=cls.os_user.name, home_folder_path=\n home_folder_path, token=cls.os_user.token)\n', (46986, 47074), False, 'from chevah.compat.testing.mockup import mk\n'), ((47146, 47180), 'chevah.compat.LocalFilesystem', 'LocalFilesystem', ([], {'avatar': 'cls.avatar'}), '(avatar=cls.avatar)\n', (47161, 47180), False, 'from chevah.compat import DefaultAvatar, LocalFilesystem, process_capabilities, system_users, SuperAvatar\n'), ((47352, 47393), 'chevah.compat.administration.os_administration.deleteUser', 'os_administration.deleteUser', (['cls.os_user'], {}), '(cls.os_user)\n', (47380, 47393), False, 'from chevah.compat.administration import os_administration\n'), ((47644, 47690), 'chevah.compat.testing.mockup.mk.makeTestUser', 'mk.makeTestUser', ([], {'home_group': 'TEST_ACCOUNT_GROUP'}), '(home_group=TEST_ACCOUNT_GROUP)\n', (47659, 47690), False, 'from chevah.compat.testing.mockup import mk\n'), ((47699, 47730), 'chevah.compat.administration.os_administration.addUser', 'os_administration.addUser', (['user'], {}), '(user)\n', (47724, 47730), False, 'from chevah.compat.administration import os_administration\n'), ((47900, 47939), 'chevah.compat.testing.filesystem.LocalTestFilesystem', 'LocalTestFilesystem', ([], {'avatar': 'self.avatar'}), '(avatar=self.avatar)\n', (47919, 47939), False, 'from chevah.compat.testing.filesystem import LocalTestFilesystem\n'), ((48367, 48414), 'chevah.compat.administration.os_administration.addUser', 'os_administration.addUser', (['cls.CREATE_TEST_USER'], {}), '(cls.CREATE_TEST_USER)\n', (48392, 48414), False, 'from chevah.compat.administration import os_administration\n'), ((5108, 5127), 'twisted.internet.reactor.removeAll', 'reactor.removeAll', ([], {}), '()\n', (5125, 5127), False, 'from twisted.internet import reactor\n'), ((7974, 7991), 'twisted.internet.reactor.timeout', 'reactor.timeout', ([], {}), '()\n', (7989, 7991), False, 'from twisted.internet import reactor\n'), ((8217, 8239), 'twisted.internet.reactor.doIteration', 'reactor.doIteration', (['t'], {}), '(t)\n', (8236, 8239), False, 'from twisted.internet import reactor\n'), ((8486, 8512), 'twisted.internet.reactor.doIteration', 'reactor.doIteration', (['(1e-06)'], {}), '(1e-06)\n', (8505, 8512), False, 'from twisted.internet import reactor\n'), ((15444, 15469), 'twisted.internet.reactor.getDelayedCalls', 'reactor.getDelayedCalls', ([], {}), '()\n', (15467, 15469), False, 'from twisted.internet import reactor\n'), ((16779, 16799), 'twisted.internet.reactor.getReaders', 'reactor.getReaders', ([], {}), '()\n', (16797, 16799), False, 'from twisted.internet import reactor\n'), ((17992, 18017), 'twisted.internet.reactor.getDelayedCalls', 'reactor.getDelayedCalls', ([], {}), '()\n', (18015, 18017), False, 'from twisted.internet import reactor\n'), ((35032, 35066), 'chevah.compat.testing.mockup.mk.fs.isFolder', 'mk.fs.isFolder', (['self.test_segments'], {}), '(self.test_segments)\n', (35046, 35066), False, 'from chevah.compat.testing.mockup import mk\n'), ((36033, 36058), 'six.text_type', 'text_type', (['self.__class__'], {}), '(self.__class__)\n', (36042, 36058), False, 'from six import text_type\n'), ((40354, 40369), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (40367, 40369), False, 'import inspect\n'), ((41772, 41801), 'chevah.compat.testing.mockup.mk.fs.exists', 'mk.fs.exists', (['folder_segments'], {}), '(folder_segments)\n', (41784, 41801), False, 'from chevah.compat.testing.mockup import mk\n'), ((42011, 42024), 'chevah.compat.SuperAvatar', 'SuperAvatar', ([], {}), '()\n', (42022, 42024), False, 'from chevah.compat import DefaultAvatar, LocalFilesystem, process_capabilities, system_users, SuperAvatar\n'), ((42065, 42080), 'chevah.compat.DefaultAvatar', 'DefaultAvatar', ([], {}), '()\n', (42078, 42080), False, 'from chevah.compat import DefaultAvatar, LocalFilesystem, process_capabilities, system_users, SuperAvatar\n'), ((44562, 44592), 'inspect.isclass', 'inspect.isclass', (['expected_type'], {}), '(expected_type)\n', (44577, 44592), False, 'import inspect\n'), ((45780, 45814), 'chevah.compat.testing.mockup.mk.fs.openFileForWriting', 'mk.fs.openFileForWriting', (['segments'], {}), '(segments)\n', (45804, 45814), False, 'from chevah.compat.testing.mockup import mk\n'), ((47296, 47343), 'chevah.compat.administration.os_administration.deleteHomeFolder', 'os_administration.deleteHomeFolder', (['cls.os_user'], {}), '(cls.os_user)\n', (47330, 47343), False, 'from chevah.compat.administration import os_administration\n'), ((3689, 3712), 'six.text_type', 'text_type', (['delayed.func'], {}), '(delayed.func)\n', (3698, 3712), False, 'from six import text_type\n'), ((4191, 4230), 'twisted.internet.reactor.threadpool._team._pending.pop', 'reactor.threadpool._team._pending.pop', ([], {}), '()\n', (4228, 4230), False, 'from twisted.internet import reactor\n'), ((10916, 10936), 'twisted.internet.reactor.getWriters', 'reactor.getWriters', ([], {}), '()\n', (10934, 10936), False, 'from twisted.internet import reactor\n'), ((29169, 29187), 'platform.version', 'platform.version', ([], {}), '()\n', (29185, 29187), False, 'import platform\n'), ((29294, 29304), 'os.uname', 'os.uname', ([], {}), '()\n', (29302, 29304), False, 'import os\n'), ((29488, 29506), 'platform.release', 'platform.release', ([], {}), '()\n', (29504, 29506), False, 'import platform\n'), ((29630, 29648), 'platform.version', 'platform.version', ([], {}), '()\n', (29646, 29648), False, 'import platform\n'), ((29650, 29668), 'platform.release', 'platform.release', ([], {}), '()\n', (29666, 29668), False, 'import platform\n'), ((30053, 30065), 'ld.version', 'ld.version', ([], {}), '()\n', (30063, 30065), False, 'import ld\n'), ((30669, 30699), 'os.environ.get', 'os.environ.get', (['"""BUILDBOT"""', '""""""'], {}), "('BUILDBOT', '')\n", (30683, 30699), False, 'import os\n'), ((30761, 30797), 'os.environ.get', 'os.environ.get', (['"""GITHUB_ACTIONS"""', '""""""'], {}), "('GITHUB_ACTIONS', '')\n", (30775, 30797), False, 'import os\n'), ((30857, 30885), 'os.environ.get', 'os.environ.get', (['"""TRAVIS"""', '""""""'], {}), "('TRAVIS', '')\n", (30871, 30885), False, 'import os\n'), ((31042, 31066), 'os.environ.get', 'os.environ.get', (['"""CI"""', '""""""'], {}), "('CI', '')\n", (31056, 31066), False, 'import os\n'), ((35084, 35138), 'chevah.compat.testing.mockup.mk.fs.deleteFolder', 'mk.fs.deleteFolder', (['self.test_segments'], {'recursive': '(True)'}), '(self.test_segments, recursive=True)\n', (35102, 35138), False, 'from chevah.compat.testing.mockup import mk\n'), ((35194, 35230), 'chevah.compat.testing.mockup.mk.fs.deleteFile', 'mk.fs.deleteFile', (['self.test_segments'], {}), '(self.test_segments)\n', (35210, 35230), False, 'from chevah.compat.testing.mockup import mk\n'), ((42888, 42928), 'resource.getrusage', 'resource.getrusage', (['resource.RUSAGE_SELF'], {}), '(resource.RUSAGE_SELF)\n', (42906, 42928), False, 'import resource\n'), ((43031, 43039), 'wmi.WMI', 'WMI', (['"""."""'], {}), "('.')\n", (43034, 43039), False, 'from wmi import WMI\n'), ((11004, 11024), 'twisted.internet.reactor.getWriters', 'reactor.getWriters', ([], {}), '()\n', (11022, 11024), False, 'from twisted.internet import reactor\n'), ((15291, 15307), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (15301, 15307), False, 'import time\n'), ((16662, 16682), 'twisted.internet.reactor.getWriters', 'reactor.getWriters', ([], {}), '()\n', (16680, 16682), False, 'from twisted.internet import reactor\n'), ((29361, 29379), 'platform.mac_ver', 'platform.mac_ver', ([], {}), '()\n', (29377, 29379), False, 'import platform\n'), ((43185, 43196), 'os.getpid', 'os.getpid', ([], {}), '()\n', (43194, 43196), False, 'import os\n'), ((7704, 7724), 'twisted.internet.reactor.getWriters', 'reactor.getWriters', ([], {}), '()\n', (7722, 7724), False, 'from twisted.internet import reactor\n'), ((7746, 7766), 'twisted.internet.reactor.getReaders', 'reactor.getReaders', ([], {}), '()\n', (7764, 7766), False, 'from twisted.internet import reactor\n'), ((11363, 11383), 'twisted.internet.reactor.getReaders', 'reactor.getReaders', ([], {}), '()\n', (11381, 11383), False, 'from twisted.internet import reactor\n'), ((15210, 15238), 'twisted.internet.reactor.threadpool.q.qsize', 'reactor.threadpool.q.qsize', ([], {}), '()\n', (15236, 15238), False, 'from twisted.internet import reactor\n'), ((20091, 20102), 'time.time', 'time.time', ([], {}), '()\n', (20100, 20102), False, 'import time\n'), ((2796, 2811), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (2809, 2811), False, 'import inspect\n'), ((7788, 7811), 'twisted.internet.reactor.getThreadPool', 'reactor.getThreadPool', ([], {}), '()\n', (7809, 7811), False, 'from twisted.internet import reactor\n')] |
jphacks/C_2118 | web/snowflake.py | a63279e92362e09d1856e3d44edb4793d370fd7a | import time
class Snowflake:
def __init__(self, init_serial_no=0):
self.machine_id = 0
self.epoch = 0
self.serial_no = init_serial_no
def generate(self):
unique_id = (
((int(time.time() * 1000) - self.epoch) & 0x1FFFFFFFFFF) << 22
| (self.machine_id & 0x3FF) << 12
| (self.serial_no & 0xFFF)
)
self.serial_no += 1
return unique_id
| [((229, 240), 'time.time', 'time.time', ([], {}), '()\n', (238, 240), False, 'import time\n')] |
icml2020submission6857/metarl | src/metarl/tf/plotter/__init__.py | 9b66cefa2b6bcb6a38096d629ce8853b47c7171d | from metarl.tf.plotter.plotter import Plotter
__all__ = ['Plotter']
| [] |
slaily/deep-learning-bits | generative_deep_learning/build_network.py | cb9ce7ec539efbdfcaa023d141466f919bd31b71 | from keras import layers
# Single-layer LSTM model for next-character prediction
model = keras.models.Sequential()
model.add(layers.LSTM(128, input_shape=(maxlen, len(chars))))
model.add(layers.Dense(len(chars), activation='softmax'))
# Model compilation configuration
optimizer = keras.optimizers.RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
# Function to sample the next character given the model’s predictions
def sample(preds, temperature=1.0):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinominal(1, preds, 1)
return np.argmax(probas)
# Text-generation loop
import sys
import random
# Trains the model for 60 epochs
for epoch in range(1, 60):
print(f'Epoch: {epoch}')
model.fit(x, y, batch_size=128, epochs=1)
# Selects a text seed at random
start_index = random.randint(0, len(text) - maxlen - 1)
generated_text = text[start_index: start_index + maxlen]
print(f'--- Generating with seed: {generated_text} ---')
# Tries a range of different sampling temperatures
for temperature in [0.2, 0.5, 1.0, 1.2]:
print(f'--- Temperature {temperature} ---')
sys.stdout.write(generated_text)
# Generates 400 characters, starting from the seed text
for i in range(400):
sampled = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(generated_text):
sampled[0, t, char_indices[char]] = 1.
# Samples the next character
preds = model.predict(sampled, verbose=0)[0]
next_index = sample(preds, temperature)
next_char = chars[next_index]
generated_text += next_char
generated_text = generated_text[1:]
sys.stdout.write(next_char)
| [((1298, 1330), 'sys.stdout.write', 'sys.stdout.write', (['generated_text'], {}), '(generated_text)\n', (1314, 1330), False, 'import sys\n'), ((1884, 1911), 'sys.stdout.write', 'sys.stdout.write', (['next_char'], {}), '(next_char)\n', (1900, 1911), False, 'import sys\n')] |
thunderbug1/pyanom | tests/test_structure_learning.py | e442bff70a4d1880a9a698c020287edf1933d498 | import io
import unittest
import numpy as np
class TestGraphicalLasso(unittest.TestCase):
"""Basic test cases."""
def _getTarget(self):
from pyanom.structure_learning import GraphicalLasso
return GraphicalLasso
def _makeOne(self, *args, **kwargs):
return self._getTarget()(*args, **kwargs)
@classmethod
def setUpClass(self):
self.X_normal = np.array([[0.975586009, -0.745997359, -0.229331244],
[-0.460992487, -1.304668238, -0.599247488],
[-0.503171745, -1.308368748, -1.451411048],
[-0.904446243, -0.287837582, 0.197153592],
[-1.106120624, 0.243612535, 1.051237763],
[0.371920628, 1.690566027, -0.468645532],
[-0.861682655, 1.472544046, -0.846863556],
[0.632918214, 1.35895507, -1.217528827],
[0.017011646, 1.556247275, -0.149119024],
[-1.129336215, 0.486811944, 0.012272206],
[0.498967152, -0.530065628, -2.14011938],
[0.402460108, -0.474465633, -0.041584595],
[-0.847994655, -1.281269721, -0.430338406],
[-0.583857254, 0.228815073, -1.321443286],
[0.963425438, -1.136873938, 0.990406269],
[-1.342349795, -0.147133485, 1.286410605],
[-0.546153552, 0.134343445, -0.380672316],
[-2.264867999, 0.227795362, 1.477762968],
[0.070095074, -0.770899782, 2.100831522],
[0.425213005, 0.796156033, 1.676164975]])
self.X_error = np.array([[-0.273095586, 0.356336588, 1.595876828],
[-0.708547003, -0.572139833, 0.858932219],
[-1.125947228, -1.049026454, 0.35980022],
[0.653070988, -0.052417831, 0.787284547],
[-1.059131881, 1.621161051, -1.295306533],
[0.499065038, -1.064179225, 1.243325767],
[0.452740621, -0.737171777, 0.352807563],
[0.626897927, -1.100559392, -0.905560876],
[1.338835274, 2.083549348, -1.280796042],
[0.264928015, 10, 2.544472412],
[-0.754827534, -1.031919195, 1.227285333],
[-0.774019674, 0.241245625, -0.989132941],
[1.298381426, 0.19445334, 2.267355363],
[1.46892843, 1.24946146, 0.322341667],
[1.057265661, -0.846614104, -0.355396321],
[0.810670486, -0.719804484, -0.943762163],
[1.169028226, 0.492444331, 0.234015505],
[-0.307091024, -1.56195639, 0.509095939],
[0.849156845, 0.533674261, 0.069183014],
[0.102812565, 8, 1.545239732]])
def test_outlier_analysis_score_shape(self):
target = self._makeOne()
target.fit(self.X_normal)
pred = target.outlier_analysis_score(self.X_error)
self.assertEqual(pred.shape, (20, 3))
def test_incorrect_feature_size(self):
X_normal = np.array([-0.056523959,
- 0.881470896,
-0.249935965,
0.186624902,
-0.30183287,
2.000815584,
0.710538188,
0.591089702,
0.099804538,
0.114730483]).reshape(-1, 1)
X_error = np.array([0.660985506,
-1.450512173,
-1.27733756,
-1.420294211,
0.737179562,
1.481425898,
-0.170147132,
-1.527687346,
0.580282631,
-3.722489636]).reshape(-1, 1)
target = self._makeOne()
with self.assertRaises(ValueError):
target.fit(X_normal)
def test_anomaly_analysis_score_shape(self):
target = self._makeOne()
target.fit(self.X_normal)
pred, pmatrix = target.anomaly_analysis_score(self.X_error)
self.assertEqual(pred.shape, (3, ))
self.assertEqual(pmatrix.shape, (3, 3))
if __name__ == '__main__':
unittest.main()
| [((4946, 4961), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4959, 4961), False, 'import unittest\n'), ((399, 1317), 'numpy.array', 'np.array', (['[[0.975586009, -0.745997359, -0.229331244], [-0.460992487, -1.304668238, -\n 0.599247488], [-0.503171745, -1.308368748, -1.451411048], [-0.904446243,\n -0.287837582, 0.197153592], [-1.106120624, 0.243612535, 1.051237763], [\n 0.371920628, 1.690566027, -0.468645532], [-0.861682655, 1.472544046, -\n 0.846863556], [0.632918214, 1.35895507, -1.217528827], [0.017011646, \n 1.556247275, -0.149119024], [-1.129336215, 0.486811944, 0.012272206], [\n 0.498967152, -0.530065628, -2.14011938], [0.402460108, -0.474465633, -\n 0.041584595], [-0.847994655, -1.281269721, -0.430338406], [-0.583857254,\n 0.228815073, -1.321443286], [0.963425438, -1.136873938, 0.990406269], [\n -1.342349795, -0.147133485, 1.286410605], [-0.546153552, 0.134343445, -\n 0.380672316], [-2.264867999, 0.227795362, 1.477762968], [0.070095074, -\n 0.770899782, 2.100831522], [0.425213005, 0.796156033, 1.676164975]]'], {}), '([[0.975586009, -0.745997359, -0.229331244], [-0.460992487, -\n 1.304668238, -0.599247488], [-0.503171745, -1.308368748, -1.451411048],\n [-0.904446243, -0.287837582, 0.197153592], [-1.106120624, 0.243612535, \n 1.051237763], [0.371920628, 1.690566027, -0.468645532], [-0.861682655, \n 1.472544046, -0.846863556], [0.632918214, 1.35895507, -1.217528827], [\n 0.017011646, 1.556247275, -0.149119024], [-1.129336215, 0.486811944, \n 0.012272206], [0.498967152, -0.530065628, -2.14011938], [0.402460108, -\n 0.474465633, -0.041584595], [-0.847994655, -1.281269721, -0.430338406],\n [-0.583857254, 0.228815073, -1.321443286], [0.963425438, -1.136873938, \n 0.990406269], [-1.342349795, -0.147133485, 1.286410605], [-0.546153552,\n 0.134343445, -0.380672316], [-2.264867999, 0.227795362, 1.477762968], [\n 0.070095074, -0.770899782, 2.100831522], [0.425213005, 0.796156033, \n 1.676164975]])\n', (407, 1317), True, 'import numpy as np\n'), ((1931, 2814), 'numpy.array', 'np.array', (['[[-0.273095586, 0.356336588, 1.595876828], [-0.708547003, -0.572139833, \n 0.858932219], [-1.125947228, -1.049026454, 0.35980022], [0.653070988, -\n 0.052417831, 0.787284547], [-1.059131881, 1.621161051, -1.295306533], [\n 0.499065038, -1.064179225, 1.243325767], [0.452740621, -0.737171777, \n 0.352807563], [0.626897927, -1.100559392, -0.905560876], [1.338835274, \n 2.083549348, -1.280796042], [0.264928015, 10, 2.544472412], [-\n 0.754827534, -1.031919195, 1.227285333], [-0.774019674, 0.241245625, -\n 0.989132941], [1.298381426, 0.19445334, 2.267355363], [1.46892843, \n 1.24946146, 0.322341667], [1.057265661, -0.846614104, -0.355396321], [\n 0.810670486, -0.719804484, -0.943762163], [1.169028226, 0.492444331, \n 0.234015505], [-0.307091024, -1.56195639, 0.509095939], [0.849156845, \n 0.533674261, 0.069183014], [0.102812565, 8, 1.545239732]]'], {}), '([[-0.273095586, 0.356336588, 1.595876828], [-0.708547003, -\n 0.572139833, 0.858932219], [-1.125947228, -1.049026454, 0.35980022], [\n 0.653070988, -0.052417831, 0.787284547], [-1.059131881, 1.621161051, -\n 1.295306533], [0.499065038, -1.064179225, 1.243325767], [0.452740621, -\n 0.737171777, 0.352807563], [0.626897927, -1.100559392, -0.905560876], [\n 1.338835274, 2.083549348, -1.280796042], [0.264928015, 10, 2.544472412],\n [-0.754827534, -1.031919195, 1.227285333], [-0.774019674, 0.241245625, \n -0.989132941], [1.298381426, 0.19445334, 2.267355363], [1.46892843, \n 1.24946146, 0.322341667], [1.057265661, -0.846614104, -0.355396321], [\n 0.810670486, -0.719804484, -0.943762163], [1.169028226, 0.492444331, \n 0.234015505], [-0.307091024, -1.56195639, 0.509095939], [0.849156845, \n 0.533674261, 0.069183014], [0.102812565, 8, 1.545239732]])\n', (1939, 2814), True, 'import numpy as np\n'), ((3673, 3826), 'numpy.array', 'np.array', (['[-0.056523959, -0.881470896, -0.249935965, 0.186624902, -0.30183287, \n 2.000815584, 0.710538188, 0.591089702, 0.099804538, 0.114730483]'], {}), '([-0.056523959, -0.881470896, -0.249935965, 0.186624902, -\n 0.30183287, 2.000815584, 0.710538188, 0.591089702, 0.099804538, \n 0.114730483])\n', (3681, 3826), True, 'import numpy as np\n'), ((4113, 4262), 'numpy.array', 'np.array', (['[0.660985506, -1.450512173, -1.27733756, -1.420294211, 0.737179562, \n 1.481425898, -0.170147132, -1.527687346, 0.580282631, -3.722489636]'], {}), '([0.660985506, -1.450512173, -1.27733756, -1.420294211, 0.737179562,\n 1.481425898, -0.170147132, -1.527687346, 0.580282631, -3.722489636])\n', (4121, 4262), True, 'import numpy as np\n')] |
29riyasaxena/MDF | examples/MDF/states.py | 476e6950d0f14f29463eb4f6e3be518dfb2160a5 | """
Example of ModECI MDF - Testing state variables
"""
from modeci_mdf.mdf import *
import sys
def main():
mod = Model(id="States")
mod_graph = Graph(id="state_example")
mod.graphs.append(mod_graph)
## Counter node
counter_node = Node(id="counter_node")
p1 = Parameter(id="increment", value=1)
counter_node.parameters.append(p1)
p2 = Parameter(id="count", value="count + increment")
counter_node.parameters.append(p2)
op1 = OutputPort(id="out_port", value=p2.id)
counter_node.output_ports.append(op1)
mod_graph.nodes.append(counter_node)
## Sine node...
sine_node = Node(id="sine_node")
sine_node.parameters.append(Parameter(id="amp", value=3))
sine_node.parameters.append(Parameter(id="period", value=0.4))
s1 = Parameter(
id="level", default_initial_value=0, time_derivative="6.283185 * rate / period"
)
sine_node.parameters.append(s1)
s2 = Parameter(
id="rate",
default_initial_value=1,
time_derivative="-1 * 6.283185 * level / period",
)
sine_node.parameters.append(s2)
op1 = OutputPort(id="out_port", value="amp * level")
sine_node.output_ports.append(op1)
mod_graph.nodes.append(sine_node)
new_file = mod.to_json_file("%s.json" % mod.id)
new_file = mod.to_yaml_file("%s.yaml" % mod.id)
if "-run" in sys.argv:
verbose = True
# verbose = False
from modeci_mdf.utils import load_mdf, print_summary
from modeci_mdf.execution_engine import EvaluableGraph
eg = EvaluableGraph(mod_graph, verbose)
dt = 0.01
duration = 2
t = 0
recorded = {}
times = []
s = []
while t <= duration:
times.append(t)
print("====== Evaluating at t = %s ======" % (t))
if t == 0:
eg.evaluate() # replace with initialize?
else:
eg.evaluate(time_increment=dt)
s.append(eg.enodes["sine_node"].evaluable_outputs["out_port"].curr_value)
t += dt
if "-nogui" not in sys.argv:
import matplotlib.pyplot as plt
plt.plot(times, s)
plt.show()
if "-graph" in sys.argv:
mod.to_graph_image(
engine="dot",
output_format="png",
view_on_render=False,
level=3,
filename_root="states",
only_warn_on_fail=True, # Makes sure test of this doesn't fail on Windows on GitHub Actions
)
return mod_graph
if __name__ == "__main__":
main()
| [((1568, 1602), 'modeci_mdf.execution_engine.EvaluableGraph', 'EvaluableGraph', (['mod_graph', 'verbose'], {}), '(mod_graph, verbose)\n', (1582, 1602), False, 'from modeci_mdf.execution_engine import EvaluableGraph\n'), ((2183, 2201), 'matplotlib.pyplot.plot', 'plt.plot', (['times', 's'], {}), '(times, s)\n', (2191, 2201), True, 'import matplotlib.pyplot as plt\n'), ((2214, 2224), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2222, 2224), True, 'import matplotlib.pyplot as plt\n')] |
jseekamp/tinkerpop | gremlin-python/src/main/jython/tests/driver/test_client.py | 5f7b7d2c4353cf2d8ee48eed6c0e5632666d16c0 | '''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
import pytest
from gremlin_python.driver.protocol import GremlinServerError
from gremlin_python.driver.client import Client
from gremlin_python.driver.protocol import GremlinServerError
from gremlin_python.driver.request import RequestMessage
from gremlin_python.process.strategies import OptionsStrategy
from gremlin_python.process.graph_traversal import __
from gremlin_python.structure.graph import Graph
__author__ = 'David M. Brown ([email protected])'
def test_connection(connection):
g = Graph().traversal()
t = g.V()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
results_set = connection.write(message).result()
future = results_set.all()
results = future.result()
assert len(results) == 6
assert isinstance(results, list)
assert results_set.done.done()
assert 'host' in results_set.status_attributes
def test_client_simple_eval(client):
assert client.submit('1 + 1').all().result()[0] == 2
def test_client_simple_eval_bindings(client):
assert client.submit('x + x', {'x': 2}).all().result()[0] == 4
def test_client_eval_traversal(client):
assert len(client.submit('g.V()').all().result()) == 6
def test_client_error(client):
try:
# should fire an exception
client.submit('1/0').all().result()
assert False
except GremlinServerError as ex:
assert 'exceptions' in ex.status_attributes
assert 'stackTrace' in ex.status_attributes
def test_client_connection_pool_after_error(client):
# Overwrite fixture with pool_size=1 client
client = Client('ws://localhost:45940/gremlin', 'gmodern', pool_size=1)
try:
# should fire an exception
client.submit('1/0').all().result()
assert False
except GremlinServerError as gse:
# expecting the pool size to be 1 again after query returned
assert gse.status_code == 597
assert client.available_pool_size == 1
def test_client_bytecode(client):
g = Graph().traversal()
t = g.V()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
result_set = client.submit(message)
assert len(result_set.all().result()) == 6
def test_client_bytecode_options(client):
# smoke test to validate serialization of OptionsStrategy. no way to really validate this from an integration
# test perspective because there's no way to access the internals of the strategy via bytecode
g = Graph().traversal()
t = g.withStrategies(OptionsStrategy(options={"x": "test", "y": True})).V()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
result_set = client.submit(message)
assert len(result_set.all().result()) == 6
##
t = g.with_("x", "test").with_("y", True).V()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
result_set = client.submit(message)
assert len(result_set.all().result()) == 6
def test_iterate_result_set(client):
g = Graph().traversal()
t = g.V()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
result_set = client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 6
def test_client_async(client):
g = Graph().traversal()
t = g.V()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
future = client.submitAsync(message)
result_set = future.result()
assert len(result_set.all().result()) == 6
def test_connection_share(client):
# Overwrite fixture with pool_size=1 client
client = Client('ws://localhost:45940/gremlin', 'gmodern', pool_size=1)
g = Graph().traversal()
t = g.V()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
message2 = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
future = client.submitAsync(message)
future2 = client.submitAsync(message2)
result_set2 = future2.result()
assert len(result_set2.all().result()) == 6
# This future has to finish for the second to yield result - pool_size=1
assert future.done()
result_set = future.result()
assert len(result_set.all().result()) == 6
def test_multi_conn_pool(client):
g = Graph().traversal()
t = g.V()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
message2 = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}})
client = Client('ws://localhost:45940/gremlin', 'g', pool_size=1)
future = client.submitAsync(message)
future2 = client.submitAsync(message2)
result_set2 = future2.result()
assert len(result_set2.all().result()) == 6
# with connection pool `future` may or may not be done here
result_set = future.result()
assert len(result_set.all().result()) == 6
def test_big_result_set(client):
g = Graph().traversal()
t = g.inject(1).repeat(__.addV('person').property('name', __.loops())).times(20000).count()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 1
t = g.V().limit(10)
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 10
t = g.V().limit(100)
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 100
t = g.V().limit(1000)
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 1000
t = g.V().limit(10000)
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 10000
def test_big_result_set_secure(secure_client):
g = Graph().traversal()
t = g.inject(1).repeat(__.addV('person').property('name', __.loops())).times(20000).count()
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = secure_client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 1
t = g.V().limit(10)
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = secure_client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 10
t = g.V().limit(100)
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = secure_client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 100
t = g.V().limit(1000)
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = secure_client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 1000
t = g.V().limit(10000)
message = RequestMessage('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases': {'g': 'g'}})
result_set = secure_client.submit(message)
results = []
for result in result_set:
results += result
assert len(results) == 10000
| [((1314, 1411), 'gremlin_python.driver.request.RequestMessage', 'RequestMessage', (['"""traversal"""', '"""bytecode"""', "{'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}}"], {}), "('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases':\n {'g': 'gmodern'}})\n", (1328, 1411), False, 'from gremlin_python.driver.request import RequestMessage\n'), ((2385, 2447), 'gremlin_python.driver.client.Client', 'Client', (['"""ws://localhost:45940/gremlin"""', '"""gmodern"""'], {'pool_size': '(1)'}), "('ws://localhost:45940/gremlin', 'gmodern', pool_size=1)\n", (2391, 2447), False, 'from gremlin_python.driver.client import Client\n'), ((2842, 2939), 'gremlin_python.driver.request.RequestMessage', 'RequestMessage', (['"""traversal"""', '"""bytecode"""', "{'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}}"], {}), "('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases':\n {'g': 'gmodern'}})\n", (2856, 2939), False, 'from gremlin_python.driver.request import RequestMessage\n'), ((3402, 3499), 'gremlin_python.driver.request.RequestMessage', 'RequestMessage', (['"""traversal"""', '"""bytecode"""', "{'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}}"], {}), "('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases':\n {'g': 'gmodern'}})\n", (3416, 3499), False, 'from gremlin_python.driver.request import RequestMessage\n'), ((3654, 3751), 'gremlin_python.driver.request.RequestMessage', 'RequestMessage', (['"""traversal"""', '"""bytecode"""', "{'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}}"], {}), "('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases':\n {'g': 'gmodern'}})\n", (3668, 3751), False, 'from gremlin_python.driver.request import RequestMessage\n'), ((3930, 4027), 'gremlin_python.driver.request.RequestMessage', 'RequestMessage', (['"""traversal"""', '"""bytecode"""', "{'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}}"], {}), "('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases':\n {'g': 'gmodern'}})\n", (3944, 4027), False, 'from gremlin_python.driver.request import RequestMessage\n'), ((4255, 4352), 'gremlin_python.driver.request.RequestMessage', 'RequestMessage', (['"""traversal"""', '"""bytecode"""', "{'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}}"], {}), "('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases':\n {'g': 'gmodern'}})\n", (4269, 4352), False, 'from gremlin_python.driver.request import RequestMessage\n'), ((4568, 4630), 'gremlin_python.driver.client.Client', 'Client', (['"""ws://localhost:45940/gremlin"""', '"""gmodern"""'], {'pool_size': '(1)'}), "('ws://localhost:45940/gremlin', 'gmodern', pool_size=1)\n", (4574, 4630), False, 'from gremlin_python.driver.client import Client\n'), ((4687, 4784), 'gremlin_python.driver.request.RequestMessage', 'RequestMessage', (['"""traversal"""', '"""bytecode"""', "{'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}}"], {}), "('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases':\n {'g': 'gmodern'}})\n", (4701, 4784), False, 'from gremlin_python.driver.request import RequestMessage\n'), ((4796, 4893), 'gremlin_python.driver.request.RequestMessage', 'RequestMessage', (['"""traversal"""', '"""bytecode"""', "{'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}}"], {}), "('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases':\n {'g': 'gmodern'}})\n", (4810, 4893), False, 'from gremlin_python.driver.request import RequestMessage\n'), ((5333, 5430), 'gremlin_python.driver.request.RequestMessage', 'RequestMessage', (['"""traversal"""', '"""bytecode"""', "{'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}}"], {}), "('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases':\n {'g': 'gmodern'}})\n", (5347, 5430), False, 'from gremlin_python.driver.request import RequestMessage\n'), ((5442, 5539), 'gremlin_python.driver.request.RequestMessage', 'RequestMessage', (['"""traversal"""', '"""bytecode"""', "{'gremlin': t.bytecode, 'aliases': {'g': 'gmodern'}}"], {}), "('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases':\n {'g': 'gmodern'}})\n", (5456, 5539), False, 'from gremlin_python.driver.request import RequestMessage\n'), ((5549, 5605), 'gremlin_python.driver.client.Client', 'Client', (['"""ws://localhost:45940/gremlin"""', '"""g"""'], {'pool_size': '(1)'}), "('ws://localhost:45940/gremlin', 'g', pool_size=1)\n", (5555, 5605), False, 'from gremlin_python.driver.client import Client\n'), ((6092, 6183), 'gremlin_python.driver.request.RequestMessage', 'RequestMessage', (['"""traversal"""', '"""bytecode"""', "{'gremlin': t.bytecode, 'aliases': {'g': 'g'}}"], {}), "('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases':\n {'g': 'g'}})\n", (6106, 6183), False, 'from gremlin_python.driver.request import RequestMessage\n'), ((6361, 6452), 'gremlin_python.driver.request.RequestMessage', 'RequestMessage', (['"""traversal"""', '"""bytecode"""', "{'gremlin': t.bytecode, 'aliases': {'g': 'g'}}"], {}), "('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases':\n {'g': 'g'}})\n", (6375, 6452), False, 'from gremlin_python.driver.request import RequestMessage\n'), ((6632, 6723), 'gremlin_python.driver.request.RequestMessage', 'RequestMessage', (['"""traversal"""', '"""bytecode"""', "{'gremlin': t.bytecode, 'aliases': {'g': 'g'}}"], {}), "('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases':\n {'g': 'g'}})\n", (6646, 6723), False, 'from gremlin_python.driver.request import RequestMessage\n'), ((6905, 6996), 'gremlin_python.driver.request.RequestMessage', 'RequestMessage', (['"""traversal"""', '"""bytecode"""', "{'gremlin': t.bytecode, 'aliases': {'g': 'g'}}"], {}), "('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases':\n {'g': 'g'}})\n", (6919, 6996), False, 'from gremlin_python.driver.request import RequestMessage\n'), ((7180, 7271), 'gremlin_python.driver.request.RequestMessage', 'RequestMessage', (['"""traversal"""', '"""bytecode"""', "{'gremlin': t.bytecode, 'aliases': {'g': 'g'}}"], {}), "('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases':\n {'g': 'g'}})\n", (7194, 7271), False, 'from gremlin_python.driver.request import RequestMessage\n'), ((7601, 7692), 'gremlin_python.driver.request.RequestMessage', 'RequestMessage', (['"""traversal"""', '"""bytecode"""', "{'gremlin': t.bytecode, 'aliases': {'g': 'g'}}"], {}), "('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases':\n {'g': 'g'}})\n", (7615, 7692), False, 'from gremlin_python.driver.request import RequestMessage\n'), ((7877, 7968), 'gremlin_python.driver.request.RequestMessage', 'RequestMessage', (['"""traversal"""', '"""bytecode"""', "{'gremlin': t.bytecode, 'aliases': {'g': 'g'}}"], {}), "('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases':\n {'g': 'g'}})\n", (7891, 7968), False, 'from gremlin_python.driver.request import RequestMessage\n'), ((8155, 8246), 'gremlin_python.driver.request.RequestMessage', 'RequestMessage', (['"""traversal"""', '"""bytecode"""', "{'gremlin': t.bytecode, 'aliases': {'g': 'g'}}"], {}), "('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases':\n {'g': 'g'}})\n", (8169, 8246), False, 'from gremlin_python.driver.request import RequestMessage\n'), ((8435, 8526), 'gremlin_python.driver.request.RequestMessage', 'RequestMessage', (['"""traversal"""', '"""bytecode"""', "{'gremlin': t.bytecode, 'aliases': {'g': 'g'}}"], {}), "('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases':\n {'g': 'g'}})\n", (8449, 8526), False, 'from gremlin_python.driver.request import RequestMessage\n'), ((8717, 8808), 'gremlin_python.driver.request.RequestMessage', 'RequestMessage', (['"""traversal"""', '"""bytecode"""', "{'gremlin': t.bytecode, 'aliases': {'g': 'g'}}"], {}), "('traversal', 'bytecode', {'gremlin': t.bytecode, 'aliases':\n {'g': 'g'}})\n", (8731, 8808), False, 'from gremlin_python.driver.request import RequestMessage\n'), ((1266, 1273), 'gremlin_python.structure.graph.Graph', 'Graph', ([], {}), '()\n', (1271, 1273), False, 'from gremlin_python.structure.graph import Graph\n'), ((2794, 2801), 'gremlin_python.structure.graph.Graph', 'Graph', ([], {}), '()\n', (2799, 2801), False, 'from gremlin_python.structure.graph import Graph\n'), ((3288, 3295), 'gremlin_python.structure.graph.Graph', 'Graph', ([], {}), '()\n', (3293, 3295), False, 'from gremlin_python.structure.graph import Graph\n'), ((3882, 3889), 'gremlin_python.structure.graph.Graph', 'Graph', ([], {}), '()\n', (3887, 3889), False, 'from gremlin_python.structure.graph import Graph\n'), ((4207, 4214), 'gremlin_python.structure.graph.Graph', 'Graph', ([], {}), '()\n', (4212, 4214), False, 'from gremlin_python.structure.graph import Graph\n'), ((4639, 4646), 'gremlin_python.structure.graph.Graph', 'Graph', ([], {}), '()\n', (4644, 4646), False, 'from gremlin_python.structure.graph import Graph\n'), ((5285, 5292), 'gremlin_python.structure.graph.Graph', 'Graph', ([], {}), '()\n', (5290, 5292), False, 'from gremlin_python.structure.graph import Graph\n'), ((5962, 5969), 'gremlin_python.structure.graph.Graph', 'Graph', ([], {}), '()\n', (5967, 5969), False, 'from gremlin_python.structure.graph import Graph\n'), ((7471, 7478), 'gremlin_python.structure.graph.Graph', 'Graph', ([], {}), '()\n', (7476, 7478), False, 'from gremlin_python.structure.graph import Graph\n'), ((3333, 3382), 'gremlin_python.process.strategies.OptionsStrategy', 'OptionsStrategy', ([], {'options': "{'x': 'test', 'y': True}"}), "(options={'x': 'test', 'y': True})\n", (3348, 3382), False, 'from gremlin_python.process.strategies import OptionsStrategy\n'), ((6044, 6054), 'gremlin_python.process.graph_traversal.__.loops', '__.loops', ([], {}), '()\n', (6052, 6054), False, 'from gremlin_python.process.graph_traversal import __\n'), ((7553, 7563), 'gremlin_python.process.graph_traversal.__.loops', '__.loops', ([], {}), '()\n', (7561, 7563), False, 'from gremlin_python.process.graph_traversal import __\n'), ((6009, 6026), 'gremlin_python.process.graph_traversal.__.addV', '__.addV', (['"""person"""'], {}), "('person')\n", (6016, 6026), False, 'from gremlin_python.process.graph_traversal import __\n'), ((7518, 7535), 'gremlin_python.process.graph_traversal.__.addV', '__.addV', (['"""person"""'], {}), "('person')\n", (7525, 7535), False, 'from gremlin_python.process.graph_traversal import __\n')] |
henrysky/gaia_tools | gaia_tools/xmatch/__init__.py | c151a1d8f6896d8ef5a379291baa8a1f027bd53b | # Tools for cross-matching catalogs
import csv
import sys
import os
import os.path
import platform
import shutil
import subprocess
import tempfile
import warnings
WIN32= platform.system() == 'Windows'
import numpy
import astropy.coordinates as acoords
from astropy.table import Table
from astropy import units as u
from ..load.download import _ERASESTR
def xmatch(cat1,cat2,maxdist=2,
colRA1='RA',colDec1='DEC',epoch1=None,
colRA2='RA',colDec2='DEC',epoch2=None,
colpmRA2='pmra',colpmDec2='pmdec',
swap=False,
col_field=None):
"""
NAME:
xmatch
PURPOSE:
cross-match two catalogs (incl. proper motion in cat2 if epochs are different)
INPUT:
cat1 - First catalog
cat2 - Second catalog
maxdist= (2) maximum distance in arcsec
colRA1= ('RA') name of the tag in cat1 with the right ascension in degree in cat1 (assumed to be ICRS)
colDec1= ('DEC') name of the tag in cat1 with the declination in degree in cat1 (assumed to be ICRS)
epoch1= (2000.) epoch of the coordinates in cat1
colRA2= ('RA') name of the tag in cat2 with the right ascension in degree in cat2 (assumed to be ICRS)
colDec2= ('DEC') name of the tag in cat2 with the declination in degree in cat2 (assumed to be ICRS)
epoch2= (2000.) epoch of the coordinates in cat2
colpmRA2= ('pmra') name of the tag in cat2 with the proper motion in right ascension in degree in cat2 (assumed to be ICRS; includes cos(Dec)) [only used when epochs are different]
colpmDec2= ('pmdec') name of the tag in cat2 with the proper motion in declination in degree in cat2 (assumed to be ICRS) [only used when epochs are different]
swap= (False) if False, find closest matches in cat2 for each cat1 source, if False do the opposite (important when one of the catalogs has duplicates)
col_field= (None) if None, simply cross-match on RA and Dec; if a string, then cross-match on RA and Dec with additional matching in the data tag specified by the string
OUTPUT:
(index into cat1 of matching objects,
index into cat2 of matching objects,
angular separation between matching objects)
HISTORY:
2016-09-12 - Written - Bovy (UofT)
2016-09-21 - Account for Gaia epoch 2015 - Bovy (UofT)
2019-07-07 - add additional catalog field matching - Leung (UofT)
"""
if epoch1 is None:
if 'ref_epoch' in cat1.dtype.fields:
epoch1= cat1['ref_epoch']
else:
epoch1= 2000.
if epoch2 is None:
if 'ref_epoch' in cat2.dtype.fields:
epoch2= cat2['ref_epoch']
else:
epoch2= 2000.
_check_epoch(cat1,epoch1)
_check_epoch(cat2,epoch2)
depoch= epoch2-epoch1
if numpy.any(depoch != 0.):
# Use proper motion to get both catalogs at the same time
dra=cat2[colpmRA2]/numpy.cos(cat2[colDec2]/180.*numpy.pi)\
/3600000.*depoch
ddec= cat2[colpmDec2]/3600000.*depoch
# Don't shift objects with non-existing proper motion
dra[numpy.isnan(cat2[colpmRA2])]= 0.
ddec[numpy.isnan(cat2[colpmDec2])]= 0.
else:
dra= 0.
ddec= 0.
mc1= acoords.SkyCoord(cat1[colRA1],cat1[colDec1],
unit=(u.degree, u.degree),frame='icrs')
mc2= acoords.SkyCoord(cat2[colRA2]-dra,cat2[colDec2]-ddec,
unit=(u.degree, u.degree),frame='icrs')
if col_field is not None:
try: # check if the field actually exists in both cat1/cat2
cat1[col_field]
cat2[col_field]
except KeyError: # python 2/3 format string
raise KeyError("'%s' does not exist in both catalog" % col_field)
uniques = numpy.unique(cat1[col_field])
if swap: # times neg one to indicate those indices untouch will be noticed at the end and filtered out
d2d = numpy.ones(len(cat2)) * -1.
idx = numpy.zeros(len(cat2), dtype=int)
else:
d2d = numpy.ones(len(cat1)) * -1.
idx = numpy.zeros(len(cat1), dtype=int)
for unique in uniques: # loop over the class
idx_1 = numpy.arange(cat1[colRA1].shape[0])[cat1[col_field] == unique]
idx_2 = numpy.arange(cat2[colRA2].shape[0])[cat2[col_field] == unique]
if idx_1.shape[0] == 0 or idx_2.shape[0] == 0: # the case where a class only exists in one but not the other
continue
if swap:
temp_idx, temp_d2d, d3d = mc2[idx_2].match_to_catalog_sky(mc1[idx_1])
m1 = numpy.arange(len(cat2))
idx[cat2[col_field] == unique] = idx_1[temp_idx]
d2d[cat2[col_field] == unique] = temp_d2d
else:
temp_idx, temp_d2d, d3d = mc1[idx_1].match_to_catalog_sky(mc2[idx_2])
m1 = numpy.arange(len(cat1))
idx[cat1[col_field] == unique] = idx_2[temp_idx]
d2d[cat1[col_field] == unique] = temp_d2d
d2d = d2d * temp_d2d.unit # make sure finally we have an unit on d2d array s.t. "<" operation can complete
else:
if swap:
idx,d2d,d3d = mc2.match_to_catalog_sky(mc1)
m1= numpy.arange(len(cat2))
else:
idx,d2d,d3d = mc1.match_to_catalog_sky(mc2)
m1= numpy.arange(len(cat1))
# to make sure filtering out all neg ones which are untouched
mindx= ((d2d < maxdist*u.arcsec) & (0.*u.arcsec <= d2d))
m1= m1[mindx]
m2= idx[mindx]
if swap:
return (m2,m1,d2d[mindx])
else:
return (m1,m2,d2d[mindx])
def cds(cat,xcat='vizier:I/350/gaiaedr3',maxdist=2,colRA='RA',colDec='DEC',
selection='best',epoch=None,colpmRA='pmra',colpmDec='pmdec',
savefilename=None,gaia_all_columns=False):
"""
NAME:
cds
PURPOSE:
Cross-match against a catalog in the CDS archive using the CDS cross-matching service (http://cdsxmatch.u-strasbg.fr/xmatch); uses the curl interface
INPUT:
cat - a catalog to cross match, requires 'RA' and 'DEC' keywords (see below)
xcat= ('vizier:I/350/gaiaedr3') name of the catalog to cross-match against, in a format understood by the CDS cross-matching service (see http://cdsxmatch.u-strasbg.fr/xmatch/doc/available-tables.html; things like 'vizier:Tycho2' or 'vizier:I/345/gaia2')
maxdist= (2) maximum distance in arcsec
colRA= ('RA') name of the tag in cat with the right ascension
colDec= ('DEC') name of the tag in cat with the declination
selection= ('best') select either all matches or the best match according to CDS (see 'selection' at http://cdsxmatch.u-strasbg.fr/xmatch/doc/API-calls.html)
epoch= (2000.) epoch of the coordinates in cat
colpmRA= ('pmra') name of the tag in cat with the proper motion in right ascension in degree in cat (assumed to be ICRS; includes cos(Dec)) [only used when epoch != 2000.]
colpmDec= ('pmdec') name of the tag in cat with the proper motion in declination in degree in cat (assumed to be ICRS) [only used when epoch != 2000.]
gaia_all_columns= (False) set to True if you are matching against Gaia DR2 and want *all* columns returned; this runs a query at the Gaia Archive, which may or may not work...
savefilename= (None) if set, save the output from CDS to this path; can match back using cds_matchback
OUTPUT:
(xcat entries for those that match,
indices into cat of matching sources: index[0] is cat index of xcat[0])
HISTORY:
2016-09-12 - Written based on RC catalog code - Bovy (UofT)
2016-09-21 - Account for Gaia epoch 2015 - Bovy (UofT)
2018-05-08 - Added gaia_all_columns - Bovy (UofT)
"""
if epoch is None:
if 'ref_epoch' in cat.dtype.fields:
epoch= cat['ref_epoch']
else:
epoch= 2000.
_check_epoch(cat,epoch)
depoch= epoch-2000.
if numpy.any(depoch != 0.):
# Use proper motion to get both catalogs at the same time
dra=cat[colpmRA]/numpy.cos(cat[colDec]/180.*numpy.pi)\
/3600000.*depoch
ddec= cat[colpmDec]/3600000.*depoch
# Don't shift objects with non-existing proper motion
dra[numpy.isnan(cat[colpmRA])]= 0.
ddec[numpy.isnan(cat[colpmDec])]= 0.
else:
dra= numpy.zeros(len(cat))
ddec= numpy.zeros(len(cat))
if selection != 'all': selection= 'best'
if selection == 'all':
raise NotImplementedError("selection='all' CDS cross-match not currently implemented")
# Write positions
posfilename= tempfile.mktemp('.csv',dir=os.getcwd())
resultfilename= tempfile.mktemp('.csv',dir=os.getcwd())
with open(posfilename,'w') as csvfile:
wr= csv.writer(csvfile,delimiter=',',quoting=csv.QUOTE_MINIMAL)
wr.writerow(['RA','DEC'])
for ii in range(len(cat)):
wr.writerow([(cat[ii][colRA]-dra[ii]+360.) % 360.,
cat[ii][colDec]]-ddec[ii])
_cds_match_batched(resultfilename,posfilename,maxdist,selection,xcat)
# Directly match on input RA
ma= cds_load(resultfilename)
if gaia_all_columns:
from astroquery.gaia import Gaia
# Write another temporary file with the XML output of the cross-match
tab= Table(numpy.array([ma['source_id'],ma['RA'],ma['DEC']]).T,
names=('source_id','RA','DEC'),
dtype=('int64','float64','float64'))
xmlfilename= tempfile.mktemp('.xml',dir=os.getcwd())
tab.write(xmlfilename,format='votable')
#get the data release....
table_identifier = xcat.split('/')[-1]
if table_identifier == 'gaia2':
table_identifier = 'gaiadr2'
try:
job= Gaia.launch_job_async(
"""select g.*, m.RA as mRA, m.DEC as mDEC
from %s.gaia_source as g
inner join tap_upload.my_table as m on m.source_id = g.source_id""" % table_identifier,
upload_resource=xmlfilename,
upload_table_name="my_table")
ma= job.get_results()
except:
print("gaia_tools.xmath.cds failed to retrieve all gaia columns, returning just the default returned by the CDS xMatch instead...")
else:
ma.rename_column('mra','RA')
ma.rename_column('mdec','DEC')
finally:
os.remove(xmlfilename)
# Remove temporary files
os.remove(posfilename)
if savefilename is None:
os.remove(resultfilename)
else:
shutil.move(resultfilename,savefilename)
# Match back to the original catalog
mai= cds_matchback(cat,ma,colRA=colRA,colDec=colDec,epoch=epoch,
colpmRA=colpmRA,colpmDec=colpmDec)
return (ma,mai)
def _cds_match_batched(resultfilename,posfilename,maxdist,selection,xcat,
nruns_necessary=1):
"""CDS xMatch (sometimes?) fails for large matches, because of a time-out,
so we recursively split until the batches are small enough to not fail"""
# Figure out which of the hierarchy we are running
try:
runs= ''.join([str(int(r)-1)
for r in posfilename.split('csv.')[-1].split('.')])
except ValueError:
runs= ''
nruns= 2**len(runs)
if nruns >= nruns_necessary:
# Only run this level's match if we don't already know that we should
# be using smaller batches
_cds_basic_match(resultfilename,posfilename,maxdist,selection,xcat)
try:
ma= cds_load(resultfilename)
except ValueError: # Assume this is the time-out failure
pass
else:
return nruns
# xMatch failed because of time-out, split
posfilename1= posfilename+'.1'
posfilename2= posfilename+'.2'
resultfilename1= resultfilename+'.1'
resultfilename2= resultfilename+'.2'
# Figure out which of the hierarchy we are running
runs= ''.join([str(int(r)-1)
for r in posfilename1.split('csv.')[-1].split('.')])
nruns= 2**len(runs)
thisrun1= 1+int(runs,2)
thisrun2= 1+int(''.join([str(int(r)-1)
for r in posfilename2.split('csv.')[-1].split('.')]),2)
# Count the number of objects
with open(posfilename,'r') as posfile:
num_lines= sum(1 for line in posfile)
# Write the header line
with open(posfilename1,'w') as posfile1:
with open(posfilename,'r') as posfile:
posfile1.write(posfile.readline())
with open(posfilename2,'w') as posfile2:
with open(posfilename,'r') as posfile:
posfile2.write(posfile.readline())
# Cut in half
cnt= 0
with open(posfilename,'r') as posfile:
with open(posfilename1,'a') as posfile1:
with open(posfilename2,'a') as posfile2:
for line in posfile:
if cnt == 0:
cnt+= 1
continue
if cnt < num_lines//2:
posfile1.write(line)
cnt+= 1 # Can stop counting once this if is done
else:
posfile2.write(line)
# Run each
sys.stdout.write('\r'+"Working on CDS xMatch batch {} / {} ...\r"\
.format(thisrun1,nruns))
sys.stdout.flush()
nruns_necessary= _cds_match_batched(resultfilename1,posfilename1,
maxdist,selection,xcat,
nruns_necessary=nruns_necessary)
sys.stdout.write('\r'+"Working on CDS xMatch batch {} / {} ...\r"\
.format(thisrun2,nruns))
sys.stdout.flush()
nruns_necessary= _cds_match_batched(resultfilename2,posfilename2,
maxdist,selection,xcat,
nruns_necessary=nruns_necessary)
sys.stdout.write('\r'+_ERASESTR+'\r')
sys.stdout.flush()
# Combine results
with open(resultfilename,'w') as resultfile:
with open(resultfilename1,'r') as resultfile1:
for line in resultfile1:
resultfile.write(line)
with open(resultfilename2,'r') as resultfile2:
for line in resultfile2:
if line[0] == 'a': continue
resultfile.write(line)
# Remove intermediate files
os.remove(posfilename1)
os.remove(posfilename2)
os.remove(resultfilename1)
os.remove(resultfilename2)
return nruns_necessary
def _cds_basic_match(resultfilename,posfilename,maxdist,selection,xcat):
# Send to CDS for matching
result= open(resultfilename,'w')
try:
subprocess.check_call(['curl',
'-X','POST',
'-F','request=xmatch',
'-F','distMaxArcsec=%i' % maxdist,
'-F','selection=%s' % selection,
'-F','RESPONSEFORMAT=csv',
'-F','cat1=@%s' % os.path.basename(posfilename),
'-F','colRA1=RA',
'-F','colDec1=DEC',
'-F','cat2=%s' % xcat,
'http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync'],
stdout=result)
except subprocess.CalledProcessError:
os.remove(posfilename)
if os.path.exists(resultfilename):
result.close()
os.remove(resultfilename)
result.close()
return None
def cds_load(filename):
if WIN32:
# windows do not have float128, but source_id is double
# get around this by squeezing precision from int64 on source_id as source_id is always integer anyway
# first read everything as fp64 and then convert source_id to int64 will keep its precision
data = numpy.genfromtxt(filename, delimiter=',', skip_header=0,
filling_values=-9999.99, names=True, max_rows=1,
dtype='float64') # only read the first row max to reduce workload to just get the column name
to_list = list(data.dtype.names)
# construct a list where everything is fp64 except 'source_id' being int64
dtype_list = [('{}'.format(i), numpy.float64) for i in to_list]
dtype_list[dtype_list.index(('source_id', numpy.float64))] = ('source_id', numpy.uint64)
return numpy.genfromtxt(filename, delimiter=',', skip_header=0,
filling_values=-9999.99, names=True,
dtype=dtype_list)
else:
return numpy.genfromtxt(filename, delimiter=',', skip_header=0,
filling_values=-9999.99, names=True,
dtype='float128')
def cds_matchback(cat,xcat,colRA='RA',colDec='DEC',selection='best',
epoch=None,colpmRA='pmra',colpmDec='pmdec',):
"""
NAME:
cds_matchback
PURPOSE:
Match a matched catalog from xmatch.cds back to the original catalog
INPUT
cat - original catalog
xcat - matched catalog returned by xmatch.cds
colRA= ('RA') name of the tag in cat with the right ascension
colDec= ('DEC') name of the tag in cat with the declination
selection= ('best') select either all matches or the best match according to CDS (see 'selection' at http://cdsxmatch.u-strasbg.fr/xmatch/doc/API-calls.html)
epoch= (2000.) epoch of the coordinates in cat
colpmRA= ('pmra') name of the tag in cat with the proper motion in right ascension in degree in cat (assumed to be ICRS; includes cos(Dec)) [only used when epoch != 2000.]
colpmDec= ('pmdec') name of the tag in cat with the proper motion in declination in degree in cat (assumed to be ICRS) [only used when epoch != 2000.]
OUTPUT:
Array indices into cat of xcat entries: index[0] is cat index of xcat[0]
HISTORY:
2016-09-12 - Written - Bovy (UofT)
2018-05-04 - Account for non-zero epoch difference - Bovy (UofT)
"""
if selection != 'all': selection= 'best'
if selection == 'all':
raise NotImplementedError("selection='all' CDS cross-match not currently implemented")
if epoch is None:
if 'ref_epoch' in cat.dtype.fields:
epoch= cat['ref_epoch']
else:
epoch= 2000.
_check_epoch(cat,epoch)
depoch= epoch-2000.
if numpy.any(depoch != 0.):
# Use proper motion to get both catalogs at the same time
dra=cat[colpmRA]/numpy.cos(cat[colDec]/180.*numpy.pi)\
/3600000.*depoch
ddec= cat[colpmDec]/3600000.*depoch
# Don't shift objects with non-existing proper motion
dra[numpy.isnan(cat[colpmRA])]= 0.
ddec[numpy.isnan(cat[colpmDec])]= 0.
else:
dra= numpy.zeros(len(cat))
ddec= numpy.zeros(len(cat))
# xmatch to v. small diff., because match is against *original* coords,
# not matched coords in CDS
mc1= acoords.SkyCoord(cat[colRA]-dra,cat[colDec]-ddec,
unit=(u.degree, u.degree),frame='icrs')
mc2= acoords.SkyCoord(xcat['RA'],xcat['DEC'],
unit=(u.degree, u.degree),frame='icrs')
idx,d2d,d3d = mc2.match_to_catalog_sky(mc1)
mindx= d2d < 1e-5*u.arcsec
return idx[mindx]
def _check_epoch(cat,epoch):
warn_about_epoch= False
if 'ref_epoch' in cat.dtype.fields:
if 'designation' not in cat.dtype.fields: # Assume this is DR1
if numpy.any(numpy.fabs(epoch-2015.) > 0.01):
warn_about_epoch= True
elif 'Gaia DR2' in cat['designation'][0].decode('utf-8'):
if numpy.any(numpy.fabs(epoch-2015.5) > 0.01):
warn_about_epoch= True
if warn_about_epoch:
warnings.warn("You appear to be using a Gaia catalog, but are not setting the epoch to 2015. (DR1) or 2015.5 (DR2), which may lead to incorrect matches")
return None
| [((171, 188), 'platform.system', 'platform.system', ([], {}), '()\n', (186, 188), False, 'import platform\n'), ((2807, 2831), 'numpy.any', 'numpy.any', (['(depoch != 0.0)'], {}), '(depoch != 0.0)\n', (2816, 2831), False, 'import numpy\n'), ((3246, 3336), 'astropy.coordinates.SkyCoord', 'acoords.SkyCoord', (['cat1[colRA1]', 'cat1[colDec1]'], {'unit': '(u.degree, u.degree)', 'frame': '"""icrs"""'}), "(cat1[colRA1], cat1[colDec1], unit=(u.degree, u.degree),\n frame='icrs')\n", (3262, 3336), True, 'import astropy.coordinates as acoords\n'), ((3366, 3469), 'astropy.coordinates.SkyCoord', 'acoords.SkyCoord', (['(cat2[colRA2] - dra)', '(cat2[colDec2] - ddec)'], {'unit': '(u.degree, u.degree)', 'frame': '"""icrs"""'}), "(cat2[colRA2] - dra, cat2[colDec2] - ddec, unit=(u.degree,\n u.degree), frame='icrs')\n", (3382, 3469), True, 'import astropy.coordinates as acoords\n'), ((7993, 8017), 'numpy.any', 'numpy.any', (['(depoch != 0.0)'], {}), '(depoch != 0.0)\n', (8002, 8017), False, 'import numpy\n'), ((10529, 10551), 'os.remove', 'os.remove', (['posfilename'], {}), '(posfilename)\n', (10538, 10551), False, 'import os\n'), ((13409, 13427), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (13425, 13427), False, 'import sys\n'), ((13756, 13774), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (13772, 13774), False, 'import sys\n'), ((13986, 14027), 'sys.stdout.write', 'sys.stdout.write', (["('\\r' + _ERASESTR + '\\r')"], {}), "('\\r' + _ERASESTR + '\\r')\n", (14002, 14027), False, 'import sys\n'), ((14028, 14046), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (14044, 14046), False, 'import sys\n'), ((14460, 14483), 'os.remove', 'os.remove', (['posfilename1'], {}), '(posfilename1)\n', (14469, 14483), False, 'import os\n'), ((14488, 14511), 'os.remove', 'os.remove', (['posfilename2'], {}), '(posfilename2)\n', (14497, 14511), False, 'import os\n'), ((14516, 14542), 'os.remove', 'os.remove', (['resultfilename1'], {}), '(resultfilename1)\n', (14525, 14542), False, 'import os\n'), ((14547, 14573), 'os.remove', 'os.remove', (['resultfilename2'], {}), '(resultfilename2)\n', (14556, 14573), False, 'import os\n'), ((18577, 18601), 'numpy.any', 'numpy.any', (['(depoch != 0.0)'], {}), '(depoch != 0.0)\n', (18586, 18601), False, 'import numpy\n'), ((19152, 19252), 'astropy.coordinates.SkyCoord', 'acoords.SkyCoord', (['(cat[colRA] - dra)', '(cat[colDec] - ddec)'], {'unit': '(u.degree, u.degree)', 'frame': '"""icrs"""'}), "(cat[colRA] - dra, cat[colDec] - ddec, unit=(u.degree, u.\n degree), frame='icrs')\n", (19168, 19252), True, 'import astropy.coordinates as acoords\n'), ((19277, 19364), 'astropy.coordinates.SkyCoord', 'acoords.SkyCoord', (["xcat['RA']", "xcat['DEC']"], {'unit': '(u.degree, u.degree)', 'frame': '"""icrs"""'}), "(xcat['RA'], xcat['DEC'], unit=(u.degree, u.degree), frame=\n 'icrs')\n", (19293, 19364), True, 'import astropy.coordinates as acoords\n'), ((3791, 3820), 'numpy.unique', 'numpy.unique', (['cat1[col_field]'], {}), '(cat1[col_field])\n', (3803, 3820), False, 'import numpy\n'), ((8812, 8873), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""","""', 'quoting': 'csv.QUOTE_MINIMAL'}), "(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)\n", (8822, 8873), False, 'import csv\n'), ((10589, 10614), 'os.remove', 'os.remove', (['resultfilename'], {}), '(resultfilename)\n', (10598, 10614), False, 'import os\n'), ((10633, 10674), 'shutil.move', 'shutil.move', (['resultfilename', 'savefilename'], {}), '(resultfilename, savefilename)\n', (10644, 10674), False, 'import shutil\n'), ((15985, 16112), 'numpy.genfromtxt', 'numpy.genfromtxt', (['filename'], {'delimiter': '""","""', 'skip_header': '(0)', 'filling_values': '(-9999.99)', 'names': '(True)', 'max_rows': '(1)', 'dtype': '"""float64"""'}), "(filename, delimiter=',', skip_header=0, filling_values=-\n 9999.99, names=True, max_rows=1, dtype='float64')\n", (16001, 16112), False, 'import numpy\n'), ((16559, 16675), 'numpy.genfromtxt', 'numpy.genfromtxt', (['filename'], {'delimiter': '""","""', 'skip_header': '(0)', 'filling_values': '(-9999.99)', 'names': '(True)', 'dtype': 'dtype_list'}), "(filename, delimiter=',', skip_header=0, filling_values=-\n 9999.99, names=True, dtype=dtype_list)\n", (16575, 16675), False, 'import numpy\n'), ((16760, 16876), 'numpy.genfromtxt', 'numpy.genfromtxt', (['filename'], {'delimiter': '""","""', 'skip_header': '(0)', 'filling_values': '(-9999.99)', 'names': '(True)', 'dtype': '"""float128"""'}), "(filename, delimiter=',', skip_header=0, filling_values=-\n 9999.99, names=True, dtype='float128')\n", (16776, 16876), False, 'import numpy\n'), ((19948, 20111), 'warnings.warn', 'warnings.warn', (['"""You appear to be using a Gaia catalog, but are not setting the epoch to 2015. (DR1) or 2015.5 (DR2), which may lead to incorrect matches"""'], {}), "(\n 'You appear to be using a Gaia catalog, but are not setting the epoch to 2015. (DR1) or 2015.5 (DR2), which may lead to incorrect matches'\n )\n", (19961, 20111), False, 'import warnings\n'), ((8684, 8695), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8693, 8695), False, 'import os\n'), ((8744, 8755), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8753, 8755), False, 'import os\n'), ((9821, 10071), 'astroquery.gaia.Gaia.launch_job_async', 'Gaia.launch_job_async', (['("""select g.*, m.RA as mRA, m.DEC as mDEC\nfrom %s.gaia_source as g\ninner join tap_upload.my_table as m on m.source_id = g.source_id"""\n % table_identifier)'], {'upload_resource': 'xmlfilename', 'upload_table_name': '"""my_table"""'}), '(\n """select g.*, m.RA as mRA, m.DEC as mDEC\nfrom %s.gaia_source as g\ninner join tap_upload.my_table as m on m.source_id = g.source_id"""\n % table_identifier, upload_resource=xmlfilename, upload_table_name=\n \'my_table\')\n', (9842, 10071), False, 'from astroquery.gaia import Gaia\n'), ((10473, 10495), 'os.remove', 'os.remove', (['xmlfilename'], {}), '(xmlfilename)\n', (10482, 10495), False, 'import os\n'), ((15490, 15512), 'os.remove', 'os.remove', (['posfilename'], {}), '(posfilename)\n', (15499, 15512), False, 'import os\n'), ((15524, 15554), 'os.path.exists', 'os.path.exists', (['resultfilename'], {}), '(resultfilename)\n', (15538, 15554), False, 'import os\n'), ((3114, 3141), 'numpy.isnan', 'numpy.isnan', (['cat2[colpmRA2]'], {}), '(cat2[colpmRA2])\n', (3125, 3141), False, 'import numpy\n'), ((3160, 3188), 'numpy.isnan', 'numpy.isnan', (['cat2[colpmDec2]'], {}), '(cat2[colpmDec2])\n', (3171, 3188), False, 'import numpy\n'), ((4218, 4253), 'numpy.arange', 'numpy.arange', (['cat1[colRA1].shape[0]'], {}), '(cat1[colRA1].shape[0])\n', (4230, 4253), False, 'import numpy\n'), ((4301, 4336), 'numpy.arange', 'numpy.arange', (['cat2[colRA2].shape[0]'], {}), '(cat2[colRA2].shape[0])\n', (4313, 4336), False, 'import numpy\n'), ((8294, 8319), 'numpy.isnan', 'numpy.isnan', (['cat[colpmRA]'], {}), '(cat[colpmRA])\n', (8305, 8319), False, 'import numpy\n'), ((8338, 8364), 'numpy.isnan', 'numpy.isnan', (['cat[colpmDec]'], {}), '(cat[colpmDec])\n', (8349, 8364), False, 'import numpy\n'), ((9360, 9411), 'numpy.array', 'numpy.array', (["[ma['source_id'], ma['RA'], ma['DEC']]"], {}), "([ma['source_id'], ma['RA'], ma['DEC']])\n", (9371, 9411), False, 'import numpy\n'), ((9568, 9579), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9577, 9579), False, 'import os\n'), ((15595, 15620), 'os.remove', 'os.remove', (['resultfilename'], {}), '(resultfilename)\n', (15604, 15620), False, 'import os\n'), ((18878, 18903), 'numpy.isnan', 'numpy.isnan', (['cat[colpmRA]'], {}), '(cat[colpmRA])\n', (18889, 18903), False, 'import numpy\n'), ((18922, 18948), 'numpy.isnan', 'numpy.isnan', (['cat[colpmDec]'], {}), '(cat[colpmDec])\n', (18933, 18948), False, 'import numpy\n'), ((2925, 2968), 'numpy.cos', 'numpy.cos', (['(cat2[colDec2] / 180.0 * numpy.pi)'], {}), '(cat2[colDec2] / 180.0 * numpy.pi)\n', (2934, 2968), False, 'import numpy\n'), ((8109, 8150), 'numpy.cos', 'numpy.cos', (['(cat[colDec] / 180.0 * numpy.pi)'], {}), '(cat[colDec] / 180.0 * numpy.pi)\n', (8118, 8150), False, 'import numpy\n'), ((15126, 15155), 'os.path.basename', 'os.path.basename', (['posfilename'], {}), '(posfilename)\n', (15142, 15155), False, 'import os\n'), ((18693, 18734), 'numpy.cos', 'numpy.cos', (['(cat[colDec] / 180.0 * numpy.pi)'], {}), '(cat[colDec] / 180.0 * numpy.pi)\n', (18702, 18734), False, 'import numpy\n'), ((19679, 19705), 'numpy.fabs', 'numpy.fabs', (['(epoch - 2015.0)'], {}), '(epoch - 2015.0)\n', (19689, 19705), False, 'import numpy\n'), ((19842, 19868), 'numpy.fabs', 'numpy.fabs', (['(epoch - 2015.5)'], {}), '(epoch - 2015.5)\n', (19852, 19868), False, 'import numpy\n')] |
ThomasLecat/ray | rllib/agents/dqn/dqn_torch_policy.py | eb025ea8cb27583e8ef6287f5654f23d1ab270ef | from typing import Dict, List, Tuple
import gym
import ray
from ray.rllib.agents.a3c.a3c_torch_policy import apply_grad_clipping
from ray.rllib.agents.dqn.dqn_tf_policy import (
PRIO_WEIGHTS, Q_SCOPE, Q_TARGET_SCOPE, postprocess_nstep_and_prio)
from ray.rllib.agents.dqn.dqn_torch_model import DQNTorchModel
from ray.rllib.agents.dqn.simple_q_torch_policy import TargetNetworkMixin
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.torch.torch_action_dist import (TorchCategorical,
TorchDistributionWrapper)
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.torch_policy import LearningRateSchedule
from ray.rllib.policy.torch_policy_template import build_torch_policy
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.rllib.utils.exploration.parameter_noise import ParameterNoise
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_ops import (FLOAT_MIN, huber_loss,
reduce_mean_ignore_inf,
softmax_cross_entropy_with_logits)
from ray.rllib.utils.typing import TensorType, TrainerConfigDict
torch, nn = try_import_torch()
F = None
if nn:
F = nn.functional
class QLoss:
def __init__(self,
q_t_selected,
q_logits_t_selected,
q_tp1_best,
q_probs_tp1_best,
importance_weights,
rewards,
done_mask,
gamma=0.99,
n_step=1,
num_atoms=1,
v_min=-10.0,
v_max=10.0):
if num_atoms > 1:
# Distributional Q-learning which corresponds to an entropy loss
z = torch.range(0.0, num_atoms - 1, dtype=torch.float32)
z = v_min + z * (v_max - v_min) / float(num_atoms - 1)
# (batch_size, 1) * (1, num_atoms) = (batch_size, num_atoms)
r_tau = torch.unsqueeze(
rewards, -1) + gamma**n_step * torch.unsqueeze(
1.0 - done_mask, -1) * torch.unsqueeze(z, 0)
r_tau = torch.clamp(r_tau, v_min, v_max)
b = (r_tau - v_min) / ((v_max - v_min) / float(num_atoms - 1))
lb = torch.floor(b)
ub = torch.ceil(b)
# Indispensable judgement which is missed in most implementations
# when b happens to be an integer, lb == ub, so pr_j(s', a*) will
# be discarded because (ub-b) == (b-lb) == 0.
floor_equal_ceil = (ub - lb < 0.5).float()
# (batch_size, num_atoms, num_atoms)
l_project = F.one_hot(lb.long(), num_atoms)
# (batch_size, num_atoms, num_atoms)
u_project = F.one_hot(ub.long(), num_atoms)
ml_delta = q_probs_tp1_best * (ub - b + floor_equal_ceil)
mu_delta = q_probs_tp1_best * (b - lb)
ml_delta = torch.sum(
l_project * torch.unsqueeze(ml_delta, -1), dim=1)
mu_delta = torch.sum(
u_project * torch.unsqueeze(mu_delta, -1), dim=1)
m = ml_delta + mu_delta
# Rainbow paper claims that using this cross entropy loss for
# priority is robust and insensitive to `prioritized_replay_alpha`
self.td_error = softmax_cross_entropy_with_logits(
logits=q_logits_t_selected, labels=m)
self.loss = torch.mean(self.td_error * importance_weights)
self.stats = {
# TODO: better Q stats for dist dqn
"mean_td_error": torch.mean(self.td_error),
}
else:
q_tp1_best_masked = (1.0 - done_mask) * q_tp1_best
# compute RHS of bellman equation
q_t_selected_target = rewards + gamma**n_step * q_tp1_best_masked
# compute the error (potentially clipped)
self.td_error = q_t_selected - q_t_selected_target.detach()
self.loss = torch.mean(
importance_weights.float() * huber_loss(self.td_error))
self.stats = {
"mean_q": torch.mean(q_t_selected),
"min_q": torch.min(q_t_selected),
"max_q": torch.max(q_t_selected),
"mean_td_error": torch.mean(self.td_error),
}
class ComputeTDErrorMixin:
def __init__(self):
def compute_td_error(obs_t, act_t, rew_t, obs_tp1, done_mask,
importance_weights):
input_dict = self._lazy_tensor_dict({SampleBatch.CUR_OBS: obs_t})
input_dict[SampleBatch.ACTIONS] = act_t
input_dict[SampleBatch.REWARDS] = rew_t
input_dict[SampleBatch.NEXT_OBS] = obs_tp1
input_dict[SampleBatch.DONES] = done_mask
input_dict[PRIO_WEIGHTS] = importance_weights
# Do forward pass on loss to update td error attribute
build_q_losses(self, self.model, None, input_dict)
return self.q_loss.td_error
self.compute_td_error = compute_td_error
def build_q_model_and_distribution(
policy: Policy, obs_space: gym.Space, action_space: gym.Space,
config: TrainerConfigDict) -> Tuple[ModelV2, TorchDistributionWrapper]:
if not isinstance(action_space, gym.spaces.Discrete):
raise UnsupportedSpaceException(
"Action space {} is not supported for DQN.".format(action_space))
if config["hiddens"]:
# try to infer the last layer size, otherwise fall back to 256
num_outputs = ([256] + config["model"]["fcnet_hiddens"])[-1]
config["model"]["no_final_linear"] = True
else:
num_outputs = action_space.n
# TODO(sven): Move option to add LayerNorm after each Dense
# generically into ModelCatalog.
add_layer_norm = (
isinstance(getattr(policy, "exploration", None), ParameterNoise)
or config["exploration_config"]["type"] == "ParameterNoise")
policy.q_model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=num_outputs,
model_config=config["model"],
framework="torch",
model_interface=DQNTorchModel,
name=Q_SCOPE,
q_hiddens=config["hiddens"],
dueling=config["dueling"],
num_atoms=config["num_atoms"],
use_noisy=config["noisy"],
v_min=config["v_min"],
v_max=config["v_max"],
sigma0=config["sigma0"],
# TODO(sven): Move option to add LayerNorm after each Dense
# generically into ModelCatalog.
add_layer_norm=add_layer_norm)
policy.q_func_vars = policy.q_model.variables()
policy.target_q_model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=num_outputs,
model_config=config["model"],
framework="torch",
model_interface=DQNTorchModel,
name=Q_TARGET_SCOPE,
q_hiddens=config["hiddens"],
dueling=config["dueling"],
num_atoms=config["num_atoms"],
use_noisy=config["noisy"],
v_min=config["v_min"],
v_max=config["v_max"],
sigma0=config["sigma0"],
# TODO(sven): Move option to add LayerNorm after each Dense
# generically into ModelCatalog.
add_layer_norm=add_layer_norm)
policy.target_q_func_vars = policy.target_q_model.variables()
return policy.q_model, TorchCategorical
def get_distribution_inputs_and_class(
policy: Policy,
model: ModelV2,
obs_batch: TensorType,
*,
explore: bool = True,
is_training: bool = False,
**kwargs) -> Tuple[TensorType, type, List[TensorType]]:
q_vals = compute_q_values(policy, model, obs_batch, explore, is_training)
q_vals = q_vals[0] if isinstance(q_vals, tuple) else q_vals
policy.q_values = q_vals
return policy.q_values, TorchCategorical, [] # state-out
def build_q_losses(policy: Policy, model, _,
train_batch: SampleBatch) -> TensorType:
config = policy.config
# Q-network evaluation.
q_t, q_logits_t, q_probs_t = compute_q_values(
policy,
policy.q_model,
train_batch[SampleBatch.CUR_OBS],
explore=False,
is_training=True)
# Target Q-network evaluation.
q_tp1, q_logits_tp1, q_probs_tp1 = compute_q_values(
policy,
policy.target_q_model,
train_batch[SampleBatch.NEXT_OBS],
explore=False,
is_training=True)
# Q scores for actions which we know were selected in the given state.
one_hot_selection = F.one_hot(train_batch[SampleBatch.ACTIONS],
policy.action_space.n)
q_t_selected = torch.sum(
torch.where(q_t > FLOAT_MIN, q_t,
torch.tensor(0.0, device=policy.device)) *
one_hot_selection, 1)
q_logits_t_selected = torch.sum(
q_logits_t * torch.unsqueeze(one_hot_selection, -1), 1)
# compute estimate of best possible value starting from state at t + 1
if config["double_q"]:
q_tp1_using_online_net, q_logits_tp1_using_online_net, \
q_dist_tp1_using_online_net = compute_q_values(
policy,
policy.q_model,
train_batch[SampleBatch.NEXT_OBS],
explore=False,
is_training=True)
q_tp1_best_using_online_net = torch.argmax(q_tp1_using_online_net, 1)
q_tp1_best_one_hot_selection = F.one_hot(q_tp1_best_using_online_net,
policy.action_space.n)
q_tp1_best = torch.sum(
torch.where(q_tp1 > FLOAT_MIN, q_tp1,
torch.tensor(0.0, device=policy.device)) *
q_tp1_best_one_hot_selection, 1)
q_probs_tp1_best = torch.sum(
q_probs_tp1 * torch.unsqueeze(q_tp1_best_one_hot_selection, -1), 1)
else:
q_tp1_best_one_hot_selection = F.one_hot(
torch.argmax(q_tp1, 1), policy.action_space.n)
q_tp1_best = torch.sum(
torch.where(q_tp1 > FLOAT_MIN, q_tp1,
torch.tensor(0.0, device=policy.device)) *
q_tp1_best_one_hot_selection, 1)
q_probs_tp1_best = torch.sum(
q_probs_tp1 * torch.unsqueeze(q_tp1_best_one_hot_selection, -1), 1)
policy.q_loss = QLoss(
q_t_selected, q_logits_t_selected, q_tp1_best, q_probs_tp1_best,
train_batch[PRIO_WEIGHTS], train_batch[SampleBatch.REWARDS],
train_batch[SampleBatch.DONES].float(), config["gamma"],
config["n_step"], config["num_atoms"], config["v_min"],
config["v_max"])
return policy.q_loss.loss
def adam_optimizer(policy: Policy,
config: TrainerConfigDict) -> "torch.optim.Optimizer":
return torch.optim.Adam(
policy.q_func_vars, lr=policy.cur_lr, eps=config["adam_epsilon"])
def build_q_stats(policy: Policy, batch) -> Dict[str, TensorType]:
return dict({
"cur_lr": policy.cur_lr,
}, **policy.q_loss.stats)
def setup_early_mixins(policy: Policy, obs_space, action_space,
config: TrainerConfigDict) -> None:
LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"])
def after_init(policy: Policy, obs_space: gym.Space, action_space: gym.Space,
config: TrainerConfigDict) -> None:
ComputeTDErrorMixin.__init__(policy)
TargetNetworkMixin.__init__(policy, obs_space, action_space, config)
# Move target net to device (this is done autoatically for the
# policy.model, but not for any other models the policy has).
policy.target_q_model = policy.target_q_model.to(policy.device)
def compute_q_values(policy: Policy,
model: ModelV2,
obs: TensorType,
explore,
is_training: bool = False):
config = policy.config
model_out, state = model({
SampleBatch.CUR_OBS: obs,
"is_training": is_training,
}, [], None)
if config["num_atoms"] > 1:
(action_scores, z, support_logits_per_action, logits,
probs_or_logits) = model.get_q_value_distributions(model_out)
else:
(action_scores, logits,
probs_or_logits) = model.get_q_value_distributions(model_out)
if config["dueling"]:
state_score = model.get_state_value(model_out)
if policy.config["num_atoms"] > 1:
support_logits_per_action_mean = torch.mean(
support_logits_per_action, dim=1)
support_logits_per_action_centered = (
support_logits_per_action - torch.unsqueeze(
support_logits_per_action_mean, dim=1))
support_logits_per_action = torch.unsqueeze(
state_score, dim=1) + support_logits_per_action_centered
support_prob_per_action = nn.functional.softmax(
support_logits_per_action)
value = torch.sum(z * support_prob_per_action, dim=-1)
logits = support_logits_per_action
probs_or_logits = support_prob_per_action
else:
advantages_mean = reduce_mean_ignore_inf(action_scores, 1)
advantages_centered = action_scores - torch.unsqueeze(
advantages_mean, 1)
value = state_score + advantages_centered
else:
value = action_scores
return value, logits, probs_or_logits
def grad_process_and_td_error_fn(policy: Policy,
optimizer: "torch.optim.Optimizer",
loss: TensorType) -> Dict[str, TensorType]:
# Clip grads if configured.
return apply_grad_clipping(policy, optimizer, loss)
def extra_action_out_fn(policy: Policy, input_dict, state_batches, model,
action_dist) -> Dict[str, TensorType]:
return {"q_values": policy.q_values}
DQNTorchPolicy = build_torch_policy(
name="DQNTorchPolicy",
loss_fn=build_q_losses,
get_default_config=lambda: ray.rllib.agents.dqn.dqn.DEFAULT_CONFIG,
make_model_and_action_dist=build_q_model_and_distribution,
action_distribution_fn=get_distribution_inputs_and_class,
stats_fn=build_q_stats,
postprocess_fn=postprocess_nstep_and_prio,
optimizer_fn=adam_optimizer,
extra_grad_process_fn=grad_process_and_td_error_fn,
extra_learn_fetches_fn=lambda policy: {"td_error": policy.q_loss.td_error},
extra_action_out_fn=extra_action_out_fn,
before_init=setup_early_mixins,
after_init=after_init,
mixins=[
TargetNetworkMixin,
ComputeTDErrorMixin,
LearningRateSchedule,
])
| [((1327, 1345), 'ray.rllib.utils.framework.try_import_torch', 'try_import_torch', ([], {}), '()\n', (1343, 1345), False, 'from ray.rllib.utils.framework import try_import_torch\n'), ((14142, 14825), 'ray.rllib.policy.torch_policy_template.build_torch_policy', 'build_torch_policy', ([], {'name': '"""DQNTorchPolicy"""', 'loss_fn': 'build_q_losses', 'get_default_config': '(lambda : ray.rllib.agents.dqn.dqn.DEFAULT_CONFIG)', 'make_model_and_action_dist': 'build_q_model_and_distribution', 'action_distribution_fn': 'get_distribution_inputs_and_class', 'stats_fn': 'build_q_stats', 'postprocess_fn': 'postprocess_nstep_and_prio', 'optimizer_fn': 'adam_optimizer', 'extra_grad_process_fn': 'grad_process_and_td_error_fn', 'extra_learn_fetches_fn': "(lambda policy: {'td_error': policy.q_loss.td_error})", 'extra_action_out_fn': 'extra_action_out_fn', 'before_init': 'setup_early_mixins', 'after_init': 'after_init', 'mixins': '[TargetNetworkMixin, ComputeTDErrorMixin, LearningRateSchedule]'}), "(name='DQNTorchPolicy', loss_fn=build_q_losses,\n get_default_config=lambda : ray.rllib.agents.dqn.dqn.DEFAULT_CONFIG,\n make_model_and_action_dist=build_q_model_and_distribution,\n action_distribution_fn=get_distribution_inputs_and_class, stats_fn=\n build_q_stats, postprocess_fn=postprocess_nstep_and_prio, optimizer_fn=\n adam_optimizer, extra_grad_process_fn=grad_process_and_td_error_fn,\n extra_learn_fetches_fn=lambda policy: {'td_error': policy.q_loss.\n td_error}, extra_action_out_fn=extra_action_out_fn, before_init=\n setup_early_mixins, after_init=after_init, mixins=[TargetNetworkMixin,\n ComputeTDErrorMixin, LearningRateSchedule])\n", (14160, 14825), False, 'from ray.rllib.policy.torch_policy_template import build_torch_policy\n'), ((6151, 6581), 'ray.rllib.models.catalog.ModelCatalog.get_model_v2', 'ModelCatalog.get_model_v2', ([], {'obs_space': 'obs_space', 'action_space': 'action_space', 'num_outputs': 'num_outputs', 'model_config': "config['model']", 'framework': '"""torch"""', 'model_interface': 'DQNTorchModel', 'name': 'Q_SCOPE', 'q_hiddens': "config['hiddens']", 'dueling': "config['dueling']", 'num_atoms': "config['num_atoms']", 'use_noisy': "config['noisy']", 'v_min': "config['v_min']", 'v_max': "config['v_max']", 'sigma0': "config['sigma0']", 'add_layer_norm': 'add_layer_norm'}), "(obs_space=obs_space, action_space=action_space,\n num_outputs=num_outputs, model_config=config['model'], framework=\n 'torch', model_interface=DQNTorchModel, name=Q_SCOPE, q_hiddens=config[\n 'hiddens'], dueling=config['dueling'], num_atoms=config['num_atoms'],\n use_noisy=config['noisy'], v_min=config['v_min'], v_max=config['v_max'],\n sigma0=config['sigma0'], add_layer_norm=add_layer_norm)\n", (6176, 6581), False, 'from ray.rllib.models.catalog import ModelCatalog\n'), ((6873, 7312), 'ray.rllib.models.catalog.ModelCatalog.get_model_v2', 'ModelCatalog.get_model_v2', ([], {'obs_space': 'obs_space', 'action_space': 'action_space', 'num_outputs': 'num_outputs', 'model_config': "config['model']", 'framework': '"""torch"""', 'model_interface': 'DQNTorchModel', 'name': 'Q_TARGET_SCOPE', 'q_hiddens': "config['hiddens']", 'dueling': "config['dueling']", 'num_atoms': "config['num_atoms']", 'use_noisy': "config['noisy']", 'v_min': "config['v_min']", 'v_max': "config['v_max']", 'sigma0': "config['sigma0']", 'add_layer_norm': 'add_layer_norm'}), "(obs_space=obs_space, action_space=action_space,\n num_outputs=num_outputs, model_config=config['model'], framework=\n 'torch', model_interface=DQNTorchModel, name=Q_TARGET_SCOPE, q_hiddens=\n config['hiddens'], dueling=config['dueling'], num_atoms=config[\n 'num_atoms'], use_noisy=config['noisy'], v_min=config['v_min'], v_max=\n config['v_max'], sigma0=config['sigma0'], add_layer_norm=add_layer_norm)\n", (6898, 7312), False, 'from ray.rllib.models.catalog import ModelCatalog\n'), ((11388, 11462), 'ray.rllib.policy.torch_policy.LearningRateSchedule.__init__', 'LearningRateSchedule.__init__', (['policy', "config['lr']", "config['lr_schedule']"], {}), "(policy, config['lr'], config['lr_schedule'])\n", (11417, 11462), False, 'from ray.rllib.policy.torch_policy import LearningRateSchedule\n'), ((11639, 11707), 'ray.rllib.agents.dqn.simple_q_torch_policy.TargetNetworkMixin.__init__', 'TargetNetworkMixin.__init__', (['policy', 'obs_space', 'action_space', 'config'], {}), '(policy, obs_space, action_space, config)\n', (11666, 11707), False, 'from ray.rllib.agents.dqn.simple_q_torch_policy import TargetNetworkMixin\n'), ((13898, 13942), 'ray.rllib.agents.a3c.a3c_torch_policy.apply_grad_clipping', 'apply_grad_clipping', (['policy', 'optimizer', 'loss'], {}), '(policy, optimizer, loss)\n', (13917, 13942), False, 'from ray.rllib.agents.a3c.a3c_torch_policy import apply_grad_clipping\n'), ((3483, 3554), 'ray.rllib.utils.torch_ops.softmax_cross_entropy_with_logits', 'softmax_cross_entropy_with_logits', ([], {'logits': 'q_logits_t_selected', 'labels': 'm'}), '(logits=q_logits_t_selected, labels=m)\n', (3516, 3554), False, 'from ray.rllib.utils.torch_ops import FLOAT_MIN, huber_loss, reduce_mean_ignore_inf, softmax_cross_entropy_with_logits\n'), ((13377, 13417), 'ray.rllib.utils.torch_ops.reduce_mean_ignore_inf', 'reduce_mean_ignore_inf', (['action_scores', '(1)'], {}), '(action_scores, 1)\n', (13399, 13417), False, 'from ray.rllib.utils.torch_ops import FLOAT_MIN, huber_loss, reduce_mean_ignore_inf, softmax_cross_entropy_with_logits\n'), ((4206, 4231), 'ray.rllib.utils.torch_ops.huber_loss', 'huber_loss', (['self.td_error'], {}), '(self.td_error)\n', (4216, 4231), False, 'from ray.rllib.utils.torch_ops import FLOAT_MIN, huber_loss, reduce_mean_ignore_inf, softmax_cross_entropy_with_logits\n')] |
SuffolkLITLab/FormFyxer | formfyxer/__init__.py | 00a6a70b30f1899fc5273de1001f1f57c3728f60 | from .lit_explorer import *
from .pdf_wrangling import *
| [] |
priidupaomets/python_kursus | Overview/11 - funktsioonid.py | 731ab386ca40c321288659db21db23912ca7f8dd | """
funktsioonid.py
Funktsioonide ja protseduuride kasutamine
"""
#
# Protseduur
#
def minu_funktsioon():
print("See on protseduur")
# Kutsume funktsiooni välja
minu_funktsioon()
#
# Funktsioon
#
def liida(num1, num2):
return num1 + num2
sum = liida(3, 5)
print(sum)
# Näide vaikeväärtuste kasutamisest
# def funk(arg1 = väärtus1, arg2 = väärtus2)
# pass
def funk(arg1 = 0, arg2 = "Test"):
print(arg1, arg2)
funk() # Kutsume funktsiooni välja ilma argumente kaasa andmata
#
# Algarvude leidmine
#
def isprime(n):
if n <= 1:
return False
for i in range(2, n):
if n % i == 0:
return False
else:
return True
# Kustume funktsiooni testimiseks välja
n = 5
if isprime(n):
print(f"{n} ON algarv") # Kasutame f-formaatimisstringi, mis lubab muutuja otse stringi sisse panna
else:
print(f"{n} EI OLE algarv")
def list_primes(max_num = 100):
for n in range(2, max_num):
if isprime(n):
print(n, end = ' ', flush = True)
print()
list_primes()
#
# Muutuva arvu argumentidega funktsioonid
#
# Lisame lihtsalt uusi argumente
def summa(num1, num2, num3):
return num1 + num2 + num3
print(summa(1, 2, 3)) # Töötab
print(summa(1, 2)) # Saame vea, kuna uus funktsioon nõuab 3 argumenti
# Katsetame funktsiooni ülelaadimist (function overloading või method overloading)
def summa(num1, num2):
return num1 + num2
def summa(num1, num2, num3):
return num1 + num2 + num3
print(summa(1, 2)) # Saame vea, kuna viimane def kirjutab eelmise üle
print(summa(1, 2, 3))
# Katsetame vaikeväärtustega funktsioone
def summa(num1, num2, num3 = 0, num4 = 0):
return num1 + num2 + num3 + num4
print(summa(1, 2))
print(summa(1, 2, 3))
print(summa(1, 2, 3, 4))
#print(summa(1, 2, 3, 4, 5)) # Selle tööle saamiseks peame f-ni muutma
def keskmine(num1, num2, num3 = 0, num4 = 0):
sum = num1 + num2 + num3 + num4 # Sama, mis summa(num1, num2, num3, num4)
argumente = 4.0
return sum / argumente
print(keskmine(1, 2)) # Ilmselgelt vale tulemus (1.5 asemel 0.75)
print(keskmine(1, 2, 3)) # Ka vale tulemus (2 asemel 1.5)
print(keskmine(1, 2, 3, 4)) # Õige tulemus
# Täiendame argumentide arvu leidmist
def keskmine(num1, num2, num3 = 0, num4 = 0):
sum = num1 + num2 + num3 + num4 # Sama, mis summa(num1, num2, num3, num4)
argumente = 2.0 # Minimaalselt 2
if num3 > 0:
argumente = argumente + 1
if num4 > 0:
argumente = argumente + 1
return sum / argumente
print(keskmine(1, 2)) # Õige tulemus
print(keskmine(1, 2, 3)) # Õige tulemus
print(keskmine(1, 2, 3, 4)) # Õige tulemus
print(keskmine(1, 2, 3, 0)) # Vale tulemus!
print(keskmine(1, 0, 3, 2)) # Õige tulemus!?! Kuidas see nüüd õige on - kas tulemus sõltub argumentide järjekorrast?
# Kasutame teistsugust vaikeväärtust
def keskmine(num1, num2, num3 = None, num4 = None):
sum = num1 + num2 # Ei saa kohe 4 arg'i kokku liita
argumente = 2.0 # Minimaalselt 2
if num3 is not None:
argumente += 1
sum = sum + num3
if num4 is not None:
argumente += 1
sum = sum + num4
return sum / argumente
print(keskmine(1, 2)) # Õige tulemus
print(keskmine(1, 2, 3)) # Õige tulemus
print(keskmine(1, 2, 3, 4)) # Õige tulemus
print(keskmine(1, 2, 3, 0)) # Õige tulemus!
print(keskmine(1, 0, 3, 2)) # Õige tulemus
# Proovime listiga argumente defineerida
def summa(numbrid=[]):
sum = 0
for num in numbrid:
sum += num
return sum
#print(summa(1)) # Ei tööta, kuna pole itereeritav tüüp
#print(summa(1, 2)) # Ei tööta, kuna pole massiiv
arvud=[1, 2]
print(summa(arvud))
arvud=[1, 2, 3]
print(summa(arvud))
arvud=[1, 2, 3, 4]
print(summa(arvud))
print(summa([1, 2, 3, 4, 5])) # Võime panna ka ilma vahemuutujata
arvud=[1]
print(summa(arvud))
def summa(*numbrid):
sum = 0
for num in numbrid:
sum += num
return sum
print(summa()) # Isegi see variant töötab
print(summa(1))
print(summa(1, 2))
arvud=[1, 2]
print(summa(*arvud)) # Ka siin tuleb '*' kasutada
arvud=[1, 2, 3]
print(summa(*arvud))
arvud=[1, 2, 3, 4]
print(summa(*arvud))
arvud=[1, 2, 3, 4, 5]
print(summa(*arvud))
arvud=[1]
print(summa(*arvud))
# Erinevat sort argumendid
def argfun(arg1, arg2, *args, kw1 = 1, kw2 = "True"):
print(arg1, arg2, *args, kw1, kw2)
argfun(1, 2, 3, 4, 5, kw1 = 10, kw2 = 12)
def argfun(**kwargs):
for (arg, val) in kwargs.items():
print(f"{arg}={val}", end = ' ')
print()
argfun(kw2 = 10, kw3 = 12, kw4 = 14)
def argfun(arg1, arg2, *args, **kwargs):
print(arg1, arg2, *args)
for (arg, val) in kwargs.items():
print(f"{arg}={val}", end = ' ')
print()
argfun(1, 2, 3, 4, 5, kw2 = 10, kw3 = 12, kw4 = 14)
def argfun(arg1, arg2, *args, kw1 = 1, kw2 = "True", **kwargs):
print(arg1, arg2, *args, kw1, kw2)
for (arg, val) in kwargs.items():
print(f"{arg}={val}", end = ' ')
print()
argfun(1, 2, 3, 4, 5, kw2 = 10, kw3 = 12, kw4 = 14)
# Kuidas garanteerida, et argumentideks on numbrid?
def numsum(*numbrid):
sum = 0
for num in numbrid:
if isinstance(num, int) or isinstance(num, float):
sum += num
return sum
def numcount(*numbrid):
count = 0
for num in numbrid:
if isinstance(num, int) or isinstance(num, float):
count += 1
return count
def numavg(*numbrid):
sum = numsum(*numbrid)
count = numcount(*numbrid)
return sum / (count * 1.0) # Võime jagatava teha float tüübiks
print(numsum(1))
print(numsum(1, 2))
print(numsum(1, 2, 3))
print(numsum(1, 2, 3, "4"))
print(numsum(1, None, 3, 4, 5))
print("-"*30)
print(numcount(1))
print(numcount(1, 2))
print(numcount(1, 2, 3))
print(numcount(1, 2, 3, "4"))
print(numcount(1, None, 3, 4, 5))
print("-"*30)
print(numavg(1))
print(numavg(1, 2))
print(numavg(1, 2, 3))
print(numavg(1, 2, 3, "4"))
print(numavg(1, None, 3, 4, 5))
print(numavg()) # Viga! Nulliga jagamine!!!
# Vigade haldamist vaatame peatselt ka lähemalt
| [] |
l2ol33rt/salt | tests/integration/states/test_cmd.py | ff68bbd9f4bda992a3e039822fb32f141e94347c | # -*- coding: utf-8 -*-
'''
Tests for the file state
'''
# Import python libs
from __future__ import absolute_import
import errno
import os
import textwrap
import tempfile
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.paths import TMP_STATE_TREE
from tests.support.mixins import SaltReturnAssertsMixin
# Import salt libs
import salt.utils
IS_WINDOWS = salt.utils.is_windows()
class CMDTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the cmd state
'''
def test_run_simple(self):
'''
cmd.run
'''
cmd = 'dir' if IS_WINDOWS else 'ls'
ret = self.run_state('cmd.run', name=cmd, cwd=tempfile.gettempdir())
self.assertSaltTrueReturn(ret)
def test_test_run_simple(self):
'''
cmd.run test interface
'''
ret = self.run_state('cmd.run', name='ls',
cwd=tempfile.gettempdir(), test=True)
self.assertSaltNoneReturn(ret)
class CMDRunRedirectTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the cmd state of run_redirect
'''
def setUp(self):
self.state_name = 'run_redirect'
state_filename = self.state_name + '.sls'
self.state_file = os.path.join(TMP_STATE_TREE, state_filename)
# Create the testfile and release the handle
fd, self.test_file = tempfile.mkstemp()
try:
os.close(fd)
except OSError as exc:
if exc.errno != errno.EBADF:
raise exc
# Create the testfile and release the handle
fd, self.test_tmp_path = tempfile.mkstemp()
try:
os.close(fd)
except OSError as exc:
if exc.errno != errno.EBADF:
raise exc
super(CMDRunRedirectTest, self).setUp()
def tearDown(self):
for path in (self.state_file, self.test_tmp_path, self.test_file):
try:
os.remove(path)
except OSError:
# Not all of the tests leave files around that we want to remove
# As some of the tests create the sls files in the test itself,
# And some are using files in the integration test file state tree.
pass
super(CMDRunRedirectTest, self).tearDown()
def test_run_unless(self):
'''
test cmd.run unless
'''
state_key = 'cmd_|-{0}_|-{0}_|-run'.format(self.test_tmp_path)
with salt.utils.fopen(self.state_file, 'w') as fb_:
fb_.write(textwrap.dedent('''
{0}:
cmd.run:
- unless: echo cheese > {1}
'''.format(self.test_tmp_path, self.test_file)))
ret = self.run_function('state.sls', [self.state_name])
self.assertTrue(ret[state_key]['result'])
def test_run_unless_multiple_cmds(self):
'''
test cmd.run using multiple unless options where the first cmd in the
list will pass, but the second will fail. This tests the fix for issue
#35384. (The fix is in PR #35545.)
'''
sls = self.run_function('state.sls', mods='issue-35384')
self.assertSaltTrueReturn(sls)
# We must assert against the comment here to make sure the comment reads that the
# command "echo "hello"" was run. This ensures that we made it to the last unless
# command in the state. If the comment reads "unless execution succeeded", or similar,
# then the unless state run bailed out after the first unless command succeeded,
# which is the bug we're regression testing for.
self.assertEqual(sls['cmd_|-cmd_run_unless_multiple_|-echo "hello"_|-run']['comment'],
'Command "echo "hello"" run')
def test_run_creates_exists(self):
'''
test cmd.run creates already there
'''
state_key = 'cmd_|-echo >> {0}_|-echo >> {0}_|-run'.format(self.test_file)
with salt.utils.fopen(self.state_file, 'w') as fb_:
fb_.write(textwrap.dedent('''
echo >> {0}:
cmd.run:
- creates: {0}
'''.format(self.test_file)))
ret = self.run_function('state.sls', [self.state_name])
self.assertTrue(ret[state_key]['result'])
self.assertEqual(len(ret[state_key]['changes']), 0)
def test_run_creates_new(self):
'''
test cmd.run creates not there
'''
os.remove(self.test_file)
state_key = 'cmd_|-echo >> {0}_|-echo >> {0}_|-run'.format(self.test_file)
with salt.utils.fopen(self.state_file, 'w') as fb_:
fb_.write(textwrap.dedent('''
echo >> {0}:
cmd.run:
- creates: {0}
'''.format(self.test_file)))
ret = self.run_function('state.sls', [self.state_name])
self.assertTrue(ret[state_key]['result'])
self.assertEqual(len(ret[state_key]['changes']), 4)
def test_run_redirect(self):
'''
test cmd.run with shell redirect
'''
state_key = 'cmd_|-echo test > {0}_|-echo test > {0}_|-run'.format(self.test_file)
with salt.utils.fopen(self.state_file, 'w') as fb_:
fb_.write(textwrap.dedent('''
echo test > {0}:
cmd.run
'''.format(self.test_file)))
ret = self.run_function('state.sls', [self.state_name])
self.assertTrue(ret[state_key]['result'])
class CMDRunWatchTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the cmd state of run_watch
'''
def setUp(self):
self.state_name = 'run_watch'
state_filename = self.state_name + '.sls'
self.state_file = os.path.join(TMP_STATE_TREE, state_filename)
super(CMDRunWatchTest, self).setUp()
def tearDown(self):
os.remove(self.state_file)
super(CMDRunWatchTest, self).tearDown()
def test_run_watch(self):
'''
test cmd.run watch
'''
saltines_key = 'cmd_|-saltines_|-echo changed=true_|-run'
biscuits_key = 'cmd_|-biscuits_|-echo biscuits_|-wait'
with salt.utils.fopen(self.state_file, 'w') as fb_:
fb_.write(textwrap.dedent('''
saltines:
cmd.run:
- name: echo changed=true
- cwd: /
- stateful: True
biscuits:
cmd.wait:
- name: echo biscuits
- cwd: /
- watch:
- cmd: saltines
'''))
ret = self.run_function('state.sls', [self.state_name])
self.assertTrue(ret[saltines_key]['result'])
self.assertTrue(ret[biscuits_key]['result'])
| [((1258, 1302), 'os.path.join', 'os.path.join', (['TMP_STATE_TREE', 'state_filename'], {}), '(TMP_STATE_TREE, state_filename)\n', (1270, 1302), False, 'import os\n'), ((1386, 1404), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (1402, 1404), False, 'import tempfile\n'), ((1628, 1646), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (1644, 1646), False, 'import tempfile\n'), ((4515, 4540), 'os.remove', 'os.remove', (['self.test_file'], {}), '(self.test_file)\n', (4524, 4540), False, 'import os\n'), ((5800, 5844), 'os.path.join', 'os.path.join', (['TMP_STATE_TREE', 'state_filename'], {}), '(TMP_STATE_TREE, state_filename)\n', (5812, 5844), False, 'import os\n'), ((5923, 5949), 'os.remove', 'os.remove', (['self.state_file'], {}), '(self.state_file)\n', (5932, 5949), False, 'import os\n'), ((1430, 1442), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (1438, 1442), False, 'import os\n'), ((1672, 1684), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (1680, 1684), False, 'import os\n'), ((686, 707), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (705, 707), False, 'import tempfile\n'), ((924, 945), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (943, 945), False, 'import tempfile\n'), ((1965, 1980), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (1974, 1980), False, 'import os\n'), ((6292, 6702), 'textwrap.dedent', 'textwrap.dedent', (['"""\n saltines:\n cmd.run:\n - name: echo changed=true\n - cwd: /\n - stateful: True\n\n biscuits:\n cmd.wait:\n - name: echo biscuits\n - cwd: /\n - watch:\n - cmd: saltines\n """'], {}), '(\n """\n saltines:\n cmd.run:\n - name: echo changed=true\n - cwd: /\n - stateful: True\n\n biscuits:\n cmd.wait:\n - name: echo biscuits\n - cwd: /\n - watch:\n - cmd: saltines\n """\n )\n', (6307, 6702), False, 'import textwrap\n')] |
ChenQuan/mars | mars/tensor/execution/datastore.py | 46fc9747e99210cebfabfc2d85bcc8272440d1a3 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
try:
import tiledb
except ImportError: # pragma: no cover
tiledb = None
from ...lib.sparse import SparseNDArray
from ...lib.sparse.core import sps
from ..expressions import datastore
from .utils import get_tiledb_ctx
def _store_tiledb(ctx, chunk):
tiledb_ctx = get_tiledb_ctx(chunk.op.tiledb_config)
uri = chunk.op.tiledb_uri
key = chunk.op.tiledb_key
timestamp = chunk.op.tiledb_timestamp
axis_offsets = chunk.op.axis_offsets
if not chunk.issparse():
# dense
to_store = np.ascontiguousarray(ctx[chunk.op.input.key])
slcs = []
for axis in range(chunk.ndim):
axis_offset = axis_offsets[axis]
axis_length = chunk.op.input.shape[axis]
slcs.append(slice(axis_offset, axis_offset + axis_length))
with tiledb.DenseArray(tiledb_ctx, uri, mode='w',
key=key, timestamp=timestamp) as arr:
arr[tuple(slcs)] = to_store
ctx[chunk.key] = np.empty((0,) * chunk.ndim, dtype=chunk.dtype)
else:
# sparse
to_store = ctx[chunk.op.input.key].spmatrix.tocoo()
if to_store.nnz > 0:
with tiledb.SparseArray(tiledb_ctx, uri, mode='w',
key=key, timestamp=timestamp) as arr:
if chunk.ndim == 1:
vec = to_store.col if to_store.shape[0] == 1 else to_store.row
vec += axis_offsets[0]
arr[vec] = to_store.data
else:
i, j = to_store.row + axis_offsets[0], to_store.col + axis_offsets[1]
arr[i, j] = to_store.data
ctx[chunk.key] = SparseNDArray(sps.csr_matrix((0, 0), dtype=chunk.dtype),
shape=chunk.shape)
def register_data_store_handler():
from ...executor import register
register(datastore.TensorTileDBDataStore, _store_tiledb)
| [((1186, 1231), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['ctx[chunk.op.input.key]'], {}), '(ctx[chunk.op.input.key])\n', (1206, 1231), True, 'import numpy as np\n'), ((1650, 1696), 'numpy.empty', 'np.empty', (['((0,) * chunk.ndim)'], {'dtype': 'chunk.dtype'}), '((0,) * chunk.ndim, dtype=chunk.dtype)\n', (1658, 1696), True, 'import numpy as np\n'), ((1471, 1545), 'tiledb.DenseArray', 'tiledb.DenseArray', (['tiledb_ctx', 'uri'], {'mode': '"""w"""', 'key': 'key', 'timestamp': 'timestamp'}), "(tiledb_ctx, uri, mode='w', key=key, timestamp=timestamp)\n", (1488, 1545), False, 'import tiledb\n'), ((1830, 1905), 'tiledb.SparseArray', 'tiledb.SparseArray', (['tiledb_ctx', 'uri'], {'mode': '"""w"""', 'key': 'key', 'timestamp': 'timestamp'}), "(tiledb_ctx, uri, mode='w', key=key, timestamp=timestamp)\n", (1848, 1905), False, 'import tiledb\n')] |
ppmlguy/fastgradclip | fastgc/model/mlp.py | 0d8bff42ab13fa3471c520a2823050ccf0ff4a21 | import torch
import torch.nn as nn
import torch.nn.functional as F
from fastgc.model.penet import PeGradNet
from fastgc.layers.linear import Linear
from fastgc.activation import activation
class MLP(PeGradNet):
def __init__(self, input_size, hidden_sizes, output_size, act_func='sigmoid',
train_alg='batch'):
"""
Parameters:
------------------
- input_size: integer, the number of features in the input
- hidden_sizes: a list of integers, a list object containing number of units for hidden layers
- output_size: an integer, the length of output vector
- act_func: string, name of activation function to use for each hidden layer
- train_alg: string, allowed values are {'batch', 'reweight', 'naive'}
"""
super(MLP, self).__init__()
self.input_size = input_size
layer_sizes = [input_size] + hidden_sizes
self.linears = nn.ModuleList([Linear(in_size, out_size, bias=True)
for in_size, out_size in zip(layer_sizes[:-1],
layer_sizes[1:])])
self.output_layer = Linear(hidden_sizes[-1], output_size, bias=True)
self.act = activation[act_func]
self.train_alg=train_alg
# list of layers in the network
self.layers = [layer for layer in self.linears]
self.layers.append(self.output_layer)
def forward(self, x):
x = x.view(-1, self.input_size)
out = x
for layer in self.linears:
out = self.act(layer(out))
logits = self.output_layer(out)
return logits
| [((1201, 1249), 'fastgc.layers.linear.Linear', 'Linear', (['hidden_sizes[-1]', 'output_size'], {'bias': '(True)'}), '(hidden_sizes[-1], output_size, bias=True)\n', (1207, 1249), False, 'from fastgc.layers.linear import Linear\n'), ((964, 1000), 'fastgc.layers.linear.Linear', 'Linear', (['in_size', 'out_size'], {'bias': '(True)'}), '(in_size, out_size, bias=True)\n', (970, 1000), False, 'from fastgc.layers.linear import Linear\n')] |
ericchen12377/CS61A_LearningDoc | 05-Environments/hw02/hw02/hw02.py | 31f23962b0e2834795bf61eeb0f4884cc5da1809 | """ Homework 2: Higher Order Functions"""
HW_SOURCE_FILE = 'hw02.py'
from operator import add, mul, sub
square = lambda x: x * x
identity = lambda x: x
triple = lambda x: 3 * x
increment = lambda x: x + 1
######################
# Required Questions #
######################
def product(n, f):
"""Return the product of the first n terms in a sequence.
n -- a positive integer
f -- a function that takes one argument to produce the term
>>> product(3, identity) # 1 * 2 * 3
6
>>> product(5, identity) # 1 * 2 * 3 * 4 * 5
120
>>> product(3, square) # 1^2 * 2^2 * 3^2
36
>>> product(5, square) # 1^2 * 2^2 * 3^2 * 4^2 * 5^2
14400
>>> product(3, increment) # (1+1) * (2+1) * (3+1)
24
>>> product(3, triple) # 1*3 * 2*3 * 3*3
162
"""
"*** YOUR CODE HERE ***"
result,k = 1,1
while k <= n:
result,k = f(k)*result, k + 1
return result
def accumulate(combiner, base, n, f):
"""Return the result of combining the first n terms in a sequence and base.
The terms to be combined are f(1), f(2), ..., f(n). combiner is a
two-argument commutative, associative function.
>>> accumulate(add, 0, 5, identity) # 0 + 1 + 2 + 3 + 4 + 5
15
>>> accumulate(add, 11, 5, identity) # 11 + 1 + 2 + 3 + 4 + 5
26
>>> accumulate(add, 11, 0, identity) # 11
11
>>> accumulate(add, 11, 3, square) # 11 + 1^2 + 2^2 + 3^2
25
>>> accumulate(mul, 2, 3, square) # 2 * 1^2 * 2^2 * 3^2
72
>>> accumulate(lambda x, y: x + y + 1, 2, 3, square)
19
>>> accumulate(lambda x, y: 2 * (x + y), 2, 3, square)
58
>>> accumulate(lambda x, y: (x + y) % 17, 19, 20, square)
16
"""
"*** YOUR CODE HERE ***"
result, k = base,1
while k <= n:
result, k = combiner(result,f(k)), k + 1
return result
def summation_using_accumulate(n, f):
"""Returns the sum of f(1) + ... + f(n). The implementation
uses accumulate.
>>> summation_using_accumulate(5, square)
55
>>> summation_using_accumulate(5, triple)
45
>>> from construct_check import check
>>> # ban iteration and recursion
>>> check(HW_SOURCE_FILE, 'summation_using_accumulate',
... ['Recursion', 'For', 'While'])
True
"""
"*** YOUR CODE HERE ***"
# result, k = 0, 1
# while k <= n:
# result, k = result + f(k), k + 1
return accumulate(add,0,n,f)
def product_using_accumulate(n, f):
"""An implementation of product using accumulate.
>>> product_using_accumulate(4, square)
576
>>> product_using_accumulate(6, triple)
524880
>>> from construct_check import check
>>> # ban iteration and recursion
>>> check(HW_SOURCE_FILE, 'product_using_accumulate',
... ['Recursion', 'For', 'While'])
True
"""
"*** YOUR CODE HERE ***"
# result, k = 1, 1
# while k <= n:
# result, k = result * f(k), k + 1
return accumulate(mul,1,n,f)
def compose1(h, g):
"""Return a function f, such that f(x) = h(g(x))."""
def f(x):
return h(g(x))
return f
def make_repeater(h, n):
"""Return the function that computes the nth application of h.
>>> add_three = make_repeater(increment, 3)
>>> add_three(5)
8
>>> make_repeater(triple, 5)(1) # 3 * 3 * 3 * 3 * 3 * 1
243
>>> make_repeater(square, 2)(5) # square(square(5))
625
>>> make_repeater(square, 4)(5) # square(square(square(square(5))))
152587890625
>>> make_repeater(square, 0)(5) # Yes, it makes sense to apply the function zero times!
5
"""
"*** YOUR CODE HERE ***"
def repeater(x):
result, k = x,1
while k <= n:
result,k = h(result), k + 1
return result
return repeater
##########################
# Just for fun Questions #
##########################
def zero(f):
return lambda x: x
def successor(n):
return lambda f: lambda x: f(n(f)(x))
def one(f):
"""Church numeral 1: same as successor(zero)"""
"*** YOUR CODE HERE ***"
return lambda x: f(x)
def two(f):
"""Church numeral 2: same as successor(successor(zero))"""
"*** YOUR CODE HERE ***"
return lambda x: f(f(x))
three = successor(two)
def church_to_int(n):
"""Convert the Church numeral n to a Python integer.
>>> church_to_int(zero)
0
>>> church_to_int(one)
1
>>> church_to_int(two)
2
>>> church_to_int(three)
3
"""
"*** YOUR CODE HERE ***"
return n(lambda x: x + 1)(0)
def add_church(m, n):
"""Return the Church numeral for m + n, for Church numerals m and n.
>>> church_to_int(add_church(two, three))
5
"""
"*** YOUR CODE HERE ***"
return lambda f: lambda x: m(f)(n(f)(x))
def mul_church(m, n):
"""Return the Church numeral for m * n, for Church numerals m and n.
>>> four = successor(three)
>>> church_to_int(mul_church(two, three))
6
>>> church_to_int(mul_church(three, four))
12
"""
"*** YOUR CODE HERE ***"
return lambda f: m(n(f))
def pow_church(m, n):
"""Return the Church numeral m ** n, for Church numerals m and n.
>>> church_to_int(pow_church(two, three))
8
>>> church_to_int(pow_church(three, two))
9
"""
"*** YOUR CODE HERE ***"
return n(m)
| [] |
farleyb-amazon/aws-encryption-sdk-python | test_vector_handlers/src/awses_test_vectors/manifests/full_message/decrypt_generation.py | 7950abd73ee333407d2dadd02ef2d57c3df464cf | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""
AWS Encryption SDK Decrypt Message Generation manifest handler.
Described in AWS Crypto Tools Test Vector Framework feature #0006 AWS Encryption SDK Decrypt Message Generation.
"""
import json
import os
import uuid
from copy import copy
import attr
import six
from aws_encryption_sdk.caches.local import LocalCryptoMaterialsCache
from aws_encryption_sdk.materials_managers.base import CryptoMaterialsManager
from aws_encryption_sdk.materials_managers.caching import CachingCryptoMaterialsManager
from aws_encryption_sdk.materials_managers.default import DefaultCryptoMaterialsManager
from awses_test_vectors.internal.defaults import ENCODING
from awses_test_vectors.internal.util import (
dictionary_validator,
file_reader,
file_writer,
iterable_validator,
membership_validator,
validate_manifest_type,
)
from awses_test_vectors.manifests.full_message.decrypt import (
DecryptionMethod,
MessageDecryptionManifest,
MessageDecryptionTestResult,
MessageDecryptionTestScenario,
)
from awses_test_vectors.manifests.full_message.encrypt import MessageEncryptionTestScenario
from awses_test_vectors.manifests.keys import KeysManifest
try:
from aws_encryption_sdk.identifiers import AlgorithmSuite
except ImportError:
from aws_encryption_sdk.identifiers import Algorithm as AlgorithmSuite
from awses_test_vectors.manifests.master_key import MasterKeySpec, master_key_provider_from_master_key_specs
try: # Python 3.5.0 and 3.5.1 have incompatible typing modules
from typing import IO, Callable, Dict, Iterable, Optional # noqa pylint: disable=unused-import
from awses_test_vectors.internal.mypy_types import ( # noqa pylint: disable=unused-import
ENCRYPT_SCENARIO_SPEC,
PLAINTEXTS_SPEC,
)
except ImportError: # pragma: no cover
# We only actually need these imports when running the mypy checks
pass
SUPPORTED_VERSIONS = (2,)
class TamperingMethod:
"""Base class for all tampering methods."""
@classmethod
def from_tampering_spec(cls, spec):
"""Load from a tampering specification"""
if spec is None:
return TamperingMethod()
if spec == "truncate":
return TruncateTamperingMethod()
if spec == "mutate":
return MutateTamperingMethod()
if spec == "half-sign":
return HalfSigningTamperingMethod()
((tampering_tag, tampering_values_spec),) = spec.items()
if tampering_tag == "change-edk-provider-info":
return ChangeEDKProviderInfoTamperingMethod.from_values_spec(tampering_values_spec)
raise ValueError("Unrecognized tampering method tag: " + tampering_tag)
# pylint: disable=R0201
def run_scenario_with_tampering(self, ciphertext_writer, generation_scenario, plaintext_uri):
"""
Run a given scenario, tampering with the input or the result.
return: a list of (ciphertext, result) pairs
"""
materials_manager = DefaultCryptoMaterialsManager(
generation_scenario.encryption_scenario.master_key_provider_fn()
)
ciphertext_to_decrypt = generation_scenario.encryption_scenario.run(materials_manager)
if generation_scenario.result:
expected_result = generation_scenario.result
else:
expected_result = MessageDecryptionTestResult.expect_output(
plaintext_uri=plaintext_uri, plaintext=generation_scenario.encryption_scenario.plaintext
)
return [
generation_scenario.decryption_test_scenario_pair(ciphertext_writer, ciphertext_to_decrypt, expected_result)
]
class ChangeEDKProviderInfoTamperingMethod(TamperingMethod):
"""Tampering method that changes the provider info on all EDKs."""
new_provider_infos = attr.ib(validator=iterable_validator(list, six.string_types))
def __init__(self, new_provider_infos):
"""Create a new instance for a given new provider info value."""
self.new_provider_infos = new_provider_infos
@classmethod
def from_values_spec(cls, values_spec):
"""Load from a tampering parameters specification"""
return ChangeEDKProviderInfoTamperingMethod(values_spec)
# pylint: disable=R0201
def run_scenario_with_tampering(self, ciphertext_writer, generation_scenario, _plaintext_uri):
"""
Run a given scenario, tampering with the input or the result.
return: a list of (ciphertext, result) pairs.
"""
master_key_provider = generation_scenario.encryption_scenario.master_key_provider_fn()
# Use a caching CMM to avoid generating a new data key every time.
cache = LocalCryptoMaterialsCache(10)
caching_cmm = CachingCryptoMaterialsManager(
master_key_provider=master_key_provider,
cache=cache,
max_age=60.0,
max_messages_encrypted=100,
)
return [
self.run_scenario_with_new_provider_info(
ciphertext_writer, generation_scenario, caching_cmm, new_provider_info
)
for new_provider_info in self.new_provider_infos
]
def run_scenario_with_new_provider_info(
self, ciphertext_writer, generation_scenario, materials_manager, new_provider_info
):
"""Run with tampering for a specific new provider info value"""
tampering_materials_manager = ProviderInfoChangingCryptoMaterialsManager(materials_manager, new_provider_info)
ciphertext_to_decrypt = generation_scenario.encryption_scenario.run(tampering_materials_manager)
expected_result = MessageDecryptionTestResult.expect_error(
"Incorrect encrypted data key provider info: " + new_provider_info
)
return generation_scenario.decryption_test_scenario_pair(
ciphertext_writer, ciphertext_to_decrypt, expected_result
)
class ProviderInfoChangingCryptoMaterialsManager(CryptoMaterialsManager):
"""
Custom CMM that modifies the provider info field on EDKS.
THIS IS ONLY USED TO CREATE INVALID MESSAGES and should never be used in
production!
"""
wrapped_cmm = attr.ib(validator=attr.validators.instance_of(CryptoMaterialsManager))
new_provider_info = attr.ib(validator=attr.validators.instance_of(six.string_types))
def __init__(self, materials_manager, new_provider_info):
"""Create a new CMM that wraps a the given CMM."""
self.wrapped_cmm = materials_manager
self.new_provider_info = new_provider_info
def get_encryption_materials(self, request):
"""
Request materials from the wrapped CMM, and then change the provider info
on each EDK.
"""
result = self.wrapped_cmm.get_encryption_materials(request)
for encrypted_data_key in result.encrypted_data_keys:
encrypted_data_key.key_provider.key_info = self.new_provider_info
return result
def decrypt_materials(self, request):
"""Thunks to the wrapped CMM"""
return self.wrapped_cmm.decrypt_materials(request)
BITS_PER_BYTE = 8
class TruncateTamperingMethod(TamperingMethod):
"""Tampering method that truncates a good message at every byte (except zero)."""
# pylint: disable=R0201
def run_scenario_with_tampering(self, ciphertext_writer, generation_scenario, _plaintext_uri):
"""
Run a given scenario, tampering with the input or the result.
return: a list of (ciphertext, result) pairs.
"""
ciphertext_to_decrypt = generation_scenario.encryption_scenario.run()
return [
generation_scenario.decryption_test_scenario_pair(
ciphertext_writer,
TruncateTamperingMethod.flip_bit(ciphertext_to_decrypt, bit),
MessageDecryptionTestResult.expect_error("Bit {} flipped".format(bit)),
)
for bit in range(0, len(ciphertext_to_decrypt) * BITS_PER_BYTE)
]
@classmethod
def flip_bit(cls, ciphertext, bit):
"""Flip only the given bit in the given ciphertext"""
byte_index, bit_index = divmod(bit, BITS_PER_BYTE)
result = bytearray(ciphertext)
result[byte_index] ^= 1 << (BITS_PER_BYTE - bit_index - 1)
return bytes(result)
class MutateTamperingMethod(TamperingMethod):
"""Tampering method that produces a message with a single bit flipped, for every possible bit."""
# pylint: disable=R0201
def run_scenario_with_tampering(self, ciphertext_writer, generation_scenario, _plaintext_uri):
"""
Run a given scenario, tampering with the input or the result.
return: a list of (ciphertext, result) pairs.
"""
ciphertext_to_decrypt = generation_scenario.encryption_scenario.run()
return [
generation_scenario.decryption_test_scenario_pair(
ciphertext_writer,
ciphertext_to_decrypt[0:length],
MessageDecryptionTestResult.expect_error("Truncated at byte {}".format(length)),
)
for length in range(1, len(ciphertext_to_decrypt))
]
class HalfSigningTamperingMethod(TamperingMethod):
"""Tampering method that changes the provider info on all EDKs."""
# pylint: disable=R0201
def run_scenario_with_tampering(self, ciphertext_writer, generation_scenario, _plaintext_uri):
"""
Run a given scenario, tampering with the input or the result.
return: a list of (ciphertext, result) pairs.
"""
tampering_materials_manager = HalfSigningCryptoMaterialsManager(
generation_scenario.encryption_scenario.master_key_provider_fn()
)
ciphertext_to_decrypt = generation_scenario.encryption_scenario.run(tampering_materials_manager)
expected_result = MessageDecryptionTestResult.expect_error(
"Unsigned message using a data key with a public key"
)
return [
generation_scenario.decryption_test_scenario_pair(ciphertext_writer, ciphertext_to_decrypt, expected_result)
]
class HalfSigningCryptoMaterialsManager(CryptoMaterialsManager):
"""
Custom CMM that generates materials for an unsigned algorithm suite
that includes the "aws-crypto-public-key" encryption context.
THIS IS ONLY USED TO CREATE INVALID MESSAGES and should never be used in
production! It is imitating what a malicious decryptor without encryption
permissions might do, to attempt to forge an unsigned message from a decrypted
signed message, and therefore this is an important case for ESDKs to reject.
"""
wrapped_default_cmm = attr.ib(validator=attr.validators.instance_of(CryptoMaterialsManager))
def __init__(self, master_key_provider):
"""
Create a new CMM that wraps a new DefaultCryptoMaterialsManager
based on the given master key provider.
"""
self.wrapped_default_cmm = DefaultCryptoMaterialsManager(master_key_provider)
def get_encryption_materials(self, request):
"""
Generate half-signing materials by requesting signing materials
from the wrapped default CMM, and then changing the algorithm suite
and removing the signing key from teh result.
"""
if request.algorithm == AlgorithmSuite.AES_256_GCM_HKDF_SHA512_COMMIT_KEY:
signing_request = copy(request)
signing_request.algorithm = AlgorithmSuite.AES_256_GCM_HKDF_SHA512_COMMIT_KEY_ECDSA_P384
result = self.wrapped_default_cmm.get_encryption_materials(signing_request)
result.algorithm = request.algorithm
result.signing_key = None
return result
raise NotImplementedError(
"The half-sign tampering method is only supported on the "
"AES_256_GCM_HKDF_SHA512_COMMIT_KEY algorithm suite."
)
def decrypt_materials(self, request):
"""Thunks to the wrapped default CMM"""
return self.wrapped_default_cmm.decrypt_materials(request)
@attr.s
class MessageDecryptionTestScenarioGenerator(object):
# pylint: disable=too-many-instance-attributes
"""Data class for a single full message decrypt test scenario.
Handles serialization and deserialization to and from manifest specs.
:param MessageEncryptionTestScenario encryption_scenario: Encryption parameters
:param tampering_method: Optional method used to tamper with the ciphertext
:type tampering_method: :class:`TamperingMethod`
:param decryption_method:
:param decryption_master_key_specs: Iterable of master key specifications
:type decryption_master_key_specs: iterable of :class:`MasterKeySpec`
:param Callable decryption_master_key_provider_fn:
:param result:
"""
encryption_scenario = attr.ib(validator=attr.validators.instance_of(MessageEncryptionTestScenario))
tampering_method = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(TamperingMethod)))
decryption_method = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(DecryptionMethod)))
decryption_master_key_specs = attr.ib(validator=iterable_validator(list, MasterKeySpec))
decryption_master_key_provider_fn = attr.ib(validator=attr.validators.is_callable())
result = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(MessageDecryptionTestResult)))
@classmethod
def from_scenario(cls, scenario, keys, plaintexts):
"""Load from a scenario specification.
:param dict scenario: Scenario specification JSON
:param KeysManifest keys: Loaded keys
:param dict plaintexts: Mapping of plaintext names to plaintext values
:return: Loaded test scenario
:rtype: MessageDecryptionTestScenarioGenerator
"""
encryption_scenario_spec = scenario["encryption-scenario"]
encryption_scenario = MessageEncryptionTestScenario.from_scenario(encryption_scenario_spec, keys, plaintexts)
tampering = scenario.get("tampering")
tampering_method = TamperingMethod.from_tampering_spec(tampering)
decryption_method_spec = scenario.get("decryption-method")
decryption_method = DecryptionMethod(decryption_method_spec) if decryption_method_spec else None
if "decryption-master-keys" in scenario:
decryption_master_key_specs = [
MasterKeySpec.from_scenario(spec) for spec in scenario["decryption-master-keys"]
]
def decryption_master_key_provider_fn():
return master_key_provider_from_master_key_specs(keys, decryption_master_key_specs)
else:
decryption_master_key_specs = encryption_scenario.master_key_specs
decryption_master_key_provider_fn = encryption_scenario.master_key_provider_fn
result_spec = scenario.get("result")
result = MessageDecryptionTestResult.from_result_spec(result_spec, None) if result_spec else None
return cls(
encryption_scenario=encryption_scenario,
tampering_method=tampering_method,
decryption_method=decryption_method,
decryption_master_key_specs=decryption_master_key_specs,
decryption_master_key_provider_fn=decryption_master_key_provider_fn,
result=result,
)
def run(self, ciphertext_writer, plaintext_uri):
"""Run this scenario, writing the resulting ciphertext with ``ciphertext_writer`` and returning
a :class:`MessageDecryptionTestScenario` that describes the matching decrypt scenario.
:param callable ciphertext_writer: Callable that will write the requested named ciphertext and
return a URI locating the written data
:param str plaintext_uri: URI locating the written plaintext data for this scenario
:return: Decrypt test scenario that describes the generated scenario
:rtype: MessageDecryptionTestScenario
"""
return dict(self.tampering_method.run_scenario_with_tampering(ciphertext_writer, self, plaintext_uri))
def decryption_test_scenario_pair(self, ciphertext_writer, ciphertext_to_decrypt, expected_result):
"""Create a new (name, decryption scenario) pair"""
ciphertext_name = str(uuid.uuid4())
ciphertext_uri = ciphertext_writer(ciphertext_name, ciphertext_to_decrypt)
return (
ciphertext_name,
MessageDecryptionTestScenario(
ciphertext_uri=ciphertext_uri,
ciphertext=ciphertext_to_decrypt,
master_key_specs=self.decryption_master_key_specs,
master_key_provider_fn=self.decryption_master_key_provider_fn,
decryption_method=self.decryption_method,
result=expected_result,
),
)
@attr.s
class MessageDecryptionGenerationManifest(object):
"""AWS Encryption SDK Decryption Message Generation manifest handler.
Described in AWS Crypto Tools Test Vector Framework feature #0006 AWS Encryption SDK Decrypt Message Generation.
:param int version: Version of this manifest
:param KeysManifest keys: Loaded keys
:param dict plaintexts: Mapping of plaintext names to plaintext values
:param dict tests: Mapping of test scenario names to :class:`MessageDecryptionGenerationManifest`s
"""
version = attr.ib(validator=membership_validator(SUPPORTED_VERSIONS))
keys = attr.ib(validator=attr.validators.instance_of(KeysManifest))
plaintexts = attr.ib(validator=dictionary_validator(six.string_types, six.binary_type))
tests = attr.ib(validator=dictionary_validator(six.string_types, MessageDecryptionTestScenarioGenerator))
type_name = "awses-decrypt-generate"
@staticmethod
def _generate_plaintexts(plaintexts_specs):
# type: (PLAINTEXTS_SPEC) -> Dict[str, bytes]
"""Generate required plaintext values.
:param dict plaintexts_specs: Mapping of plaintext name to size in bytes
:return: Mapping of plaintext name to randomly generated bytes
:rtype: dict
"""
return {name: os.urandom(size) for name, size in plaintexts_specs.items()}
@classmethod
def from_file(cls, input_file):
# type: (IO) -> MessageDecryptionGenerationManifest
"""Load from a file containing a full message encrypt manifest.
:param file input_file: File object for file containing JSON manifest
:return: Loaded manifest
:rtype: MessageEncryptionManifest
"""
raw_manifest = json.load(input_file)
validate_manifest_type(
type_name=cls.type_name, manifest_version=raw_manifest["manifest"], supported_versions=SUPPORTED_VERSIONS
)
parent_dir = os.path.abspath(os.path.dirname(input_file.name))
reader = file_reader(parent_dir)
raw_keys_manifest = json.loads(reader(raw_manifest["keys"]).decode(ENCODING))
keys = KeysManifest.from_manifest_spec(raw_keys_manifest)
plaintexts = cls._generate_plaintexts(raw_manifest["plaintexts"])
tests = {}
for name, scenario in raw_manifest["tests"].items():
try:
tests[name] = MessageDecryptionTestScenarioGenerator.from_scenario(
scenario=scenario, keys=keys, plaintexts=plaintexts
)
except NotImplementedError:
continue
return cls(version=raw_manifest["manifest"]["version"], keys=keys, plaintexts=plaintexts, tests=tests)
def run_and_write_to_dir(self, target_directory, json_indent=None):
# type: (str, Optional[int]) -> None
"""Process all known encrypt test scenarios and write the resulting data and manifests to disk.
:param str target_directory: Directory in which to write all output
:param int json_indent: Number of spaces to indent JSON files (optional: default is to write minified)
"""
root_dir = os.path.abspath(target_directory)
root_writer = file_writer(root_dir)
root_writer("keys.json", json.dumps(self.keys.manifest_spec, indent=json_indent).encode(ENCODING))
plaintext_writer = file_writer(os.path.join(root_dir, "plaintexts"))
plaintext_uris = {name: plaintext_writer(name, plaintext) for name, plaintext in self.plaintexts.items()}
ciphertext_writer = file_writer(os.path.join(root_dir, "ciphertexts"))
test_scenarios = {
decrypt_scenario_name: decrypt_scenario
for name, scenario in self.tests.items()
for decrypt_scenario_name, decrypt_scenario in scenario.run(
ciphertext_writer, plaintext_uris[scenario.encryption_scenario.plaintext_name]
).items()
}
decrypt_manifest = MessageDecryptionManifest(
keys_uri="file://keys.json", keys=self.keys, test_scenarios=test_scenarios
)
root_writer("manifest.json", json.dumps(decrypt_manifest.manifest_spec, indent=json_indent).encode(ENCODING))
| [((5257, 5286), 'aws_encryption_sdk.caches.local.LocalCryptoMaterialsCache', 'LocalCryptoMaterialsCache', (['(10)'], {}), '(10)\n', (5282, 5286), False, 'from aws_encryption_sdk.caches.local import LocalCryptoMaterialsCache\n'), ((5309, 5438), 'aws_encryption_sdk.materials_managers.caching.CachingCryptoMaterialsManager', 'CachingCryptoMaterialsManager', ([], {'master_key_provider': 'master_key_provider', 'cache': 'cache', 'max_age': '(60.0)', 'max_messages_encrypted': '(100)'}), '(master_key_provider=master_key_provider,\n cache=cache, max_age=60.0, max_messages_encrypted=100)\n', (5338, 5438), False, 'from aws_encryption_sdk.materials_managers.caching import CachingCryptoMaterialsManager\n'), ((6203, 6316), 'awses_test_vectors.manifests.full_message.decrypt.MessageDecryptionTestResult.expect_error', 'MessageDecryptionTestResult.expect_error', (["('Incorrect encrypted data key provider info: ' + new_provider_info)"], {}), "(\n 'Incorrect encrypted data key provider info: ' + new_provider_info)\n", (6243, 6316), False, 'from awses_test_vectors.manifests.full_message.decrypt import DecryptionMethod, MessageDecryptionManifest, MessageDecryptionTestResult, MessageDecryptionTestScenario\n'), ((10425, 10525), 'awses_test_vectors.manifests.full_message.decrypt.MessageDecryptionTestResult.expect_error', 'MessageDecryptionTestResult.expect_error', (['"""Unsigned message using a data key with a public key"""'], {}), "(\n 'Unsigned message using a data key with a public key')\n", (10465, 10525), False, 'from awses_test_vectors.manifests.full_message.decrypt import DecryptionMethod, MessageDecryptionManifest, MessageDecryptionTestResult, MessageDecryptionTestScenario\n'), ((11555, 11605), 'aws_encryption_sdk.materials_managers.default.DefaultCryptoMaterialsManager', 'DefaultCryptoMaterialsManager', (['master_key_provider'], {}), '(master_key_provider)\n', (11584, 11605), False, 'from aws_encryption_sdk.materials_managers.default import DefaultCryptoMaterialsManager\n'), ((14530, 14621), 'awses_test_vectors.manifests.full_message.encrypt.MessageEncryptionTestScenario.from_scenario', 'MessageEncryptionTestScenario.from_scenario', (['encryption_scenario_spec', 'keys', 'plaintexts'], {}), '(encryption_scenario_spec, keys,\n plaintexts)\n', (14573, 14621), False, 'from awses_test_vectors.manifests.full_message.encrypt import MessageEncryptionTestScenario\n'), ((19188, 19209), 'json.load', 'json.load', (['input_file'], {}), '(input_file)\n', (19197, 19209), False, 'import json\n'), ((19218, 19352), 'awses_test_vectors.internal.util.validate_manifest_type', 'validate_manifest_type', ([], {'type_name': 'cls.type_name', 'manifest_version': "raw_manifest['manifest']", 'supported_versions': 'SUPPORTED_VERSIONS'}), "(type_name=cls.type_name, manifest_version=\n raw_manifest['manifest'], supported_versions=SUPPORTED_VERSIONS)\n", (19240, 19352), False, 'from awses_test_vectors.internal.util import dictionary_validator, file_reader, file_writer, iterable_validator, membership_validator, validate_manifest_type\n'), ((19459, 19482), 'awses_test_vectors.internal.util.file_reader', 'file_reader', (['parent_dir'], {}), '(parent_dir)\n', (19470, 19482), False, 'from awses_test_vectors.internal.util import dictionary_validator, file_reader, file_writer, iterable_validator, membership_validator, validate_manifest_type\n'), ((19584, 19634), 'awses_test_vectors.manifests.keys.KeysManifest.from_manifest_spec', 'KeysManifest.from_manifest_spec', (['raw_keys_manifest'], {}), '(raw_keys_manifest)\n', (19615, 19634), False, 'from awses_test_vectors.manifests.keys import KeysManifest\n'), ((20597, 20630), 'os.path.abspath', 'os.path.abspath', (['target_directory'], {}), '(target_directory)\n', (20612, 20630), False, 'import os\n'), ((20653, 20674), 'awses_test_vectors.internal.util.file_writer', 'file_writer', (['root_dir'], {}), '(root_dir)\n', (20664, 20674), False, 'from awses_test_vectors.internal.util import dictionary_validator, file_reader, file_writer, iterable_validator, membership_validator, validate_manifest_type\n'), ((21416, 21521), 'awses_test_vectors.manifests.full_message.decrypt.MessageDecryptionManifest', 'MessageDecryptionManifest', ([], {'keys_uri': '"""file://keys.json"""', 'keys': 'self.keys', 'test_scenarios': 'test_scenarios'}), "(keys_uri='file://keys.json', keys=self.keys,\n test_scenarios=test_scenarios)\n", (21441, 21521), False, 'from awses_test_vectors.manifests.full_message.decrypt import DecryptionMethod, MessageDecryptionManifest, MessageDecryptionTestResult, MessageDecryptionTestScenario\n'), ((3902, 4037), 'awses_test_vectors.manifests.full_message.decrypt.MessageDecryptionTestResult.expect_output', 'MessageDecryptionTestResult.expect_output', ([], {'plaintext_uri': 'plaintext_uri', 'plaintext': 'generation_scenario.encryption_scenario.plaintext'}), '(plaintext_uri=plaintext_uri,\n plaintext=generation_scenario.encryption_scenario.plaintext)\n', (3943, 4037), False, 'from awses_test_vectors.manifests.full_message.decrypt import DecryptionMethod, MessageDecryptionManifest, MessageDecryptionTestResult, MessageDecryptionTestScenario\n'), ((4390, 4432), 'awses_test_vectors.internal.util.iterable_validator', 'iterable_validator', (['list', 'six.string_types'], {}), '(list, six.string_types)\n', (4408, 4432), False, 'from awses_test_vectors.internal.util import dictionary_validator, file_reader, file_writer, iterable_validator, membership_validator, validate_manifest_type\n'), ((6765, 6816), 'attr.validators.instance_of', 'attr.validators.instance_of', (['CryptoMaterialsManager'], {}), '(CryptoMaterialsManager)\n', (6792, 6816), False, 'import attr\n'), ((6860, 6905), 'attr.validators.instance_of', 'attr.validators.instance_of', (['six.string_types'], {}), '(six.string_types)\n', (6887, 6905), False, 'import attr\n'), ((11277, 11328), 'attr.validators.instance_of', 'attr.validators.instance_of', (['CryptoMaterialsManager'], {}), '(CryptoMaterialsManager)\n', (11304, 11328), False, 'import attr\n'), ((11995, 12008), 'copy.copy', 'copy', (['request'], {}), '(request)\n', (11999, 12008), False, 'from copy import copy\n'), ((13438, 13496), 'attr.validators.instance_of', 'attr.validators.instance_of', (['MessageEncryptionTestScenario'], {}), '(MessageEncryptionTestScenario)\n', (13465, 13496), False, 'import attr\n'), ((13778, 13817), 'awses_test_vectors.internal.util.iterable_validator', 'iterable_validator', (['list', 'MasterKeySpec'], {}), '(list, MasterKeySpec)\n', (13796, 13817), False, 'from awses_test_vectors.internal.util import dictionary_validator, file_reader, file_writer, iterable_validator, membership_validator, validate_manifest_type\n'), ((13877, 13906), 'attr.validators.is_callable', 'attr.validators.is_callable', ([], {}), '()\n', (13904, 13906), False, 'import attr\n'), ((14833, 14873), 'awses_test_vectors.manifests.full_message.decrypt.DecryptionMethod', 'DecryptionMethod', (['decryption_method_spec'], {}), '(decryption_method_spec)\n', (14849, 14873), False, 'from awses_test_vectors.manifests.full_message.decrypt import DecryptionMethod, MessageDecryptionManifest, MessageDecryptionTestResult, MessageDecryptionTestScenario\n'), ((15515, 15578), 'awses_test_vectors.manifests.full_message.decrypt.MessageDecryptionTestResult.from_result_spec', 'MessageDecryptionTestResult.from_result_spec', (['result_spec', 'None'], {}), '(result_spec, None)\n', (15559, 15578), False, 'from awses_test_vectors.manifests.full_message.decrypt import DecryptionMethod, MessageDecryptionManifest, MessageDecryptionTestResult, MessageDecryptionTestScenario\n'), ((16902, 16914), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (16912, 16914), False, 'import uuid\n'), ((17058, 17352), 'awses_test_vectors.manifests.full_message.decrypt.MessageDecryptionTestScenario', 'MessageDecryptionTestScenario', ([], {'ciphertext_uri': 'ciphertext_uri', 'ciphertext': 'ciphertext_to_decrypt', 'master_key_specs': 'self.decryption_master_key_specs', 'master_key_provider_fn': 'self.decryption_master_key_provider_fn', 'decryption_method': 'self.decryption_method', 'result': 'expected_result'}), '(ciphertext_uri=ciphertext_uri, ciphertext=\n ciphertext_to_decrypt, master_key_specs=self.\n decryption_master_key_specs, master_key_provider_fn=self.\n decryption_master_key_provider_fn, decryption_method=self.\n decryption_method, result=expected_result)\n', (17087, 17352), False, 'from awses_test_vectors.manifests.full_message.decrypt import DecryptionMethod, MessageDecryptionManifest, MessageDecryptionTestResult, MessageDecryptionTestScenario\n'), ((18019, 18059), 'awses_test_vectors.internal.util.membership_validator', 'membership_validator', (['SUPPORTED_VERSIONS'], {}), '(SUPPORTED_VERSIONS)\n', (18039, 18059), False, 'from awses_test_vectors.internal.util import dictionary_validator, file_reader, file_writer, iterable_validator, membership_validator, validate_manifest_type\n'), ((18090, 18131), 'attr.validators.instance_of', 'attr.validators.instance_of', (['KeysManifest'], {}), '(KeysManifest)\n', (18117, 18131), False, 'import attr\n'), ((18168, 18223), 'awses_test_vectors.internal.util.dictionary_validator', 'dictionary_validator', (['six.string_types', 'six.binary_type'], {}), '(six.string_types, six.binary_type)\n', (18188, 18223), False, 'from awses_test_vectors.internal.util import dictionary_validator, file_reader, file_writer, iterable_validator, membership_validator, validate_manifest_type\n'), ((18255, 18333), 'awses_test_vectors.internal.util.dictionary_validator', 'dictionary_validator', (['six.string_types', 'MessageDecryptionTestScenarioGenerator'], {}), '(six.string_types, MessageDecryptionTestScenarioGenerator)\n', (18275, 18333), False, 'from awses_test_vectors.internal.util import dictionary_validator, file_reader, file_writer, iterable_validator, membership_validator, validate_manifest_type\n'), ((18752, 18768), 'os.urandom', 'os.urandom', (['size'], {}), '(size)\n', (18762, 18768), False, 'import os\n'), ((19408, 19440), 'os.path.dirname', 'os.path.dirname', (['input_file.name'], {}), '(input_file.name)\n', (19423, 19440), False, 'import os\n'), ((20823, 20859), 'os.path.join', 'os.path.join', (['root_dir', '"""plaintexts"""'], {}), "(root_dir, 'plaintexts')\n", (20835, 20859), False, 'import os\n'), ((21016, 21053), 'os.path.join', 'os.path.join', (['root_dir', '"""ciphertexts"""'], {}), "(root_dir, 'ciphertexts')\n", (21028, 21053), False, 'import os\n'), ((13564, 13608), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TamperingMethod'], {}), '(TamperingMethod)\n', (13591, 13608), False, 'import attr\n'), ((13678, 13723), 'attr.validators.instance_of', 'attr.validators.instance_of', (['DecryptionMethod'], {}), '(DecryptionMethod)\n', (13705, 13723), False, 'import attr\n'), ((13964, 14020), 'attr.validators.instance_of', 'attr.validators.instance_of', (['MessageDecryptionTestResult'], {}), '(MessageDecryptionTestResult)\n', (13991, 14020), False, 'import attr\n'), ((15019, 15052), 'awses_test_vectors.manifests.master_key.MasterKeySpec.from_scenario', 'MasterKeySpec.from_scenario', (['spec'], {}), '(spec)\n', (15046, 15052), False, 'from awses_test_vectors.manifests.master_key import MasterKeySpec, master_key_provider_from_master_key_specs\n'), ((15191, 15267), 'awses_test_vectors.manifests.master_key.master_key_provider_from_master_key_specs', 'master_key_provider_from_master_key_specs', (['keys', 'decryption_master_key_specs'], {}), '(keys, decryption_master_key_specs)\n', (15232, 15267), False, 'from awses_test_vectors.manifests.master_key import MasterKeySpec, master_key_provider_from_master_key_specs\n'), ((20709, 20764), 'json.dumps', 'json.dumps', (['self.keys.manifest_spec'], {'indent': 'json_indent'}), '(self.keys.manifest_spec, indent=json_indent)\n', (20719, 20764), False, 'import json\n'), ((21578, 21640), 'json.dumps', 'json.dumps', (['decrypt_manifest.manifest_spec'], {'indent': 'json_indent'}), '(decrypt_manifest.manifest_spec, indent=json_indent)\n', (21588, 21640), False, 'import json\n')] |
ismacaulay/qtcwatchdog | acceptance/test/TestStartStopFeature.py | 72f3588eef1019bac8788fa58c52722dfa7c4d28 | from acceptance.harness.acceptance_test import WatchdogAcceptanceTest
class TestStartStopFeature(WatchdogAcceptanceTest):
def test_willStartObserverWhenWatchdogStarted(self):
self.create_and_start_watchdog()
self.assertTrue(self.fs_observer.running)
def test_willStopObserverWhenWatchdogStopped(self):
self.create_and_start_watchdog()
self.watchdog.stop()
self.assertFalse(self.fs_observer.running)
def test_willJoinObserverThreadWhenWatchdogStopped(self):
self.create_and_start_watchdog()
self.watchdog.stop()
self.assertTrue(self.fs_observer.joined)
| [] |
VincentStimper/nsf | neural_spline_flows/nde/transforms/transform_test.py | 6bde505639ebcb67bffa227ea0021e3de235e03d | import torch
import torchtestcase
from neural_spline_flows.nde.transforms import base
class TransformTest(torchtestcase.TorchTestCase):
"""Base test for all transforms."""
def assert_tensor_is_good(self, tensor, shape=None):
self.assertIsInstance(tensor, torch.Tensor)
self.assertFalse(torch.isnan(tensor).any())
self.assertFalse(torch.isinf(tensor).any())
if shape is not None:
self.assertEqual(tensor.shape, torch.Size(shape))
def assert_forward_inverse_are_consistent(self, transform, inputs):
inverse = base.InverseTransform(transform)
identity = base.CompositeTransform([inverse, transform])
outputs, logabsdet = identity(inputs)
self.assert_tensor_is_good(outputs, shape=inputs.shape)
self.assert_tensor_is_good(logabsdet, shape=inputs.shape[:1])
self.assertEqual(outputs, inputs)
self.assertEqual(logabsdet, torch.zeros(inputs.shape[:1]))
def assertNotEqual(self, first, second, msg=None):
if ((self._eps and (first - second).abs().max().item() < self._eps) or
(not self._eps and torch.equal(first, second))):
self._fail_with_message(msg, "The tensors are _not_ different!")
| [((576, 608), 'neural_spline_flows.nde.transforms.base.InverseTransform', 'base.InverseTransform', (['transform'], {}), '(transform)\n', (597, 608), False, 'from neural_spline_flows.nde.transforms import base\n'), ((628, 673), 'neural_spline_flows.nde.transforms.base.CompositeTransform', 'base.CompositeTransform', (['[inverse, transform]'], {}), '([inverse, transform])\n', (651, 673), False, 'from neural_spline_flows.nde.transforms import base\n'), ((933, 962), 'torch.zeros', 'torch.zeros', (['inputs.shape[:1]'], {}), '(inputs.shape[:1])\n', (944, 962), False, 'import torch\n'), ((466, 483), 'torch.Size', 'torch.Size', (['shape'], {}), '(shape)\n', (476, 483), False, 'import torch\n'), ((1134, 1160), 'torch.equal', 'torch.equal', (['first', 'second'], {}), '(first, second)\n', (1145, 1160), False, 'import torch\n'), ((314, 333), 'torch.isnan', 'torch.isnan', (['tensor'], {}), '(tensor)\n', (325, 333), False, 'import torch\n'), ((366, 385), 'torch.isinf', 'torch.isinf', (['tensor'], {}), '(tensor)\n', (377, 385), False, 'import torch\n')] |
brandonaltermatt/penetration-testing-scripts | directory-traversal/validate-file-extension-null-byte-bypass.py | 433b5d000a5573e60b9d8e49932cedce74937ebc | """
https://portswigger.net/web-security/file-path-traversal/lab-validate-file-extension-null-byte-bypass
"""
import sys
import requests
site = sys.argv[1]
if 'https://' in site:
site = site.rstrip('/').lstrip('https://')
url = f'''https://{site}/image?filename=../../../etc/passwd%00.png'''
s = requests.Session()
resp = s.get(url)
print(resp.text) | [((304, 322), 'requests.Session', 'requests.Session', ([], {}), '()\n', (320, 322), False, 'import requests\n')] |
joselynzhao/One-shot-Person-Re-ID-ATM | atmpro1_vsm2.py | d039b1a66410f87cfe931774eba54a5f1a1a0260 | #!/usr/bin/python3.6
# -*- coding: utf-8 -*-
# @Time : 2020/9/3 上午11:03
# @Author : Joselynzhao
# @Email : [email protected]
# @File : atmpro1_vsm2.py
# @Software: PyCharm
# @Desc :
#!/usr/bin/python3.6
# -*- coding: utf-8 -*-
# @Time : 2020/9/1 下午7:07
# @Author : Joselynzhao
# @Email : [email protected]
# @File : atmpro1_vsm.py
# @Software: PyCharm
# @Desc :
#!/usr/bin/python3.6
# -*- coding: utf-8 -*-
# @Time : 2020/8/26 下午8:26
# @Author : Joselynzhao
# @Email : [email protected]
# @File : atmpro1.py
# @Software: PyCharm
# @Desc :
from my_reid.eug import *
from my_reid import datasets
from my_reid import models
import numpy as np
import torch
import argparse
import os
import warnings
warnings.filterwarnings("ignore")
from my_reid.utils.logging import Logger
import os.path as osp
import sys
from torch.backends import cudnn
from my_reid.utils.serialization import load_checkpoint
from torch import nn
import time
import pickle
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data.distributed import DistributedSampler
from pathlib import Path
def resume(savepath):
import re
pattern = re.compile(r'step_(\d+)\.ckpt')
start_step = -1
ckpt_file = ""
# find start step
files = os.listdir(savepath)
files.sort()
for filename in files:
try:
iter_ = int(pattern.search(filename).groups()[0])
print(iter_)
if iter_ > start_step:
start_step = iter_
ckpt_file = osp.join(savepath, filename)
except:
continue
# if need resume
if start_step >= 0:
print("continued from iter step", start_step)
else:
print("resume failed", start_step, files)
return start_step, ckpt_file
def main(args):
father = Path('/mnt/')
if father.exists(): # 是在服务器上
data_dir = Path('/mnt/share/datasets/RE-ID/data') # 服务器
logs_dir = Path('/mnt/home/{}'.format(args.log_name)) # 服务器
else: #本地
data_dir = Path('/home/joselyn/workspace/ATM_SERIES/data') # 本地跑用这个
logs_dir = Path('/home/joselyn/workspace/ATM_SERIES/{}'.format(args.log_name)) # 本地跑用这个
cudnn.benchmark = True
cudnn.enabled = True
save_path = os.path.join(logs_dir, args.dataset, args.exp_name, args.exp_order) # 到编号位置.
total_step = 100 // args.EF + 1
sys.stdout = Logger(osp.join(save_path, 'log' + str(args.EF) + time.strftime(".%m_%d_%H:%M:%S") + '.txt'))
dataf_file = open(osp.join(save_path, 'dataf.txt'), 'a') # 保存性能数据. #特征空间中的性能问题.
data_file = open(osp.join(save_path, 'data.txt'), 'a') # 保存性能数据. #特征空间中的性能问题.
kf_file = open(osp.join(save_path,'kf.txt'),'a')
# 数据格式为 label_pre_r, select_pre_r,label_pre_t, select_pre_t ,加上了了tagper的数据.
tagper_path = osp.join(save_path,'tagper') #tagper存储路径.
if not Path(tagper_path).exists():
os.mkdir(tagper_path)
'''# 记录配置信息 和路径'''
print('-'*20+'config_info'+'-'*20)
config_file = open(osp.join(save_path, 'config.txt'), 'w')
config_info = str(args).split('(')[1].strip(')').split(',')
config_info.sort()
for one in config_info:
key,value=map(str,one.split('='))
config_file.write(key.strip()+'='+value.strip('\'')+'\n')
print(key.strip()+'='+value.strip('\''))
config_file.write('save_path='+save_path)
print('save_path='+save_path)
print('-' * 20 + 'config_info' + '-' * 20)
config_file.close()
train_time_file = open(osp.join(save_path, 'time.txt'), 'a') # 只记录训练所需要的时间.
# 数据格式为 step_time total_time.
total_time = 0
# get all the labeled and unlabeled data for training
dataset_all = datasets.create(args.dataset, osp.join(data_dir, args.dataset))
num_all_examples = len(dataset_all.train)
l_data, u_data = get_init_shot_in_cam1(dataset_all,
load_path="./examples/{}_init_{}.pickle".format(dataset_all.name, args.init),
init=args.init)
resume_step, ckpt_file = -1, ''
if args.resume:
resume_step, ckpt_file = resume(save_path)
# initial the EUG algorithm
eug = EUG(batch_size=args.batch_size, num_classes=dataset_all.num_train_ids,
dataset=dataset_all, l_data=l_data, u_data=u_data, save_path=save_path, max_frames=args.max_frames,
embeding_fea_size=args.fea, momentum=args.momentum, lamda=args.lamda)
tagper = EUG(batch_size=args.batch_size, num_classes=dataset_all.num_train_ids,
dataset=dataset_all, l_data=l_data, u_data=u_data, save_path=tagper_path,
max_frames=args.max_frames,
embeding_fea_size=args.fea, momentum=args.momentum, lamda=args.lamda)
new_train_data = l_data
unselected_data = u_data
iter_mode = 2 #迭代模式,确定是否训练tagper
for step in range(total_step):
# for resume
if step < resume_step:
continue
ratio = (step + 1) * args.EF / 100
ratio_t = (step+1+args.t) * args.EF /100
nums_to_select = int(len(u_data) * ratio)
nums_to_select_tagper = int(len(u_data) * ratio_t)
if nums_to_select >= len(u_data):
break
#args.vsm_lambda的衰减 0.5 - 0
vsm_lambda = args.vsm_lambda*step/(1-(total_step/2)) +args.vsm_lambda
vsm_lambda +=1
print("Runing: EF={}%, step {}:\t Nums_to_be_select {} \t Ritio \t Logs-dir {}".format(
args.EF, step, nums_to_select, ratio, save_path))
# train the model or load ckpt
start_time = time.time()
print("training reid model")
eug.train(new_train_data, unselected_data, step, loss=args.loss, epochs=args.epochs, step_size=args.step_size,
init_lr=0.1) if step != resume_step else eug.resume(ckpt_file, step)
# 只对eug进行性能评估
# mAP, rank1, rank5, rank10, rank20 = 0, 0, 0, 0, 0
mAP, rank1, rank5, rank10, rank20 = eug.evaluate(dataset_all.query, dataset_all.gallery)
# 把数据写到data文件里.
data_file.write('{} {:.2%} {:.2%} {:.2%} {:.2%} {:.2%}\n'.format(step, mAP, rank1, rank5, rank10, rank20))
pred_y, pred_score,label_pre,dists= eug.estimate_label_vsm()
selected_idx = eug.select_top_data_vsm2(pred_score, dists,args.topk,vsm_lambda,min(nums_to_select_tagper,len(u_data)-50) if iter_mode==2 else min(nums_to_select,len(u_data))) #直接翻两倍取数据. -50个样本,保证unselected_data数量不为0
new_train_data, unselected_data, select_pre= eug.generate_new_train_data(selected_idx, pred_y)
raw_label_pre, raw_select_pre = label_pre,select_pre
t_label_pre,t_select_pre = 0,0
raw_select_pre_t = 0
# label_pre_t,select_pre_t=0,0
if iter_mode==2:
raw_select_pre_t = raw_select_pre
print("training tagper model")
selected_idx = eug.select_top_data_vsm2(pred_score,dists,args.topk,vsm_lambda, min(nums_to_select, len(u_data)))
_, _, raw_select_pre = eug.generate_new_train_data(selected_idx, pred_y)
# kf_file.write('{} {:.2%} {:.2%}'.format(step, label_pre, select_pre))
tagper.resume(osp.join(save_path,'step_{}.ckpt'.format(step)),step)
tagper.train(new_train_data, unselected_data, step, loss=args.loss, epochs=args.epochs, step_size=args.step_size, init_lr=0.1)
pred_y, pred_score, label_pre,dists= tagper.estimate_label_vsm()
selected_idx = tagper.select_top_data_vsm2(pred_score,dists,args.topk,vsm_lambda,min(nums_to_select,len(u_data))) # 采样目标数量
new_train_data, unselected_data, select_pre= tagper.generate_new_train_data(selected_idx, pred_y)
t_label_pre,t_select_pre = label_pre,select_pre
label_pre,select_pre = t_label_pre,t_select_pre
if nums_to_select_tagper >=len(u_data):
iter_mode=1 #切换模式
print('tagper is stop')
else: #mode = 1
# raw_select_pre = raw_select_pre_t
# raw_select_pre_t = 0
label_pre,select_pre = raw_label_pre,raw_select_pre
end_time = time.time()
step_time = end_time - start_time
total_time = step_time + total_time
train_time_file.write('{} {:.6} {:.6}\n'.format(step, step_time, total_time))
kf_file.write('{} {} {} {:.2%} {:.2%} {:.2%} {:.2%} {:.2%}\n'.format(step,nums_to_select,nums_to_select_tagper,raw_label_pre,raw_select_pre,raw_select_pre_t,t_label_pre,t_select_pre))
dataf_file.write(
'{} {:.2%} {:.2%}\n'.format(step, label_pre, select_pre))
dataf_file.close()
train_time_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Progressive Learning for One-Example re-ID')
parser.add_argument('-d', '--dataset', type=str, default='mars',
choices=datasets.names())
parser.add_argument('-b', '--batch-size', type=int, default=16)
parser.add_argument('-f', '--fea', type=int, default=1024)
parser.add_argument('--EF', type=int, default=10)
parser.add_argument('--t', type=float, default=2) #不再tagper采样的倍率, 而是表示跨多少个step采样.
parser.add_argument('--exp_order', type=str, default='0')
parser.add_argument('--exp_name', type=str, default='atm')
parser.add_argument('--exp_aim', type=str, default='for paper')
parser.add_argument('--run_file',type=str,default='train.py')
parser.add_argument('--log_name',type=str,default='pl_logs')
parser.add_argument('--topk',type=int,default=2)
parser.add_argument('--vsm_lambda',type=float,default=0.5)
parser.add_argument('--resume', type=str, default='Yes')
parser.add_argument('--max_frames', type=int, default=900)
parser.add_argument('--loss', type=str, default='ExLoss', choices=['CrossEntropyLoss', 'ExLoss'])
parser.add_argument('--init', type=float, default=-1)
parser.add_argument('-m', '--momentum', type=float, default=0.5)
parser.add_argument('-e', '--epochs', type=int, default=70)
parser.add_argument('-s', '--step_size', type=int, default=55)
parser.add_argument('--lamda', type=float, default=0.5)
main(parser.parse_args())
| [((755, 788), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (778, 788), False, 'import warnings\n'), ((1235, 1267), 're.compile', 're.compile', (['"""step_(\\\\d+)\\\\.ckpt"""'], {}), "('step_(\\\\d+)\\\\.ckpt')\n", (1245, 1267), False, 'import re\n'), ((1341, 1361), 'os.listdir', 'os.listdir', (['savepath'], {}), '(savepath)\n', (1351, 1361), False, 'import os\n'), ((1894, 1907), 'pathlib.Path', 'Path', (['"""/mnt/"""'], {}), "('/mnt/')\n", (1898, 1907), False, 'from pathlib import Path\n'), ((2333, 2400), 'os.path.join', 'os.path.join', (['logs_dir', 'args.dataset', 'args.exp_name', 'args.exp_order'], {}), '(logs_dir, args.dataset, args.exp_name, args.exp_order)\n', (2345, 2400), False, 'import os\n'), ((2880, 2909), 'os.path.join', 'osp.join', (['save_path', '"""tagper"""'], {}), "(save_path, 'tagper')\n", (2888, 2909), True, 'import os.path as osp\n'), ((8751, 8837), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Progressive Learning for One-Example re-ID"""'}), "(description=\n 'Progressive Learning for One-Example re-ID')\n", (8774, 8837), False, 'import argparse\n'), ((1960, 1998), 'pathlib.Path', 'Path', (['"""/mnt/share/datasets/RE-ID/data"""'], {}), "('/mnt/share/datasets/RE-ID/data')\n", (1964, 1998), False, 'from pathlib import Path\n'), ((2108, 2155), 'pathlib.Path', 'Path', (['"""/home/joselyn/workspace/ATM_SERIES/data"""'], {}), "('/home/joselyn/workspace/ATM_SERIES/data')\n", (2112, 2155), False, 'from pathlib import Path\n'), ((2580, 2612), 'os.path.join', 'osp.join', (['save_path', '"""dataf.txt"""'], {}), "(save_path, 'dataf.txt')\n", (2588, 2612), True, 'import os.path as osp\n'), ((2665, 2696), 'os.path.join', 'osp.join', (['save_path', '"""data.txt"""'], {}), "(save_path, 'data.txt')\n", (2673, 2696), True, 'import os.path as osp\n'), ((2747, 2776), 'os.path.join', 'osp.join', (['save_path', '"""kf.txt"""'], {}), "(save_path, 'kf.txt')\n", (2755, 2776), True, 'import os.path as osp\n'), ((2970, 2991), 'os.mkdir', 'os.mkdir', (['tagper_path'], {}), '(tagper_path)\n', (2978, 2991), False, 'import os\n'), ((3079, 3112), 'os.path.join', 'osp.join', (['save_path', '"""config.txt"""'], {}), "(save_path, 'config.txt')\n", (3087, 3112), True, 'import os.path as osp\n'), ((3570, 3601), 'os.path.join', 'osp.join', (['save_path', '"""time.txt"""'], {}), "(save_path, 'time.txt')\n", (3578, 3601), True, 'import os.path as osp\n'), ((3785, 3817), 'os.path.join', 'osp.join', (['data_dir', 'args.dataset'], {}), '(data_dir, args.dataset)\n', (3793, 3817), True, 'import os.path as osp\n'), ((5657, 5668), 'time.time', 'time.time', ([], {}), '()\n', (5666, 5668), False, 'import time\n'), ((8186, 8197), 'time.time', 'time.time', ([], {}), '()\n', (8195, 8197), False, 'import time\n'), ((8934, 8950), 'my_reid.datasets.names', 'datasets.names', ([], {}), '()\n', (8948, 8950), False, 'from my_reid import datasets\n'), ((1604, 1632), 'os.path.join', 'osp.join', (['savepath', 'filename'], {}), '(savepath, filename)\n', (1612, 1632), True, 'import os.path as osp\n'), ((2934, 2951), 'pathlib.Path', 'Path', (['tagper_path'], {}), '(tagper_path)\n', (2938, 2951), False, 'from pathlib import Path\n'), ((2514, 2546), 'time.strftime', 'time.strftime', (['""".%m_%d_%H:%M:%S"""'], {}), "('.%m_%d_%H:%M:%S')\n", (2527, 2546), False, 'import time\n')] |
eHealthAfrica/aether-elasticsearch-consumer | consumer/tests/test__index_handler.py | fc29a1da8cfd7482257b1023b50a1a43372886c5 | # Copyright (C) 2019 by eHealth Africa : http://www.eHealthAfrica.org
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import pytest
import requests
import responses
from time import sleep
from elasticsearch.exceptions import NotFoundError
from aet.logger import get_logger
from app import index_handler
from . import * # noqa # fixtures
LOG = get_logger('TEST-IDX')
# convenience function for jsonpath
@responses.activate
@pytest.mark.unit
def test__handle_http():
responses.add(
responses.GET,
'http://bad-url',
json={'error': 'not found'},
status=404
)
res = requests.get('http://bad-url')
with pytest.raises(requests.exceptions.HTTPError):
index_handler.handle_http(res)
@pytest.mark.unit
def test__get_es_index_from_autoconfig(SubscriptionDefinition, ComplexSchema):
es_options = SubscriptionDefinition.get('es_options')
tenant = 'dev'
name = 'a-topic'
alias = es_options.get('alias_name')
index = index_handler.get_es_index_from_subscription(
es_options, name, tenant, ComplexSchema
)
LOG.debug(json.dumps(index, indent=2))
assert(first('$.name', index) == f'{tenant}.{name}')
geo_name = es_options['geo_point_name']
assert(first(
f'$.body.mappings._doc.properties.{geo_name}', index) is not None)
@pytest.mark.unit
def test__get_index_for_topic(SubscriptionDefinition, ComplexSchema):
name = 'Person'
es_options = SubscriptionDefinition.get('es_options')
geo_name = es_options.get('geo_point_name')
auto_ts = es_options.get('auto_timestamp')
index = index_handler.get_index_for_topic(name, geo_name, auto_ts, ComplexSchema)
index = index.get('mappings', None)
assert(len(index) == 1)
assert(first('$._doc', index) is not None)
assert(first(f'$._doc.properties.{geo_name}.type', index) == 'geo_point')
assert(first(f'$._doc._meta.aet_auto_ts', index) == auto_ts)
@pytest.mark.unit
def test__get_es_types_from_schema(ComplexSchema):
res = index_handler.get_es_types_from_schema(ComplexSchema)
assert(first('$.beds.type', res) == 'integer')
assert(first('$.username.type', res) == 'keyword')
assert(first('$._start.type', res) == 'date')
assert(first('$.geometry.type', res) == 'object')
assert(first('$.meta.type', res) == 'object')
assert(first('$.mandatory_date.type', res) == 'date')
assert(first('$.mandatory_date.format', res) == 'date')
assert(first('$.optional_dt.type', res) == 'date')
assert(first('$.optional_dt.format', res) == 'epoch_millis')
assert(len(list(res.keys())) == 55)
@pytest.mark.unit
def test__make_kibana_index(AutoGenSchema):
name = 'kibana-index-name'
res = index_handler.make_kibana_index(name, AutoGenSchema)
assert(res.get('attributes', {}).get('title') == name)
@pytest.mark.unit
def test___find_timestamp(ComplexSchema):
result = index_handler._find_timestamp(ComplexSchema)
assert(result == 'timestamp')
@pytest.mark.unit
def test___format_lookups(ComplexSchema):
formatted = index_handler._format_lookups(ComplexSchema)
assert(
json.dumps(
formatted.get(
'operational_status'), sort_keys=True) ==
json.dumps(
SAMPLE_FIELD_LOOKUP.get(
'operational_status'), sort_keys=True)
)
@pytest.mark.unit
def test___format_single_lookup(ComplexSchema):
matching = ComplexSchema.get_node('MySurvey.operational_status')
res = index_handler._format_single_lookup(matching)
assert(
json.dumps(res, sort_keys=True) ==
json.dumps(SAMPLE_FIELD_LOOKUP.get(
'operational_status'), sort_keys=True)
)
@pytest.mark.unit
def test__get_alias_from_namespace():
namespace = 'A_Gather_Form_V1'
res = index_handler.get_alias_from_namespace(namespace)
assert(res == 'A_Gather_Form')
@pytest.mark.integration
def test__update_es_index(TestElasticsearch, PolySchemaA, PolySchemaB):
# register index with mapping
es = TestElasticsearch.get_session()
doc_id = 'poly-test-doc'
doc = {
'id': doc_id,
'poly': '1001'
}
index_a = index_handler.get_es_index_from_subscription(
es_options={},
name='test1',
tenant='test-tenant',
schema=PolySchemaA
)
index_name = index_a.get('name')
index_b = index_handler.get_es_index_from_subscription(
es_options={},
name='test1',
tenant='test-tenant',
schema=PolySchemaB
)
alias = index_handler.get_alias_from_namespace(PolySchemaA.name)
# register schema A
index_handler.update_es_index(es, index_a, 'test-tenant', alias)
# put doc
es.create(
index=index_name,
id=doc_id,
body=doc
)
es.indices.refresh(index=index_name)
res = es.search(index=index_name, body={
"query": {"term": {"poly": "1001"}}
})
assert(res.get('hits').get('max_score') < 1.0) # find imperfect by string
res = es.search(index=index_name, body={
"query": {"term": {"poly": 1001}}
})
assert(res.get('hits').get('max_score') < 1.0) # find imperfect by string
# migrate to schema B
index_handler.update_es_index(es, index_b, 'test-tenant', alias)
es.indices.refresh(index=index_name)
res = es.search(index=index_name, body={
"query": {"term": {"poly": "1001"}}
})
assert(res.get('hits').get('max_score') == 1.0) # find by string
res = es.search(index=index_name, body={
"query": {"term": {"poly": 1001}}
})
assert(res.get('hits').get('max_score') == 1.0) # find by int
| [((976, 998), 'aet.logger.get_logger', 'get_logger', (['"""TEST-IDX"""'], {}), "('TEST-IDX')\n", (986, 998), False, 'from aet.logger import get_logger\n'), ((1105, 1196), 'responses.add', 'responses.add', (['responses.GET', '"""http://bad-url"""'], {'json': "{'error': 'not found'}", 'status': '(404)'}), "(responses.GET, 'http://bad-url', json={'error': 'not found'},\n status=404)\n", (1118, 1196), False, 'import responses\n'), ((1241, 1271), 'requests.get', 'requests.get', (['"""http://bad-url"""'], {}), "('http://bad-url')\n", (1253, 1271), False, 'import requests\n'), ((1616, 1705), 'app.index_handler.get_es_index_from_subscription', 'index_handler.get_es_index_from_subscription', (['es_options', 'name', 'tenant', 'ComplexSchema'], {}), '(es_options, name, tenant,\n ComplexSchema)\n', (1660, 1705), False, 'from app import index_handler\n'), ((2228, 2301), 'app.index_handler.get_index_for_topic', 'index_handler.get_index_for_topic', (['name', 'geo_name', 'auto_ts', 'ComplexSchema'], {}), '(name, geo_name, auto_ts, ComplexSchema)\n', (2261, 2301), False, 'from app import index_handler\n'), ((2641, 2694), 'app.index_handler.get_es_types_from_schema', 'index_handler.get_es_types_from_schema', (['ComplexSchema'], {}), '(ComplexSchema)\n', (2679, 2694), False, 'from app import index_handler\n'), ((3338, 3390), 'app.index_handler.make_kibana_index', 'index_handler.make_kibana_index', (['name', 'AutoGenSchema'], {}), '(name, AutoGenSchema)\n', (3369, 3390), False, 'from app import index_handler\n'), ((3525, 3569), 'app.index_handler._find_timestamp', 'index_handler._find_timestamp', (['ComplexSchema'], {}), '(ComplexSchema)\n', (3554, 3569), False, 'from app import index_handler\n'), ((3682, 3726), 'app.index_handler._format_lookups', 'index_handler._format_lookups', (['ComplexSchema'], {}), '(ComplexSchema)\n', (3711, 3726), False, 'from app import index_handler\n'), ((4109, 4154), 'app.index_handler._format_single_lookup', 'index_handler._format_single_lookup', (['matching'], {}), '(matching)\n', (4144, 4154), False, 'from app import index_handler\n'), ((4414, 4463), 'app.index_handler.get_alias_from_namespace', 'index_handler.get_alias_from_namespace', (['namespace'], {}), '(namespace)\n', (4452, 4463), False, 'from app import index_handler\n'), ((4780, 4899), 'app.index_handler.get_es_index_from_subscription', 'index_handler.get_es_index_from_subscription', ([], {'es_options': '{}', 'name': '"""test1"""', 'tenant': '"""test-tenant"""', 'schema': 'PolySchemaA'}), "(es_options={}, name='test1',\n tenant='test-tenant', schema=PolySchemaA)\n", (4824, 4899), False, 'from app import index_handler\n'), ((4985, 5104), 'app.index_handler.get_es_index_from_subscription', 'index_handler.get_es_index_from_subscription', ([], {'es_options': '{}', 'name': '"""test1"""', 'tenant': '"""test-tenant"""', 'schema': 'PolySchemaB'}), "(es_options={}, name='test1',\n tenant='test-tenant', schema=PolySchemaB)\n", (5029, 5104), False, 'from app import index_handler\n'), ((5151, 5207), 'app.index_handler.get_alias_from_namespace', 'index_handler.get_alias_from_namespace', (['PolySchemaA.name'], {}), '(PolySchemaA.name)\n', (5189, 5207), False, 'from app import index_handler\n'), ((5236, 5300), 'app.index_handler.update_es_index', 'index_handler.update_es_index', (['es', 'index_a', '"""test-tenant"""', 'alias'], {}), "(es, index_a, 'test-tenant', alias)\n", (5265, 5300), False, 'from app import index_handler\n'), ((5818, 5882), 'app.index_handler.update_es_index', 'index_handler.update_es_index', (['es', 'index_b', '"""test-tenant"""', 'alias'], {}), "(es, index_b, 'test-tenant', alias)\n", (5847, 5882), False, 'from app import index_handler\n'), ((1281, 1325), 'pytest.raises', 'pytest.raises', (['requests.exceptions.HTTPError'], {}), '(requests.exceptions.HTTPError)\n', (1294, 1325), False, 'import pytest\n'), ((1335, 1365), 'app.index_handler.handle_http', 'index_handler.handle_http', (['res'], {}), '(res)\n', (1360, 1365), False, 'from app import index_handler\n'), ((1730, 1757), 'json.dumps', 'json.dumps', (['index'], {'indent': '(2)'}), '(index, indent=2)\n', (1740, 1757), False, 'import json\n'), ((4175, 4206), 'json.dumps', 'json.dumps', (['res'], {'sort_keys': '(True)'}), '(res, sort_keys=True)\n', (4185, 4206), False, 'import json\n')] |
datopian/plans | plans/config.py | 12bd9ff6f725703e7a73f3ad90680f5ade8cebdf | import os
database_url = os.environ.get('DATABASE_URL')
| [((26, 56), 'os.environ.get', 'os.environ.get', (['"""DATABASE_URL"""'], {}), "('DATABASE_URL')\n", (40, 56), False, 'import os\n')] |
ShubhamKahlon57/Letsupgrade-python-Batch-7 | Assignment Day 2 .py | 7989c2d2f17e58dd4ee8f278c37d2c1d18e5e3af | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
#List and function
# In[6]:
# empty list
my_list = []
# list of integers
my_list = [1, 2, 3]
# list with mixed data types
my_list = [1, "Hello", 3.4]
# In[7]:
# nested list
my_list = ["mouse", [8, 4, 6], ['a']]
# In[11]:
# List indexing
my_list = ['p', 'r', 'o', 'b', 'e']
# Output: p
print(my_list[0])
# Output: o
print(my_list[2])
# Output: e
print(my_list[4])
# Nested List
n_list = ["Happy", [2, 0, 1, 5]]
# Nested indexing
print(n_list[0][1])
print(n_list[1][3])
# Error! Only integer can be used for indexing
print(my_list[4])
# In[9]:
# Appending and Extending lists in Python
odd = [1, 3, 5]
odd.append(7)
print(odd)
odd.extend([9, 11, 13])
print(odd)
# In[13]:
# Deleting list items
my_list = ['p', 'r', 'o', 'b', 'l', 'e', 'm']
# delete one item
del my_list[2]
print(my_list)
# delete multiple items
del my_list[1:5]
print(my_list)
# delete entire list
del my_list
# In[14]:
# Appending and Extending lists in Python
odd = [1, 3, 5]
odd.append(7)
print(odd)
odd.extend([9, 11, 13])
print(odd)
# In[15]:
#Dictionary and function
# In[18]:
y_dict = {}
# dictionary with integer keys
my_dict = {1: 'apple', 2: 'ball'}
# dictionary with mixed keys
my_dict = {'name': 'John', 1: [2, 4, 3]}
# using dict()
my_dict = dict({1:'apple', 2:'ball'})
# from sequence having each item as a pair
my_dict = dict([(1,'apple'), (2,'ball')])
# In[20]:
# get vs [] for retrieving elements
my_dict = {'name': 'Jack', 'age': 26}
# Output: Jack
print(my_dict['name'])
# Output: 26
print(my_dict.get('age'))
# In[21]:
# Changing and adding Dictionary Elements
my_dict = {'name': 'Jack', 'age': 26}
# update value
my_dict['age'] = 27
#Output: {'age': 27, 'name': 'Jack'}
print(my_dict)
# add item
my_dict['address'] = 'Downtown'
# Output: {'address': 'Downtown', 'age': 27, 'name': 'Jack'}
print(my_dict)
# In[22]:
#Sets and its function
# In[23]:
my_set = {1, 2, 3}
print(my_set)
# In[24]:
my_set = {1.0, "Hello", (1, 2, 3)}
print(my_set)
# In[25]:
# set cannot have duplicates
my_set = {1, 2, 3, 4, 3, 2}
print(my_set)
# In[26]:
#Tuple and its method
# In[27]:
# Tuple having integers
my_tuple = (1, 2, 3)
print(my_tuple)
# In[28]:
my_tuple = ("hello")
print(type(my_tuple))
# In[30]:
# Accessing tuple elements using indexing
my_tuple = ('p','e','r','m','i','t')
print(my_tuple[0])
print(my_tuple[5])
# In[31]:
print(my_tuple[-1])
# In[32]:
print(my_tuple[-6])
# In[36]:
# Changing tuple values
my_tuple = (4, 2, 3, [6, 5])
# TypeError: 'tuple' object does not support item assignment
# my_tuple[1] = 9
# However, item of mutable element can be changed
my_tuple[3][0] = 9 # Output: (4, 2, 3, [9, 5])
print(my_tuple)
# Tuples can be reassigned
my_tuple = ('p', 'r', 'o', 'g', 'r', 'a', 'm', 'i', 'z')
# Output: ('p', 'r', 'o', 'g', 'r', 'a', 'm', 'i', 'z')
print(my_tuple)
# In[37]:
#String and its function
# In[38]:
# Python string examples - all assignments are identical.
String_var = 'Python'
String_var = "Python"
String_var = """Python"""
# with Triple quotes Strings can extend to multiple lines
String_var = """ This document will help you to
explore all the concepts
of Python Strings!!! """
# Replace "document" with "tutorial" and store in another variable
substr_var = String_var.replace("document", "tutorial")
print (substr_var)
# In[ ]:
| [] |
irvandindaprakoso/online-test-py | hackerrank/pickingNumbers.py | a7a6cd98ba3e0b74558ecb7e431eb2729077a38a | def pickingNumbers(a):
# Write your code here
max = 0
for i in a:
c = a.count(i)
d = a.count(i-1)
e = c+d
if e>max:
max = e
return max
| [] |
preston-wagner/authorizesauce | tests/test_api_transaction.py | 130ee30f500c8b5bf9a6384296ca4f5d5bb565e7 | from datetime import date
from six import BytesIO, binary_type, u
from six.moves.urllib.parse import parse_qsl, urlencode
from unittest2 import TestCase
import mock
from authorizesauce.apis.transaction import PROD_URL, TEST_URL, TransactionAPI
from authorizesauce.data import Address, CreditCard
from authorizesauce.exceptions import AuthorizeConnectionError, \
AuthorizeResponseError
class MockResponse(BytesIO):
class Headers(dict):
def getparam(self, *args, **kwargs):
"""Python 2 version"""
return None
def get_content_charset(self, failobj=None, *args, **kwargs):
"""Python 3 version"""
return failobj
def __init__(self, *args, **kwargs):
BytesIO.__init__(self, *args, **kwargs)
self.headers = self.Headers()
SUCCESS = MockResponse(
b'1;1;1;This transaction has been approved.;IKRAGJ;Y;2171062816;;;20.00;CC'
b';auth_only;;Jeffrey;Schenck;;45 Rose Ave;Venice;CA;90291;USA;;;;;;;;;;;;'
b';;;;;375DD9293D7605E20DF0B437EE2A7B92;P;2;;;;;;;;;;;XXXX1111;Visa;;;;;;;'
b';;;;;;;;;;Y')
PARSED_SUCCESS = {
'cvv_response': 'P',
'authorization_code': 'IKRAGJ',
'response_code': '1',
'amount': '20.00',
'transaction_type': 'auth_only',
'avs_response': 'Y',
'response_reason_code': '1',
'response_reason_text': 'This transaction has been approved.',
'transaction_id': '2171062816',
}
ERROR = MockResponse(
b'2;1;2;This transaction has been declined.;000000;N;2171062816;;;20.00;CC'
b';auth_only;;Jeffrey;Schenck;;45 Rose Ave;Venice;CA;90291;USA;;;;;;;;;;;;'
b';;;;;375DD9293D7605E20DF0B437EE2A7B92;N;1;;;;;;;;;;;XXXX1111;Visa;;;;;;;'
b';;;;;;;;;;Y')
PARSED_ERROR = {
'cvv_response': 'N',
'authorization_code': '000000',
'response_code': '2',
'amount': '20.00',
'transaction_type': 'auth_only',
'avs_response': 'N',
'response_reason_code': '2',
'response_reason_text': 'This transaction has been declined.',
'transaction_id': '2171062816',
}
def _unicode_str(s):
if isinstance(s, binary_type):
return s.decode('unicode_escape')
return s
def _are_params_eq(params1, params2):
_params1, _params2 = map(_unicode_str, (params1, params2))
return frozenset(parse_qsl(_params1)) == frozenset(parse_qsl(_params2))
class TransactionAPITests(TestCase):
def setUp(self):
self.api = TransactionAPI('123', '456')
self.success = lambda *args, **kwargs: SUCCESS.seek(0) or SUCCESS
self.error = lambda *args, **kwargs: ERROR.seek(0) or ERROR
self.year = date.today().year + 10
self.credit_card = CreditCard('4111111111111111', self.year, 1, '911')
self.address = Address('45 Rose Ave', 'Venice', 'CA', '90291')
def test_basic_api(self):
api = TransactionAPI('123', '456')
self.assertEqual(api.url, TEST_URL)
api = TransactionAPI('123', '456', debug=False)
self.assertEqual(api.url, PROD_URL)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_make_call(self, urlopen):
urlopen.side_effect = self.success
params = {'a': '1', 'b': '2'}
result = self.api._make_call(params)
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(_are_params_eq(
urlopen.call_args[1]['data'], urlencode(params)
))
self.assertEqual(result, PARSED_SUCCESS)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_make_call_with_unicode(self, urlopen):
urlopen.side_effect = self.success
result = self.api._make_call({u('\xe3'): '1', 'b': u('\xe3')})
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(_are_params_eq(
urlopen.call_args[1]['data'], 'b=%C3%A3&%C3%A3=1'
))
self.assertEqual(result, PARSED_SUCCESS)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_make_call_connection_error(self, urlopen):
urlopen.side_effect = IOError('Borked')
self.assertRaises(AuthorizeConnectionError, self.api._make_call,
{'a': '1', 'b': '2'})
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_make_call_response_error(self, urlopen):
urlopen.side_effect = self.error
try:
self.api._make_call({'a': '1', 'b': '2'})
except AuthorizeResponseError as e:
self.assertTrue(str(e).startswith(
'This transaction has been declined.'
))
self.assertEqual(e.full_response, PARSED_ERROR)
def test_add_params(self):
self.assertEqual(self.api._add_params({}), {})
params = self.api._add_params({}, credit_card=self.credit_card)
self.assertEqual(params, {
'x_card_num': '4111111111111111',
'x_exp_date': '01-{0}'.format(self.year),
'x_card_code': '911',
})
params = self.api._add_params({}, address=self.address)
self.assertEqual(params, {
'x_address': '45 Rose Ave',
'x_city': 'Venice',
'x_state': 'CA',
'x_zip': '90291',
'x_country': 'US',
})
params = self.api._add_params(
{}, credit_card=self.credit_card, address=self.address
)
self.assertEqual(params, {
'x_card_num': '4111111111111111',
'x_exp_date': '01-{0}'.format(self.year),
'x_card_code': '911',
'x_address': '45 Rose Ave',
'x_city': 'Venice',
'x_state': 'CA',
'x_zip': '90291',
'x_country': 'US',
})
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_auth(self, urlopen):
urlopen.side_effect = self.success
result = self.api.auth(20, self.credit_card, self.address)
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(urlopen.call_args[1]['data'], (
'x_login=123&x_zip=90291&x_card_num=4111111111111111&'
'x_amount=20.00&x_tran_key=456&x_city=Venice&x_country=US&'
'x_version=3.1&x_state=CA&x_delim_char=%3B&'
'x_address=45+Rose+Ave&x_exp_date=01-{0}&x_test_request=FALSE'
'&x_card_code=911&x_type=AUTH_ONLY&x_delim_data=TRUE'.format(
str(self.year)
)
))
self.assertEqual(result, PARSED_SUCCESS)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_capture(self, urlopen):
urlopen.side_effect = self.success
result = self.api.capture(20, self.credit_card, self.address)
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(urlopen.call_args[1]['data'], (
'x_login=123&x_zip=90291&x_card_num=4111111111111111&'
'x_amount=20.00&x_tran_key=456&x_city=Venice&x_country=US&'
'x_version=3.1&x_state=CA&x_delim_char=%3B&'
'x_address=45+Rose+Ave&x_exp_date=01-{0}&x_test_request=FALSE'
'&x_card_code=911&x_type=AUTH_ONLY&x_delim_data=TRUE'.format(
str(self.year)
)
))
self.assertEqual(result, PARSED_SUCCESS)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_settle(self, urlopen):
urlopen.side_effect = self.success
# Test without specified amount
result = self.api.settle('123456')
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(urlopen.call_args[1]['data'], (
'https://test.authorize.net/gateway/transact.dll?x_login=123'
'&x_trans_id=123456&x_version=3.1&x_delim_char=%3B'
'&x_type=PRIOR_AUTH_CAPTURE&x_delim_data=TRUE&x_tran_key=456'
'&x_test_request=FALSE'
))
self.assertEqual(result, PARSED_SUCCESS)
# Test with specified amount
result = self.api.settle('123456', amount=10)
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(urlopen.call_args[1]['data'], (
'https://test.authorize.net/gateway/transact.dll?x_login=123'
'&x_trans_id=123456&x_version=3.1&x_delim_char=%3B'
'&x_type=PRIOR_AUTH_CAPTURE&x_amount=10.00&x_delim_data=TRUE'
'&x_tran_key=456&x_test_request=FALSE'
))
self.assertEqual(result, PARSED_SUCCESS)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_credit(self, urlopen):
urlopen.side_effect = self.success
# Test with transaction_id, amount
result = self.api.credit('1111', '123456', 10)
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(urlopen.call_args[1]['data'], (
'https://test.authorize.net/gateway/transact.dll?x_login=123'
'&x_trans_id=123456&x_version=3.1&x_amount=10.00'
'&x_delim_char=%3B&x_type=CREDIT&x_card_num=1111'
'&x_delim_data=TRUE&x_tran_key=456&x_test_request=FALSE'
))
self.assertEqual(result, PARSED_SUCCESS)
@mock.patch('authorizesauce.apis.transaction.urlopen')
def test_void(self, urlopen):
urlopen.side_effect = self.success
result = self.api.void('123456')
self.assertEqual(urlopen.call_args[0][0], TEST_URL)
self.assertTrue(urlopen.call_args[1]['data'], (
'https://test.authorize.net/gateway/transact.dll?x_login=123'
'&x_trans_id=123456&x_version=3.1&x_delim_char=%3B&x_type=VOID'
'&x_delim_data=TRUE&x_tran_key=456&x_test_request=FALSE'
))
self.assertEqual(result, PARSED_SUCCESS)
| [((2995, 3048), 'mock.patch', 'mock.patch', (['"""authorizesauce.apis.transaction.urlopen"""'], {}), "('authorizesauce.apis.transaction.urlopen')\n", (3005, 3048), False, 'import mock\n'), ((3440, 3493), 'mock.patch', 'mock.patch', (['"""authorizesauce.apis.transaction.urlopen"""'], {}), "('authorizesauce.apis.transaction.urlopen')\n", (3450, 3493), False, 'import mock\n'), ((3888, 3941), 'mock.patch', 'mock.patch', (['"""authorizesauce.apis.transaction.urlopen"""'], {}), "('authorizesauce.apis.transaction.urlopen')\n", (3898, 3941), False, 'import mock\n'), ((4173, 4226), 'mock.patch', 'mock.patch', (['"""authorizesauce.apis.transaction.urlopen"""'], {}), "('authorizesauce.apis.transaction.urlopen')\n", (4183, 4226), False, 'import mock\n'), ((5684, 5737), 'mock.patch', 'mock.patch', (['"""authorizesauce.apis.transaction.urlopen"""'], {}), "('authorizesauce.apis.transaction.urlopen')\n", (5694, 5737), False, 'import mock\n'), ((6454, 6507), 'mock.patch', 'mock.patch', (['"""authorizesauce.apis.transaction.urlopen"""'], {}), "('authorizesauce.apis.transaction.urlopen')\n", (6464, 6507), False, 'import mock\n'), ((7230, 7283), 'mock.patch', 'mock.patch', (['"""authorizesauce.apis.transaction.urlopen"""'], {}), "('authorizesauce.apis.transaction.urlopen')\n", (7240, 7283), False, 'import mock\n'), ((8408, 8461), 'mock.patch', 'mock.patch', (['"""authorizesauce.apis.transaction.urlopen"""'], {}), "('authorizesauce.apis.transaction.urlopen')\n", (8418, 8461), False, 'import mock\n'), ((9089, 9142), 'mock.patch', 'mock.patch', (['"""authorizesauce.apis.transaction.urlopen"""'], {}), "('authorizesauce.apis.transaction.urlopen')\n", (9099, 9142), False, 'import mock\n'), ((735, 774), 'six.BytesIO.__init__', 'BytesIO.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (751, 774), False, 'from six import BytesIO, binary_type, u\n'), ((2407, 2435), 'authorizesauce.apis.transaction.TransactionAPI', 'TransactionAPI', (['"""123"""', '"""456"""'], {}), "('123', '456')\n", (2421, 2435), False, 'from authorizesauce.apis.transaction import PROD_URL, TEST_URL, TransactionAPI\n'), ((2648, 2699), 'authorizesauce.data.CreditCard', 'CreditCard', (['"""4111111111111111"""', 'self.year', '(1)', '"""911"""'], {}), "('4111111111111111', self.year, 1, '911')\n", (2658, 2699), False, 'from authorizesauce.data import Address, CreditCard\n'), ((2723, 2770), 'authorizesauce.data.Address', 'Address', (['"""45 Rose Ave"""', '"""Venice"""', '"""CA"""', '"""90291"""'], {}), "('45 Rose Ave', 'Venice', 'CA', '90291')\n", (2730, 2770), False, 'from authorizesauce.data import Address, CreditCard\n'), ((2816, 2844), 'authorizesauce.apis.transaction.TransactionAPI', 'TransactionAPI', (['"""123"""', '"""456"""'], {}), "('123', '456')\n", (2830, 2844), False, 'from authorizesauce.apis.transaction import PROD_URL, TEST_URL, TransactionAPI\n'), ((2903, 2944), 'authorizesauce.apis.transaction.TransactionAPI', 'TransactionAPI', (['"""123"""', '"""456"""'], {'debug': '(False)'}), "('123', '456', debug=False)\n", (2917, 2944), False, 'from authorizesauce.apis.transaction import PROD_URL, TEST_URL, TransactionAPI\n'), ((2273, 2292), 'six.moves.urllib.parse.parse_qsl', 'parse_qsl', (['_params1'], {}), '(_params1)\n', (2282, 2292), False, 'from six.moves.urllib.parse import parse_qsl, urlencode\n'), ((2307, 2326), 'six.moves.urllib.parse.parse_qsl', 'parse_qsl', (['_params2'], {}), '(_params2)\n', (2316, 2326), False, 'from six.moves.urllib.parse import parse_qsl, urlencode\n'), ((2598, 2610), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2608, 2610), False, 'from datetime import date\n'), ((3356, 3373), 'six.moves.urllib.parse.urlencode', 'urlencode', (['params'], {}), '(params)\n', (3365, 3373), False, 'from six.moves.urllib.parse import parse_qsl, urlencode\n'), ((3627, 3633), 'six.u', 'u', (['"""ã"""'], {}), "('ã')\n", (3628, 3633), False, 'from six import BytesIO, binary_type, u\n'), ((3648, 3654), 'six.u', 'u', (['"""ã"""'], {}), "('ã')\n", (3649, 3654), False, 'from six import BytesIO, binary_type, u\n')] |
balmasea/genieparser | src/genie/libs/parser/iosxe/tests/ShowIpv6ProtocolsSectionRip/cli/equal/golden_output_2_expected.py | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | expected_output = {
"vrf": {
"VRF1": {
"address_family": {
"ipv6": {
"instance": {
"rip ripng": {
"redistribute": {
"static": {"route_policy": "static-to-rip"},
"connected": {},
},
"interfaces": {
"GigabitEthernet3.200": {},
"GigabitEthernet2.200": {},
},
}
}
}
}
}
}
}
| [] |
sungpyocho/covid19-aichi-tools | build_json.py | 5170bf405f67b14179fe10838701ec5baa9d6cc1 | import csv
import io
import json
import pandas as pd
import sys
from dateutil import tz
from datetime import datetime, date, time, timedelta
# Japan Standard Time (UTC + 09:00)
JST = tz.gettz('Asia/Tokyo')
JST_current_time = datetime.now(tz=JST).strftime('%Y/%m/%d %H:%M')
patients_list = []
patients_summary_dic = {}
# 引数を取得 異常系処理はしてないので注意
args = sys.argv
with open('data/patients.csv', 'r', encoding="utf-8") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
patients_list.append(row)
patients_summary_dic.setdefault(row['date'], 0)
patients_summary_dic[row['date']] += 1
# 日付のリストを生成
strdt = datetime.strptime("2020-01-26", '%Y-%m-%d') # 開始日
enddt = datetime.strptime(args[1], '%Y-%m-%d') # 終了日
# 日付差の日数を算出(リストに最終日も含めたいので、+1しています)
days_num = (enddt - strdt).days + 1
datelist = []
for i in range(days_num):
datelist.append(strdt + timedelta(days = i))
patients_summary_list = []
# 日付の新しい順に辿って小計が 0 でない日から開始する
foundZero = True
for date in reversed(datelist):
if (not (date.strftime('%Y-%m-%d') in patients_summary_dic)) and foundZero:
continue
else:
foundZero = False
patients_summary_dic.setdefault(date.strftime('%Y-%m-%d'), 0)
patients_summary_list.append({
"日付": date.strftime('%Y-%m-%d'),
"小計": patients_summary_dic[date.strftime('%Y-%m-%d')]
})
patients_summary_list = patients_summary_list[::-1] # 日付の昇順に並び替え
# main_summary_history.csvをPandasのDataframeに変換
main_summary_history_df = pd.read_csv('data/main_summary_history.csv', keep_default_na=False)
# 検査件数の読み込み
inspections_summary_list = []
with open('data/inspections_summary.csv', 'r', encoding="utf-8") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
inspections_summary_list.append({
"日付": datetime.strptime(row['検査日'], '%Y/%m/%d').strftime('%Y-%m-%d'),
"小計": int(row['検査件数(件)']),
"合算": row['合算']
})
data = {
"lastUpdate": JST_current_time,
"patients": {
"date": JST_current_time,
"data": patients_list
},
"patients_summary" : {
"date": JST_current_time,
"data": patients_summary_list
},
"inspections_summary" : {
"date": JST_current_time,
"data": inspections_summary_list
},
"main_summary_history": {
"date": JST_current_time,
"data": json.loads(main_summary_history_df.to_json(orient='records', force_ascii=False))
}
}
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
print(json.dumps(data, indent=4, ensure_ascii=False))
| [((184, 206), 'dateutil.tz.gettz', 'tz.gettz', (['"""Asia/Tokyo"""'], {}), "('Asia/Tokyo')\n", (192, 206), False, 'from dateutil import tz\n'), ((646, 689), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2020-01-26"""', '"""%Y-%m-%d"""'], {}), "('2020-01-26', '%Y-%m-%d')\n", (663, 689), False, 'from datetime import datetime, date, time, timedelta\n'), ((705, 743), 'datetime.datetime.strptime', 'datetime.strptime', (['args[1]', '"""%Y-%m-%d"""'], {}), "(args[1], '%Y-%m-%d')\n", (722, 743), False, 'from datetime import datetime, date, time, timedelta\n'), ((1527, 1594), 'pandas.read_csv', 'pd.read_csv', (['"""data/main_summary_history.csv"""'], {'keep_default_na': '(False)'}), "('data/main_summary_history.csv', keep_default_na=False)\n", (1538, 1594), True, 'import pandas as pd\n'), ((2513, 2566), 'io.TextIOWrapper', 'io.TextIOWrapper', (['sys.stdout.buffer'], {'encoding': '"""utf-8"""'}), "(sys.stdout.buffer, encoding='utf-8')\n", (2529, 2566), False, 'import io\n'), ((441, 464), 'csv.DictReader', 'csv.DictReader', (['csvfile'], {}), '(csvfile)\n', (455, 464), False, 'import csv\n'), ((1728, 1751), 'csv.DictReader', 'csv.DictReader', (['csvfile'], {}), '(csvfile)\n', (1742, 1751), False, 'import csv\n'), ((2573, 2619), 'json.dumps', 'json.dumps', (['data'], {'indent': '(4)', 'ensure_ascii': '(False)'}), '(data, indent=4, ensure_ascii=False)\n', (2583, 2619), False, 'import json\n'), ((226, 246), 'datetime.datetime.now', 'datetime.now', ([], {'tz': 'JST'}), '(tz=JST)\n', (238, 246), False, 'from datetime import datetime, date, time, timedelta\n'), ((893, 910), 'datetime.timedelta', 'timedelta', ([], {'days': 'i'}), '(days=i)\n', (902, 910), False, 'from datetime import datetime, date, time, timedelta\n'), ((1195, 1220), 'datetime.date.strftime', 'date.strftime', (['"""%Y-%m-%d"""'], {}), "('%Y-%m-%d')\n", (1208, 1220), False, 'from datetime import datetime, date, time, timedelta\n'), ((1035, 1060), 'datetime.date.strftime', 'date.strftime', (['"""%Y-%m-%d"""'], {}), "('%Y-%m-%d')\n", (1048, 1060), False, 'from datetime import datetime, date, time, timedelta\n'), ((1286, 1311), 'datetime.date.strftime', 'date.strftime', (['"""%Y-%m-%d"""'], {}), "('%Y-%m-%d')\n", (1299, 1311), False, 'from datetime import datetime, date, time, timedelta\n'), ((1352, 1377), 'datetime.date.strftime', 'date.strftime', (['"""%Y-%m-%d"""'], {}), "('%Y-%m-%d')\n", (1365, 1377), False, 'from datetime import datetime, date, time, timedelta\n'), ((1839, 1880), 'datetime.datetime.strptime', 'datetime.strptime', (["row['検査日']", '"""%Y/%m/%d"""'], {}), "(row['検査日'], '%Y/%m/%d')\n", (1856, 1880), False, 'from datetime import datetime, date, time, timedelta\n')] |
jjjkkkjjj/pytorch.dl | dl/models/ssd/modules/utils.py | d82aa1191c14f328c62de85e391ac6fa1b4c7ee3 | import torch
from ....data.utils.boxes import centroids2corners, iou
def matching_strategy(targets, dboxes, **kwargs):
"""
:param targets: Tensor, shape is (batch*object num(batch), 1+4+class_labels)
:param dboxes: shape is (default boxes num, 4)
IMPORTANT: Note that means (cx, cy, w, h)
:param kwargs:
threshold: (Optional) float, threshold for returned indicator
batch_num: (Required) int, batch size
:return:
pos_indicator: Bool Tensor, shape = (batch, default box num). this represents whether each default box is object or background.
matched_targets: Tensor, shape = (batch, default box num, 4+class_num) including background
"""
threshold = kwargs.pop('threshold', 0.5)
batch_num = kwargs.pop('batch_num')
device = dboxes.device
dboxes_num = dboxes.shape[0]
# minus 'box number per image' and 'localization=(cx, cy, w, h)'
class_num = targets[0].shape[1] - 4
# convert centered coordinated to minmax coordinates
dboxes_mm = centroids2corners(dboxes)
# create returned empty Tensor
pos_indicator, matched_targets = torch.empty((batch_num, dboxes_num), device=device, dtype=torch.bool), torch.empty((batch_num, dboxes_num, 4 + class_num), device=device)
# matching for each batch
index = 0
for b, target in enumerate(targets):
targets_loc, targets_conf = target[:, :4], target[:, 4:]
# overlaps' shape = (object num, default box num)
overlaps = iou(centroids2corners(targets_loc), dboxes_mm.clone())
"""
best_overlap_per_object, best_dbox_ind_per_object = overlaps.max(dim=1)
best_overlap_per_dbox, best_object_ind_per_dbox = overlaps.max(dim=0)
for object_ind, dbox_ind in enumerate(best_dbox_ind_per_object):
best_object_ind_per_dbox[dbox_ind] = object_ind
best_overlap_per_dbox.index_fill_(0, best_dbox_ind_per_object, 999)
pos_ind = best_overlap_per_dbox > threshold
pos_indicator[b] = pos_ind
gt_loc[b], gt_conf[b] = targets[best_object_ind_per_dbox], targets_conf[best_object_ind_per_dbox]
neg_ind = torch.logical_not(pos_ind)
gt_conf[b, neg_ind] = 0
gt_conf[b, neg_ind, -1] = 1
"""
# get maximum overlap value for each default box
# shape = (batch num, dboxes num)
overlaps_per_dbox, object_indices = overlaps.max(dim=0)
#object_indices = object_indices.long() # for fancy indexing
# get maximum overlap values for each object
# shape = (batch num, object num)
overlaps_per_object, dbox_indices = overlaps.max(dim=1)
for obj_ind, dbox_ind in enumerate(dbox_indices):
object_indices[dbox_ind] = obj_ind
overlaps_per_dbox.index_fill_(0, dbox_indices, threshold + 1)# ensure N!=0
pos_ind = overlaps_per_dbox > threshold
# assign targets
matched_targets[b, :, :4], matched_targets[b, :, 4:] = targets_loc[object_indices], targets_conf[object_indices]
pos_indicator[b] = pos_ind
# set background flag
neg_ind = torch.logical_not(pos_ind)
matched_targets[b, neg_ind, 4:] = 0
matched_targets[b, neg_ind, -1] = 1
return pos_indicator, matched_targets
def matching_strategy_quads(targets, dboxes, **kwargs):
"""
:param targets: Tensor, shape is (batch*object num(batch), 4=(cx,cy,w,h)+8=(x1,y1,x2,y2,...)+class_labels)
:param dboxes: shape is (default boxes num, 4)
IMPORTANT: Note that means (cx, cy, w, h)
:param kwargs:
threshold: (Optional) float, threshold for returned indicator
batch_num: (Required) int, batch size
:return:
pos_indicator: Bool Tensor, shape = (batch, default box num). this represents whether each default box is object or background.
matched_targets: Tensor, shape = (batch, default box num, 4+class_num) including background
"""
threshold = kwargs.pop('threshold', 0.5)
batch_num = kwargs.pop('batch_num')
device = dboxes.device
dboxes_num = dboxes.shape[0]
# minus 'box number per image' and 'localization=(cx, cy, w, h)'
class_num = targets[0].shape[1] - 4 - 8
# convert centered coordinated to minmax coordinates
dboxes_mm = centroids2corners(dboxes)
# create returned empty Tensor
pos_indicator, matched_targets = torch.empty((batch_num, dboxes_num), device=device, dtype=torch.bool), torch.empty(
(batch_num, dboxes_num, 4 + 8 + class_num), device=device)
# matching for each batch
index = 0
for b, target in enumerate(targets):
targets_loc, targets_quad, targets_conf = target[:, :4], target[:, 4:12], target[:, 12:]
# overlaps' shape = (object num, default box num)
overlaps = iou(centroids2corners(targets_loc), dboxes_mm.clone())
"""
best_overlap_per_object, best_dbox_ind_per_object = overlaps.max(dim=1)
best_overlap_per_dbox, best_object_ind_per_dbox = overlaps.max(dim=0)
for object_ind, dbox_ind in enumerate(best_dbox_ind_per_object):
best_object_ind_per_dbox[dbox_ind] = object_ind
best_overlap_per_dbox.index_fill_(0, best_dbox_ind_per_object, 999)
pos_ind = best_overlap_per_dbox > threshold
pos_indicator[b] = pos_ind
gt_loc[b], gt_conf[b] = targets[best_object_ind_per_dbox], targets_conf[best_object_ind_per_dbox]
neg_ind = torch.logical_not(pos_ind)
gt_conf[b, neg_ind] = 0
gt_conf[b, neg_ind, -1] = 1
"""
# get maximum overlap value for each default box
# shape = (batch num, dboxes num)
overlaps_per_dbox, object_indices = overlaps.max(dim=0)
# object_indices = object_indices.long() # for fancy indexing
# get maximum overlap values for each object
# shape = (batch num, object num)
overlaps_per_object, dbox_indices = overlaps.max(dim=1)
for obj_ind, dbox_ind in enumerate(dbox_indices):
object_indices[dbox_ind] = obj_ind
overlaps_per_dbox.index_fill_(0, dbox_indices, threshold + 1) # ensure N!=0
pos_ind = overlaps_per_dbox > threshold
# assign targets
matched_targets[b, :, :4], matched_targets[b, :, 4:12], matched_targets[b, :, 12:] = \
targets_loc[object_indices], targets_quad[object_indices], targets_conf[object_indices]
pos_indicator[b] = pos_ind
# set background flag
neg_ind = torch.logical_not(pos_ind)
matched_targets[b, neg_ind, 12:] = 0
matched_targets[b, neg_ind, -1] = 1
return pos_indicator, matched_targets
| [((1129, 1198), 'torch.empty', 'torch.empty', (['(batch_num, dboxes_num)'], {'device': 'device', 'dtype': 'torch.bool'}), '((batch_num, dboxes_num), device=device, dtype=torch.bool)\n', (1140, 1198), False, 'import torch\n'), ((1200, 1266), 'torch.empty', 'torch.empty', (['(batch_num, dboxes_num, 4 + class_num)'], {'device': 'device'}), '((batch_num, dboxes_num, 4 + class_num), device=device)\n', (1211, 1266), False, 'import torch\n'), ((3110, 3136), 'torch.logical_not', 'torch.logical_not', (['pos_ind'], {}), '(pos_ind)\n', (3127, 3136), False, 'import torch\n'), ((4369, 4438), 'torch.empty', 'torch.empty', (['(batch_num, dboxes_num)'], {'device': 'device', 'dtype': 'torch.bool'}), '((batch_num, dboxes_num), device=device, dtype=torch.bool)\n', (4380, 4438), False, 'import torch\n'), ((4440, 4510), 'torch.empty', 'torch.empty', (['(batch_num, dboxes_num, 4 + 8 + class_num)'], {'device': 'device'}), '((batch_num, dboxes_num, 4 + 8 + class_num), device=device)\n', (4451, 4510), False, 'import torch\n'), ((6472, 6498), 'torch.logical_not', 'torch.logical_not', (['pos_ind'], {}), '(pos_ind)\n', (6489, 6498), False, 'import torch\n')] |
lancelee82/bluelake | sandroad.py | 3ac3bba191ec5e331dcf66e0a20725445585c316 | """
Flatpath, go forward forever.
http://codeincomplete.com/posts/javascript-racer/
http://www.extentofthejam.com/pseudo/
http://pixel.garoux.net/screen/game_list
Usage:
* UP/DOWN/LEFT/RIGHT
* SPACE : hide/show road map
* TAB : replay this road
* RETURN : go to a new road
TODO:
* hill road
* more road sprites
* sound
"""
import math
import random
import time
from starfish import pygm
from starfish import consts
from starfish import sptdraw
from starfish import utils
IMG_POS_BACKGROUND = {
'HILLS': { 'x': 5, 'y': 5, 'w': 1280, 'h': 480 },
'SKY': { 'x': 5, 'y': 495, 'w': 1280, 'h': 480 },
'TREES': { 'x': 5, 'y': 985, 'w': 1280, 'h': 480 },
}
IMG_POS_SPRITES = {
'PALM_TREE': { 'x': 5, 'y': 5, 'w': 215, 'h': 540 },
'BILLBOARD08': { 'x': 230, 'y': 5, 'w': 385, 'h': 265 },
'TREE1': { 'x': 625, 'y': 5, 'w': 360, 'h': 360 },
'DEAD_TREE1': { 'x': 5, 'y': 555, 'w': 135, 'h': 332 },
'BILLBOARD09': { 'x': 150, 'y': 555, 'w': 328, 'h': 282 },
'BOULDER3': { 'x': 230, 'y': 280, 'w': 320, 'h': 220 },
'COLUMN': { 'x': 995, 'y': 5, 'w': 200, 'h': 315 },
'BILLBOARD01': { 'x': 625, 'y': 375, 'w': 300, 'h': 170 },
'BILLBOARD06': { 'x': 488, 'y': 555, 'w': 298, 'h': 190 },
'BILLBOARD05': { 'x': 5, 'y': 897, 'w': 298, 'h': 190 },
'BILLBOARD07': { 'x': 313, 'y': 897, 'w': 298, 'h': 190 },
'BOULDER2': { 'x': 621, 'y': 897, 'w': 298, 'h': 140 },
'TREE2': { 'x': 1205, 'y': 5, 'w': 282, 'h': 295 },
'BILLBOARD04': { 'x': 1205, 'y': 310, 'w': 268, 'h': 170 },
'DEAD_TREE2': { 'x': 1205, 'y': 490, 'w': 150, 'h': 260 },
'BOULDER1': { 'x': 1205, 'y': 760, 'w': 168, 'h': 248 },
'BUSH1': { 'x': 5, 'y': 1097, 'w': 240, 'h': 155 },
'CACTUS': { 'x': 929, 'y': 897, 'w': 235, 'h': 118 },
'BUSH2': { 'x': 255, 'y': 1097, 'w': 232, 'h': 152 },
'BILLBOARD03': { 'x': 5, 'y': 1262, 'w': 230, 'h': 220 },
'BILLBOARD02': { 'x': 245, 'y': 1262, 'w': 215, 'h': 220 },
'STUMP': { 'x': 995, 'y': 330, 'w': 195, 'h': 140 },
'SEMI': { 'x': 1365, 'y': 490, 'w': 122, 'h': 144 },
'TRUCK': { 'x': 1365, 'y': 644, 'w': 100, 'h': 78 },
'CAR03': { 'x': 1383, 'y': 760, 'w': 88, 'h': 55 },
'CAR02': { 'x': 1383, 'y': 825, 'w': 80, 'h': 59 },
'CAR04': { 'x': 1383, 'y': 894, 'w': 80, 'h': 57 },
'CAR01': { 'x': 1205, 'y': 1018, 'w': 80, 'h': 56 },
'PLAYER_UPHILL_LEFT': { 'x': 1383, 'y': 961, 'w': 80, 'h': 45 },
'PLAYER_UPHILL_STRAIGHT': { 'x': 1295, 'y': 1018, 'w': 80, 'h': 45 },
'PLAYER_UPHILL_RIGHT': { 'x': 1385, 'y': 1018, 'w': 80, 'h': 45 },
'PLAYER_LEFT': { 'x': 995, 'y': 480, 'w': 80, 'h': 41 },
'PLAYER_STRAIGHT': { 'x': 1085, 'y': 480, 'w': 80, 'h': 41 },
'PLAYER_RIGHT': { 'x': 995, 'y': 531, 'w': 80, 'h': 41 }
}
FP_COLOR_WHITE = '#FFFFFF'
FP_COLOR_BLACK = '#000000'
FP_COLOR_YELLOW = '#EEEE00'
FP_COLOR_BLUE = '#00EEEE'
FP_COLORS = {
'SKY': '#72D7EE',
'TREE': '#005108',
'FOG': '#005108',
'LIGHT': {'road': '#6B6B6B', 'grass': '#10AA10', 'rumble': '#555555', 'lane': '#CCCCCC'},
'DARK': {'road': '#696969', 'grass': '#009A00', 'rumble': '#BBBBBB' },
'START': {'road': FP_COLOR_WHITE, 'grass': FP_COLOR_WHITE, 'rumble': FP_COLOR_WHITE},
'FINISH': {'road': FP_COLOR_BLACK, 'grass': FP_COLOR_BLACK, 'rumble': FP_COLOR_BLACK},
'START_Y': {'road': FP_COLOR_YELLOW, 'grass': '#10AA10', 'rumble': '#555555', 'lane': '#CCCCCC'},
}
FP_ROAD = {
'LENGTH': {'NONE': 0, 'SHORT': 25, 'MEDIUM': 50, 'LONG': 100 }, # num segments
'CURVE': {'NONE': 0, 'EASY': 2, 'MEDIUM': 4, 'HARD': 6 },
'HILL': {'NONE': 0, 'LOW': 20, 'MEDIUM': 40, 'HIGH': 60 },
}
FP_ROAD_SPRTS = {
'chest': {'imgs': ['img_sprts/i_chest1.png'], 'score': 100,},
'coin1': {'imgs': ['img_sprts/i_coin1.png'], 'score': 1,},
'coin5': {'imgs': ['img_sprts/i_coin5.png'], 'score': 5,},
'coin20': {'imgs': ['img_sprts/i_coin20.png'], 'score': 20,},
'health': {'imgs': ['img_sprts/i_health.png'], 'score': 10,},
'heart': {'imgs': ['img_sprts/i_heart.png'], 'score': 50,},
'pot1': {'imgs': ['img_sprts/i_pot1.png'], 'score': -5,},
'pot2': {'imgs': ['img_sprts/i_pot2.png'], 'score': -1,},
'shell': {'imgs': ['img_sprts/p_shell.png'], 'score': -20,},
'rockd': {'imgs': ['img_sprts/rock_d2.png'], 'score': -10,},
'rockr': {'imgs': ['img_sprts/rock_r2.png'], 'score': -50,},
#'ashra_defeat': {'imgs': ['img_sprts/ashra_defeat1.png'], 'score': -100,},
#'bear': {'imgs': ['img_sprts/bear2.png'], 'score': -80,},
#'dinof': {'imgs': ['img_sprts/dinof2.png'], 'score': -50,},
'blobb': {'imgs': ['img_sprts/blobb1.png'], 'score': -50,},
'chick_fly': {'imgs': ['img_sprts/chick_fly3.png'], 'score': 70,},
'clown': {'imgs': ['img_sprts/clown1.png'], 'score': -100,},
}
class SptTmpx(sptdraw.SptDrawBase):
def __init__(self, size, *args, **kwargs):
super(SptTmpx, self).__init__(size)
self.draw_on()
def draw_on(self, *args, **kwargs):
self.fill(consts.GREEN)
self.pygm.draw.circle(self.surf, consts.WHITE,
(self.size[0] / 2, self.size[1] / 2),
self.size[0] / 2, 0)
class SptTmpi(pygm.SptImg):
def __init__(self, img_file, *args, **kwargs):
super(SptTmpi, self).__init__(img_file)
class FPSptBg(pygm.SptImgOne):
def __init__(self, img_file, pos, *args, **kwargs):
super(FPSptBg, self).__init__(img_file, pos)
class FPSptSprts(pygm.SptImgOne):
def __init__(self, img_file, pos, *args, **kwargs):
super(FPSptSprts, self).__init__(img_file, pos)
class FPSptFog(sptdraw.SptDrawBase):
def __init__(self, size, c=[0, 81, 8, 0], h=30, *args, **kwargs):
super(FPSptFog, self).__init__(size)
self.c = c
self.h = h
self.draw_on()
def draw_on(self, *args, **kwargs):
#self.fill(self.c)
d = 2
n = self.h / d
for i in range(n):
rct = [0, i * d, self.size[0], d]
#ca = 255 / n * (n - i)
ca = 200 / n * (n - i)
self.c[3] = ca
self.pygm.draw.rect(self.surf, self.c, rct)
class FPSptRdSprts(pygm.SptImg):
def __init__(self, img_file, *args, **kwargs):
super(FPSptRdSprts, self).__init__(img_file)
@classmethod
def create_by_img(cls, img):
return cls(img)
# for test
#o = SptTmpx((40, 40))
#return o
class FPSptRoadB(sptdraw.SptDrawBase):
def __init__(self, size, cfg, *args, **kwargs):
super(FPSptRoadB, self).__init__(size)
self.cfg = cfg
self.car = kwargs.get('car')
self.bg_sky = kwargs.get('bg_sky')
self.bg_hills = kwargs.get('bg_hills')
self.bg_trees = kwargs.get('bg_trees')
self.clr_dark_road = utils.clr_from_str(FP_COLORS['DARK']['road'])
self.clr_dark_grass = utils.clr_from_str(FP_COLORS['DARK']['grass'])
self.rd_reset(init=True)
self.add_fog()
def prms_reset(self, keep_segs=False):
self.e_keys_up = []
self.e_keys_dn = []
self.camera_x = 0.0
self.camera_y = 0.0
self.camera_z = 500.0#1000.0#0.0 == self.camera_h
self.xw = 0.0
self.yw = 0.0
self.zw = 0.0
self.xc = 0.0
self.yc = 0.0
self.zc = 0.0 ##
self.xp = 0.0
self.yp = 0.0
self.xs = 0.0
self.ys = 0.0
self.d = 200.0#100.0#10.0#30.0#1.0
self.w = self.size[0]
self.h = self.size[1]
if not keep_segs:
self.segments = []
self.rd_sprt_objs = {}
self.rd_sprt_cache = [] # for sprites render order
self.track_len = 0.0
self.seg_len = 200.0#100.0#20.0#60.0#200.0#
self.road_w = 2400#2000#600.0#200.0#1000.0#200#
self.camera_h = 500.0#1000.0#
self.speed_max = 300.0#180.0#200.0#100.0
self.lane_w = 60
self.seg_n = 300#200
#self.seg_draw_n = 200#150
self.seg_draw_n = 70#100#200#150
self.speed = 0.0
self.position = 0.0
self.player_x = 0.0#100.0#1000.0#
self.centrifugal = 0.1#0.06#0.08#0.01#0.3
self.player_seg = None
self.base_seg = None # the segment just under the car
self.player_di = 0 # 0:^ 1:> 2:v 3:<
self.player_go = 0 # 0:- 1:^ 2:v
self.speed_dt_up = 1.0#2.0#3.0
self.speed_dt_dn = 2.0#4.0#6.0
self.speed_dt_na = 1.0#3.0
self.player_x_dt = 60.0#30.0#20.0
self.last_seg_i = 0
self.score = 0
self.game_over = False
self.game_score = 0.0
self.tm_start = 0.0
self.tm_end = 0.0
self.tm_last_once = 0.0
self.sky_speed = 0.1#0.05#
self.hill_speed = 0.2#0.1#
self.tree_speed = 0.3#0.15#
def rd_reset(self, init=False, keep_segs=False, segs_file=None):
#if not init and not keep_segs:
if not init:
self.rd_sprts_del_all_objs()
self.prms_reset(keep_segs=keep_segs)
if segs_file is not None:
try:
segs = self.rd_seg_json_load(segs_file)
self.segments = segs
self.track_len = len(self.segments) * self.seg_len
except Exception as e:
print e
self.init_rd_segs_rand_1()
else:
if not keep_segs:
self.init_rd_segs_rand_1()
self.draw_on()
self.rd_seg_render()
def init_rd_segs_rand_1(self):
#self.rd_seg_init(self.seg_n)
#self.rd_seg_init(self.seg_draw_n)
#self.rd_seg_init(100)#20#500#2#10#4#1#100#200
#self.rd_seg_init(random.randint(30, 100))
self.rd_seg_init(random.randint(1, 10)) # for a3c train
self.rd_seg_init_rand_curve()
#self.add_curves()
#self.add_low_rolling_hills(20, 2.0)
##self.add_low_rolling_hills(30, 4.0)
#self.rd_seg_init_rand(10)#50#10#3#1
#segnrand = random.randint(3, 30)
segnrand = random.randint(2, 6) # for a3c train
self.rd_seg_init_rand(segnrand)
# for segment draw
#self.rd_seg_init(self.seg_draw_n)
#self.rd_seg_init(100)#20#500#2#10#4#1#100#200
self.rd_seg_init(10) # for a3c train
self.rd_start_seg_init()
self.rd_sprts_init_rand()
def draw_on(self, *args, **kwargs):
self.fill(self.clr_dark_grass)
def add_fog(self):
self.fog = FPSptFog(self.size)
self.fog.rect.top = 240
self.fog.rect.left = 0
self.disp_add(self.fog)
def get_seg_base_i(self, pos=None):
if pos is None:
pos = self.position
i = int(pos / self.seg_len)
#x#i = int(utils.math_round(pos / self.seg_len))
#i = int(math.floor(pos / self.seg_len))
#i = int(math.ceil(pos / self.seg_len))
seg_n = len(self.segments)
i = (i + seg_n) % seg_n
return i
def rd_get_segs(self, whole=False):
if whole:
segs = self.segments
else:
segs = self.segments[:-self.seg_draw_n]
return segs
# #### geometry #### #
def geo_prjc_scale(self, d, zc):
if zc == 0.0:
return 1.0
else:
return d / zc
def xc_to_xp(self, xc, d, zc):
if zc == 0.0:
#xp = float('inf')
#xp = 2 ** 64
xp = xc
else:
xp = xc * (d / zc)
return xp
def yc_to_yp(self, yc, d, zc):
if zc == 0.0:
#yp = float('inf')
#yp = 2 ** 64
yp = yc
else:
yp = yc * (d / zc)
return yp
def xp_to_xs(self, xp, w):
#xs = w / 2.0 + w / 2.0 * xp
xs = w / 2.0 + xp
return xs
def yp_to_ys(self, yp, h):
#ys = h / 2.0 - h / 2.0 * yp
ys = h / 2.0 - yp
return ys
def rd_seg_init(self, a=500):
for n in range(a):
self.rd_seg_add(0.0, 0.0)
def rd_seg_add(self, curve=0.0, yw=0.0):
#print '+', curve, yw
n = len(self.segments)
#print n
if n % 2 == 0:
#if n % 4 == 0:
c = FP_COLORS['LIGHT']
#c = {'road': FP_COLOR_WHITE}
else:
c = FP_COLORS['DARK']
#c = {'road': FP_COLOR_BLACK}
seg = {
'index': n,
'p1': {'world': {'z': (n + 1) * self.seg_len,
'y': self.seg_lasy_y()},
'camera': {},
'screen': {}},
'p2': {'world': {'z': (n + 2) * self.seg_len,
'y': yw},
'camera': {},
'screen': {}},
'curve': curve,
'color': c,
'sprites': [],
'looped': 0,
}
self.segments.append(seg)
self.track_len = len(self.segments) * self.seg_len
#self.track_len = (len(self.segments) - self.seg_draw_n) * self.seg_len
def seg_lasy_y(self):
seg_n = len(self.segments)
if seg_n == 0:
return 0.0
else:
return self.segments[seg_n - 1]['p2']['world'].get('y', 0.0)
def rd_seg_init_rand(self, n=50):
#print 'rd_seg_init_rand', n
for i in range(n):
p = random.random()
#print p
rl = random.choice([1, -1])
enter = random.randint(10, 40)
hold = random.randint(10, 40)
leave = random.randint(10, 40)
if p < 0.3:
curve = 0.0
yw = 0.0
#elif p < 0.8:
# curve = 0.0
# yw = random.random() * 10.0
else:
curve = rl * random.random() * 6.0
yw = 0.0
self.add_road(enter, hold, leave, curve, yw)
def rd_seg_init_rand_2(self, n=50):
for i in range(n):
p = random.random()
#print p
rl = random.choice([1, -1])
if p < 0.35:
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
rl * FP_ROAD['CURVE']['MEDIUM'])
elif p < 0.7:
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
rl * FP_ROAD['CURVE']['EASY'])
else:
enter = random.randint(10, 100)
hold = random.randint(10, 100)
leave = random.randint(10, 100)
self.add_road(enter, hold, leave, 0.0, 0.0)
def rd_seg_init_rand_curve(self, n=5):
#print 'rd_seg_init_rand', n
for i in range(n):
rl = random.choice([1, -1])
enter = random.randint(10, 40)
hold = random.randint(10, 40)
leave = random.randint(10, 40)
curve = rl * random.random() * 8.0
yw = 0.0
self.add_road(enter, hold, leave, curve, yw)
def rd_start_seg_init(self, n=3):
seg_n = len(self.segments)
if seg_n == 0:
return
#self.segments[0]['color'] = FP_COLORS['START_Y']
#self.segments[2]['color'] = FP_COLORS['START_Y']
for i in range(n):
self.segments[i]['color'] = FP_COLORS['START_Y']
def rd_sprts_init_rand(self, n=None):
seg_n = len(self.segments)
if n is None:
#n = seg_n / 20
n = seg_n / random.randint(10, 30)
for i in range(n):
j = random.randint(10, seg_n - 10)
sprt = random.choice(FP_ROAD_SPRTS.keys())
s = {
'name': sprt,
'type': 1, # image / animate / ...
'obj': None, # need to create at render
##'x_i': None, # get real (random) x from x_pos
'x_i': random.randint(0, 4),
'score': FP_ROAD_SPRTS[sprt].get('score', 0),
}
self.segments[j]['sprites'].append(s)
def rd_sprts_del_all_objs(self):
for k, sprt in self.rd_sprt_objs.items():
#print k, sprt
self.disp_del(sprt)
del self.rd_sprt_objs[k]
def util_limit(self, value, mn, mx):
return max(mn, min(value, mx))
def util_accelerate(self, v, accel, dt):
return v + (accel * dt)
def util_increase(self, start, increment, mx): # with looping
result = start + increment
while (result >= mx):
result -= mx
while (result < 0):
result += mx
return result
def util_ease_in(self, a, b, percent):
return a + (b - a) * math.pow(percent, 2)
def util_ease_out(self, a, b, percent):
return a + (b - a) * (1 - math.pow(1 - percent, 2))
def util_ease_in_out(self, a, b, percent):
return a + (b - a) * ((-math.cos(percent * math.pi)/2) + 0.5)
def util_curve_percent_remaining(self, n, total):
return (n % total) / total
def add_road(self, enter, hold, leave, curve, yw=0.0):
#print enter, hold, leave, curve, yw
start_y = self.seg_lasy_y()
end_y = start_y + (int(yw) * self.seg_len)
total = enter + hold + leave
for n in range(enter):
self.rd_seg_add(self.util_ease_in(0, curve, float(n)/enter),
self.util_ease_out(start_y, end_y,
float(n)/total))
for n in range(hold):
self.rd_seg_add(curve,
self.util_ease_out(start_y, end_y,
(float(n)+enter)/total))
for n in range(leave):
self.rd_seg_add(self.util_ease_out(curve, 0, n/leave),
self.util_ease_out(start_y, end_y,
(float(n)+enter+hold)/total))
def add_curves(self):
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
-FP_ROAD['CURVE']['EASY'])
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['CURVE']['MEDIUM'])
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['CURVE']['EASY'])
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
-FP_ROAD['CURVE']['EASY'])
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
-FP_ROAD['CURVE']['MEDIUM'])
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
0.0)
def add_low_rolling_hills(self, num, height):
num = num or ROAD['LENGTH']['SHORT']
height = height or ROAD['HILL']['LOW']
self.add_road(num, num, num, 0, height/2.0)
self.add_road(num, num, num, 0, -height)
self.add_road(num, num, num, 0, height)
self.add_road(num, num, num, 0, 0)
self.add_road(num, num, num, 0, height/2.0)
self.add_road(num, num, num, 0, 0)
def rd_seg_get_cleared(self, segs=None):
if not segs:
segs = self.segments
segs_c = []
for seg in segs:
if not seg['sprites']:
segs_c.append(seg)
else:
seg_c = {}
for k, v in seg.items():
if k not in ['sprites']:
seg_c[k] = v
else:
seg_c[k] = []
for spr in seg['sprites']:
spr_n = {}
for sk, sv in spr.items():
if sk not in ['obj']:
spr_n[sk] = sv
else:
spr_n[sk] = None
seg_c[k].append(spr_n)
segs_c.append(seg_c)
return segs_c
def rd_seg_json_save(self, f):
sc = self.rd_seg_get_cleared(self.segments)
s = utils.json_dumps(sc)
with open(f, 'w') as fo:
fo.write(s)
def rd_seg_json_load(self, f):
with open(f, 'r') as fi:
s = fi.read()
segs = utils.json_loads(s)
return segs
def rd_seg_render__1_o(self):
"""straight"""
xc1 = self.road_w / 2 - self.player_x
xc2 = -self.road_w / 2 - self.player_x
xc3 = self.road_w / 2 - self.player_x
xc4 = -self.road_w / 2 - self.player_x
xcl1 = xc1 - self.lane_w
xcl2 = xc2 + self.lane_w
xcl3 = xc3 - self.lane_w
xcl4 = xc4 + self.lane_w
xcr1 = self.lane_w - self.player_x
xcr2 = -self.lane_w - self.player_x
xcr3 = self.lane_w - self.player_x
xcr4 = -self.lane_w - self.player_x
yc = self.camera_h
#print '=' * 80
#print 'self.position', self.position
for i, seg in enumerate(self.segments):
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
zc1 = zw1 - self.camera_z - self.position
zc2 = zw2 - self.camera_z - self.position
#zc1 = self.position - (zw1 - self.camera_z)
#zc2 = self.position - (zw2 - self.camera_z)
xp1 = self.xc_to_xp(xc1, self.d, zc1)
xs1 = self.xp_to_xs(xp1, self.w)
xp2 = self.xc_to_xp(xc2, self.d, zc1)
xs2 = self.xp_to_xs(xp2, self.w)
xp3 = self.xc_to_xp(xc3, self.d, zc2)
xs3 = self.xp_to_xs(xp3, self.w)
xp4 = self.xc_to_xp(xc4, self.d, zc2)
xs4 = self.xp_to_xs(xp4, self.w)
yp1 = self.yc_to_yp(yc, self.d, zc1)
ys1 = self.yp_to_ys(yp1, self.h)
ys2 = ys1
yp3 = self.yc_to_yp(yc, self.d, zc2)
ys3 = self.yp_to_ys(yp3, self.h)
ys4 = ys3
self.render_polygon(None,
0, ys1, self.w, ys2,
self.w, ys4, 0, ys3,
seg['color']['grass'])
self.render_polygon(None,
xs1, ys1, xs2, ys2,
xs4, ys4, xs3, ys3,
seg['color']['road'])
if 1:#i % 2 == 1:
xpl1 = self.xc_to_xp(xcl1, self.d, zc1)
xsl1 = self.xp_to_xs(xpl1, self.w)
xpl2 = self.xc_to_xp(xcl2, self.d, zc1)
xsl2 = self.xp_to_xs(xpl2, self.w)
xpl3 = self.xc_to_xp(xcl3, self.d, zc2)
xsl3 = self.xp_to_xs(xpl3, self.w)
xpl4 = self.xc_to_xp(xcl4, self.d, zc2)
xsl4 = self.xp_to_xs(xpl4, self.w)
self.render_polygon(None,
xs1, ys1, xsl1, ys1,
xsl3, ys3, xs3, ys3,
seg['color']['rumble'])
self.render_polygon(None,
xs2, ys2, xsl2, ys2,
xsl4, ys4, xs4, ys4,
seg['color']['rumble'])
xpr1 = self.xc_to_xp(xcr1, self.d, zc1)
xsr1 = self.xp_to_xs(xpr1, self.w)
xpr2 = self.xc_to_xp(xcr2, self.d, zc1)
xsr2 = self.xp_to_xs(xpr2, self.w)
xpr3 = self.xc_to_xp(xcr3, self.d, zc2)
xsr3 = self.xp_to_xs(xpr3, self.w)
xpr4 = self.xc_to_xp(xcr4, self.d, zc2)
xsr4 = self.xp_to_xs(xpr4, self.w)
self.render_polygon(None,
xsr1, ys1, xsr2, ys2,
xsr4, ys4, xsr3, ys3,
seg['color']['rumble'])
def rd_seg_render__2_o(self):
"""curve test 1"""
#theta_i = math.pi /180.0 * 0.1
#theta_i = math.pi /180.0 * 0.5
theta_i = math.pi /180.0 * 0.9
#theta_i = 0.0
xc1 = self.road_w / 2 - self.player_x
xc2 = -self.road_w / 2 - self.player_x
xc3 = self.road_w / 2 - self.player_x
xc4 = -self.road_w / 2 - self.player_x
yc = self.camera_h
print '=' * 80
print 'self.position', self.position
# <2>
seg_n = len(self.segments)
segbi = self.get_seg_base_i()
print 'segbi', segbi
# TODO: do at update
#dpx1 = self.seg_len * math.tan(theta_i)
#self.player_x -= dpx1
# <1>
#for i, seg in enumerate(self.segments):
# <2>
for i in range(self.seg_draw_n):
#'''
# <2>
si = (segbi + i) % seg_n
#print si
seg = self.segments[si]
#x#zw1 = (i+1)*self.seg_len
#zw2 = (i+2)*self.seg_len
#'''
# <1>
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
zc1 = zw1 - self.camera_z - self.position
zc2 = zw2 - self.camera_z - self.position
curve_d = 500
#x#xc1 = self.road_w / 2 - self.player_x - curve_d * i
#xc2 = -self.road_w / 2 - self.player_x - curve_d * i
#xc3 = self.road_w / 2 - self.player_x - curve_d * i
#xc4 = -self.road_w / 2 - self.player_x - curve_d * i
xp1 = self.xc_to_xp(xc1, self.d, zc1)
xs1 = self.xp_to_xs(xp1, self.w)
xp2 = self.xc_to_xp(xc2, self.d, zc1)
xs2 = self.xp_to_xs(xp2, self.w)
xp3 = self.xc_to_xp(xc3, self.d, zc2)
xs3 = self.xp_to_xs(xp3, self.w)
xp4 = self.xc_to_xp(xc4, self.d, zc2)
xs4 = self.xp_to_xs(xp4, self.w)
yp1 = self.yc_to_yp(yc, self.d, zc1)
ys1 = self.yp_to_ys(yp1, self.h)
ys2 = ys1
yp3 = self.yc_to_yp(yc, self.d, zc2)
ys3 = self.yp_to_ys(yp3, self.h)
ys4 = ys3
#'''
#if 1:
#if i < self.seg_draw_n / 2:
if i < self.seg_draw_n / 4:
theta1 = theta_i * i
theta2 = theta_i * (i + 1)
dx1 = self.seg_len * math.tan(theta1)
dx2 = self.seg_len * math.tan(theta2)
xs1 += dx1
xs2 += dx1
xs3 += dx2 #+ dx1
xs4 += dx2 #+ dx1
#'''
self.render_polygon(None,
0, ys1, self.w, ys2,
self.w, ys4, 0, ys3,
seg['color']['grass'])
self.render_polygon(None,
xs1, ys1, xs2, ys2,
xs4, ys4, xs3, ys3,
seg['color']['road'])
def rd_seg_render__3_o(self):
"""curve test 2: draw a circle"""
#theta_i = math.pi /180.0 * 0.1
#theta_i = math.pi /180.0 * 0.5
theta_i = math.pi /180.0 * 0.9
#theta_i = 0.0
#xc1 = self.road_w / 2 - self.player_x
#xc2 = -self.road_w / 2 - self.player_x
#xc3 = self.road_w / 2 - self.player_x
#xc4 = -self.road_w / 2 - self.player_x
# <3>
#engi = math.pi / 2.0 / self.seg_draw_n
engi = math.pi / 2.0 / 60#10#20
rad = self.road_w * 4#2
rad1 = rad + self.road_w / 2
rad2 = rad - self.road_w / 2
yc = self.camera_h
print '=' * 80
print 'self.position', self.position
# <2>
seg_n = len(self.segments)
segbi = self.get_seg_base_i()
print 'segbi', segbi
# TODO: do at update
#dpx1 = self.seg_len * math.tan(theta_i)
#self.player_x -= dpx1
# <1>
#for i, seg in enumerate(self.segments):
# <2>
for i in range(self.seg_draw_n):
#'''
# <2>
si = (segbi + i) % seg_n
#print si
seg = self.segments[si]
#x#zw1 = (i+1)*self.seg_len
#zw2 = (i+2)*self.seg_len
#'''
# <1>
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
zc1 = zw1 - self.camera_z - self.position
zc2 = zw2 - self.camera_z - self.position
curve_d = 500
#x#xc1 = self.road_w / 2 - self.player_x - curve_d * i
#xc2 = -self.road_w / 2 - self.player_x - curve_d * i
#xc3 = self.road_w / 2 - self.player_x - curve_d * i
#xc4 = -self.road_w / 2 - self.player_x - curve_d * i
# <3>
xx1 = rad1 * math.cos(engi * i)
xx2 = rad2 * math.cos(engi * i)
xx3 = rad1 * math.cos(engi * (i + 1))
xx4 = rad2 * math.cos(engi * (i + 1))
xc1 = (rad - xx1) - self.player_x
xc2 = (rad - xx2) - self.player_x
xc3 = (rad - xx3) - self.player_x
xc4 = (rad - xx4) - self.player_x
xp1 = self.xc_to_xp(xc1, self.d, zc1)
xs1 = self.xp_to_xs(xp1, self.w)
xp2 = self.xc_to_xp(xc2, self.d, zc1)
xs2 = self.xp_to_xs(xp2, self.w)
xp3 = self.xc_to_xp(xc3, self.d, zc2)
xs3 = self.xp_to_xs(xp3, self.w)
xp4 = self.xc_to_xp(xc4, self.d, zc2)
xs4 = self.xp_to_xs(xp4, self.w)
yp1 = self.yc_to_yp(yc, self.d, zc1)
ys1 = self.yp_to_ys(yp1, self.h)
ys2 = ys1
yp3 = self.yc_to_yp(yc, self.d, zc2)
ys3 = self.yp_to_ys(yp3, self.h)
ys4 = ys3
'''
#if 1:
#if i < self.seg_draw_n / 2:
if i < self.seg_draw_n / 4:
theta1 = theta_i * i
theta2 = theta_i * (i + 1)
dx1 = self.seg_len * math.tan(theta1)
dx2 = self.seg_len * math.tan(theta2)
xs1 += dx1
xs2 += dx1
xs3 += dx2 #+ dx1
xs4 += dx2 #+ dx1
'''
self.render_polygon(None,
0, ys1, self.w, ys2,
self.w, ys4, 0, ys3,
seg['color']['grass'])
self.render_polygon(None,
xs1, ys1, xs2, ys2,
xs4, ys4, xs3, ys3,
seg['color']['road'])
def rd_seg_render__4_o(self):
"""curve"""
#theta_i = math.pi /180.0 * 0.1
#theta_i = math.pi /180.0 * 0.5
theta_i = math.pi /180.0 * 0.9
#theta_i = 0.0
xc1 = self.road_w / 2 - self.player_x
xc2 = -self.road_w / 2 - self.player_x
xc3 = self.road_w / 2 - self.player_x
xc4 = -self.road_w / 2 - self.player_x
#xcl1 = xc1 - self.lane_w
#xcl2 = xc2 + self.lane_w
#xcl3 = xc3 - self.lane_w
#xcl4 = xc4 + self.lane_w
xcr1 = self.lane_w - self.player_x
xcr2 = -self.lane_w - self.player_x
xcr3 = self.lane_w - self.player_x
xcr4 = -self.lane_w - self.player_x
yc = self.camera_h
print '=' * 80
print 'self.position', self.position
# <2>
seg_n = len(self.segments)
segbi = self.get_seg_base_i()
print 'segbi', segbi
self.player_seg = self.segments[segbi]
b_curve = self.player_seg.get('curve', 0.0)
#b_percent = 0.5
b_percent = self.util_curve_percent_remaining(self.position,
self.seg_len)
dx_curve = - (b_curve * b_percent)
x_curve = 0
# <1>
#for i, seg in enumerate(self.segments):
# <2>
for i in range(self.seg_draw_n):
#'''
# <2>
si = (segbi + i) % seg_n
#print si
seg = self.segments[si]
#'''
'''
#x#
if seg['index'] < segbi:
zw1 = (i+1)*self.seg_len
zw2 = (i+2)*self.seg_len
else:
# <1>
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
'''
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
zc1 = zw1 - self.camera_z - self.position
zc2 = zw2 - self.camera_z - self.position
# for curve
xc1 = xc1 - x_curve
xc2 = xc2 - x_curve
xc3 = xc3 - x_curve - dx_curve
xc4 = xc4 - x_curve - dx_curve
xcl1 = xc1 - self.lane_w
xcl2 = xc2 + self.lane_w
xcl3 = xc3 - self.lane_w
xcl4 = xc4 + self.lane_w
xcr1 = xcr1 - x_curve
xcr2 = xcr2 - x_curve
xcr3 = xcr3 - x_curve - dx_curve
xcr4 = xcr4 - x_curve - dx_curve
x_curve = x_curve + dx_curve
dx_curve = dx_curve + seg.get('curve', 0.0)
xp1 = self.xc_to_xp(xc1, self.d, zc1)
xs1 = self.xp_to_xs(xp1, self.w)
xp2 = self.xc_to_xp(xc2, self.d, zc1)
xs2 = self.xp_to_xs(xp2, self.w)
xp3 = self.xc_to_xp(xc3, self.d, zc2)
xs3 = self.xp_to_xs(xp3, self.w)
xp4 = self.xc_to_xp(xc4, self.d, zc2)
xs4 = self.xp_to_xs(xp4, self.w)
yp1 = self.yc_to_yp(yc, self.d, zc1)
ys1 = self.yp_to_ys(yp1, self.h)
ys2 = ys1
yp3 = self.yc_to_yp(yc, self.d, zc2)
ys3 = self.yp_to_ys(yp3, self.h)
ys4 = ys3
'''
#if 1:
#if i < self.seg_draw_n / 2:
if i < self.seg_draw_n / 4:
theta1 = theta_i * i
theta2 = theta_i * (i + 1)
dx1 = self.seg_len * math.tan(theta1)
dx2 = self.seg_len * math.tan(theta2)
xs1 += dx1
xs2 += dx1
xs3 += dx2 #+ dx1
xs4 += dx2 #+ dx1
'''
self.render_polygon(None,
0, ys1, self.w, ys2,
self.w, ys4, 0, ys3,
seg['color']['grass'])
self.render_polygon(None,
xs1, ys1, xs2, ys2,
xs4, ys4, xs3, ys3,
seg['color']['road'])
if 1:#i % 2 == 1:
xpl1 = self.xc_to_xp(xcl1, self.d, zc1)
xsl1 = self.xp_to_xs(xpl1, self.w)
xpl2 = self.xc_to_xp(xcl2, self.d, zc1)
xsl2 = self.xp_to_xs(xpl2, self.w)
xpl3 = self.xc_to_xp(xcl3, self.d, zc2)
xsl3 = self.xp_to_xs(xpl3, self.w)
xpl4 = self.xc_to_xp(xcl4, self.d, zc2)
xsl4 = self.xp_to_xs(xpl4, self.w)
self.render_polygon(None,
xs1, ys1, xsl1, ys1,
xsl3, ys3, xs3, ys3,
seg['color']['rumble'])
self.render_polygon(None,
xs2, ys2, xsl2, ys2,
xsl4, ys4, xs4, ys4,
seg['color']['rumble'])
xpr1 = self.xc_to_xp(xcr1, self.d, zc1)
xsr1 = self.xp_to_xs(xpr1, self.w)
xpr2 = self.xc_to_xp(xcr2, self.d, zc1)
xsr2 = self.xp_to_xs(xpr2, self.w)
xpr3 = self.xc_to_xp(xcr3, self.d, zc2)
xsr3 = self.xp_to_xs(xpr3, self.w)
xpr4 = self.xc_to_xp(xcr4, self.d, zc2)
xsr4 = self.xp_to_xs(xpr4, self.w)
self.render_polygon(None,
xsr1, ys1, xsr2, ys2,
xsr4, ys4, xsr3, ys3,
seg['color']['rumble'])
def rd_seg_render(self):
"""curve"""
#theta_i = math.pi /180.0 * 0.1
#theta_i = math.pi /180.0 * 0.5
theta_i = math.pi /180.0 * 0.9
#theta_i = 0.0
xc1 = self.road_w / 2 - self.player_x
xc2 = -self.road_w / 2 - self.player_x
xc3 = self.road_w / 2 - self.player_x
xc4 = -self.road_w / 2 - self.player_x
#xcl1 = xc1 - self.lane_w
#xcl2 = xc2 + self.lane_w
#xcl3 = xc3 - self.lane_w
#xcl4 = xc4 + self.lane_w
xcr1 = self.lane_w - self.player_x
xcr2 = -self.lane_w - self.player_x
xcr3 = self.lane_w - self.player_x
xcr4 = -self.lane_w - self.player_x
yc = self.camera_h
#print '=' * 80
#print 'self.position', self.position
# <2>
seg_n = len(self.segments)
segbi = self.get_seg_base_i()
#print 'segbi', segbi, ' / ', seg_n
self.player_seg = self.segments[segbi]
self.base_seg = self.segments[(segbi + 2) % seg_n]
# for test
#self.base_seg['color'] = FP_COLORS['FINISH']
b_curve = self.player_seg.get('curve', 0.0)
#b_percent = 0.5
b_percent = self.util_curve_percent_remaining(self.position,
self.seg_len)
dx_curve = - (b_curve * b_percent)
x_curve = 0
#print 'b_curve', b_curve
#print 'world z', self.player_seg['p1']['world']['z']
#print 'world y', self.player_seg['p1']['world'].get('y', 0.0)
# clear the sprites cache
self.rd_sprt_cache = []
# <1>
#for i, seg in enumerate(self.segments):
# <2>
for i in range(self.seg_draw_n):
#'''
# <2>
si = (segbi + i) % seg_n
#print si
seg = self.segments[si]
#'''
'''
# for test
if i < 10:
print '>>> ', i
print 'curve', seg.get('curve', 0.0)
print 'world z', seg['p1']['world']['z']
print 'world y', seg['p1']['world'].get('y', 0.0)
#print '-' * 30
'''
'''
#x#
if seg['index'] < segbi:
zw1 = (i+1)*self.seg_len
zw2 = (i+2)*self.seg_len
else:
# <1>
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
'''
zw1 = (i+1)*self.seg_len
zw2 = (i+2)*self.seg_len
zc1 = zw1 - self.camera_z - (self.position % self.seg_len)
zc2 = zw2 - self.camera_z - (self.position % self.seg_len)
'''
#x#
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
zc1 = zw1 - self.camera_z - self.position
zc2 = zw2 - self.camera_z - self.position
'''
# for curve
xc1 = xc1 - x_curve
xc2 = xc2 - x_curve
xc3 = xc3 - x_curve - dx_curve
xc4 = xc4 - x_curve - dx_curve
xcl1 = xc1 - self.lane_w
xcl2 = xc2 + self.lane_w
xcl3 = xc3 - self.lane_w
xcl4 = xc4 + self.lane_w
xcr1 = xcr1 - x_curve
xcr2 = xcr2 - x_curve
xcr3 = xcr3 - x_curve - dx_curve
xcr4 = xcr4 - x_curve - dx_curve
x_curve = x_curve + dx_curve
dx_curve = dx_curve + seg.get('curve', 0.0)
# for hills
yw1 = seg['p1']['world'].get('y', 0.0)
yw2 = seg['p2']['world'].get('y', 0.0)
yc1 = yc - yw1
yc2 = yc - yw2
#print yw1, yw2
xp1 = self.xc_to_xp(xc1, self.d, zc1)
xs1 = self.xp_to_xs(xp1, self.w)
xp2 = self.xc_to_xp(xc2, self.d, zc1)
xs2 = self.xp_to_xs(xp2, self.w)
xp3 = self.xc_to_xp(xc3, self.d, zc2)
xs3 = self.xp_to_xs(xp3, self.w)
xp4 = self.xc_to_xp(xc4, self.d, zc2)
xs4 = self.xp_to_xs(xp4, self.w)
yp1 = self.yc_to_yp(yc1, self.d, zc1)
ys1 = self.yp_to_ys(yp1, self.h)
ys2 = ys1
yp3 = self.yc_to_yp(yc2, self.d, zc2)
ys3 = self.yp_to_ys(yp3, self.h)
ys4 = ys3
'''
# for test
if i < 10:
print xs1, ys1, xs2, ys2
print xs4, ys4, xs3, ys3
print '-' * 30
'''
# grass
self.render_polygon(None,
0, ys1, self.w, ys2,
self.w, ys4, 0, ys3,
seg['color']['grass'])
# road
self.render_polygon(None,
xs1, ys1, xs2, ys2,
xs4, ys4, xs3, ys3,
seg['color']['road'])
if 1:#i % 2 == 1:
xpl1 = self.xc_to_xp(xcl1, self.d, zc1)
xsl1 = self.xp_to_xs(xpl1, self.w)
xpl2 = self.xc_to_xp(xcl2, self.d, zc1)
xsl2 = self.xp_to_xs(xpl2, self.w)
xpl3 = self.xc_to_xp(xcl3, self.d, zc2)
xsl3 = self.xp_to_xs(xpl3, self.w)
xpl4 = self.xc_to_xp(xcl4, self.d, zc2)
xsl4 = self.xp_to_xs(xpl4, self.w)
self.render_polygon(None,
xs1, ys1, xsl1, ys1,
xsl3, ys3, xs3, ys3,
seg['color']['rumble'])
self.render_polygon(None,
xs2, ys2, xsl2, ys2,
xsl4, ys4, xs4, ys4,
seg['color']['rumble'])
xpr1 = self.xc_to_xp(xcr1, self.d, zc1)
xsr1 = self.xp_to_xs(xpr1, self.w)
xpr2 = self.xc_to_xp(xcr2, self.d, zc1)
xsr2 = self.xp_to_xs(xpr2, self.w)
xpr3 = self.xc_to_xp(xcr3, self.d, zc2)
xsr3 = self.xp_to_xs(xpr3, self.w)
xpr4 = self.xc_to_xp(xcr4, self.d, zc2)
xsr4 = self.xp_to_xs(xpr4, self.w)
self.render_polygon(None,
xsr1, ys1, xsr2, ys2,
xsr4, ys4, xsr3, ys3,
seg['color']['rumble'])
# for test
#self.pygm.draw.circle(self.surf, consts.BLUE,
# (int(xsr1), 116 - int(ys1)),
# 3, 0)
# render road sprites
# TODO: check if this seg is looped
seg_scale = self.geo_prjc_scale(self.d, zc1)
x_rnd = random.randint(1, self.road_w / 2 - 10) * seg_scale
#x_sprt = (xs1 + xs2) / 2.0
#y_sprt = (ys1 + ys3) / 2.0
x_dt = x_rnd * seg_scale
x_pos = [xsr1, xsr2,
(xsr1 + xsl1) / 2.0,
(xsr2 + xsl2) / 2.0,
xsl1, xsl2]
#x_sprt = xsr1
x_sprt = (xsr1 + xsl1) / 2.0
#x_sprt = random.choice(x_pos)
x_i = random.randint(0, len(x_pos) - 1) # NOTE: not used now !!
##x_i = 2
y_sprt = ys1
scale_sprt = seg_scale * 8.0#10.0#2.0
obj = self.rd_sprts_render(seg, x_pos, x_i, y_sprt, scale_sprt)
if obj:
self.rd_sprt_cache.append(obj)
# render the sprites with right order
for obj in self.rd_sprt_cache[::-1]:
self.disp_add(obj)
def render_polygon(self, ctx, x1, y1, x2, y2, x3, y3, x4, y4, color):
#d = 200#100#240#50#
#a = 60
#pnts = [[x1, y1], [x2, y2], [x3, y3], [x4, y4], [x1, y1]]
#pnts = [[x1, y1-d], [x2, y2-d], [x3, y3-d], [x4, y4-d], [x1, y1-d]]
#pnts = [[x1, y1+a], [x2, y2+a], [x3, y3+a], [x4, y4+a], [x1, y1+a]]
# reflect the y-
d = 116
pnts = [[x1, d-y1], [x2, d-y2], [x3, d-y3], [x4, d-y4], [x1, d-y1]]
c = utils.clr_from_str(color)
try:
self.pygm.draw.polygon(self.surf, c, pnts)
except Exception as e:
#print '-' * 60
pass
def rd_sprts_render(self, seg, x_pos, x_i, y, scale):
sprts = seg.get('sprites')
if not sprts:
return None
for i, info in enumerate(sprts):
sprt = info['name']
obj_k = str(seg['index']) + '_' + str(i) + '_' + sprt
obj = info.get('obj')
'''
# TODO: <1>
if not obj:
obj = FPSptRdSprts.create_by_img(FP_ROAD_SPRTS[sprt][0])
info['obj'] = obj
self.disp_add(obj)
'''
# <2>
if obj:
self.disp_del(obj)
# NOTE: objs will be deleted at rd_sprts_del_all_objs()
##del self.rd_sprt_objs[obj_k]
img = FP_ROAD_SPRTS[sprt]['imgs'][0]
obj = FPSptRdSprts.create_by_img(img)
# avoid: pygame.error: Width or height is too large
if scale > 500:
#print 'scale <1>', scale
pass
else:
try:
obj.scale(scale)
except:
#print 'scale <2>', scale
pass
x_i_saved = info.get('x_i')
#if not x_i_saved:
# info['x_i'] = x_i
# x_i_saved = x_i
obj.rect.top = 116 - y + 240 - obj.rect.height
obj.rect.left = x_pos[x_i_saved] - obj.rect.width / 2
#obj.scale(scale)
info['obj'] = obj
##self.disp_add(obj) # NOTE: render out here
self.rd_sprt_objs[obj_k] = obj # for reset to delete all
# NOTE: only show one
break
return obj
def handle_event(self, events, *args, **kwargs):
#print '>>> ', events
if not self.flag_check_event:
return events
else:
return self.check_key(events)
def key_to_di(self, k):
if k == self.pglc.K_UP:
return 0
elif k == self.pglc.K_RIGHT:
return 1
elif k == self.pglc.K_DOWN:
return 2
elif k == self.pglc.K_LEFT:
return 3
else:
return None
def key_to_di_b(self, k):
if k == self.pglc.K_f or k == self.pglc.K_j:
return 0
elif k == self.pglc.K_k:
return 1
elif k == self.pglc.K_SPACE or k == self.pglc.K_v or k == self.pglc.K_n:
return 2
elif k == self.pglc.K_d:
return 3
else:
return None
def check_key(self, events):
#print id(events)
r_events = []
e_keys_up = []
e_keys_dn = []
for event in events:
#print event
if event.type == self.pglc.KEYUP:
di = self.key_to_di(event.key)
if di is None:
di = self.key_to_di_b(event.key)
if di is not None:
e_keys_up.append(di)
else:
r_events.append(event)
elif event.type == self.pglc.KEYDOWN:
di = self.key_to_di(event.key)
if di is None:
di = self.key_to_di_b(event.key)
if di is not None:
e_keys_dn.append(di)
else:
r_events.append(event)
else:
r_events.append(event)
self.e_keys_up = e_keys_up
self.e_keys_dn = e_keys_dn
return r_events
def refresh__1(self, fps_clock, *args, **kwargs):
#print '>>> refresh'
#'''
if self.player_di == 3: # <
self.player_x -= 9
if self.player_x < -1000:
self.player_di = 1
elif self.player_di == 1:
self.player_x += 19
if self.player_x > 1000:
self.player_di = 3
#'''
#'''
self.position += 10.0#5.0#1.0
self.position += random.randint(2, 10)
if self.position > self.track_len:
self.position -= self.track_len
#'''
self.draw_on()
self.rd_seg_render()
def refresh(self, fps_clock, *args, **kwargs):
self.check_player_di(self.e_keys_dn, self.e_keys_up)
self.draw_on()
self.rd_seg_render()
self.update_world()
self.check_if_car_out_road()
self.check_score()
self.check_tm()
self.update_bg()
def check_player_di(self, e_keys_dn, e_keys_up):
if 0 in e_keys_dn:
self.player_go = 1
elif 2 in e_keys_dn:
self.player_go = 2
if 1 in e_keys_dn:
self.player_di = 1
elif 3 in e_keys_dn:
self.player_di = 3
if 0 in e_keys_up:
if self.player_go != 2:
self.player_go = 0
if 2 in e_keys_up:
if self.player_go != 1:
self.player_go = 0
if 1 in e_keys_up:
if self.player_di != 3:
self.player_di = 0
if 3 in e_keys_up:
if self.player_di != 1:
self.player_di = 0
def update_world(self):
if self.player_go == 1:
self.speed += self.speed_dt_up
elif self.player_go == 2:
self.speed -= self.speed_dt_dn
else:
self.speed -= self.speed_dt_na
# if on the grass, slow down
if self.player_x < -self.road_w / 2 or \
self.player_x > self.road_w / 2:
self.speed -= 10
if self.speed < 0.0:
self.speed = 0.0
elif self.speed > self.speed_max:
self.speed = self.speed_max
self.position += self.speed
if self.position > self.track_len:
self.position -= self.track_len
# for check score
self.last_seg_i = 0
self.game_over = True
self.game_score = 1.0
if self.player_di == 1:
#self.player_x += self.player_x_dt
self.player_x += self.speed / 5 + 20
elif self.player_di == 3:
#self.player_x -= self.player_x_dt
self.player_x -= self.speed / 5 + 20
else:
pass
p_curve = self.player_seg.get('curve', 0.0)
#print 'p_curve', p_curve
p_dt = self.speed * p_curve * self.centrifugal
#print p_dt
#self.player_x -= p_dt
self.player_x += p_dt
def check_if_car_out_road(self):
# decrease score when go out the road
if self.player_x < -self.road_w / 2 or \
self.player_x > self.road_w / 2:
if self.score > 0:
self.score -= 1
#self.score -= 1
#if self.score < 0:
# self.score = 0
self.game_over = True
self.game_score = -1.0
def check_score(self):
# make sure we check score once for a segment
seg_i = self.player_seg['index']
if seg_i > self.last_seg_i:
self.last_seg_i = seg_i
else:
return
# NOTE: here we should use the segment just under the car
#sprts = self.player_seg['sprites']
sprts = self.base_seg['sprites']
if not sprts:
return
# NOTE: we now only use the first sprite !
sprt = sprts[0]
x_i = sprt.get('x_i')
if x_i is None:
return
scr = sprt.get('score')
if not scr: # None or 0
return
obj = sprt.get('obj')
if not obj: # None or 0
return
#rd_w_half = self.road_w / 2
#x_pos = [rd_w_half + self.lane_w,
# rd_w_half - self.lane_w]
sprt_x = obj.rect.left
sprt_w = obj.rect.width
car_x = self.player_x
car_w = self.car.rect.width * 2
sprt_at = 10000
if x_i == 0:
sprt_at = 40
elif x_i == 1:
sprt_at = -40
elif x_i == 2:
sprt_at = 580
elif x_i == 3:
sprt_at = -580
elif x_i == 4:
sprt_at = 1100
elif x_i == 5:
sprt_at = -1100
#print 'sprt_x', sprt_x
#print 'car_x', car_x
#print 'car_w', car_w
#print 'sprt_at', (car_x - car_w / 2), sprt_at, (car_x + car_w / 2)
#print '-' * 40
w_half = car_w / 2 + sprt_w / 2
#if (car_x + car_w / 2) < sprt_x < (car_x + car_w / 2):
if (car_x - w_half) < sprt_at < (car_x + w_half):
self.score += scr
def check_tm(self):
if self.position > self.seg_len * 2:
if self.tm_start == 0.0:
self.tm_start = time.time()
self.tm_end = self.tm_start
else:
self.tm_end = time.time()
self.tm_last_once = self.tm_end - self.tm_start
else:
self.tm_start = 0.0
#self.tm_end = 0.0
def update_bg(self):
# always move the cloud
for sky in self.bg_sky:
sky.rect.left -= 1#self.sky_speed
if sky.rect.left + sky.rect.width < 0:
sky.rect.left += sky.rect.width * 2
if sky.rect.left - sky.rect.width > 0:
sky.rect.left -= sky.rect.width * 2
if self.speed <= 0.0:
return
p_curve = self.player_seg.get('curve', 0.0)
#p_curve = 3
#print 'p_curve', p_curve
p_dt = self.speed * p_curve * self.centrifugal
#p_dt = 40
#p_dt = -40
#p_dt = random.randint(-100, 100)
#print p_dt
for sky in self.bg_sky:
#print sky
sky.rect.left += int(self.sky_speed * p_dt)
# always move the cloud
#sky.rect.left -= self.sky_speed
if sky.rect.left + sky.rect.width < 0:
sky.rect.left += sky.rect.width * 2
if sky.rect.left - sky.rect.width > 0:
sky.rect.left -= sky.rect.width * 2
for hill in self.bg_hills:
hill.rect.left += int(self.hill_speed * p_dt)
if hill.rect.left + hill.rect.width < 0:
hill.rect.left += hill.rect.width * 2
if hill.rect.left - hill.rect.width > 0:
hill.rect.left -= hill.rect.width * 2
for trees in self.bg_trees:
trees.rect.left += int(self.tree_speed * p_dt)
if trees.rect.left + trees.rect.width < 0:
trees.rect.left += trees.rect.width * 2
if trees.rect.left - trees.rect.width > 0:
trees.rect.left -= trees.rect.width * 2
class FPSptRoadMap(sptdraw.SptDrawBase):
def __init__(self, size, segs, rad, *args, **kwargs):
super(FPSptRoadMap, self).__init__(size)
self.segs = segs
self.rad = rad
#self.fill(consts.WHITE)
self.draw_segs(self.segs, self.rad)
def xy_to_cntr(self, x, y):
return [self.size[0] / 2 + x, self.size[1] / 2 - y]
def cv_to_engl(self, curve, rad):
a = float(curve) / rad
#a *= 10.0
#print a
s = 1.0
if a < 0.0:
s = -1.0
if a < -1.0:
a = -1.0
elif a > 1.0:
a = 1.0
#tht_d = math.acos(a)
tht_d = math.asin(a)
return tht_d
def get_segs_pnts(self, segs, rad):
pnts = []
x, y = 0.0, 0.0
tht = 0.0
rad_m = 4.0#2.0#1.0#
cv_s = 0
cv_l = 0.0
pnts.append([x, y])
for seg in segs:
curve = seg.get('curve', 0.0)
if curve == 0.0:
if cv_s:
tht_d = self.cv_to_engl(cv_l, rad)
#tht += tht_d
tht -= tht_d
rad_m = 20.0#10.0#50.0#
cv_s = 0
cv_l = 0.0
else:
rad_m = 0.5#1.0#0.1#
else:
if cv_s:
cv_l += curve
else:
cv_s = 1
continue
x += rad_m * math.cos(tht)
y += rad_m * math.sin(tht)
pnts.append([x, y])
#print pnts
return pnts
def get_segs_pnts_1(self, segs, rad):
pnts = []
x, y = 0.0, 0.0
tht = 0.0
rad_m = 4.0#2.0#1.0#
pnts.append([x, y])
for seg in segs:
curve = seg.get('curve', 0.0)
if curve == 0.0:
rad_m = 1.0#0.1#
else:
a = float(curve) / rad
a *= 10.0
#print a
if a < -1.0:
a = -1.0
elif a > 1.0:
a = 1.0
#tht_d = math.acos(a)
tht_d = math.asin(a) # TODO:
tht += tht_d
rad_m = 10.0#50.0#
x += rad_m * math.cos(tht)
y += rad_m * math.sin(tht)
pnts.append([x, y])
#print pnts
return pnts
def draw_segs(self, segs, rad):
pnts = self.get_segs_pnts(segs, rad)
#print pnts
if len(pnts) <= 1:
return
#if len(pnts) > 0:
# pnts.append(pnts[0])
cpnts = [self.xy_to_cntr(p[0], p[1]) for p in pnts]
c = utils.clr_from_str(FP_COLOR_BLUE)
#self.pygm.draw.polygon(self.surf, c, cpnts)
self.pygm.draw.lines(self.surf, c, False, cpnts, 3)
class FPSptProgress(sptdraw.SptDrawBase):
def __init__(self, size, c_bg=consts.BLUE, c_prog=consts.GREEN):
super(FPSptProgress, self).__init__(size)
self.c_bg = c_bg
self.c_prog = c_prog
self.progress(0.0)
def progress(self, prog):
y = self.size[1] * prog
self.fill(self.c_bg)
#self.pygm.draw.rect(self.surf, consts.GREEN,
# [1, 0, self.size[0] - 2, y])
# from down to up
self.pygm.draw.rect(self.surf, self.c_prog,
[1, self.size[1] - y,
self.size[0] - 2, y])
class FPStraight(pygm.PyGMSprite):
def __init__(self, cfg, *args, **kwargs):
super(FPStraight, self).__init__()
self.cfg = cfg
self.bg_sky1 = FPSptBg('img_flatpath/images/background.png',
IMG_POS_BACKGROUND['SKY'])
self.bg_sky1.rect.top = 0
self.bg_sky1.rect.left = 0
self.disp_add(self.bg_sky1)
self.bg_sky2 = FPSptBg('img_flatpath/images/background.png',
IMG_POS_BACKGROUND['SKY'])
self.bg_sky2.rect.top = 0
self.bg_sky2.rect.left = self.bg_sky1.rect.width
self.disp_add(self.bg_sky2)
self.bg_hills1 = FPSptBg('img_flatpath/images/background.png',
IMG_POS_BACKGROUND['HILLS'])
self.bg_hills1.rect.top = 0
self.bg_hills1.rect.left = 0
self.disp_add(self.bg_hills1)
self.bg_hills2 = FPSptBg('img_flatpath/images/background.png',
IMG_POS_BACKGROUND['HILLS'])
self.bg_hills2.rect.top = 0
self.bg_hills2.rect.left = self.bg_hills1.rect.width
self.disp_add(self.bg_hills2)
self.bg_trees1 = FPSptBg('img_flatpath/images/background.png',
IMG_POS_BACKGROUND['TREES'])
self.bg_trees1.rect.top = 0
self.bg_trees1.rect.left = 0
self.disp_add(self.bg_trees1)
self.bg_trees2 = FPSptBg('img_flatpath/images/background.png',
IMG_POS_BACKGROUND['TREES'])
self.bg_trees2.rect.top = 0
self.bg_trees2.rect.left = self.bg_trees1.rect.width
self.disp_add(self.bg_trees2)
self.car = FPSptSprts('img_flatpath/images/sprites.png',
IMG_POS_SPRITES['PLAYER_STRAIGHT'])
#print self.road.cameraDepth/self.road.playerZ
#self.car.scale(self.road.cameraDepth/self.road.playerZ)
self.car.scale(2)
self.car.rect.top = 400
self.car.rect.left = (640 - self.car.rect.width) / 2
##self.disp_add(self.car) # car disp add after road
#self.road = FPSptRoad((640, 240), self.cfg)
self.road = FPSptRoadB((640, 240), self.cfg,
car=self.car,
bg_sky=[self.bg_sky1, self.bg_sky2],
bg_hills=[self.bg_hills1, self.bg_hills2],
bg_trees=[self.bg_trees1, self.bg_trees2])
self.road.rect.top = 240
self.road.rect.left = 0
self.disp_add(self.road)
self.disp_add(self.car)
self.rdmap = FPSptRoadMap((480, 480),
self.road.rd_get_segs(whole=True),
self.road.seg_len)
self.rdmap.rect.top = 0
self.rdmap.rect.left = 80
self.rdmap.rotate(90)
self.disp_add(self.rdmap)
self.rdpsd = pygm.SptLbl(str(int(self.road.speed)),
c=consts.GREEN, font_size=12)
self.rdpsd.rect.top = 456
self.rdpsd.rect.left = 312
self.disp_add(self.rdpsd)
self.scr = pygm.SptLbl(str(int(self.road.score)),
c=consts.RED, font_size=16)
self.scr.rect.top = 40#454
self.scr.rect.left = 600
self.disp_add(self.scr)
self.tm_once = pygm.SptLbl(str(int(self.road.tm_last_once)),
c=consts.YELLOW, font_size=16)
self.tm_once.rect.top = 20#454
self.tm_once.rect.left = 600
self.disp_add(self.tm_once)
self.prog = FPSptProgress((4, 100), c_prog=consts.YELLOW)
self.prog.rect.top = 70#340
self.prog.rect.left = 610
#self.prog.rotate(180)
self.disp_add(self.prog)
self.spd = FPSptProgress((4, 100), c_prog=consts.GREEN)
self.spd.rect.top = 70#340
self.spd.rect.left = 602
#self.spd.rotate(180)
self.disp_add(self.spd)
def rdmap_hide(self):
self.rdmap.hide()
def rdmap_reset(self):
self.rdmap.clear()
self.rdmap.draw_segs(self.road.rd_get_segs(whole=True),
self.road.seg_len)
self.rdmap.rotate(90)
def road_reset(self):
self.road.rd_reset()
self.rdmap_reset()
def road_reset_keep_segs(self):
self.road.rd_reset(init=False, keep_segs=True)
def road_reset_from_file(self, segs_file='sr_roads/sr_road.txt'):
segs_file = utils.dir_abs(segs_file, __file__)
self.road.rd_reset(init=False, keep_segs=False,
segs_file=segs_file)
self.rdmap_reset()
def road_segs_to_file(self, segs_file=None):
if not segs_file:
segs_file = 'sr_roads/sr_road_' + str(int(time.time())) + '.txt'
segs_file = utils.dir_abs(segs_file, __file__)
self.road.rd_seg_json_save(segs_file)
def handle_event(self, events, *args, **kwargs):
#return events
r_events = []
for event in events:
#print event
if event.type == self.pglc.KEYUP:
k = event.key
if k == self.pglc.K_SPACE:
# hide / show road map
self.rdmap_hide()
elif k == self.pglc.K_RETURN:
self.road_reset()
elif k == self.pglc.K_TAB:
self.road_reset_keep_segs()
elif k == self.pglc.K_BACKSPACE:
self.road_reset_from_file()
elif k == self.pglc.K_SLASH:
self.road_segs_to_file()
else:
r_events.append(event)
elif event.type == self.pglc.KEYDOWN:
r_events.append(event)
else:
r_events.append(event)
return r_events
def refresh(self, fps_clock, *args, **kwargs):
self.rdpsd.lbl_set(str(int(self.road.speed)))
self.scr.lbl_set(str(int(self.road.score)))
self.tm_once.lbl_set(str(int(self.road.tm_last_once)))
prg = self.road.position / self.road.track_len
self.prog.progress(prg)
spdc = self.road.speed / self.road.speed_max
self.spd.progress(spdc)
class FPSceneA(pygm.PyGMScene):
def __init__(self, *args, **kwargs):
super(FPSceneA, self).__init__(*args, **kwargs)
self.straight = FPStraight({})
self.straight.rect.top = 0
self.straight.rect.left = 0
self.disp_add(self.straight)
''''
self.sn1 = SptTmpx((200, 200))
self.sn1.rect.top = 100
self.sn1.rect.left = 100
self.disp_add(self.sn1)
'''
'''
self.lb1 = pygm.SptLbl('hello,', c=consts.GREEN, font_size=32)
self.lb1.rect.top = 200
self.lb1.rect.left = 100
self.disp_add(self.lb1)
'''
def handle_event(self, events, *args, **kwargs):
return events
def refresh(self, fps_clock, *args, **kwargs):
pass
class GMFlatpath(pygm.PyGMGame):
def __init__(self, title, winw, winh, *args, **kwargs):
super(GMFlatpath, self).__init__(title, winw, winh)
bk_im = utils.dir_abs('starfish/data/img_bk_1.jpg', __file__)
#self.bk = pygm.SptImg('data/img_bk_1.jpg')
self.bk = pygm.SptImg(bk_im)
self.bk.rect.top = -230
self.bk.rect.left = -230
#self.disp_add(self.bk)
self.scn1 = FPSceneA()
self.disp_add(self.scn1)
road_file = kwargs.get('road_file')
if road_file:
self.scn1.straight.road_reset_from_file(segs_file=road_file)
def main():
#sf = GMFlatpath('flatpath <:::>', 640, 480)
sf = GMFlatpath('flatpath <:::>', 640, 480, road_file='sr_road.txt')
sf.mainloop()
if __name__ == '__main__':
main()
| [] |
laetrid/learning | First_course/test5_base.py | b28312c34db2118fb7d5691834b8f7e628117642 | #!/usr/bin/env python
sw1_show_cdp_neighbors = '''
SW1>show cdp neighbors
Capability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge
S - Switch, H - Host, I - IGMP, r - Repeater, P - Phone
Device ID Local Intrfce Holdtme Capability Platform Port ID
R1 Fas 0/11 153 R S I 881 Fas 1
R2 Fas 0/12 123 R S I 881 Fas 1
R3 Fas 0/13 129 R S I 881 Fas 1
R4 Fas 0/14 173 R S I 881 Fas 1
R5 Fas 0/15 144 R S I 881 Fas 1
'''
sw1_show_cdp_neighbors_detail = '''
SW1> show cdp neighbors detail
--------------------------
Device ID: R1
Entry address(es):
IP address: 10.1.1.1
Platform: Cisco 881, Capabilities: Router Switch IGMP
Interface: FastEthernet0/11, Port ID (outgoing port): FastEthernet1
Holdtime: 153 sec
Version :
Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.0(1)M4, RELEASE SOFTWARE (fc1)
Technical Support: http://www.cisco.com/techsupport
Copyright (c) 1986-2010 by Cisco Systems, Inc.
Compiled Fri 29-Oct-10 00:02 by prod_rel_team
advertisement version: 2
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
Management address(es):
--------------------------
Device ID: R2
Entry address(es):
IP address: 10.1.1.2
Platform: Cisco 881, Capabilities: Router Switch IGMP
Interface: FastEthernet0/12, Port ID (outgoing port): FastEthernet1
Holdtime: 123 sec
Version :
Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.0(1)M4, RELEASE SOFTWARE (fc1)
Technical Support: http://www.cisco.com/techsupport
Copyright (c) 1986-2010 by Cisco Systems, Inc.
Compiled Fri 29-Oct-10 00:02 by prod_rel_team
advertisement version: 2
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
Management address(es):
--------------------------
Device ID: R3
Entry address(es):
IP address: 10.1.1.3
Platform: Cisco 881, Capabilities: Router Switch IGMP
Interface: FastEthernet0/13, Port ID (outgoing port): FastEthernet1
Holdtime: 129 sec
Version :
Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.0(1)M4, RELEASE SOFTWARE (fc1)
Technical Support: http://www.cisco.com/techsupport
Copyright (c) 1986-2010 by Cisco Systems, Inc.
Compiled Fri 29-Oct-10 00:02 by prod_rel_team
advertisement version: 2
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
Management address(es):
--------------------------
Device ID: R4
Entry address(es):
IP address: 10.1.1.4
Platform: Cisco 881, Capabilities: Router Switch IGMP
Interface: FastEthernet0/14, Port ID (outgoing port): FastEthernet1
Holdtime: 173 sec
Version :
Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.0(1)M4, RELEASE SOFTWARE (fc1)
Technical Support: http://www.cisco.com/techsupport
Copyright (c) 1986-2010 by Cisco Systems, Inc.
Compiled Fri 29-Oct-10 00:02 by prod_rel_team
advertisement version: 2
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
Management address(es):
--------------------------
Device ID: R5
Entry address(es):
IP address: 10.1.1.5
Platform: Cisco 881, Capabilities: Router Switch IGMP
Interface: FastEthernet0/15, Port ID (outgoing port): FastEthernet1
Holdtime: 144 sec
Version :
Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.0(1)M4, RELEASE SOFTWARE (fc1)
Technical Support: http://www.cisco.com/techsupport
Copyright (c) 1986-2010 by Cisco Systems, Inc.
Compiled Fri 29-Oct-10 00:02 by prod_rel_team
advertisement version: 2
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
Management address(es):
'''
r1_show_cdp_neighbors = '''
R1>show cdp neighbors
Capability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge
S - Switch, H - Host, I - IGMP, r - Repeater
Device ID Local Intrfce Holdtme Capability Platform Port ID
SW1 Fas 1 150 S I WS-C2950- Fas 0/11
'''
r1_show_cdp_neighbors_detail = '''
R1>show cdp neighbors detail
-------------------------
Device ID: SW1
Entry address(es):
IP address: 10.1.1.22
Platform: cisco WS-C2950-24, Capabilities: Switch IGMP
Interface: FastEthernet1, Port ID (outgoing port): FastEthernet0/11
Holdtime : 145 sec
Version :
Cisco Internetwork Operating System Software
IOS (tm) C2950 Software (C2950-I6Q4L2-M), Version 12.1(22)EA8a, RELEASE SOFTWARE (fc1)
Copyright (c) 1986-2006 by cisco Systems, Inc.
Compiled Fri 28-Jul-06 15:16 by weiliu
advertisement version: 2
Protocol Hello: OUI=0x00000C, Protocol ID=0x0112; payload len=27, value=00000000FFFFFFFF010221FF0000000000000019E845CE80FF0000
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
'''
r2_show_cdp_neighbors = '''
R2>show cdp neighbors
Capability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge
S - Switch, H - Host, I - IGMP, r - Repeater
Device ID Local Intrfce Holdtme Capability Platform Port ID
SW1 Fas 1 150 S I WS-C2950- Fas 0/12
'''
r2_show_cdp_neighbors_detail = '''
R2>show cdp neighbors detail
-------------------------
Device ID: SW1
Entry address(es):
IP address: 10.1.1.22
Platform: cisco WS-C2950-24, Capabilities: Switch IGMP
Interface: FastEthernet1, Port ID (outgoing port): FastEthernet0/12
Holdtime : 145 sec
Version :
Cisco Internetwork Operating System Software
IOS (tm) C2950 Software (C2950-I6Q4L2-M), Version 12.1(22)EA8a, RELEASE SOFTWARE (fc1)
Copyright (c) 1986-2006 by cisco Systems, Inc.
Compiled Fri 28-Jul-06 15:16 by weiliu
advertisement version: 2
Protocol Hello: OUI=0x00000C, Protocol ID=0x0112; payload len=27, value=00000000FFFFFFFF010221FF0000000000000019E845CE80FF0000
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
'''
r3_show_cdp_neighbors = '''
R3>show cdp neighbors
Capability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge
S - Switch, H - Host, I - IGMP, r - Repeater
Device ID Local Intrfce Holdtme Capability Platform Port ID
SW1 Fas 1 150 S I WS-C2950- Fas 0/13
'''
r3_show_cdp_neighbors_detail = '''
R3>show cdp neighbors detail
-------------------------
Device ID: SW1
Entry address(es):
IP address: 10.1.1.22
Platform: cisco WS-C2950-24, Capabilities: Switch IGMP
Interface: FastEthernet1, Port ID (outgoing port): FastEthernet0/13
Holdtime : 145 sec
Version :
Cisco Internetwork Operating System Software
IOS (tm) C2950 Software (C2950-I6Q4L2-M), Version 12.1(22)EA8a, RELEASE SOFTWARE (fc1)
Copyright (c) 1986-2006 by cisco Systems, Inc.
Compiled Fri 28-Jul-06 15:16 by weiliu
advertisement version: 2
Protocol Hello: OUI=0x00000C, Protocol ID=0x0112; payload len=27, value=00000000FFFFFFFF010221FF0000000000000019E845CE80FF0000
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
'''
r4_show_cdp_neighbors = '''
R4>show cdp neighbors
Capability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge
S - Switch, H - Host, I - IGMP, r - Repeater
Device ID Local Intrfce Holdtme Capability Platform Port ID
SW1 Fas 1 150 S I WS-C2950- Fas 0/14
'''
r4_show_cdp_neighbors_detail = '''
R4>show cdp neighbors detail
-------------------------
Device ID: SW1
Entry address(es):
IP address: 10.1.1.22
Platform: cisco WS-C2950-24, Capabilities: Switch IGMP
Interface: FastEthernet1, Port ID (outgoing port): FastEthernet0/14
Holdtime : 145 sec
Version :
Cisco Internetwork Operating System Software
IOS (tm) C2950 Software (C2950-I6Q4L2-M), Version 12.1(22)EA8a, RELEASE SOFTWARE (fc1)
Copyright (c) 1986-2006 by cisco Systems, Inc.
Compiled Fri 28-Jul-06 15:16 by weiliu
advertisement version: 2
Protocol Hello: OUI=0x00000C, Protocol ID=0x0112; payload len=27, value=00000000FFFFFFFF010221FF0000000000000019E845CE80FF0000
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
'''
r5_show_cdp_neighbors = '''
R5>show cdp neighbors
Capability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge
S - Switch, H - Host, I - IGMP, r - Repeater
Device ID Local Intrfce Holdtme Capability Platform Port ID
SW1 Fas 1 150 S I WS-C2950- Fas 0/15
'''
r5_show_cdp_neighbors_detail = '''
R5>show cdp neighbors detail
-------------------------
Device ID: SW1
Entry address(es):
IP address: 10.1.1.22
Platform: cisco WS-C2950-24, Capabilities: Switch IGMP
Interface: FastEthernet1, Port ID (outgoing port): FastEthernet0/15
Holdtime : 145 sec
Version :
Cisco Internetwork Operating System Software
IOS (tm) C2950 Software (C2950-I6Q4L2-M), Version 12.1(22)EA8a, RELEASE SOFTWARE (fc1)
Copyright (c) 1986-2006 by cisco Systems, Inc.
Compiled Fri 28-Jul-06 15:16 by weiliu
advertisement version: 2
Protocol Hello: OUI=0x00000C, Protocol ID=0x0112; payload len=27, value=00000000FFFFFFFF010221FF0000000000000019E845CE80FF0000
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
'''
| [] |
jeremiedbb/scipy | scipy/optimize/_numdiff.py | 2bea64c334b18fd445a7945b350d7ace2dc22913 | """Routines for numerical differentiation."""
from __future__ import division
import numpy as np
from numpy.linalg import norm
from scipy.sparse.linalg import LinearOperator
from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find
from ._group_columns import group_dense, group_sparse
EPS = np.finfo(np.float64).eps
def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub):
"""Adjust final difference scheme to the presence of bounds.
Parameters
----------
x0 : ndarray, shape (n,)
Point at which we wish to estimate derivative.
h : ndarray, shape (n,)
Desired finite difference steps.
num_steps : int
Number of `h` steps in one direction required to implement finite
difference scheme. For example, 2 means that we need to evaluate
f(x0 + 2 * h) or f(x0 - 2 * h)
scheme : {'1-sided', '2-sided'}
Whether steps in one or both directions are required. In other
words '1-sided' applies to forward and backward schemes, '2-sided'
applies to center schemes.
lb : ndarray, shape (n,)
Lower bounds on independent variables.
ub : ndarray, shape (n,)
Upper bounds on independent variables.
Returns
-------
h_adjusted : ndarray, shape (n,)
Adjusted step sizes. Step size decreases only if a sign flip or
switching to one-sided scheme doesn't allow to take a full step.
use_one_sided : ndarray of bool, shape (n,)
Whether to switch to one-sided scheme. Informative only for
``scheme='2-sided'``.
"""
if scheme == '1-sided':
use_one_sided = np.ones_like(h, dtype=bool)
elif scheme == '2-sided':
h = np.abs(h)
use_one_sided = np.zeros_like(h, dtype=bool)
else:
raise ValueError("`scheme` must be '1-sided' or '2-sided'.")
if np.all((lb == -np.inf) & (ub == np.inf)):
return h, use_one_sided
h_total = h * num_steps
h_adjusted = h.copy()
lower_dist = x0 - lb
upper_dist = ub - x0
if scheme == '1-sided':
x = x0 + h_total
violated = (x < lb) | (x > ub)
fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist)
h_adjusted[violated & fitting] *= -1
forward = (upper_dist >= lower_dist) & ~fitting
h_adjusted[forward] = upper_dist[forward] / num_steps
backward = (upper_dist < lower_dist) & ~fitting
h_adjusted[backward] = -lower_dist[backward] / num_steps
elif scheme == '2-sided':
central = (lower_dist >= h_total) & (upper_dist >= h_total)
forward = (upper_dist >= lower_dist) & ~central
h_adjusted[forward] = np.minimum(
h[forward], 0.5 * upper_dist[forward] / num_steps)
use_one_sided[forward] = True
backward = (upper_dist < lower_dist) & ~central
h_adjusted[backward] = -np.minimum(
h[backward], 0.5 * lower_dist[backward] / num_steps)
use_one_sided[backward] = True
min_dist = np.minimum(upper_dist, lower_dist) / num_steps
adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist))
h_adjusted[adjusted_central] = min_dist[adjusted_central]
use_one_sided[adjusted_central] = False
return h_adjusted, use_one_sided
relative_step = {"2-point": EPS**0.5,
"3-point": EPS**(1/3),
"cs": EPS**0.5}
def _compute_absolute_step(rel_step, x0, method):
if rel_step is None:
rel_step = relative_step[method]
sign_x0 = (x0 >= 0).astype(float) * 2 - 1
return rel_step * sign_x0 * np.maximum(1.0, np.abs(x0))
def _prepare_bounds(bounds, x0):
lb, ub = [np.asarray(b, dtype=float) for b in bounds]
if lb.ndim == 0:
lb = np.resize(lb, x0.shape)
if ub.ndim == 0:
ub = np.resize(ub, x0.shape)
return lb, ub
def group_columns(A, order=0):
"""Group columns of a 2-D matrix for sparse finite differencing [1]_.
Two columns are in the same group if in each row at least one of them
has zero. A greedy sequential algorithm is used to construct groups.
Parameters
----------
A : array_like or sparse matrix, shape (m, n)
Matrix of which to group columns.
order : int, iterable of int with shape (n,) or None
Permutation array which defines the order of columns enumeration.
If int or None, a random permutation is used with `order` used as
a random seed. Default is 0, that is use a random permutation but
guarantee repeatability.
Returns
-------
groups : ndarray of int, shape (n,)
Contains values from 0 to n_groups-1, where n_groups is the number
of found groups. Each value ``groups[i]`` is an index of a group to
which ith column assigned. The procedure was helpful only if
n_groups is significantly less than n.
References
----------
.. [1] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of Mathematics
and its Applications, 13 (1974), pp. 117-120.
"""
if issparse(A):
A = csc_matrix(A)
else:
A = np.atleast_2d(A)
A = (A != 0).astype(np.int32)
if A.ndim != 2:
raise ValueError("`A` must be 2-dimensional.")
m, n = A.shape
if order is None or np.isscalar(order):
rng = np.random.RandomState(order)
order = rng.permutation(n)
else:
order = np.asarray(order)
if order.shape != (n,):
raise ValueError("`order` has incorrect shape.")
A = A[:, order]
if issparse(A):
groups = group_sparse(m, n, A.indices, A.indptr)
else:
groups = group_dense(m, n, A)
groups[order] = groups.copy()
return groups
def approx_derivative(fun, x0, method='3-point', rel_step=None, f0=None,
bounds=(-np.inf, np.inf), sparsity=None,
as_linear_operator=False, args=(), kwargs={}):
"""Compute finite difference approximation of the derivatives of a
vector-valued function.
If a function maps from R^n to R^m, its derivatives form m-by-n matrix
called the Jacobian, where an element (i, j) is a partial derivative of
f[i] with respect to x[j].
Parameters
----------
fun : callable
Function of which to estimate the derivatives. The argument x
passed to this function is ndarray of shape (n,) (never a scalar
even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
x0 : array_like of shape (n,) or float
Point at which to estimate the derivatives. Float will be converted
to a 1-D array.
method : {'3-point', '2-point', 'cs'}, optional
Finite difference method to use:
- '2-point' - use the first order accuracy forward or backward
difference.
- '3-point' - use central difference in interior points and the
second order accuracy forward or backward difference
near the boundary.
- 'cs' - use a complex-step finite difference scheme. This assumes
that the user function is real-valued and can be
analytically continued to the complex plane. Otherwise,
produces bogus results.
rel_step : None or array_like, optional
Relative step size to use. The absolute step size is computed as
``h = rel_step * sign(x0) * max(1, abs(x0))``, possibly adjusted to
fit into the bounds. For ``method='3-point'`` the sign of `h` is
ignored. If None (default) then step is selected automatically,
see Notes.
f0 : None or array_like, optional
If not None it is assumed to be equal to ``fun(x0)``, in this case
the ``fun(x0)`` is not called. Default is None.
bounds : tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each bound must match the size of `x0` or be a scalar, in the latter
case the bound will be the same for all variables. Use it to limit the
range of function evaluation. Bounds checking is not implemented
when `as_linear_operator` is True.
sparsity : {None, array_like, sparse matrix, 2-tuple}, optional
Defines a sparsity structure of the Jacobian matrix. If the Jacobian
matrix is known to have only few non-zero elements in each row, then
it's possible to estimate its several columns by a single function
evaluation [3]_. To perform such economic computations two ingredients
are required:
* structure : array_like or sparse matrix of shape (m, n). A zero
element means that a corresponding element of the Jacobian
identically equals to zero.
* groups : array_like of shape (n,). A column grouping for a given
sparsity structure, use `group_columns` to obtain it.
A single array or a sparse matrix is interpreted as a sparsity
structure, and groups are computed inside the function. A tuple is
interpreted as (structure, groups). If None (default), a standard
dense differencing will be used.
Note, that sparse differencing makes sense only for large Jacobian
matrices where each row contains few non-zero elements.
as_linear_operator : bool, optional
When True the function returns an `scipy.sparse.linalg.LinearOperator`.
Otherwise it returns a dense array or a sparse matrix depending on
`sparsity`. The linear operator provides an efficient way of computing
``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow
direct access to individual elements of the matrix. By default
`as_linear_operator` is False.
args, kwargs : tuple and dict, optional
Additional arguments passed to `fun`. Both empty by default.
The calling signature is ``fun(x, *args, **kwargs)``.
Returns
-------
J : {ndarray, sparse matrix, LinearOperator}
Finite difference approximation of the Jacobian matrix.
If `as_linear_operator` is True returns a LinearOperator
with shape (m, n). Otherwise it returns a dense array or sparse
matrix depending on how `sparsity` is defined. If `sparsity`
is None then a ndarray with shape (m, n) is returned. If
`sparsity` is not None returns a csr_matrix with shape (m, n).
For sparse matrices and linear operators it is always returned as
a 2-D structure, for ndarrays, if m=1 it is returned
as a 1-D gradient array with shape (n,).
See Also
--------
check_derivative : Check correctness of a function computing derivatives.
Notes
-----
If `rel_step` is not provided, it assigned to ``EPS**(1/s)``, where EPS is
machine epsilon for float64 numbers, s=2 for '2-point' method and s=3 for
'3-point' method. Such relative step approximately minimizes a sum of
truncation and round-off errors, see [1]_.
A finite difference scheme for '3-point' method is selected automatically.
The well-known central difference scheme is used for points sufficiently
far from the boundary, and 3-point forward or backward scheme is used for
points near the boundary. Both schemes have the second-order accuracy in
terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point
forward and backward difference schemes.
For dense differencing when m=1 Jacobian is returned with a shape (n,),
on the other hand when n=1 Jacobian is returned with a shape (m, 1).
Our motivation is the following: a) It handles a case of gradient
computation (m=1) in a conventional way. b) It clearly separates these two
different cases. b) In all cases np.atleast_2d can be called to get 2-D
Jacobian with correct dimensions.
References
----------
.. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific
Computing. 3rd edition", sec. 5.7.
.. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of Mathematics
and its Applications, 13 (1974), pp. 117-120.
.. [3] B. Fornberg, "Generation of Finite Difference Formulas on
Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988.
Examples
--------
>>> import numpy as np
>>> from scipy.optimize import approx_derivative
>>>
>>> def f(x, c1, c2):
... return np.array([x[0] * np.sin(c1 * x[1]),
... x[0] * np.cos(c2 * x[1])])
...
>>> x0 = np.array([1.0, 0.5 * np.pi])
>>> approx_derivative(f, x0, args=(1, 2))
array([[ 1., 0.],
[-1., 0.]])
Bounds can be used to limit the region of function evaluation.
In the example below we compute left and right derivative at point 1.0.
>>> def g(x):
... return x**2 if x >= 1 else x
...
>>> x0 = 1.0
>>> approx_derivative(g, x0, bounds=(-np.inf, 1.0))
array([ 1.])
>>> approx_derivative(g, x0, bounds=(1.0, np.inf))
array([ 2.])
"""
if method not in ['2-point', '3-point', 'cs']:
raise ValueError("Unknown method '%s'. " % method)
x0 = np.atleast_1d(x0)
if x0.ndim > 1:
raise ValueError("`x0` must have at most 1 dimension.")
lb, ub = _prepare_bounds(bounds, x0)
if lb.shape != x0.shape or ub.shape != x0.shape:
raise ValueError("Inconsistent shapes between bounds and `x0`.")
if as_linear_operator and not (np.all(np.isinf(lb))
and np.all(np.isinf(ub))):
raise ValueError("Bounds not supported when "
"`as_linear_operator` is True.")
def fun_wrapped(x):
f = np.atleast_1d(fun(x, *args, **kwargs))
if f.ndim > 1:
raise RuntimeError("`fun` return value has "
"more than 1 dimension.")
return f
if f0 is None:
f0 = fun_wrapped(x0)
else:
f0 = np.atleast_1d(f0)
if f0.ndim > 1:
raise ValueError("`f0` passed has more than 1 dimension.")
if np.any((x0 < lb) | (x0 > ub)):
raise ValueError("`x0` violates bound constraints.")
if as_linear_operator:
if rel_step is None:
rel_step = relative_step[method]
return _linear_operator_difference(fun_wrapped, x0,
f0, rel_step, method)
else:
h = _compute_absolute_step(rel_step, x0, method)
if method == '2-point':
h, use_one_sided = _adjust_scheme_to_bounds(
x0, h, 1, '1-sided', lb, ub)
elif method == '3-point':
h, use_one_sided = _adjust_scheme_to_bounds(
x0, h, 1, '2-sided', lb, ub)
elif method == 'cs':
use_one_sided = False
if sparsity is None:
return _dense_difference(fun_wrapped, x0, f0, h,
use_one_sided, method)
else:
if not issparse(sparsity) and len(sparsity) == 2:
structure, groups = sparsity
else:
structure = sparsity
groups = group_columns(sparsity)
if issparse(structure):
structure = csc_matrix(structure)
else:
structure = np.atleast_2d(structure)
groups = np.atleast_1d(groups)
return _sparse_difference(fun_wrapped, x0, f0, h,
use_one_sided, structure,
groups, method)
def _linear_operator_difference(fun, x0, f0, h, method):
m = f0.size
n = x0.size
if method == '2-point':
def matvec(p):
if np.array_equal(p, np.zeros_like(p)):
return np.zeros(m)
dx = h / norm(p)
x = x0 + dx*p
df = fun(x) - f0
return df / dx
elif method == '3-point':
def matvec(p):
if np.array_equal(p, np.zeros_like(p)):
return np.zeros(m)
dx = 2*h / norm(p)
x1 = x0 - (dx/2)*p
x2 = x0 + (dx/2)*p
f1 = fun(x1)
f2 = fun(x2)
df = f2 - f1
return df / dx
elif method == 'cs':
def matvec(p):
if np.array_equal(p, np.zeros_like(p)):
return np.zeros(m)
dx = h / norm(p)
x = x0 + dx*p*1.j
f1 = fun(x)
df = f1.imag
return df / dx
else:
raise RuntimeError("Never be here.")
return LinearOperator((m, n), matvec)
def _dense_difference(fun, x0, f0, h, use_one_sided, method):
m = f0.size
n = x0.size
J_transposed = np.empty((n, m))
h_vecs = np.diag(h)
for i in range(h.size):
if method == '2-point':
x = x0 + h_vecs[i]
dx = x[i] - x0[i] # Recompute dx as exactly representable number.
df = fun(x) - f0
elif method == '3-point' and use_one_sided[i]:
x1 = x0 + h_vecs[i]
x2 = x0 + 2 * h_vecs[i]
dx = x2[i] - x0[i]
f1 = fun(x1)
f2 = fun(x2)
df = -3.0 * f0 + 4 * f1 - f2
elif method == '3-point' and not use_one_sided[i]:
x1 = x0 - h_vecs[i]
x2 = x0 + h_vecs[i]
dx = x2[i] - x1[i]
f1 = fun(x1)
f2 = fun(x2)
df = f2 - f1
elif method == 'cs':
f1 = fun(x0 + h_vecs[i]*1.j)
df = f1.imag
dx = h_vecs[i, i]
else:
raise RuntimeError("Never be here.")
J_transposed[i] = df / dx
if m == 1:
J_transposed = np.ravel(J_transposed)
return J_transposed.T
def _sparse_difference(fun, x0, f0, h, use_one_sided,
structure, groups, method):
m = f0.size
n = x0.size
row_indices = []
col_indices = []
fractions = []
n_groups = np.max(groups) + 1
for group in range(n_groups):
# Perturb variables which are in the same group simultaneously.
e = np.equal(group, groups)
h_vec = h * e
if method == '2-point':
x = x0 + h_vec
dx = x - x0
df = fun(x) - f0
# The result is written to columns which correspond to perturbed
# variables.
cols, = np.nonzero(e)
# Find all non-zero elements in selected columns of Jacobian.
i, j, _ = find(structure[:, cols])
# Restore column indices in the full array.
j = cols[j]
elif method == '3-point':
# Here we do conceptually the same but separate one-sided
# and two-sided schemes.
x1 = x0.copy()
x2 = x0.copy()
mask_1 = use_one_sided & e
x1[mask_1] += h_vec[mask_1]
x2[mask_1] += 2 * h_vec[mask_1]
mask_2 = ~use_one_sided & e
x1[mask_2] -= h_vec[mask_2]
x2[mask_2] += h_vec[mask_2]
dx = np.zeros(n)
dx[mask_1] = x2[mask_1] - x0[mask_1]
dx[mask_2] = x2[mask_2] - x1[mask_2]
f1 = fun(x1)
f2 = fun(x2)
cols, = np.nonzero(e)
i, j, _ = find(structure[:, cols])
j = cols[j]
mask = use_one_sided[j]
df = np.empty(m)
rows = i[mask]
df[rows] = -3 * f0[rows] + 4 * f1[rows] - f2[rows]
rows = i[~mask]
df[rows] = f2[rows] - f1[rows]
elif method == 'cs':
f1 = fun(x0 + h_vec*1.j)
df = f1.imag
dx = h_vec
cols, = np.nonzero(e)
i, j, _ = find(structure[:, cols])
j = cols[j]
else:
raise ValueError("Never be here.")
# All that's left is to compute the fraction. We store i, j and
# fractions as separate arrays and later construct coo_matrix.
row_indices.append(i)
col_indices.append(j)
fractions.append(df[i] / dx[j])
row_indices = np.hstack(row_indices)
col_indices = np.hstack(col_indices)
fractions = np.hstack(fractions)
J = coo_matrix((fractions, (row_indices, col_indices)), shape=(m, n))
return csr_matrix(J)
def check_derivative(fun, jac, x0, bounds=(-np.inf, np.inf), args=(),
kwargs={}):
"""Check correctness of a function computing derivatives (Jacobian or
gradient) by comparison with a finite difference approximation.
Parameters
----------
fun : callable
Function of which to estimate the derivatives. The argument x
passed to this function is ndarray of shape (n,) (never a scalar
even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
jac : callable
Function which computes Jacobian matrix of `fun`. It must work with
argument x the same way as `fun`. The return value must be array_like
or sparse matrix with an appropriate shape.
x0 : array_like of shape (n,) or float
Point at which to estimate the derivatives. Float will be converted
to 1-D array.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each bound must match the size of `x0` or be a scalar, in the latter
case the bound will be the same for all variables. Use it to limit the
range of function evaluation.
args, kwargs : tuple and dict, optional
Additional arguments passed to `fun` and `jac`. Both empty by default.
The calling signature is ``fun(x, *args, **kwargs)`` and the same
for `jac`.
Returns
-------
accuracy : float
The maximum among all relative errors for elements with absolute values
higher than 1 and absolute errors for elements with absolute values
less or equal than 1. If `accuracy` is on the order of 1e-6 or lower,
then it is likely that your `jac` implementation is correct.
See Also
--------
approx_derivative : Compute finite difference approximation of derivative.
Examples
--------
>>> import numpy as np
>>> from scipy.optimize import check_derivative
>>>
>>>
>>> def f(x, c1, c2):
... return np.array([x[0] * np.sin(c1 * x[1]),
... x[0] * np.cos(c2 * x[1])])
...
>>> def jac(x, c1, c2):
... return np.array([
... [np.sin(c1 * x[1]), c1 * x[0] * np.cos(c1 * x[1])],
... [np.cos(c2 * x[1]), -c2 * x[0] * np.sin(c2 * x[1])]
... ])
...
>>>
>>> x0 = np.array([1.0, 0.5 * np.pi])
>>> check_derivative(f, jac, x0, args=(1, 2))
2.4492935982947064e-16
"""
J_to_test = jac(x0, *args, **kwargs)
if issparse(J_to_test):
J_diff = approx_derivative(fun, x0, bounds=bounds, sparsity=J_to_test,
args=args, kwargs=kwargs)
J_to_test = csr_matrix(J_to_test)
abs_err = J_to_test - J_diff
i, j, abs_err_data = find(abs_err)
J_diff_data = np.asarray(J_diff[i, j]).ravel()
return np.max(np.abs(abs_err_data) /
np.maximum(1, np.abs(J_diff_data)))
else:
J_diff = approx_derivative(fun, x0, bounds=bounds,
args=args, kwargs=kwargs)
abs_err = np.abs(J_to_test - J_diff)
return np.max(abs_err / np.maximum(1, np.abs(J_diff)))
| [((310, 330), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (318, 330), True, 'import numpy as np\n'), ((1858, 1898), 'numpy.all', 'np.all', (['((lb == -np.inf) & (ub == np.inf))'], {}), '((lb == -np.inf) & (ub == np.inf))\n', (1864, 1898), True, 'import numpy as np\n'), ((13371, 13388), 'numpy.atleast_1d', 'np.atleast_1d', (['x0'], {}), '(x0)\n', (13384, 13388), True, 'import numpy as np\n'), ((14296, 14325), 'numpy.any', 'np.any', (['((x0 < lb) | (x0 > ub))'], {}), '((x0 < lb) | (x0 > ub))\n', (14302, 14325), True, 'import numpy as np\n'), ((16791, 16821), 'scipy.sparse.linalg.LinearOperator', 'LinearOperator', (['(m, n)', 'matvec'], {}), '((m, n), matvec)\n', (16805, 16821), False, 'from scipy.sparse.linalg import LinearOperator\n'), ((16937, 16953), 'numpy.empty', 'np.empty', (['(n, m)'], {}), '((n, m))\n', (16945, 16953), True, 'import numpy as np\n'), ((16967, 16977), 'numpy.diag', 'np.diag', (['h'], {}), '(h)\n', (16974, 16977), True, 'import numpy as np\n'), ((20310, 20332), 'numpy.hstack', 'np.hstack', (['row_indices'], {}), '(row_indices)\n', (20319, 20332), True, 'import numpy as np\n'), ((20351, 20373), 'numpy.hstack', 'np.hstack', (['col_indices'], {}), '(col_indices)\n', (20360, 20373), True, 'import numpy as np\n'), ((20390, 20410), 'numpy.hstack', 'np.hstack', (['fractions'], {}), '(fractions)\n', (20399, 20410), True, 'import numpy as np\n'), ((1638, 1665), 'numpy.ones_like', 'np.ones_like', (['h'], {'dtype': 'bool'}), '(h, dtype=bool)\n', (1650, 1665), True, 'import numpy as np\n'), ((3669, 3695), 'numpy.asarray', 'np.asarray', (['b'], {'dtype': 'float'}), '(b, dtype=float)\n', (3679, 3695), True, 'import numpy as np\n'), ((3747, 3770), 'numpy.resize', 'np.resize', (['lb', 'x0.shape'], {}), '(lb, x0.shape)\n', (3756, 3770), True, 'import numpy as np\n'), ((3806, 3829), 'numpy.resize', 'np.resize', (['ub', 'x0.shape'], {}), '(ub, x0.shape)\n', (3815, 3829), True, 'import numpy as np\n'), ((5187, 5203), 'numpy.atleast_2d', 'np.atleast_2d', (['A'], {}), '(A)\n', (5200, 5203), True, 'import numpy as np\n'), ((5363, 5381), 'numpy.isscalar', 'np.isscalar', (['order'], {}), '(order)\n', (5374, 5381), True, 'import numpy as np\n'), ((5397, 5425), 'numpy.random.RandomState', 'np.random.RandomState', (['order'], {}), '(order)\n', (5418, 5425), True, 'import numpy as np\n'), ((5487, 5504), 'numpy.asarray', 'np.asarray', (['order'], {}), '(order)\n', (5497, 5504), True, 'import numpy as np\n'), ((14175, 14192), 'numpy.atleast_1d', 'np.atleast_1d', (['f0'], {}), '(f0)\n', (14188, 14192), True, 'import numpy as np\n'), ((17914, 17936), 'numpy.ravel', 'np.ravel', (['J_transposed'], {}), '(J_transposed)\n', (17922, 17936), True, 'import numpy as np\n'), ((18180, 18194), 'numpy.max', 'np.max', (['groups'], {}), '(groups)\n', (18186, 18194), True, 'import numpy as np\n'), ((18317, 18340), 'numpy.equal', 'np.equal', (['group', 'groups'], {}), '(group, groups)\n', (18325, 18340), True, 'import numpy as np\n'), ((23642, 23668), 'numpy.abs', 'np.abs', (['(J_to_test - J_diff)'], {}), '(J_to_test - J_diff)\n', (23648, 23668), True, 'import numpy as np\n'), ((1708, 1717), 'numpy.abs', 'np.abs', (['h'], {}), '(h)\n', (1714, 1717), True, 'import numpy as np\n'), ((1742, 1770), 'numpy.zeros_like', 'np.zeros_like', (['h'], {'dtype': 'bool'}), '(h, dtype=bool)\n', (1755, 1770), True, 'import numpy as np\n'), ((2149, 2164), 'numpy.abs', 'np.abs', (['h_total'], {}), '(h_total)\n', (2155, 2164), True, 'import numpy as np\n'), ((2168, 2202), 'numpy.maximum', 'np.maximum', (['lower_dist', 'upper_dist'], {}), '(lower_dist, upper_dist)\n', (2178, 2202), True, 'import numpy as np\n'), ((2673, 2734), 'numpy.minimum', 'np.minimum', (['h[forward]', '(0.5 * upper_dist[forward] / num_steps)'], {}), '(h[forward], 0.5 * upper_dist[forward] / num_steps)\n', (2683, 2734), True, 'import numpy as np\n'), ((3608, 3618), 'numpy.abs', 'np.abs', (['x0'], {}), '(x0)\n', (3614, 3618), True, 'import numpy as np\n'), ((15573, 15594), 'numpy.atleast_1d', 'np.atleast_1d', (['groups'], {}), '(groups)\n', (15586, 15594), True, 'import numpy as np\n'), ((18598, 18611), 'numpy.nonzero', 'np.nonzero', (['e'], {}), '(e)\n', (18608, 18611), True, 'import numpy as np\n'), ((2875, 2938), 'numpy.minimum', 'np.minimum', (['h[backward]', '(0.5 * lower_dist[backward] / num_steps)'], {}), '(h[backward], 0.5 * lower_dist[backward] / num_steps)\n', (2885, 2938), True, 'import numpy as np\n'), ((3011, 3045), 'numpy.minimum', 'np.minimum', (['upper_dist', 'lower_dist'], {}), '(upper_dist, lower_dist)\n', (3021, 3045), True, 'import numpy as np\n'), ((15526, 15550), 'numpy.atleast_2d', 'np.atleast_2d', (['structure'], {}), '(structure)\n', (15539, 15550), True, 'import numpy as np\n'), ((15951, 15967), 'numpy.zeros_like', 'np.zeros_like', (['p'], {}), '(p)\n', (15964, 15967), True, 'import numpy as np\n'), ((15993, 16004), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (16001, 16004), True, 'import numpy as np\n'), ((16026, 16033), 'numpy.linalg.norm', 'norm', (['p'], {}), '(p)\n', (16030, 16033), False, 'from numpy.linalg import norm\n'), ((19271, 19282), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (19279, 19282), True, 'import numpy as np\n'), ((19453, 19466), 'numpy.nonzero', 'np.nonzero', (['e'], {}), '(e)\n', (19463, 19466), True, 'import numpy as np\n'), ((19592, 19603), 'numpy.empty', 'np.empty', (['m'], {}), '(m)\n', (19600, 19603), True, 'import numpy as np\n'), ((23358, 23382), 'numpy.asarray', 'np.asarray', (['J_diff[i, j]'], {}), '(J_diff[i, j])\n', (23368, 23382), True, 'import numpy as np\n'), ((23413, 23433), 'numpy.abs', 'np.abs', (['abs_err_data'], {}), '(abs_err_data)\n', (23419, 23433), True, 'import numpy as np\n'), ((3098, 3116), 'numpy.abs', 'np.abs', (['h_adjusted'], {}), '(h_adjusted)\n', (3104, 3116), True, 'import numpy as np\n'), ((13685, 13697), 'numpy.isinf', 'np.isinf', (['lb'], {}), '(lb)\n', (13693, 13697), True, 'import numpy as np\n'), ((13745, 13757), 'numpy.isinf', 'np.isinf', (['ub'], {}), '(ub)\n', (13753, 13757), True, 'import numpy as np\n'), ((16203, 16219), 'numpy.zeros_like', 'np.zeros_like', (['p'], {}), '(p)\n', (16216, 16219), True, 'import numpy as np\n'), ((16245, 16256), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (16253, 16256), True, 'import numpy as np\n'), ((16280, 16287), 'numpy.linalg.norm', 'norm', (['p'], {}), '(p)\n', (16284, 16287), False, 'from numpy.linalg import norm\n'), ((19901, 19914), 'numpy.nonzero', 'np.nonzero', (['e'], {}), '(e)\n', (19911, 19914), True, 'import numpy as np\n'), ((23472, 23491), 'numpy.abs', 'np.abs', (['J_diff_data'], {}), '(J_diff_data)\n', (23478, 23491), True, 'import numpy as np\n'), ((23715, 23729), 'numpy.abs', 'np.abs', (['J_diff'], {}), '(J_diff)\n', (23721, 23729), True, 'import numpy as np\n'), ((16534, 16550), 'numpy.zeros_like', 'np.zeros_like', (['p'], {}), '(p)\n', (16547, 16550), True, 'import numpy as np\n'), ((16576, 16587), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (16584, 16587), True, 'import numpy as np\n'), ((16609, 16616), 'numpy.linalg.norm', 'norm', (['p'], {}), '(p)\n', (16613, 16616), False, 'from numpy.linalg import norm\n')] |
abhinavg97/pytorch-lightning | tests/models/test_hparams.py | 0d54cf25a2dba33e4640ac52768a83406e7a0a94 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
from argparse import Namespace
import cloudpickle
import pytest
import torch
from fsspec.implementations.local import LocalFileSystem
from omegaconf import OmegaConf, Container
from torch.nn import functional as F
from torch.utils.data import DataLoader
from pytorch_lightning import Trainer, LightningModule
from pytorch_lightning.core.saving import save_hparams_to_yaml, load_hparams_from_yaml
from pytorch_lightning.utilities import AttributeDict, is_picklable
from tests.base import EvalModelTemplate, TrialMNIST, BoringModel
class SaveHparamsModel(EvalModelTemplate):
""" Tests that a model can take an object """
def __init__(self, hparams):
super().__init__()
self.save_hyperparameters(hparams)
class AssignHparamsModel(EvalModelTemplate):
""" Tests that a model can take an object with explicit setter """
def __init__(self, hparams):
super().__init__()
self.hparams = hparams
# -------------------------
# STANDARD TESTS
# -------------------------
def _run_standard_hparams_test(tmpdir, model, cls, try_overwrite=False):
"""
Tests for the existence of an arg 'test_arg=14'
"""
hparam_type = type(model.hparams)
# test proper property assignments
assert model.hparams.test_arg == 14
# verify we can train
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, overfit_batches=2)
trainer.fit(model)
# make sure the raw checkpoint saved the properties
raw_checkpoint_path = _raw_checkpoint_path(trainer)
raw_checkpoint = torch.load(raw_checkpoint_path)
assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in raw_checkpoint
assert raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]['test_arg'] == 14
# verify that model loads correctly
model2 = cls.load_from_checkpoint(raw_checkpoint_path)
assert model2.hparams.test_arg == 14
assert isinstance(model2.hparams, hparam_type)
if try_overwrite:
# verify that we can overwrite the property
model3 = cls.load_from_checkpoint(raw_checkpoint_path, test_arg=78)
assert model3.hparams.test_arg == 78
return raw_checkpoint_path
@pytest.mark.parametrize("cls", [SaveHparamsModel, AssignHparamsModel])
def test_namespace_hparams(tmpdir, cls):
# init model
model = cls(hparams=Namespace(test_arg=14))
# run standard test suite
_run_standard_hparams_test(tmpdir, model, cls)
@pytest.mark.parametrize("cls", [SaveHparamsModel, AssignHparamsModel])
def test_dict_hparams(tmpdir, cls):
# init model
model = cls(hparams={'test_arg': 14})
# run standard test suite
_run_standard_hparams_test(tmpdir, model, cls)
@pytest.mark.parametrize("cls", [SaveHparamsModel, AssignHparamsModel])
def test_omega_conf_hparams(tmpdir, cls):
# init model
conf = OmegaConf.create(dict(test_arg=14, mylist=[15.4, dict(a=1, b=2)]))
model = cls(hparams=conf)
assert isinstance(model.hparams, Container)
# run standard test suite
raw_checkpoint_path = _run_standard_hparams_test(tmpdir, model, cls)
model2 = cls.load_from_checkpoint(raw_checkpoint_path)
assert isinstance(model2.hparams, Container)
# config specific tests
assert model2.hparams.test_arg == 14
assert model2.hparams.mylist[0] == 15.4
def test_explicit_args_hparams(tmpdir):
"""
Tests that a model can take implicit args and assign
"""
# define model
class LocalModel(EvalModelTemplate):
def __init__(self, test_arg, test_arg2):
super().__init__()
self.save_hyperparameters('test_arg', 'test_arg2')
model = LocalModel(test_arg=14, test_arg2=90)
# run standard test suite
raw_checkpoint_path = _run_standard_hparams_test(tmpdir, model, LocalModel)
model = LocalModel.load_from_checkpoint(raw_checkpoint_path, test_arg2=120)
# config specific tests
assert model.hparams.test_arg2 == 120
def test_implicit_args_hparams(tmpdir):
"""
Tests that a model can take regular args and assign
"""
# define model
class LocalModel(EvalModelTemplate):
def __init__(self, test_arg, test_arg2):
super().__init__()
self.save_hyperparameters()
model = LocalModel(test_arg=14, test_arg2=90)
# run standard test suite
raw_checkpoint_path = _run_standard_hparams_test(tmpdir, model, LocalModel)
model = LocalModel.load_from_checkpoint(raw_checkpoint_path, test_arg2=120)
# config specific tests
assert model.hparams.test_arg2 == 120
def test_explicit_missing_args_hparams(tmpdir):
"""
Tests that a model can take regular args and assign
"""
# define model
class LocalModel(EvalModelTemplate):
def __init__(self, test_arg, test_arg2):
super().__init__()
self.save_hyperparameters('test_arg')
model = LocalModel(test_arg=14, test_arg2=90)
# test proper property assignments
assert model.hparams.test_arg == 14
# verify we can train
trainer = Trainer(default_root_dir=tmpdir, max_epochs=2, overfit_batches=0.5)
trainer.fit(model)
# make sure the raw checkpoint saved the properties
raw_checkpoint_path = _raw_checkpoint_path(trainer)
raw_checkpoint = torch.load(raw_checkpoint_path)
assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in raw_checkpoint
assert raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]['test_arg'] == 14
# verify that model loads correctly
model = LocalModel.load_from_checkpoint(raw_checkpoint_path, test_arg2=123)
assert model.hparams.test_arg == 14
assert 'test_arg2' not in model.hparams # test_arg2 is not registered in class init
return raw_checkpoint_path
# -------------------------
# SPECIFIC TESTS
# -------------------------
def test_class_nesting():
class MyModule(LightningModule):
def forward(self):
...
# make sure PL modules are always nn.Module
a = MyModule()
assert isinstance(a, torch.nn.Module)
def test_outside():
a = MyModule()
_ = a.hparams
class A:
def test(self):
a = MyModule()
_ = a.hparams
def test2(self):
test_outside()
test_outside()
A().test2()
A().test()
class SubClassEvalModel(EvalModelTemplate):
any_other_loss = torch.nn.CrossEntropyLoss()
def __init__(self, *args, subclass_arg=1200, **kwargs):
super().__init__(*args, **kwargs)
self.save_hyperparameters()
class SubSubClassEvalModel(SubClassEvalModel):
pass
class AggSubClassEvalModel(SubClassEvalModel):
def __init__(self, *args, my_loss=torch.nn.CrossEntropyLoss(), **kwargs):
super().__init__(*args, **kwargs)
self.save_hyperparameters()
class UnconventionalArgsEvalModel(EvalModelTemplate):
""" A model that has unconventional names for "self", "*args" and "**kwargs". """
def __init__(obj, *more_args, other_arg=300, **more_kwargs):
# intentionally named obj
super().__init__(*more_args, **more_kwargs)
obj.save_hyperparameters()
class DictConfSubClassEvalModel(SubClassEvalModel):
def __init__(self, *args, dict_conf=OmegaConf.create(dict(my_param='something')), **kwargs):
super().__init__(*args, **kwargs)
self.save_hyperparameters()
@pytest.mark.parametrize("cls", [
EvalModelTemplate,
SubClassEvalModel,
SubSubClassEvalModel,
AggSubClassEvalModel,
UnconventionalArgsEvalModel,
DictConfSubClassEvalModel,
])
def test_collect_init_arguments(tmpdir, cls):
""" Test that the model automatically saves the arguments passed into the constructor """
extra_args = {}
if cls is AggSubClassEvalModel:
extra_args.update(my_loss=torch.nn.CosineEmbeddingLoss())
elif cls is DictConfSubClassEvalModel:
extra_args.update(dict_conf=OmegaConf.create(dict(my_param='anything')))
model = cls(**extra_args)
assert model.hparams.batch_size == 32
model = cls(batch_size=179, **extra_args)
assert model.hparams.batch_size == 179
if isinstance(model, SubClassEvalModel):
assert model.hparams.subclass_arg == 1200
if isinstance(model, AggSubClassEvalModel):
assert isinstance(model.hparams.my_loss, torch.nn.CosineEmbeddingLoss)
# verify that the checkpoint saved the correct values
trainer = Trainer(default_root_dir=tmpdir, max_epochs=2, overfit_batches=0.5)
trainer.fit(model)
raw_checkpoint_path = _raw_checkpoint_path(trainer)
raw_checkpoint = torch.load(raw_checkpoint_path)
assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in raw_checkpoint
assert raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]['batch_size'] == 179
# verify that model loads correctly
model = cls.load_from_checkpoint(raw_checkpoint_path)
assert model.hparams.batch_size == 179
if isinstance(model, AggSubClassEvalModel):
assert isinstance(model.hparams.my_loss, torch.nn.CosineEmbeddingLoss)
if isinstance(model, DictConfSubClassEvalModel):
assert isinstance(model.hparams.dict_conf, Container)
assert model.hparams.dict_conf['my_param'] == 'anything'
# verify that we can overwrite whatever we want
model = cls.load_from_checkpoint(raw_checkpoint_path, batch_size=99)
assert model.hparams.batch_size == 99
def _raw_checkpoint_path(trainer) -> str:
raw_checkpoint_paths = os.listdir(trainer.checkpoint_callback.dirpath)
raw_checkpoint_paths = [x for x in raw_checkpoint_paths if '.ckpt' in x]
assert raw_checkpoint_paths
raw_checkpoint_path = raw_checkpoint_paths[0]
raw_checkpoint_path = os.path.join(trainer.checkpoint_callback.dirpath, raw_checkpoint_path)
return raw_checkpoint_path
class LocalVariableModelSuperLast(EvalModelTemplate):
""" This model has the super().__init__() call at the end. """
def __init__(self, arg1, arg2, *args, **kwargs):
self.argument1 = arg1 # arg2 intentionally not set
arg1 = 'overwritten'
local_var = 1234
super().__init__(*args, **kwargs) # this is intentionally here at the end
class LocalVariableModelSuperFirst(EvalModelTemplate):
""" This model has the _auto_collect_arguments() call at the end. """
def __init__(self, arg1, arg2, *args, **kwargs):
super().__init__(*args, **kwargs)
self.argument1 = arg1 # arg2 intentionally not set
arg1 = 'overwritten'
local_var = 1234
self.save_hyperparameters() # this is intentionally here at the end
@pytest.mark.parametrize("cls", [
LocalVariableModelSuperFirst,
# LocalVariableModelSuperLast,
])
def test_collect_init_arguments_with_local_vars(cls):
""" Tests that only the arguments are collected and not local variables. """
model = cls(arg1=1, arg2=2)
assert 'local_var' not in model.hparams
assert model.hparams['arg1'] == 'overwritten'
assert model.hparams['arg2'] == 2
# @pytest.mark.parametrize("cls,config", [
# (SaveHparamsModel, Namespace(my_arg=42)),
# (SaveHparamsModel, dict(my_arg=42)),
# (SaveHparamsModel, OmegaConf.create(dict(my_arg=42))),
# (AssignHparamsModel, Namespace(my_arg=42)),
# (AssignHparamsModel, dict(my_arg=42)),
# (AssignHparamsModel, OmegaConf.create(dict(my_arg=42))),
# ])
# def test_single_config_models(tmpdir, cls, config):
# """ Test that the model automatically saves the arguments passed into the constructor """
# model = cls(config)
#
# # no matter how you do it, it should be assigned
# assert model.hparams.my_arg == 42
#
# # verify that the checkpoint saved the correct values
# trainer = Trainer(default_root_dir=tmpdir, max_epochs=2, overfit_batches=0.5)
# trainer.fit(model)
#
# # verify that model loads correctly
# raw_checkpoint_path = _raw_checkpoint_path(trainer)
# model = cls.load_from_checkpoint(raw_checkpoint_path)
# assert model.hparams.my_arg == 42
class AnotherArgModel(EvalModelTemplate):
def __init__(self, arg1):
super().__init__()
self.save_hyperparameters(arg1)
class OtherArgsModel(EvalModelTemplate):
def __init__(self, arg1, arg2):
super().__init__()
self.save_hyperparameters(arg1, arg2)
@pytest.mark.parametrize("cls,config", [
(AnotherArgModel, dict(arg1=42)),
(OtherArgsModel, dict(arg1=3.14, arg2='abc')),
])
def test_single_config_models_fail(tmpdir, cls, config):
""" Test fail on passing unsupported config type. """
with pytest.raises(ValueError):
_ = cls(**config)
@pytest.mark.parametrize("past_key", ['module_arguments'])
def test_load_past_checkpoint(tmpdir, past_key):
model = EvalModelTemplate()
# verify we can train
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1)
trainer.fit(model)
# make sure the raw checkpoint saved the properties
raw_checkpoint_path = _raw_checkpoint_path(trainer)
raw_checkpoint = torch.load(raw_checkpoint_path)
raw_checkpoint[past_key] = raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]
raw_checkpoint['hparams_type'] = 'Namespace'
raw_checkpoint[past_key]['batch_size'] = -17
del raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]
# save back the checkpoint
torch.save(raw_checkpoint, raw_checkpoint_path)
# verify that model loads correctly
model2 = EvalModelTemplate.load_from_checkpoint(raw_checkpoint_path)
assert model2.hparams.batch_size == -17
def test_hparams_pickle(tmpdir):
ad = AttributeDict({'key1': 1, 'key2': 'abc'})
pkl = pickle.dumps(ad)
assert ad == pickle.loads(pkl)
pkl = cloudpickle.dumps(ad)
assert ad == pickle.loads(pkl)
class UnpickleableArgsEvalModel(EvalModelTemplate):
""" A model that has an attribute that cannot be pickled. """
def __init__(self, foo='bar', pickle_me=(lambda x: x + 1), **kwargs):
super().__init__(**kwargs)
assert not is_picklable(pickle_me)
self.save_hyperparameters()
def test_hparams_pickle_warning(tmpdir):
model = UnpickleableArgsEvalModel()
trainer = Trainer(default_root_dir=tmpdir, max_steps=1)
with pytest.warns(UserWarning, match="attribute 'pickle_me' removed from hparams because it cannot be pickled"):
trainer.fit(model)
assert 'pickle_me' not in model.hparams
def test_hparams_save_yaml(tmpdir):
hparams = dict(batch_size=32, learning_rate=0.001, data_root='./any/path/here',
nasted=dict(any_num=123, anystr='abcd'))
path_yaml = os.path.join(tmpdir, 'testing-hparams.yaml')
save_hparams_to_yaml(path_yaml, hparams)
assert load_hparams_from_yaml(path_yaml) == hparams
save_hparams_to_yaml(path_yaml, Namespace(**hparams))
assert load_hparams_from_yaml(path_yaml) == hparams
save_hparams_to_yaml(path_yaml, AttributeDict(hparams))
assert load_hparams_from_yaml(path_yaml) == hparams
save_hparams_to_yaml(path_yaml, OmegaConf.create(hparams))
assert load_hparams_from_yaml(path_yaml) == hparams
class NoArgsSubClassEvalModel(EvalModelTemplate):
def __init__(self):
super().__init__()
class SimpleNoArgsModel(LightningModule):
def __init__(self):
super().__init__()
self.l1 = torch.nn.Linear(28 * 28, 10)
def forward(self, x):
return torch.relu(self.l1(x.view(x.size(0), -1)))
def training_step(self, batch, batch_nb):
x, y = batch
loss = F.cross_entropy(self(x), y)
return {'loss': loss, 'log': {'train_loss': loss}}
def test_step(self, batch, batch_nb):
x, y = batch
loss = F.cross_entropy(self(x), y)
return {'loss': loss, 'log': {'train_loss': loss}}
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=0.02)
@pytest.mark.parametrize("cls", [
SimpleNoArgsModel,
NoArgsSubClassEvalModel,
])
def test_model_nohparams_train_test(tmpdir, cls):
"""Test models that do not tae any argument in init."""
model = cls()
trainer = Trainer(
max_epochs=1,
default_root_dir=tmpdir,
)
train_loader = DataLoader(TrialMNIST(os.getcwd(), train=True, download=True), batch_size=32)
trainer.fit(model, train_loader)
test_loader = DataLoader(TrialMNIST(os.getcwd(), train=False, download=True), batch_size=32)
trainer.test(test_dataloaders=test_loader)
def test_model_ignores_non_exist_kwargument(tmpdir):
"""Test that the model takes only valid class arguments."""
class LocalModel(EvalModelTemplate):
def __init__(self, batch_size=15):
super().__init__(batch_size=batch_size)
self.save_hyperparameters()
model = LocalModel()
assert model.hparams.batch_size == 15
# verify that the checkpoint saved the correct values
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1)
trainer.fit(model)
# verify that we can overwrite whatever we want
raw_checkpoint_path = _raw_checkpoint_path(trainer)
model = LocalModel.load_from_checkpoint(raw_checkpoint_path, non_exist_kwarg=99)
assert 'non_exist_kwarg' not in model.hparams
class SuperClassPositionalArgs(EvalModelTemplate):
def __init__(self, hparams):
super().__init__()
self._hparams = None # pretend EvalModelTemplate did not call self.save_hyperparameters()
self.hparams = hparams
class SubClassVarArgs(SuperClassPositionalArgs):
""" Loading this model should accept hparams and init in the super class """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def test_args(tmpdir):
""" Test for inheritance: super class takes positional arg, subclass takes varargs. """
hparams = dict(test=1)
model = SubClassVarArgs(hparams)
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1)
trainer.fit(model)
raw_checkpoint_path = _raw_checkpoint_path(trainer)
with pytest.raises(TypeError, match="__init__\(\) got an unexpected keyword argument 'test'"):
SubClassVarArgs.load_from_checkpoint(raw_checkpoint_path)
class RuntimeParamChangeModelSaving(BoringModel):
def __init__(self, **kwargs):
super().__init__()
self.save_hyperparameters()
class RuntimeParamChangeModelAssign(BoringModel):
def __init__(self, **kwargs):
super().__init__()
self.hparams = kwargs
@pytest.mark.parametrize("cls", [RuntimeParamChangeModelSaving, RuntimeParamChangeModelAssign])
def test_init_arg_with_runtime_change(tmpdir, cls):
"""Test that we save/export only the initial hparams, no other runtime change allowed"""
model = cls(running_arg=123)
assert model.hparams.running_arg == 123
model.hparams.running_arg = -1
assert model.hparams.running_arg == -1
model.hparams = Namespace(abc=42)
assert model.hparams.abc == 42
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
limit_test_batches=2,
max_epochs=1,
)
trainer.fit(model)
path_yaml = os.path.join(trainer.logger.log_dir, trainer.logger.NAME_HPARAMS_FILE)
hparams = load_hparams_from_yaml(path_yaml)
assert hparams.get('running_arg') == 123
class UnsafeParamModel(BoringModel):
def __init__(self, my_path, any_param=123):
super().__init__()
self.save_hyperparameters()
def test_model_with_fsspec_as_parameter(tmpdir):
model = UnsafeParamModel(LocalFileSystem(tmpdir))
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
limit_test_batches=2,
max_epochs=1,
)
trainer.fit(model)
trainer.test()
| [((2764, 2834), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cls"""', '[SaveHparamsModel, AssignHparamsModel]'], {}), "('cls', [SaveHparamsModel, AssignHparamsModel])\n", (2787, 2834), False, 'import pytest\n'), ((3026, 3096), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cls"""', '[SaveHparamsModel, AssignHparamsModel]'], {}), "('cls', [SaveHparamsModel, AssignHparamsModel])\n", (3049, 3096), False, 'import pytest\n'), ((3277, 3347), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cls"""', '[SaveHparamsModel, AssignHparamsModel]'], {}), "('cls', [SaveHparamsModel, AssignHparamsModel])\n", (3300, 3347), False, 'import pytest\n'), ((7932, 8110), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cls"""', '[EvalModelTemplate, SubClassEvalModel, SubSubClassEvalModel,\n AggSubClassEvalModel, UnconventionalArgsEvalModel,\n DictConfSubClassEvalModel]'], {}), "('cls', [EvalModelTemplate, SubClassEvalModel,\n SubSubClassEvalModel, AggSubClassEvalModel, UnconventionalArgsEvalModel,\n DictConfSubClassEvalModel])\n", (7955, 8110), False, 'import pytest\n'), ((11162, 11224), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cls"""', '[LocalVariableModelSuperFirst]'], {}), "('cls', [LocalVariableModelSuperFirst])\n", (11185, 11224), False, 'import pytest\n'), ((13180, 13237), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""past_key"""', "['module_arguments']"], {}), "('past_key', ['module_arguments'])\n", (13203, 13237), False, 'import pytest\n'), ((16412, 16488), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cls"""', '[SimpleNoArgsModel, NoArgsSubClassEvalModel]'], {}), "('cls', [SimpleNoArgsModel, NoArgsSubClassEvalModel])\n", (16435, 16488), False, 'import pytest\n'), ((18985, 19083), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cls"""', '[RuntimeParamChangeModelSaving, RuntimeParamChangeModelAssign]'], {}), "('cls', [RuntimeParamChangeModelSaving,\n RuntimeParamChangeModelAssign])\n", (19008, 19083), False, 'import pytest\n'), ((1923, 1988), 'pytorch_lightning.Trainer', 'Trainer', ([], {'default_root_dir': 'tmpdir', 'max_epochs': '(1)', 'overfit_batches': '(2)'}), '(default_root_dir=tmpdir, max_epochs=1, overfit_batches=2)\n', (1930, 1988), False, 'from pytorch_lightning import Trainer, LightningModule\n'), ((2146, 2177), 'torch.load', 'torch.load', (['raw_checkpoint_path'], {}), '(raw_checkpoint_path)\n', (2156, 2177), False, 'import torch\n'), ((5614, 5681), 'pytorch_lightning.Trainer', 'Trainer', ([], {'default_root_dir': 'tmpdir', 'max_epochs': '(2)', 'overfit_batches': '(0.5)'}), '(default_root_dir=tmpdir, max_epochs=2, overfit_batches=0.5)\n', (5621, 5681), False, 'from pytorch_lightning import Trainer, LightningModule\n'), ((5839, 5870), 'torch.load', 'torch.load', (['raw_checkpoint_path'], {}), '(raw_checkpoint_path)\n', (5849, 5870), False, 'import torch\n'), ((6940, 6967), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (6965, 6967), False, 'import torch\n'), ((8975, 9042), 'pytorch_lightning.Trainer', 'Trainer', ([], {'default_root_dir': 'tmpdir', 'max_epochs': '(2)', 'overfit_batches': '(0.5)'}), '(default_root_dir=tmpdir, max_epochs=2, overfit_batches=0.5)\n', (8982, 9042), False, 'from pytorch_lightning import Trainer, LightningModule\n'), ((9145, 9176), 'torch.load', 'torch.load', (['raw_checkpoint_path'], {}), '(raw_checkpoint_path)\n', (9155, 9176), False, 'import torch\n'), ((10032, 10079), 'os.listdir', 'os.listdir', (['trainer.checkpoint_callback.dirpath'], {}), '(trainer.checkpoint_callback.dirpath)\n', (10042, 10079), False, 'import os\n'), ((10265, 10335), 'os.path.join', 'os.path.join', (['trainer.checkpoint_callback.dirpath', 'raw_checkpoint_path'], {}), '(trainer.checkpoint_callback.dirpath, raw_checkpoint_path)\n', (10277, 10335), False, 'import os\n'), ((13299, 13318), 'tests.base.EvalModelTemplate', 'EvalModelTemplate', ([], {}), '()\n', (13316, 13318), False, 'from tests.base import EvalModelTemplate, TrialMNIST, BoringModel\n'), ((13360, 13406), 'pytorch_lightning.Trainer', 'Trainer', ([], {'default_root_dir': 'tmpdir', 'max_epochs': '(1)'}), '(default_root_dir=tmpdir, max_epochs=1)\n', (13367, 13406), False, 'from pytorch_lightning import Trainer, LightningModule\n'), ((13564, 13595), 'torch.load', 'torch.load', (['raw_checkpoint_path'], {}), '(raw_checkpoint_path)\n', (13574, 13595), False, 'import torch\n'), ((13888, 13935), 'torch.save', 'torch.save', (['raw_checkpoint', 'raw_checkpoint_path'], {}), '(raw_checkpoint, raw_checkpoint_path)\n', (13898, 13935), False, 'import torch\n'), ((13990, 14049), 'tests.base.EvalModelTemplate.load_from_checkpoint', 'EvalModelTemplate.load_from_checkpoint', (['raw_checkpoint_path'], {}), '(raw_checkpoint_path)\n', (14028, 14049), False, 'from tests.base import EvalModelTemplate, TrialMNIST, BoringModel\n'), ((14138, 14179), 'pytorch_lightning.utilities.AttributeDict', 'AttributeDict', (["{'key1': 1, 'key2': 'abc'}"], {}), "({'key1': 1, 'key2': 'abc'})\n", (14151, 14179), False, 'from pytorch_lightning.utilities import AttributeDict, is_picklable\n'), ((14190, 14206), 'pickle.dumps', 'pickle.dumps', (['ad'], {}), '(ad)\n', (14202, 14206), False, 'import pickle\n'), ((14252, 14273), 'cloudpickle.dumps', 'cloudpickle.dumps', (['ad'], {}), '(ad)\n', (14269, 14273), False, 'import cloudpickle\n'), ((14715, 14760), 'pytorch_lightning.Trainer', 'Trainer', ([], {'default_root_dir': 'tmpdir', 'max_steps': '(1)'}), '(default_root_dir=tmpdir, max_steps=1)\n', (14722, 14760), False, 'from pytorch_lightning import Trainer, LightningModule\n'), ((15147, 15191), 'os.path.join', 'os.path.join', (['tmpdir', '"""testing-hparams.yaml"""'], {}), "(tmpdir, 'testing-hparams.yaml')\n", (15159, 15191), False, 'import os\n'), ((15197, 15237), 'pytorch_lightning.core.saving.save_hparams_to_yaml', 'save_hparams_to_yaml', (['path_yaml', 'hparams'], {}), '(path_yaml, hparams)\n', (15217, 15237), False, 'from pytorch_lightning.core.saving import save_hparams_to_yaml, load_hparams_from_yaml\n'), ((16643, 16689), 'pytorch_lightning.Trainer', 'Trainer', ([], {'max_epochs': '(1)', 'default_root_dir': 'tmpdir'}), '(max_epochs=1, default_root_dir=tmpdir)\n', (16650, 16689), False, 'from pytorch_lightning import Trainer, LightningModule\n'), ((17430, 17476), 'pytorch_lightning.Trainer', 'Trainer', ([], {'default_root_dir': 'tmpdir', 'max_epochs': '(1)'}), '(default_root_dir=tmpdir, max_epochs=1)\n', (17437, 17476), False, 'from pytorch_lightning import Trainer, LightningModule\n'), ((18398, 18444), 'pytorch_lightning.Trainer', 'Trainer', ([], {'default_root_dir': 'tmpdir', 'max_epochs': '(1)'}), '(default_root_dir=tmpdir, max_epochs=1)\n', (18405, 18444), False, 'from pytorch_lightning import Trainer, LightningModule\n'), ((19400, 19417), 'argparse.Namespace', 'Namespace', ([], {'abc': '(42)'}), '(abc=42)\n', (19409, 19417), False, 'from argparse import Namespace\n'), ((19468, 19584), 'pytorch_lightning.Trainer', 'Trainer', ([], {'default_root_dir': 'tmpdir', 'limit_train_batches': '(2)', 'limit_val_batches': '(2)', 'limit_test_batches': '(2)', 'max_epochs': '(1)'}), '(default_root_dir=tmpdir, limit_train_batches=2, limit_val_batches=2,\n limit_test_batches=2, max_epochs=1)\n', (19475, 19584), False, 'from pytorch_lightning import Trainer, LightningModule\n'), ((19668, 19738), 'os.path.join', 'os.path.join', (['trainer.logger.log_dir', 'trainer.logger.NAME_HPARAMS_FILE'], {}), '(trainer.logger.log_dir, trainer.logger.NAME_HPARAMS_FILE)\n', (19680, 19738), False, 'import os\n'), ((19753, 19786), 'pytorch_lightning.core.saving.load_hparams_from_yaml', 'load_hparams_from_yaml', (['path_yaml'], {}), '(path_yaml)\n', (19775, 19786), False, 'from pytorch_lightning.core.saving import save_hparams_to_yaml, load_hparams_from_yaml\n'), ((20101, 20217), 'pytorch_lightning.Trainer', 'Trainer', ([], {'default_root_dir': 'tmpdir', 'limit_train_batches': '(2)', 'limit_val_batches': '(2)', 'limit_test_batches': '(2)', 'max_epochs': '(1)'}), '(default_root_dir=tmpdir, limit_train_batches=2, limit_val_batches=2,\n limit_test_batches=2, max_epochs=1)\n', (20108, 20217), False, 'from pytorch_lightning import Trainer, LightningModule\n'), ((7253, 7280), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (7278, 7280), False, 'import torch\n'), ((13124, 13149), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (13137, 13149), False, 'import pytest\n'), ((14224, 14241), 'pickle.loads', 'pickle.loads', (['pkl'], {}), '(pkl)\n', (14236, 14241), False, 'import pickle\n'), ((14291, 14308), 'pickle.loads', 'pickle.loads', (['pkl'], {}), '(pkl)\n', (14303, 14308), False, 'import pickle\n'), ((14770, 14881), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""attribute \'pickle_me\' removed from hparams because it cannot be pickled"""'}), '(UserWarning, match=\n "attribute \'pickle_me\' removed from hparams because it cannot be pickled")\n', (14782, 14881), False, 'import pytest\n'), ((15249, 15282), 'pytorch_lightning.core.saving.load_hparams_from_yaml', 'load_hparams_from_yaml', (['path_yaml'], {}), '(path_yaml)\n', (15271, 15282), False, 'from pytorch_lightning.core.saving import save_hparams_to_yaml, load_hparams_from_yaml\n'), ((15331, 15351), 'argparse.Namespace', 'Namespace', ([], {}), '(**hparams)\n', (15340, 15351), False, 'from argparse import Namespace\n'), ((15364, 15397), 'pytorch_lightning.core.saving.load_hparams_from_yaml', 'load_hparams_from_yaml', (['path_yaml'], {}), '(path_yaml)\n', (15386, 15397), False, 'from pytorch_lightning.core.saving import save_hparams_to_yaml, load_hparams_from_yaml\n'), ((15446, 15468), 'pytorch_lightning.utilities.AttributeDict', 'AttributeDict', (['hparams'], {}), '(hparams)\n', (15459, 15468), False, 'from pytorch_lightning.utilities import AttributeDict, is_picklable\n'), ((15481, 15514), 'pytorch_lightning.core.saving.load_hparams_from_yaml', 'load_hparams_from_yaml', (['path_yaml'], {}), '(path_yaml)\n', (15503, 15514), False, 'from pytorch_lightning.core.saving import save_hparams_to_yaml, load_hparams_from_yaml\n'), ((15563, 15588), 'omegaconf.OmegaConf.create', 'OmegaConf.create', (['hparams'], {}), '(hparams)\n', (15579, 15588), False, 'from omegaconf import OmegaConf, Container\n'), ((15601, 15634), 'pytorch_lightning.core.saving.load_hparams_from_yaml', 'load_hparams_from_yaml', (['path_yaml'], {}), '(path_yaml)\n', (15623, 15634), False, 'from pytorch_lightning.core.saving import save_hparams_to_yaml, load_hparams_from_yaml\n'), ((15862, 15890), 'torch.nn.Linear', 'torch.nn.Linear', (['(28 * 28)', '(10)'], {}), '(28 * 28, 10)\n', (15877, 15890), False, 'import torch\n'), ((18534, 18629), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""__init__\\\\(\\\\) got an unexpected keyword argument \'test\'"""'}), '(TypeError, match=\n "__init__\\\\(\\\\) got an unexpected keyword argument \'test\'")\n', (18547, 18629), False, 'import pytest\n'), ((20062, 20085), 'fsspec.implementations.local.LocalFileSystem', 'LocalFileSystem', (['tmpdir'], {}), '(tmpdir)\n', (20077, 20085), False, 'from fsspec.implementations.local import LocalFileSystem\n'), ((2917, 2939), 'argparse.Namespace', 'Namespace', ([], {'test_arg': '(14)'}), '(test_arg=14)\n', (2926, 2939), False, 'from argparse import Namespace\n'), ((14558, 14581), 'pytorch_lightning.utilities.is_picklable', 'is_picklable', (['pickle_me'], {}), '(pickle_me)\n', (14570, 14581), False, 'from pytorch_lightning.utilities import AttributeDict, is_picklable\n'), ((16755, 16766), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (16764, 16766), False, 'import os\n'), ((16889, 16900), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (16898, 16900), False, 'import os\n'), ((8360, 8390), 'torch.nn.CosineEmbeddingLoss', 'torch.nn.CosineEmbeddingLoss', ([], {}), '()\n', (8388, 8390), False, 'import torch\n')] |
hadrianmontes/jax-md | tests/space_test.py | cea1cc6b22db6044a502eeeab4bddde35ac15d94 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for jax_md.space."""
from absl.testing import absltest
from absl.testing import parameterized
from jax.config import config as jax_config
from jax import random
import jax.numpy as jnp
from jax import grad, jit, jacfwd
from jax import test_util as jtu
from jax_md import space, test_util, quantity, energy
from jax_md.util import *
from functools import partial
from unittest import SkipTest
test_util.update_test_tolerance(5e-5, 5e-13)
jax_config.parse_flags_with_absl()
jax_config.enable_omnistaging()
FLAGS = jax_config.FLAGS
PARTICLE_COUNT = 10
STOCHASTIC_SAMPLES = 10
SHIFT_STEPS = 10
SPATIAL_DIMENSION = [2, 3]
BOX_FORMATS = ['scalar', 'vector', 'matrix']
if FLAGS.jax_enable_x64:
POSITION_DTYPE = [f32, f64]
else:
POSITION_DTYPE = [f32]
def make_periodic_general_test_system(N, dim, dtype, box_format):
assert box_format in BOX_FORMATS
box_size = quantity.box_size_at_number_density(N, 1.0, dim)
box = dtype(box_size)
if box_format == 'vector':
box = jnp.array(jnp.ones(dim) * box_size, dtype)
elif box_format == 'matrix':
box = jnp.array(jnp.eye(dim) * box_size, dtype)
d, s = space.periodic(jnp.diag(box) if box_format == 'matrix' else box)
d_gf, s_gf = space.periodic_general(box)
d_g, s_g = space.periodic_general(box, fractional_coordinates=False)
key = random.PRNGKey(0)
R_f = random.uniform(key, (N, dim), dtype=dtype)
R = space.transform(box, R_f)
E = jit(energy.soft_sphere_pair(d))
E_gf = jit(energy.soft_sphere_pair(d_gf))
E_g = jit(energy.soft_sphere_pair(d_g))
return R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g)
# pylint: disable=invalid-name
class SpaceTest(jtu.JaxTestCase):
# pylint: disable=g-complex-comprehension
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_transform(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2 = random.split(key, 3)
R = random.normal(
split1, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
T = random.normal(
split2, (spatial_dimension, spatial_dimension), dtype=dtype)
R_prime_exact = jnp.array(jnp.einsum('ij,kj->ki', T, R), dtype=dtype)
R_prime = space.transform(T, R)
self.assertAllClose(R_prime_exact, R_prime)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}'.format(dim),
'spatial_dimension': dim
} for dim in SPATIAL_DIMENSION))
def test_transform_grad(self, spatial_dimension):
key = random.PRNGKey(0)
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2 = random.split(key, 3)
R = random.normal(split1, (PARTICLE_COUNT, spatial_dimension))
T = random.normal(split2, (spatial_dimension, spatial_dimension))
R_prime = space.transform(T, R)
energy_direct = lambda R: jnp.sum(R ** 2)
energy_indirect = lambda T, R: jnp.sum(space.transform(T, R) ** 2)
grad_direct = grad(energy_direct)(R_prime)
grad_indirect = grad(energy_indirect, 1)(T, R)
self.assertAllClose(grad_direct, grad_indirect)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_transform_inverse(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
tol = 1e-13
if dtype is f32:
tol = 1e-5
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2 = random.split(key, 3)
R = random.normal(
split1, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
T = random.normal(
split2, (spatial_dimension, spatial_dimension), dtype=dtype)
T_inv = space.inverse(T)
R_test = space.transform(T_inv, space.transform(T, R))
self.assertAllClose(R, R_test)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_canonicalize_displacement_or_metric(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
displacement, _ = space.periodic_general(jnp.eye(spatial_dimension))
metric = space.metric(displacement)
test_metric = space.canonicalize_displacement_or_metric(displacement)
metric = space.map_product(metric)
test_metric = space.map_product(test_metric)
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2 = random.split(key, 3)
R = random.normal(
split1, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
self.assertAllClose(metric(R, R), test_metric(R, R))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_periodic_displacement(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
for _ in range(STOCHASTIC_SAMPLES):
key, split = random.split(key)
R = random.uniform(
split, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
dR = space.map_product(space.pairwise_displacement)(R, R)
dR_wrapped = space.periodic_displacement(f32(1.0), dR)
dR_direct = dR
dr_direct = space.distance(dR)
dr_direct = jnp.reshape(dr_direct, dr_direct.shape + (1,))
if spatial_dimension == 2:
for i in range(-1, 2):
for j in range(-1, 2):
dR_shifted = dR + jnp.array([i, j], dtype=R.dtype)
dr_shifted = space.distance(dR_shifted)
dr_shifted = jnp.reshape(dr_shifted, dr_shifted.shape + (1,))
dR_direct = jnp.where(dr_shifted < dr_direct, dR_shifted, dR_direct)
dr_direct = jnp.where(dr_shifted < dr_direct, dr_shifted, dr_direct)
elif spatial_dimension == 3:
for i in range(-1, 2):
for j in range(-1, 2):
for k in range(-1, 2):
dR_shifted = dR + jnp.array([i, j, k], dtype=R.dtype)
dr_shifted = space.distance(dR_shifted)
dr_shifted = jnp.reshape(dr_shifted, dr_shifted.shape + (1,))
dR_direct = jnp.where(
dr_shifted < dr_direct, dR_shifted, dR_direct)
dr_direct = jnp.where(
dr_shifted < dr_direct, dr_shifted, dr_direct)
dR_direct = jnp.array(dR_direct, dtype=dR.dtype)
assert dR_wrapped.dtype == dtype
self.assertAllClose(dR_wrapped, dR_direct)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_periodic_shift(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2 = random.split(key, 3)
R = random.uniform(
split1, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
dR = jnp.sqrt(f32(0.1)) * random.normal(
split2, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
dR = jnp.where(dR > 0.49, f32(0.49), dR)
dR = jnp.where(dR < -0.49, f32(-0.49), dR)
R_shift = space.periodic_shift(f32(1.0), R, dR)
assert R_shift.dtype == R.dtype
assert jnp.all(R_shift < 1.0)
assert jnp.all(R_shift > 0.0)
dR_after = space.periodic_displacement(f32(1.0), R_shift - R)
assert dR_after.dtype == R.dtype
self.assertAllClose(dR_after, dR)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_periodic_against_periodic_general(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
tol = 1e-13
if dtype is f32:
tol = 1e-5
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2, split3 = random.split(key, 4)
max_box_size = f32(10.0)
box_size = max_box_size * random.uniform(
split1, (spatial_dimension,), dtype=dtype)
transform = jnp.diag(box_size)
R = random.uniform(
split2, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
R_scaled = R * box_size
dR = random.normal(
split3, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
disp_fn, shift_fn = space.periodic(box_size)
general_disp_fn, general_shift_fn = space.periodic_general(transform)
disp_fn = space.map_product(disp_fn)
general_disp_fn = space.map_product(general_disp_fn)
self.assertAllClose(disp_fn(R_scaled, R_scaled), general_disp_fn(R, R))
assert disp_fn(R_scaled, R_scaled).dtype == dtype
self.assertAllClose(
shift_fn(R_scaled, dR), general_shift_fn(R, dR) * box_size)
assert shift_fn(R_scaled, dR).dtype == dtype
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_periodic_against_periodic_general_grad(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
tol = 1e-13
if dtype is f32:
tol = 1e-5
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2, split3 = random.split(key, 4)
max_box_size = f32(10.0)
box_size = max_box_size * random.uniform(
split1, (spatial_dimension,), dtype=dtype)
transform = jnp.diag(box_size)
R = random.uniform(
split2, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
R_scaled = R * box_size
dR = random.normal(
split3, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
disp_fn, shift_fn = space.periodic(box_size)
general_disp_fn, general_shift_fn = space.periodic_general(transform)
disp_fn = space.map_product(disp_fn)
general_disp_fn = space.map_product(general_disp_fn)
grad_fn = grad(lambda R: jnp.sum(disp_fn(R, R) ** 2))
general_grad_fn = grad(lambda R: jnp.sum(general_disp_fn(R, R) ** 2))
self.assertAllClose(grad_fn(R_scaled), general_grad_fn(R))
assert general_grad_fn(R).dtype == dtype
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype,
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_periodic_general_dynamic(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
eye = jnp.eye(spatial_dimension)
for _ in range(STOCHASTIC_SAMPLES):
key, split_T0_scale, split_T0_dT = random.split(key, 3)
key, split_T1_scale, split_T1_dT = random.split(key, 3)
key, split_t, split_R, split_dR = random.split(key, 4)
size_0 = 10.0 * random.uniform(split_T0_scale, ())
dtransform_0 = 0.5 * random.normal(
split_T0_dT, (spatial_dimension, spatial_dimension))
T_0 = jnp.array(size_0 * (eye + dtransform_0), dtype=dtype)
size_1 = 10.0 * random.uniform(split_T1_scale, (), dtype=dtype)
dtransform_1 = 0.5 * random.normal(
split_T1_dT, (spatial_dimension, spatial_dimension), dtype=dtype)
T_1 = jnp.array(size_1 * (eye + dtransform_1), dtype=dtype)
disp_fn, shift_fn = space.periodic_general(T_0)
true_disp_fn, true_shift_fn = space.periodic_general(T_1)
disp_fn = partial(disp_fn, box=T_1)
disp_fn = space.map_product(disp_fn)
true_disp_fn = space.map_product(true_disp_fn)
R = random.uniform(
split_R, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
dR = random.normal(
split_dR, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
self.assertAllClose(
disp_fn(R, R), jnp.array(true_disp_fn(R, R), dtype=dtype))
self.assertAllClose(
shift_fn(R, dR, box=T_1), jnp.array(true_shift_fn(R, dR), dtype=dtype))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype,
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_periodic_general_wrapped_vs_unwrapped(
self, spatial_dimension, dtype):
key = random.PRNGKey(0)
eye = jnp.eye(spatial_dimension, dtype=dtype)
tol = 1e-13
if dtype is f32:
tol = 2e-5
for _ in range(STOCHASTIC_SAMPLES):
key, split_R, split_T = random.split(key, 3)
dT = random.normal(
split_T, (spatial_dimension, spatial_dimension), dtype=dtype)
T = eye + dT + jnp.transpose(dT)
R = random.uniform(
split_R, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
R0 = R
unwrapped_R = R
displacement, shift = space.periodic_general(T)
_, unwrapped_shift = space.periodic_general(T, wrapped=False)
displacement = space.map_product(displacement)
for _ in range(SHIFT_STEPS):
key, split = random.split(key)
dR = random.normal(
split, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
R = shift(R, dR)
unwrapped_R = unwrapped_shift(unwrapped_R, dR)
self.assertAllClose(
displacement(R, R0),
displacement(unwrapped_R, R0))
assert not (jnp.all(unwrapped_R > 0) and jnp.all(unwrapped_R < 1))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_energy(self, spatial_dimension, dtype, box_format):
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
self.assertAllClose(E(R), E_gf(R_f))
self.assertAllClose(E(R), E_g(R))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_force(self, spatial_dimension, dtype, box_format):
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
self.assertAllClose(grad(E)(R), grad(E_gf)(R_f))
self.assertAllClose(grad(E)(R), grad(E_g)(R))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_shift(self, spatial_dimension, dtype, box_format):
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
R_new = s(R, grad(E)(R))
R_gf_new = s_gf(R_f, grad(E_gf)(R_f))
R_g_new = s_g(R, grad(E_g)(R))
self.assertAllClose(R_new, space.transform(box, R_gf_new))
self.assertAllClose(R_new, R_g_new)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_deform(self, spatial_dimension, dtype, box_format):
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
deformed_box = box * 0.9
self.assertAllClose(E_gf(R_f, box=deformed_box),
E_g(R, new_box=deformed_box))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_deform_grad(self,
spatial_dimension, dtype, box_format):
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
deformed_box = box * 0.9
self.assertAllClose(grad(E_gf)(R_f, box=deformed_box),
grad(E_g)(R, new_box=deformed_box))
self.assertAllClose(jacfwd(E_gf)(R_f, box=deformed_box),
jacfwd(E_g)(R, new_box=deformed_box))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_deform_shift(self,
spatial_dimension, dtype, box_format):
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
deformed_box = box * 0.9
R_new = s_g(R, grad(E_g)(R), new_box=deformed_box)
R_gf_new = space.transform(deformed_box, s_gf(R_f, grad(E_gf)(R_f)))
self.assertAllClose(R_new, R_gf_new)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_grad_box(self, spatial_dimension, dtype, box_format):
if box_format == 'scalar':
raise SkipTest('Scalar case fails due to JAX Issue #5849.')
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
@grad
def box_energy_g_fn(box):
return E_g(R, new_box=box)
@grad
def box_energy_gf_fn(box):
return E_gf(R_f, box=box)
self.assertAllClose(box_energy_g_fn(box), box_energy_gf_fn(box))
if __name__ == '__main__':
absltest.main()
| [((984, 1029), 'jax_md.test_util.update_test_tolerance', 'test_util.update_test_tolerance', (['(5e-05)', '(5e-13)'], {}), '(5e-05, 5e-13)\n', (1015, 1029), False, 'from jax_md import space, test_util, quantity, energy\n'), ((1030, 1064), 'jax.config.config.parse_flags_with_absl', 'jax_config.parse_flags_with_absl', ([], {}), '()\n', (1062, 1064), True, 'from jax.config import config as jax_config\n'), ((1065, 1096), 'jax.config.config.enable_omnistaging', 'jax_config.enable_omnistaging', ([], {}), '()\n', (1094, 1096), True, 'from jax.config import config as jax_config\n'), ((1460, 1508), 'jax_md.quantity.box_size_at_number_density', 'quantity.box_size_at_number_density', (['N', '(1.0)', 'dim'], {}), '(N, 1.0, dim)\n', (1495, 1508), False, 'from jax_md import space, test_util, quantity, energy\n'), ((1788, 1815), 'jax_md.space.periodic_general', 'space.periodic_general', (['box'], {}), '(box)\n', (1810, 1815), False, 'from jax_md import space, test_util, quantity, energy\n'), ((1829, 1886), 'jax_md.space.periodic_general', 'space.periodic_general', (['box'], {'fractional_coordinates': '(False)'}), '(box, fractional_coordinates=False)\n', (1851, 1886), False, 'from jax_md import space, test_util, quantity, energy\n'), ((1896, 1913), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (1910, 1913), False, 'from jax import random\n'), ((1923, 1965), 'jax.random.uniform', 'random.uniform', (['key', '(N, dim)'], {'dtype': 'dtype'}), '(key, (N, dim), dtype=dtype)\n', (1937, 1965), False, 'from jax import random\n'), ((1972, 1997), 'jax_md.space.transform', 'space.transform', (['box', 'R_f'], {}), '(box, R_f)\n', (1987, 1997), False, 'from jax_md import space, test_util, quantity, energy\n'), ((20126, 20141), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (20139, 20141), False, 'from absl.testing import absltest\n'), ((2009, 2035), 'jax_md.energy.soft_sphere_pair', 'energy.soft_sphere_pair', (['d'], {}), '(d)\n', (2032, 2035), False, 'from jax_md import space, test_util, quantity, energy\n'), ((2050, 2079), 'jax_md.energy.soft_sphere_pair', 'energy.soft_sphere_pair', (['d_gf'], {}), '(d_gf)\n', (2073, 2079), False, 'from jax_md import space, test_util, quantity, energy\n'), ((2093, 2121), 'jax_md.energy.soft_sphere_pair', 'energy.soft_sphere_pair', (['d_g'], {}), '(d_g)\n', (2116, 2121), False, 'from jax_md import space, test_util, quantity, energy\n'), ((2621, 2638), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (2635, 2638), False, 'from jax import random\n'), ((3331, 3348), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (3345, 3348), False, 'from jax import random\n'), ((4239, 4256), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (4253, 4256), False, 'from jax import random\n'), ((5077, 5094), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (5091, 5094), False, 'from jax import random\n'), ((5182, 5208), 'jax_md.space.metric', 'space.metric', (['displacement'], {}), '(displacement)\n', (5194, 5208), False, 'from jax_md import space, test_util, quantity, energy\n'), ((5227, 5282), 'jax_md.space.canonicalize_displacement_or_metric', 'space.canonicalize_displacement_or_metric', (['displacement'], {}), '(displacement)\n', (5268, 5282), False, 'from jax_md import space, test_util, quantity, energy\n'), ((5297, 5322), 'jax_md.space.map_product', 'space.map_product', (['metric'], {}), '(metric)\n', (5314, 5322), False, 'from jax_md import space, test_util, quantity, energy\n'), ((5341, 5371), 'jax_md.space.map_product', 'space.map_product', (['test_metric'], {}), '(test_metric)\n', (5358, 5371), False, 'from jax_md import space, test_util, quantity, energy\n'), ((5957, 5974), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (5971, 5974), False, 'from jax import random\n'), ((7864, 7881), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (7878, 7881), False, 'from jax import random\n'), ((8947, 8964), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (8961, 8964), False, 'from jax import random\n'), ((10376, 10393), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (10390, 10393), False, 'from jax import random\n'), ((11761, 11778), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (11775, 11778), False, 'from jax import random\n'), ((11790, 11816), 'jax.numpy.eye', 'jnp.eye', (['spatial_dimension'], {}), '(spatial_dimension)\n', (11797, 11816), True, 'import jax.numpy as jnp\n'), ((13541, 13558), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (13555, 13558), False, 'from jax import random\n'), ((13570, 13609), 'jax.numpy.eye', 'jnp.eye', (['spatial_dimension'], {'dtype': 'dtype'}), '(spatial_dimension, dtype=dtype)\n', (13577, 13609), True, 'import jax.numpy as jnp\n'), ((14658, 14929), 'jax.test_util.cases_from_list', 'jtu.cases_from_list', (["({'testcase_name':\n f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',\n 'spatial_dimension': dim, 'dtype': dtype, 'box_format': box_format} for\n dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE for box_format in\n BOX_FORMATS)"], {}), "({'testcase_name':\n f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',\n 'spatial_dimension': dim, 'dtype': dtype, 'box_format': box_format} for\n dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE for box_format in\n BOX_FORMATS)\n", (14677, 14929), True, 'from jax import test_util as jtu\n'), ((15318, 15589), 'jax.test_util.cases_from_list', 'jtu.cases_from_list', (["({'testcase_name':\n f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',\n 'spatial_dimension': dim, 'dtype': dtype, 'box_format': box_format} for\n dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE for box_format in\n BOX_FORMATS)"], {}), "({'testcase_name':\n f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',\n 'spatial_dimension': dim, 'dtype': dtype, 'box_format': box_format} for\n dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE for box_format in\n BOX_FORMATS)\n", (15337, 15589), True, 'from jax import test_util as jtu\n'), ((16001, 16272), 'jax.test_util.cases_from_list', 'jtu.cases_from_list', (["({'testcase_name':\n f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',\n 'spatial_dimension': dim, 'dtype': dtype, 'box_format': box_format} for\n dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE for box_format in\n BOX_FORMATS)"], {}), "({'testcase_name':\n f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',\n 'spatial_dimension': dim, 'dtype': dtype, 'box_format': box_format} for\n dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE for box_format in\n BOX_FORMATS)\n", (16020, 16272), True, 'from jax import test_util as jtu\n'), ((16792, 17063), 'jax.test_util.cases_from_list', 'jtu.cases_from_list', (["({'testcase_name':\n f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',\n 'spatial_dimension': dim, 'dtype': dtype, 'box_format': box_format} for\n dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE for box_format in\n BOX_FORMATS)"], {}), "({'testcase_name':\n f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',\n 'spatial_dimension': dim, 'dtype': dtype, 'box_format': box_format} for\n dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE for box_format in\n BOX_FORMATS)\n", (16811, 17063), True, 'from jax import test_util as jtu\n'), ((17509, 17780), 'jax.test_util.cases_from_list', 'jtu.cases_from_list', (["({'testcase_name':\n f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',\n 'spatial_dimension': dim, 'dtype': dtype, 'box_format': box_format} for\n dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE for box_format in\n BOX_FORMATS)"], {}), "({'testcase_name':\n f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',\n 'spatial_dimension': dim, 'dtype': dtype, 'box_format': box_format} for\n dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE for box_format in\n BOX_FORMATS)\n", (17528, 17780), True, 'from jax import test_util as jtu\n'), ((18407, 18678), 'jax.test_util.cases_from_list', 'jtu.cases_from_list', (["({'testcase_name':\n f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',\n 'spatial_dimension': dim, 'dtype': dtype, 'box_format': box_format} for\n dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE for box_format in\n BOX_FORMATS)"], {}), "({'testcase_name':\n f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',\n 'spatial_dimension': dim, 'dtype': dtype, 'box_format': box_format} for\n dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE for box_format in\n BOX_FORMATS)\n", (18426, 18678), True, 'from jax import test_util as jtu\n'), ((19234, 19505), 'jax.test_util.cases_from_list', 'jtu.cases_from_list', (["({'testcase_name':\n f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',\n 'spatial_dimension': dim, 'dtype': dtype, 'box_format': box_format} for\n dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE for box_format in\n BOX_FORMATS)"], {}), "({'testcase_name':\n f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',\n 'spatial_dimension': dim, 'dtype': dtype, 'box_format': box_format} for\n dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE for box_format in\n BOX_FORMATS)\n", (19253, 19505), True, 'from jax import test_util as jtu\n'), ((1723, 1736), 'jax.numpy.diag', 'jnp.diag', (['box'], {}), '(box)\n', (1731, 1736), True, 'import jax.numpy as jnp\n'), ((2708, 2728), 'jax.random.split', 'random.split', (['key', '(3)'], {}), '(key, 3)\n', (2720, 2728), False, 'from jax import random\n'), ((2740, 2811), 'jax.random.normal', 'random.normal', (['split1', '(PARTICLE_COUNT, spatial_dimension)'], {'dtype': 'dtype'}), '(split1, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)\n', (2753, 2811), False, 'from jax import random\n'), ((2831, 2905), 'jax.random.normal', 'random.normal', (['split2', '(spatial_dimension, spatial_dimension)'], {'dtype': 'dtype'}), '(split2, (spatial_dimension, spatial_dimension), dtype=dtype)\n', (2844, 2905), False, 'from jax import random\n'), ((3008, 3029), 'jax_md.space.transform', 'space.transform', (['T', 'R'], {}), '(T, R)\n', (3023, 3029), False, 'from jax_md import space, test_util, quantity, energy\n'), ((3418, 3438), 'jax.random.split', 'random.split', (['key', '(3)'], {}), '(key, 3)\n', (3430, 3438), False, 'from jax import random\n'), ((3450, 3508), 'jax.random.normal', 'random.normal', (['split1', '(PARTICLE_COUNT, spatial_dimension)'], {}), '(split1, (PARTICLE_COUNT, spatial_dimension))\n', (3463, 3508), False, 'from jax import random\n'), ((3519, 3580), 'jax.random.normal', 'random.normal', (['split2', '(spatial_dimension, spatial_dimension)'], {}), '(split2, (spatial_dimension, spatial_dimension))\n', (3532, 3580), False, 'from jax import random\n'), ((3598, 3619), 'jax_md.space.transform', 'space.transform', (['T', 'R'], {}), '(T, R)\n', (3613, 3619), False, 'from jax_md import space, test_util, quantity, energy\n'), ((4381, 4401), 'jax.random.split', 'random.split', (['key', '(3)'], {}), '(key, 3)\n', (4393, 4401), False, 'from jax import random\n'), ((4413, 4484), 'jax.random.normal', 'random.normal', (['split1', '(PARTICLE_COUNT, spatial_dimension)'], {'dtype': 'dtype'}), '(split1, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)\n', (4426, 4484), False, 'from jax import random\n'), ((4505, 4579), 'jax.random.normal', 'random.normal', (['split2', '(spatial_dimension, spatial_dimension)'], {'dtype': 'dtype'}), '(split2, (spatial_dimension, spatial_dimension), dtype=dtype)\n', (4518, 4579), False, 'from jax import random\n'), ((4603, 4619), 'jax_md.space.inverse', 'space.inverse', (['T'], {}), '(T)\n', (4616, 4619), False, 'from jax_md import space, test_util, quantity, energy\n'), ((5141, 5167), 'jax.numpy.eye', 'jnp.eye', (['spatial_dimension'], {}), '(spatial_dimension)\n', (5148, 5167), True, 'import jax.numpy as jnp\n'), ((5441, 5461), 'jax.random.split', 'random.split', (['key', '(3)'], {}), '(key, 3)\n', (5453, 5461), False, 'from jax import random\n'), ((5473, 5544), 'jax.random.normal', 'random.normal', (['split1', '(PARTICLE_COUNT, spatial_dimension)'], {'dtype': 'dtype'}), '(split1, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)\n', (5486, 5544), False, 'from jax import random\n'), ((6035, 6052), 'jax.random.split', 'random.split', (['key'], {}), '(key)\n', (6047, 6052), False, 'from jax import random\n'), ((6064, 6135), 'jax.random.uniform', 'random.uniform', (['split', '(PARTICLE_COUNT, spatial_dimension)'], {'dtype': 'dtype'}), '(split, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)\n', (6078, 6135), False, 'from jax import random\n'), ((6311, 6329), 'jax_md.space.distance', 'space.distance', (['dR'], {}), '(dR)\n', (6325, 6329), False, 'from jax_md import space, test_util, quantity, energy\n'), ((6348, 6394), 'jax.numpy.reshape', 'jnp.reshape', (['dr_direct', '(dr_direct.shape + (1,))'], {}), '(dr_direct, dr_direct.shape + (1,))\n', (6359, 6394), True, 'import jax.numpy as jnp\n'), ((7403, 7439), 'jax.numpy.array', 'jnp.array', (['dR_direct'], {'dtype': 'dR.dtype'}), '(dR_direct, dtype=dR.dtype)\n', (7412, 7439), True, 'import jax.numpy as jnp\n'), ((7951, 7971), 'jax.random.split', 'random.split', (['key', '(3)'], {}), '(key, 3)\n', (7963, 7971), False, 'from jax import random\n'), ((7983, 8055), 'jax.random.uniform', 'random.uniform', (['split1', '(PARTICLE_COUNT, spatial_dimension)'], {'dtype': 'dtype'}), '(split1, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)\n', (7997, 8055), False, 'from jax import random\n'), ((8384, 8406), 'jax.numpy.all', 'jnp.all', (['(R_shift < 1.0)'], {}), '(R_shift < 1.0)\n', (8391, 8406), True, 'import jax.numpy as jnp\n'), ((8420, 8442), 'jax.numpy.all', 'jnp.all', (['(R_shift > 0.0)'], {}), '(R_shift > 0.0)\n', (8427, 8442), True, 'import jax.numpy as jnp\n'), ((9097, 9117), 'jax.random.split', 'random.split', (['key', '(4)'], {}), '(key, 4)\n', (9109, 9117), False, 'from jax import random\n'), ((9267, 9285), 'jax.numpy.diag', 'jnp.diag', (['box_size'], {}), '(box_size)\n', (9275, 9285), True, 'import jax.numpy as jnp\n'), ((9297, 9369), 'jax.random.uniform', 'random.uniform', (['split2', '(PARTICLE_COUNT, spatial_dimension)'], {'dtype': 'dtype'}), '(split2, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)\n', (9311, 9369), False, 'from jax import random\n'), ((9421, 9492), 'jax.random.normal', 'random.normal', (['split3', '(PARTICLE_COUNT, spatial_dimension)'], {'dtype': 'dtype'}), '(split3, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)\n', (9434, 9492), False, 'from jax import random\n'), ((9529, 9553), 'jax_md.space.periodic', 'space.periodic', (['box_size'], {}), '(box_size)\n', (9543, 9553), False, 'from jax_md import space, test_util, quantity, energy\n'), ((9596, 9629), 'jax_md.space.periodic_general', 'space.periodic_general', (['transform'], {}), '(transform)\n', (9618, 9629), False, 'from jax_md import space, test_util, quantity, energy\n'), ((9647, 9673), 'jax_md.space.map_product', 'space.map_product', (['disp_fn'], {}), '(disp_fn)\n', (9664, 9673), False, 'from jax_md import space, test_util, quantity, energy\n'), ((9698, 9732), 'jax_md.space.map_product', 'space.map_product', (['general_disp_fn'], {}), '(general_disp_fn)\n', (9715, 9732), False, 'from jax_md import space, test_util, quantity, energy\n'), ((10526, 10546), 'jax.random.split', 'random.split', (['key', '(4)'], {}), '(key, 4)\n', (10538, 10546), False, 'from jax import random\n'), ((10696, 10714), 'jax.numpy.diag', 'jnp.diag', (['box_size'], {}), '(box_size)\n', (10704, 10714), True, 'import jax.numpy as jnp\n'), ((10726, 10798), 'jax.random.uniform', 'random.uniform', (['split2', '(PARTICLE_COUNT, spatial_dimension)'], {'dtype': 'dtype'}), '(split2, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)\n', (10740, 10798), False, 'from jax import random\n'), ((10850, 10921), 'jax.random.normal', 'random.normal', (['split3', '(PARTICLE_COUNT, spatial_dimension)'], {'dtype': 'dtype'}), '(split3, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)\n', (10863, 10921), False, 'from jax import random\n'), ((10958, 10982), 'jax_md.space.periodic', 'space.periodic', (['box_size'], {}), '(box_size)\n', (10972, 10982), False, 'from jax_md import space, test_util, quantity, energy\n'), ((11025, 11058), 'jax_md.space.periodic_general', 'space.periodic_general', (['transform'], {}), '(transform)\n', (11047, 11058), False, 'from jax_md import space, test_util, quantity, energy\n'), ((11076, 11102), 'jax_md.space.map_product', 'space.map_product', (['disp_fn'], {}), '(disp_fn)\n', (11093, 11102), False, 'from jax_md import space, test_util, quantity, energy\n'), ((11127, 11161), 'jax_md.space.map_product', 'space.map_product', (['general_disp_fn'], {}), '(general_disp_fn)\n', (11144, 11161), False, 'from jax_md import space, test_util, quantity, energy\n'), ((11899, 11919), 'jax.random.split', 'random.split', (['key', '(3)'], {}), '(key, 3)\n', (11911, 11919), False, 'from jax import random\n'), ((11961, 11981), 'jax.random.split', 'random.split', (['key', '(3)'], {}), '(key, 3)\n', (11973, 11981), False, 'from jax import random\n'), ((12022, 12042), 'jax.random.split', 'random.split', (['key', '(4)'], {}), '(key, 4)\n', (12034, 12042), False, 'from jax import random\n'), ((12216, 12269), 'jax.numpy.array', 'jnp.array', (['(size_0 * (eye + dtransform_0))'], {'dtype': 'dtype'}), '(size_0 * (eye + dtransform_0), dtype=dtype)\n', (12225, 12269), True, 'import jax.numpy as jnp\n'), ((12471, 12524), 'jax.numpy.array', 'jnp.array', (['(size_1 * (eye + dtransform_1))'], {'dtype': 'dtype'}), '(size_1 * (eye + dtransform_1), dtype=dtype)\n', (12480, 12524), True, 'import jax.numpy as jnp\n'), ((12552, 12579), 'jax_md.space.periodic_general', 'space.periodic_general', (['T_0'], {}), '(T_0)\n', (12574, 12579), False, 'from jax_md import space, test_util, quantity, energy\n'), ((12616, 12643), 'jax_md.space.periodic_general', 'space.periodic_general', (['T_1'], {}), '(T_1)\n', (12638, 12643), False, 'from jax_md import space, test_util, quantity, energy\n'), ((12661, 12686), 'functools.partial', 'partial', (['disp_fn'], {'box': 'T_1'}), '(disp_fn, box=T_1)\n', (12668, 12686), False, 'from functools import partial\n'), ((12704, 12730), 'jax_md.space.map_product', 'space.map_product', (['disp_fn'], {}), '(disp_fn)\n', (12721, 12730), False, 'from jax_md import space, test_util, quantity, energy\n'), ((12752, 12783), 'jax_md.space.map_product', 'space.map_product', (['true_disp_fn'], {}), '(true_disp_fn)\n', (12769, 12783), False, 'from jax_md import space, test_util, quantity, energy\n'), ((12795, 12868), 'jax.random.uniform', 'random.uniform', (['split_R', '(PARTICLE_COUNT, spatial_dimension)'], {'dtype': 'dtype'}), '(split_R, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)\n', (12809, 12868), False, 'from jax import random\n'), ((12889, 12962), 'jax.random.normal', 'random.normal', (['split_dR', '(PARTICLE_COUNT, spatial_dimension)'], {'dtype': 'dtype'}), '(split_dR, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)\n', (12902, 12962), False, 'from jax import random\n'), ((13736, 13756), 'jax.random.split', 'random.split', (['key', '(3)'], {}), '(key, 3)\n', (13748, 13756), False, 'from jax import random\n'), ((13769, 13844), 'jax.random.normal', 'random.normal', (['split_T', '(spatial_dimension, spatial_dimension)'], {'dtype': 'dtype'}), '(split_T, (spatial_dimension, spatial_dimension), dtype=dtype)\n', (13782, 13844), False, 'from jax import random\n'), ((13904, 13977), 'jax.random.uniform', 'random.uniform', (['split_R', '(PARTICLE_COUNT, spatial_dimension)'], {'dtype': 'dtype'}), '(split_R, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)\n', (13918, 13977), False, 'from jax import random\n'), ((14051, 14076), 'jax_md.space.periodic_general', 'space.periodic_general', (['T'], {}), '(T)\n', (14073, 14076), False, 'from jax_md import space, test_util, quantity, energy\n'), ((14104, 14144), 'jax_md.space.periodic_general', 'space.periodic_general', (['T'], {'wrapped': '(False)'}), '(T, wrapped=False)\n', (14126, 14144), False, 'from jax_md import space, test_util, quantity, energy\n'), ((14167, 14198), 'jax_md.space.map_product', 'space.map_product', (['displacement'], {}), '(displacement)\n', (14184, 14198), False, 'from jax_md import space, test_util, quantity, energy\n'), ((16685, 16715), 'jax_md.space.transform', 'space.transform', (['box', 'R_gf_new'], {}), '(box, R_gf_new)\n', (16700, 16715), False, 'from jax_md import space, test_util, quantity, energy\n'), ((19679, 19732), 'unittest.SkipTest', 'SkipTest', (['"""Scalar case fails due to JAX Issue #5849."""'], {}), "('Scalar case fails due to JAX Issue #5849.')\n", (19687, 19732), False, 'from unittest import SkipTest\n'), ((1582, 1595), 'jax.numpy.ones', 'jnp.ones', (['dim'], {}), '(dim)\n', (1590, 1595), True, 'import jax.numpy as jnp\n'), ((2948, 2977), 'jax.numpy.einsum', 'jnp.einsum', (['"""ij,kj->ki"""', 'T', 'R'], {}), "('ij,kj->ki', T, R)\n", (2958, 2977), True, 'import jax.numpy as jnp\n'), ((3653, 3668), 'jax.numpy.sum', 'jnp.sum', (['(R ** 2)'], {}), '(R ** 2)\n', (3660, 3668), True, 'import jax.numpy as jnp\n'), ((3763, 3782), 'jax.grad', 'grad', (['energy_direct'], {}), '(energy_direct)\n', (3767, 3782), False, 'from jax import grad, jit, jacfwd\n'), ((3814, 3838), 'jax.grad', 'grad', (['energy_indirect', '(1)'], {}), '(energy_indirect, 1)\n', (3818, 3838), False, 'from jax import grad, jit, jacfwd\n'), ((4659, 4680), 'jax_md.space.transform', 'space.transform', (['T', 'R'], {}), '(T, R)\n', (4674, 4680), False, 'from jax_md import space, test_util, quantity, energy\n'), ((6156, 6202), 'jax_md.space.map_product', 'space.map_product', (['space.pairwise_displacement'], {}), '(space.pairwise_displacement)\n', (6173, 6202), False, 'from jax_md import space, test_util, quantity, energy\n'), ((8097, 8168), 'jax.random.normal', 'random.normal', (['split2', '(PARTICLE_COUNT, spatial_dimension)'], {'dtype': 'dtype'}), '(split2, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)\n', (8110, 8168), False, 'from jax import random\n'), ((9182, 9239), 'jax.random.uniform', 'random.uniform', (['split1', '(spatial_dimension,)'], {'dtype': 'dtype'}), '(split1, (spatial_dimension,), dtype=dtype)\n', (9196, 9239), False, 'from jax import random\n'), ((10611, 10668), 'jax.random.uniform', 'random.uniform', (['split1', '(spatial_dimension,)'], {'dtype': 'dtype'}), '(split1, (spatial_dimension,), dtype=dtype)\n', (10625, 10668), False, 'from jax import random\n'), ((12066, 12100), 'jax.random.uniform', 'random.uniform', (['split_T0_scale', '()'], {}), '(split_T0_scale, ())\n', (12080, 12100), False, 'from jax import random\n'), ((12128, 12194), 'jax.random.normal', 'random.normal', (['split_T0_dT', '(spatial_dimension, spatial_dimension)'], {}), '(split_T0_dT, (spatial_dimension, spatial_dimension))\n', (12141, 12194), False, 'from jax import random\n'), ((12293, 12340), 'jax.random.uniform', 'random.uniform', (['split_T1_scale', '()'], {'dtype': 'dtype'}), '(split_T1_scale, (), dtype=dtype)\n', (12307, 12340), False, 'from jax import random\n'), ((12368, 12447), 'jax.random.normal', 'random.normal', (['split_T1_dT', '(spatial_dimension, spatial_dimension)'], {'dtype': 'dtype'}), '(split_T1_dT, (spatial_dimension, spatial_dimension), dtype=dtype)\n', (12381, 12447), False, 'from jax import random\n'), ((13875, 13892), 'jax.numpy.transpose', 'jnp.transpose', (['dT'], {}), '(dT)\n', (13888, 13892), True, 'import jax.numpy as jnp\n'), ((14256, 14273), 'jax.random.split', 'random.split', (['key'], {}), '(key)\n', (14268, 14273), False, 'from jax import random\n'), ((14287, 14357), 'jax.random.normal', 'random.normal', (['split', '(PARTICLE_COUNT, spatial_dimension)'], {'dtype': 'dtype'}), '(split, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)\n', (14300, 14357), False, 'from jax import random\n'), ((15887, 15894), 'jax.grad', 'grad', (['E'], {}), '(E)\n', (15891, 15894), False, 'from jax import grad, jit, jacfwd\n'), ((15899, 15909), 'jax.grad', 'grad', (['E_gf'], {}), '(E_gf)\n', (15903, 15909), False, 'from jax import grad, jit, jacfwd\n'), ((15940, 15947), 'jax.grad', 'grad', (['E'], {}), '(E)\n', (15944, 15947), False, 'from jax import grad, jit, jacfwd\n'), ((15952, 15961), 'jax.grad', 'grad', (['E_g'], {}), '(E_g)\n', (15956, 15961), False, 'from jax import grad, jit, jacfwd\n'), ((16564, 16571), 'jax.grad', 'grad', (['E'], {}), '(E)\n', (16568, 16571), False, 'from jax import grad, jit, jacfwd\n'), ((16601, 16611), 'jax.grad', 'grad', (['E_gf'], {}), '(E_gf)\n', (16605, 16611), False, 'from jax import grad, jit, jacfwd\n'), ((16639, 16648), 'jax.grad', 'grad', (['E_g'], {}), '(E_g)\n', (16643, 16648), False, 'from jax import grad, jit, jacfwd\n'), ((18153, 18163), 'jax.grad', 'grad', (['E_gf'], {}), '(E_gf)\n', (18157, 18163), False, 'from jax import grad, jit, jacfwd\n'), ((18212, 18221), 'jax.grad', 'grad', (['E_g'], {}), '(E_g)\n', (18216, 18221), False, 'from jax import grad, jit, jacfwd\n'), ((18273, 18285), 'jax.jacfwd', 'jacfwd', (['E_gf'], {}), '(E_gf)\n', (18279, 18285), False, 'from jax import grad, jit, jacfwd\n'), ((18334, 18345), 'jax.jacfwd', 'jacfwd', (['E_g'], {}), '(E_g)\n', (18340, 18345), False, 'from jax import grad, jit, jacfwd\n'), ((19048, 19057), 'jax.grad', 'grad', (['E_g'], {}), '(E_g)\n', (19052, 19057), False, 'from jax import grad, jit, jacfwd\n'), ((1666, 1678), 'jax.numpy.eye', 'jnp.eye', (['dim'], {}), '(dim)\n', (1673, 1678), True, 'import jax.numpy as jnp\n'), ((14568, 14592), 'jax.numpy.all', 'jnp.all', (['(unwrapped_R > 0)'], {}), '(unwrapped_R > 0)\n', (14575, 14592), True, 'import jax.numpy as jnp\n'), ((14597, 14621), 'jax.numpy.all', 'jnp.all', (['(unwrapped_R < 1)'], {}), '(unwrapped_R < 1)\n', (14604, 14621), True, 'import jax.numpy as jnp\n'), ((19139, 19149), 'jax.grad', 'grad', (['E_gf'], {}), '(E_gf)\n', (19143, 19149), False, 'from jax import grad, jit, jacfwd\n'), ((3714, 3735), 'jax_md.space.transform', 'space.transform', (['T', 'R'], {}), '(T, R)\n', (3729, 3735), False, 'from jax_md import space, test_util, quantity, energy\n'), ((6582, 6608), 'jax_md.space.distance', 'space.distance', (['dR_shifted'], {}), '(dR_shifted)\n', (6596, 6608), False, 'from jax_md import space, test_util, quantity, energy\n'), ((6634, 6682), 'jax.numpy.reshape', 'jnp.reshape', (['dr_shifted', '(dr_shifted.shape + (1,))'], {}), '(dr_shifted, dr_shifted.shape + (1,))\n', (6645, 6682), True, 'import jax.numpy as jnp\n'), ((6708, 6764), 'jax.numpy.where', 'jnp.where', (['(dr_shifted < dr_direct)', 'dR_shifted', 'dR_direct'], {}), '(dr_shifted < dr_direct, dR_shifted, dR_direct)\n', (6717, 6764), True, 'import jax.numpy as jnp\n'), ((6789, 6845), 'jax.numpy.where', 'jnp.where', (['(dr_shifted < dr_direct)', 'dr_shifted', 'dr_direct'], {}), '(dr_shifted < dr_direct, dr_shifted, dr_direct)\n', (6798, 6845), True, 'import jax.numpy as jnp\n'), ((6523, 6555), 'jax.numpy.array', 'jnp.array', (['[i, j]'], {'dtype': 'R.dtype'}), '([i, j], dtype=R.dtype)\n', (6532, 6555), True, 'import jax.numpy as jnp\n'), ((7076, 7102), 'jax_md.space.distance', 'space.distance', (['dR_shifted'], {}), '(dR_shifted)\n', (7090, 7102), False, 'from jax_md import space, test_util, quantity, energy\n'), ((7130, 7178), 'jax.numpy.reshape', 'jnp.reshape', (['dr_shifted', '(dr_shifted.shape + (1,))'], {}), '(dr_shifted, dr_shifted.shape + (1,))\n', (7141, 7178), True, 'import jax.numpy as jnp\n'), ((7206, 7262), 'jax.numpy.where', 'jnp.where', (['(dr_shifted < dr_direct)', 'dR_shifted', 'dR_direct'], {}), '(dr_shifted < dr_direct, dR_shifted, dR_direct)\n', (7215, 7262), True, 'import jax.numpy as jnp\n'), ((7308, 7364), 'jax.numpy.where', 'jnp.where', (['(dr_shifted < dr_direct)', 'dr_shifted', 'dr_direct'], {}), '(dr_shifted < dr_direct, dr_shifted, dr_direct)\n', (7317, 7364), True, 'import jax.numpy as jnp\n'), ((7012, 7047), 'jax.numpy.array', 'jnp.array', (['[i, j, k]'], {'dtype': 'R.dtype'}), '([i, j, k], dtype=R.dtype)\n', (7021, 7047), True, 'import jax.numpy as jnp\n')] |
TrollPursePublishing/trollpurse-trollops | functions/batch-custom-action/status-api/lambda.py | 27e54cfd1ba1eed27097e2e3038dfab56691cf49 | import boto3
batch_client = boto3.client('batch')
def lambda_handler(event, context):
describe_response = batch_client.describe_jobs(
jobs=[ event.get('jobId', '')]
)
return describe_response.get('jobs', [{}])[0].get('status', '')
| [((29, 50), 'boto3.client', 'boto3.client', (['"""batch"""'], {}), "('batch')\n", (41, 50), False, 'import boto3\n')] |
ifaraag/app | app/auth/views.py | d952f0dc58fd703074c19ed3235c1520119baf5f | from flask import Blueprint, render_template, redirect, url_for, request, flash
from flask.ext.login import login_required, login_user, logout_user
from werkzeug import check_password_hash, generate_password_hash
from app import db, login_manager, pubnub, app, _callback
from .models import User
from .forms import LoginForm, SignupForm
mod_auth = Blueprint('auth', __name__)
@mod_auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm(request.form)
error = None
print(request.method)
if request.method == 'POST':
user = db.users.find_one({'username': request.form['username']})
if not user:
error = 'User does not exist'
elif not check_password_hash(user['password'], request.form['password']):
error = 'Invalid credentials. Please try again.'
else:
user_obj = User(user['username'])
login_user(user_obj)
return redirect(url_for('devices.list_devices'))
return render_template('auth/login.html',
title='Log In to Hydrosmart',
form=form,
error=error)
@mod_auth.route('/signup', methods=['GET', 'POST'])
def signup():
form = SignupForm(request.form)
error = None
if request.method == 'POST':
existing_user = db.users.find_one({'username' :
request.form['username']})
if existing_user:
error = 'Username already exists'
else:
new_user = {'username' : request.form['username'],
'email' : request.form['email'],
'zip' : request.form['zip'],
'password' : generate_password_hash(request.form['password'])}
db.users.insert_one(new_user)
user = db.users.find_one({'username': request.form['username']})
pubnub.channel_group_add_channel(channel_group=app.config['PUBNUB_CHANNEL_GRP'], channel=user['username'])
pubnub.grant(channel=user['username'], auth_key=app.config['PUBNUB_AUTH_KEY'], read=True, write=True, manage=True, ttl=0)
return redirect(url_for('dashboard.dashboard'))
return render_template('auth/signup.html', form=form,
title='Sign Up for Hydrosmart', error=error)
# @mod_auth.route('/googlelogin', methods=['GET', 'POST'])
@mod_auth.route("/logout")
@login_required
def logout():
logout_user()
flash("Logged out.")
return redirect('/login')
@login_manager.unauthorized_handler
def unauthorized_callback():
return redirect('/login')
@login_manager.user_loader
def load_user(username):
u = db.users.find_one({'username': username})
if not u:
return None
return User(u['username'])
def callback(message, channel):
db.data.insert_one(message)
def error(message):
db.data.insert_one(message)
| [((350, 377), 'flask.Blueprint', 'Blueprint', (['"""auth"""', '__name__'], {}), "('auth', __name__)\n", (359, 377), False, 'from flask import Blueprint, render_template, redirect, url_for, request, flash\n'), ((998, 1090), 'flask.render_template', 'render_template', (['"""auth/login.html"""'], {'title': '"""Log In to Hydrosmart"""', 'form': 'form', 'error': 'error'}), "('auth/login.html', title='Log In to Hydrosmart', form=form,\n error=error)\n", (1013, 1090), False, 'from flask import Blueprint, render_template, redirect, url_for, request, flash\n'), ((2236, 2332), 'flask.render_template', 'render_template', (['"""auth/signup.html"""'], {'form': 'form', 'title': '"""Sign Up for Hydrosmart"""', 'error': 'error'}), "('auth/signup.html', form=form, title=\n 'Sign Up for Hydrosmart', error=error)\n", (2251, 2332), False, 'from flask import Blueprint, render_template, redirect, url_for, request, flash\n'), ((2477, 2490), 'flask.ext.login.logout_user', 'logout_user', ([], {}), '()\n', (2488, 2490), False, 'from flask.ext.login import login_required, login_user, logout_user\n'), ((2495, 2515), 'flask.flash', 'flash', (['"""Logged out."""'], {}), "('Logged out.')\n", (2500, 2515), False, 'from flask import Blueprint, render_template, redirect, url_for, request, flash\n'), ((2527, 2545), 'flask.redirect', 'redirect', (['"""/login"""'], {}), "('/login')\n", (2535, 2545), False, 'from flask import Blueprint, render_template, redirect, url_for, request, flash\n'), ((2621, 2639), 'flask.redirect', 'redirect', (['"""/login"""'], {}), "('/login')\n", (2629, 2639), False, 'from flask import Blueprint, render_template, redirect, url_for, request, flash\n'), ((2699, 2740), 'app.db.users.find_one', 'db.users.find_one', (["{'username': username}"], {}), "({'username': username})\n", (2716, 2740), False, 'from app import db, login_manager, pubnub, app, _callback\n'), ((2835, 2862), 'app.db.data.insert_one', 'db.data.insert_one', (['message'], {}), '(message)\n', (2853, 2862), False, 'from app import db, login_manager, pubnub, app, _callback\n'), ((2886, 2913), 'app.db.data.insert_one', 'db.data.insert_one', (['message'], {}), '(message)\n', (2904, 2913), False, 'from app import db, login_manager, pubnub, app, _callback\n'), ((569, 626), 'app.db.users.find_one', 'db.users.find_one', (["{'username': request.form['username']}"], {}), "({'username': request.form['username']})\n", (586, 626), False, 'from app import db, login_manager, pubnub, app, _callback\n'), ((1345, 1402), 'app.db.users.find_one', 'db.users.find_one', (["{'username': request.form['username']}"], {}), "({'username': request.form['username']})\n", (1362, 1402), False, 'from app import db, login_manager, pubnub, app, _callback\n'), ((1805, 1834), 'app.db.users.insert_one', 'db.users.insert_one', (['new_user'], {}), '(new_user)\n', (1824, 1834), False, 'from app import db, login_manager, pubnub, app, _callback\n'), ((1854, 1911), 'app.db.users.find_one', 'db.users.find_one', (["{'username': request.form['username']}"], {}), "({'username': request.form['username']})\n", (1871, 1911), False, 'from app import db, login_manager, pubnub, app, _callback\n'), ((1924, 2035), 'app.pubnub.channel_group_add_channel', 'pubnub.channel_group_add_channel', ([], {'channel_group': "app.config['PUBNUB_CHANNEL_GRP']", 'channel': "user['username']"}), "(channel_group=app.config[\n 'PUBNUB_CHANNEL_GRP'], channel=user['username'])\n", (1956, 2035), False, 'from app import db, login_manager, pubnub, app, _callback\n'), ((2043, 2169), 'app.pubnub.grant', 'pubnub.grant', ([], {'channel': "user['username']", 'auth_key': "app.config['PUBNUB_AUTH_KEY']", 'read': '(True)', 'write': '(True)', 'manage': '(True)', 'ttl': '(0)'}), "(channel=user['username'], auth_key=app.config[\n 'PUBNUB_AUTH_KEY'], read=True, write=True, manage=True, ttl=0)\n", (2055, 2169), False, 'from app import db, login_manager, pubnub, app, _callback\n'), ((707, 770), 'werkzeug.check_password_hash', 'check_password_hash', (["user['password']", "request.form['password']"], {}), "(user['password'], request.form['password'])\n", (726, 770), False, 'from werkzeug import check_password_hash, generate_password_hash\n'), ((905, 925), 'flask.ext.login.login_user', 'login_user', (['user_obj'], {}), '(user_obj)\n', (915, 925), False, 'from flask.ext.login import login_required, login_user, logout_user\n'), ((1743, 1791), 'werkzeug.generate_password_hash', 'generate_password_hash', (["request.form['password']"], {}), "(request.form['password'])\n", (1765, 1791), False, 'from werkzeug import check_password_hash, generate_password_hash\n'), ((2193, 2223), 'flask.url_for', 'url_for', (['"""dashboard.dashboard"""'], {}), "('dashboard.dashboard')\n", (2200, 2223), False, 'from flask import Blueprint, render_template, redirect, url_for, request, flash\n'), ((954, 985), 'flask.url_for', 'url_for', (['"""devices.list_devices"""'], {}), "('devices.list_devices')\n", (961, 985), False, 'from flask import Blueprint, render_template, redirect, url_for, request, flash\n')] |
xingjianpan/news_reader_backend | economist/migrations/0003_auto_20170406_1402.py | c892e157460ef22720bfcbad5a7d2bfe9bcd4aa9 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-06 06:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('economist', '0002_auto_20170406_1153'),
]
operations = [
migrations.AlterField(
model_name='article',
name='alternativename',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='article',
name='category',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='article',
name='fly_title',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='article',
name='headline',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='article',
name='project',
field=models.TextField(editable=False),
),
migrations.AlterField(
model_name='article',
name='source',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='article',
name='source_url',
field=models.URLField(editable=False),
),
migrations.AlterField(
model_name='article',
name='spider',
field=models.TextField(editable=False),
),
]
| [((413, 452), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (429, 452), False, 'from django.db import migrations, models\n'), ((577, 616), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (593, 616), False, 'from django.db import migrations, models\n'), ((742, 781), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (758, 781), False, 'from django.db import migrations, models\n'), ((906, 945), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (922, 945), False, 'from django.db import migrations, models\n'), ((1069, 1101), 'django.db.models.TextField', 'models.TextField', ([], {'editable': '(False)'}), '(editable=False)\n', (1085, 1101), False, 'from django.db import migrations, models\n'), ((1224, 1263), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1240, 1263), False, 'from django.db import migrations, models\n'), ((1390, 1421), 'django.db.models.URLField', 'models.URLField', ([], {'editable': '(False)'}), '(editable=False)\n', (1405, 1421), False, 'from django.db import migrations, models\n'), ((1544, 1576), 'django.db.models.TextField', 'models.TextField', ([], {'editable': '(False)'}), '(editable=False)\n', (1560, 1576), False, 'from django.db import migrations, models\n')] |
coinplus-sa/coinplus-solo | test/test_ethereum.py | e4f385a3d9eb7b72e14e397761fd9a113938917a | import unittest
from coinplus_solo_redeem.common import wif_export_bitcoin, compute_public_key_sec256k1, address_from_publickey_ethereum
class TestEthereum(unittest.TestCase):
"""test of the bitcoin conversion from private key to wif"""
def setUp(self):
self.test_add_vector = [("03cb3e5f30245658e1e3615f1620e5b40f7d9016c0edb3611dd786327dd5e40caa", "0xfd965bB8907566c550D8C0325207a1cB744f2fc2"),
("03c2773e19b0cd4175832d781d521390e5aac7b0841904f93211bf114786f5a145", "0xDB1F8a8B668F15B9e696dDfF30Ce233703f9eC97"),
("0277c3757e791426b7fa43cf64197bfd5c2fe277ece721b12558a52729f6b68b8a", "0x6C4DCd1f900d89a7A70C9A5bA9F7a24a4Bd70878"),
("02d93dfcd93a76d7bac5b0fa394ad4bfd6cd92d10a64728b4b5f707d87db9cd2aa", "0x42F7C7ccD753055c219B85ddc5F05512b3f94528"),
("037049004c5ad576beb518dcc74506df3faf520109a489886b7d1435a63b9b0b88", "0x0af4DbEf58063AEd75e6fF57610348E55954E8FB"),
("0260bbacc03555af21f062ff04e9fbde36bcf0ae7396812d336e7f2e5292306f2b", "0xd13AA41456549AAf4F00C681e014E8CEd8c04d60"),
("0343710601de0710dd81a0b7102bf1b794809a330caf4e1b4ae6567923c00df6a5", "0x011934E5d9EE8C230BBFccF33Ab83c62E5486d91"),
("028c48ff458287f34cc1ad5c58a441500f8f315e9cabe34ff1601a5a0f791e4d0a", "0x98447B7aC721BDeb197a7e72780f6f41BECA2919"),
("0258cdabe1dad468dda6a7d62bee9e0cddadfe87d664e62df9143e769c017dd651", "0xaA5EacE5be0D09B09BAf66df62b0D85EA20b4ee4"),
("0289a6d2272382ceec291674530eebb1b05dadab88ebf1bc45569ba612a4e3973a", "0x79B4044CeB2DFAa123FbE5B4da43BF7cFF01718c")]
def test_address_testvector(self):
for publickey_hex, address_expected in self.test_add_vector:
publickey = bytearray.fromhex(publickey_hex)
address = address_from_publickey_ethereum(publickey)
self.assertEqual(address, address_expected)
| [((1952, 1994), 'coinplus_solo_redeem.common.address_from_publickey_ethereum', 'address_from_publickey_ethereum', (['publickey'], {}), '(publickey)\n', (1983, 1994), False, 'from coinplus_solo_redeem.common import wif_export_bitcoin, compute_public_key_sec256k1, address_from_publickey_ethereum\n')] |
youaresherlock/PythonPractice | python97/chapter05/list_gen.py | 2e22d3fdcb26353cb0d8215c150e84d11bc9a022 | #!usr/bin/python
# -*- coding:utf8 -*-
# 列表生成式(列表推导式)
# 1. 提取出1-20之间的奇数
# odd_list = []
# for i in range(21):
# if i % 2 == 1:
# odd_list.append(i)
# odd_list = [i for i in range(21) if i % 2 == 1]
# print(odd_list)
# 2. 逻辑复杂的情况 如果是奇数将结果平方
# 列表生成式性能高于列表操作
def handle_item(item):
return item * item
odd_list = [handle_item(i) for i in range(21) if i % 2 == 1]
print(odd_list)
# 生成器表达式
odd_gen = (i for i in range(21) if i % 2 == 1)
print(type(odd_gen))
for item in odd_gen:
print(item)
# 字典推导式
my_dict = {"bobby1": 22, "bobby2": 23, "imooc.com": 5}
reversed_dict = {value:key for key, value in my_dict.items()}
print(reversed_dict)
# 集合推导式
my_set = set(my_dict.keys())
my_set = {key for key, value in my_dict.items()}
print(type(my_set))
| [] |
isqad/streamlink | src/streamlink/plugin/plugin.py | f6708f1d38d056177ac3d614ebbb740d956d46f0 | import ast
import operator
import re
from collections import OrderedDict
from functools import partial
from ..cache import Cache
from ..exceptions import PluginError, NoStreamsError
from ..options import Options
# FIXME: This is a crude attempt at making a bitrate's
# weight end up similar to the weight of a resolution.
# Someone who knows math, please fix.
BIT_RATE_WEIGHT_RATIO = 2.8
ALT_WEIGHT_MOD = 0.01
QUALITY_WEIGTHS_EXTRA = {
"other": {
"live": 1080,
},
"tv": {
"hd": 1080,
"sd": 576,
},
"quality": {
"ehq": 720,
"hq": 576,
"sq": 360,
},
}
FILTER_OPERATORS = {
"<": operator.lt,
"<=": operator.le,
">": operator.gt,
">=": operator.ge,
}
PARAMS_REGEX = r"(\w+)=({.+?}|\[.+?\]|\(.+?\)|'(?:[^'\\]|\\')*'|\"(?:[^\"\\]|\\\")*\"|\S+)"
HIGH_PRIORITY = 30
NORMAL_PRIORITY = 20
LOW_PRIORITY = 10
NO_PRIORITY = 0
def stream_weight(stream):
for group, weights in QUALITY_WEIGTHS_EXTRA.items():
if stream in weights:
return weights[stream], group
match = re.match(r"^(\d+)(k|p)?(\d+)?(\+)?(?:_(\d+)k)?(?:_(alt)(\d)?)?$", stream)
if match:
weight = 0
if match.group(6):
if match.group(7):
weight -= ALT_WEIGHT_MOD * int(match.group(7))
else:
weight -= ALT_WEIGHT_MOD
name_type = match.group(2)
if name_type == "k": # bit rate
bitrate = int(match.group(1))
weight += bitrate / BIT_RATE_WEIGHT_RATIO
return weight, "bitrate"
elif name_type == "p": # resolution
weight += int(match.group(1))
if match.group(3): # fps eg. 60p or 50p
weight += int(match.group(3))
if match.group(4) == "+":
weight += 1
if match.group(5): # bit rate classifier for resolution
weight += int(match.group(5)) / BIT_RATE_WEIGHT_RATIO
return weight, "pixels"
return 0, "none"
def iterate_streams(streams):
for name, stream in streams:
if isinstance(stream, list):
for sub_stream in stream:
yield (name, sub_stream)
else:
yield (name, stream)
def stream_type_priority(stream_types, stream):
stream_type = type(stream[1]).shortname()
try:
prio = stream_types.index(stream_type)
except ValueError:
try:
prio = stream_types.index("*")
except ValueError:
prio = 99
return prio
def stream_sorting_filter(expr, stream_weight):
match = re.match(r"(?P<op><=|>=|<|>)?(?P<value>[\w+]+)", expr)
if not match:
raise PluginError("Invalid filter expression: {0}".format(expr))
op, value = match.group("op", "value")
op = FILTER_OPERATORS.get(op, operator.eq)
filter_weight, filter_group = stream_weight(value)
def func(quality):
weight, group = stream_weight(quality)
if group == filter_group:
return not op(weight, filter_weight)
return True
return func
def parse_url_params(url):
split = url.split(" ", 1)
url = split[0]
params = split[1] if len(split) > 1 else ''
return url, parse_params(params)
def parse_params(params):
rval = {}
matches = re.findall(PARAMS_REGEX, params)
for key, value in matches:
try:
value = ast.literal_eval(value)
except Exception:
pass
rval[key] = value
return rval
class Plugin(object):
"""A plugin can retrieve stream information from the URL specified.
:param url: URL that the plugin will operate on
"""
cache = None
logger = None
module = "unknown"
options = Options()
session = None
@classmethod
def bind(cls, session, module):
cls.cache = Cache(filename="plugin-cache.json",
key_prefix=module)
cls.logger = session.logger.new_module("plugin." + module)
cls.module = module
cls.session = session
def __init__(self, url):
self.url = url
@classmethod
def can_handle_url(cls, url):
raise NotImplementedError
@classmethod
def set_option(cls, key, value):
cls.options.set(key, value)
@classmethod
def get_option(cls, key):
return cls.options.get(key)
@classmethod
def stream_weight(cls, stream):
return stream_weight(stream)
@classmethod
def default_stream_types(cls, streams):
stream_types = ["rtmp", "hls", "hds", "http"]
for name, stream in iterate_streams(streams):
stream_type = type(stream).shortname()
if stream_type not in stream_types:
stream_types.append(stream_type)
return stream_types
@classmethod
def broken(cls, issue=None):
def func(*args, **kwargs):
msg = (
"This plugin has been marked as broken. This is likely due to "
"changes to the service preventing a working implementation. "
)
if issue:
msg += "More info: https://github.com/streamlink/streamlink/issues/{0}".format(issue)
raise PluginError(msg)
def decorator(*args, **kwargs):
return func
return decorator
@classmethod
def priority(cls, url):
"""
Return the plugin priority for a given URL, by default it returns
NORMAL priority.
:return: priority level
"""
return NORMAL_PRIORITY
def streams(self, stream_types=None, sorting_excludes=None):
"""Attempts to extract available streams.
Returns a :class:`dict` containing the streams, where the key is
the name of the stream, most commonly the quality and the value
is a :class:`Stream` object.
The result can contain the synonyms **best** and **worst** which
points to the streams which are likely to be of highest and
lowest quality respectively.
If multiple streams with the same name are found, the order of
streams specified in *stream_types* will determine which stream
gets to keep the name while the rest will be renamed to
"<name>_<stream type>".
The synonyms can be fine tuned with the *sorting_excludes*
parameter. This can be either of these types:
- A list of filter expressions in the format
*[operator]<value>*. For example the filter ">480p" will
exclude streams ranked higher than "480p" from the list
used in the synonyms ranking. Valid operators are >, >=, <
and <=. If no operator is specified then equality will be
tested.
- A function that is passed to filter() with a list of
stream names as input.
:param stream_types: A list of stream types to return.
:param sorting_excludes: Specify which streams to exclude from
the best/worst synonyms.
.. versionchanged:: 1.4.2
Added *priority* parameter.
.. versionchanged:: 1.5.0
Renamed *priority* to *stream_types* and changed behaviour
slightly.
.. versionchanged:: 1.5.0
Added *sorting_excludes* parameter.
.. versionchanged:: 1.6.0
*sorting_excludes* can now be a list of filter expressions
or a function that is passed to filter().
"""
try:
ostreams = self._get_streams()
if isinstance(ostreams, dict):
ostreams = ostreams.items()
# Flatten the iterator to a list so we can reuse it.
if ostreams:
ostreams = list(ostreams)
except NoStreamsError:
return {}
except (IOError, OSError, ValueError) as err:
raise PluginError(err)
if not ostreams:
return {}
if stream_types is None:
stream_types = self.default_stream_types(ostreams)
# Add streams depending on stream type and priorities
sorted_streams = sorted(iterate_streams(ostreams),
key=partial(stream_type_priority,
stream_types))
streams = {}
for name, stream in sorted_streams:
stream_type = type(stream).shortname()
# Use * as wildcard to match other stream types
if "*" not in stream_types and stream_type not in stream_types:
continue
# drop _alt from any stream names
if name.endswith("_alt"):
name = name[:-len("_alt")]
existing = streams.get(name)
if existing:
existing_stream_type = type(existing).shortname()
if existing_stream_type != stream_type:
name = "{0}_{1}".format(name, stream_type)
if name in streams:
name = "{0}_alt".format(name)
num_alts = len(list(filter(lambda n: n.startswith(name), streams.keys())))
# We shouldn't need more than 2 alt streams
if num_alts >= 2:
continue
elif num_alts > 0:
name = "{0}{1}".format(name, num_alts + 1)
# Validate stream name and discard the stream if it's bad.
match = re.match("([A-z0-9_+]+)", name)
if match:
name = match.group(1)
else:
self.logger.debug("The stream '{0}' has been ignored "
"since it is badly named.", name)
continue
# Force lowercase name and replace space with underscore.
streams[name.lower()] = stream
# Create the best/worst synonmys
def stream_weight_only(s):
return (self.stream_weight(s)[0] or
(len(streams) == 1 and 1))
stream_names = filter(stream_weight_only, streams.keys())
sorted_streams = sorted(stream_names, key=stream_weight_only)
if isinstance(sorting_excludes, list):
for expr in sorting_excludes:
filter_func = stream_sorting_filter(expr, self.stream_weight)
sorted_streams = list(filter(filter_func, sorted_streams))
elif callable(sorting_excludes):
sorted_streams = list(filter(sorting_excludes, sorted_streams))
final_sorted_streams = OrderedDict()
for stream_name in sorted(streams, key=stream_weight_only):
final_sorted_streams[stream_name] = streams[stream_name]
if len(sorted_streams) > 0:
best = sorted_streams[-1]
worst = sorted_streams[0]
final_sorted_streams["worst"] = streams[worst]
final_sorted_streams["best"] = streams[best]
return final_sorted_streams
def get_streams(self, *args, **kwargs):
"""Deprecated since version 1.9.0.
Has been renamed to :func:`Plugin.streams`, this is an alias
for backwards compatibility.
"""
return self.streams(*args, **kwargs)
def _get_streams(self):
raise NotImplementedError
__all__ = ["Plugin"]
| [((1082, 1159), 're.match', 're.match', (['"""^(\\\\d+)(k|p)?(\\\\d+)?(\\\\+)?(?:_(\\\\d+)k)?(?:_(alt)(\\\\d)?)?$"""', 'stream'], {}), "('^(\\\\d+)(k|p)?(\\\\d+)?(\\\\+)?(?:_(\\\\d+)k)?(?:_(alt)(\\\\d)?)?$', stream)\n", (1090, 1159), False, 'import re\n'), ((2624, 2678), 're.match', 're.match', (['"""(?P<op><=|>=|<|>)?(?P<value>[\\\\w+]+)"""', 'expr'], {}), "('(?P<op><=|>=|<|>)?(?P<value>[\\\\w+]+)', expr)\n", (2632, 2678), False, 'import re\n'), ((3329, 3361), 're.findall', 're.findall', (['PARAMS_REGEX', 'params'], {}), '(PARAMS_REGEX, params)\n', (3339, 3361), False, 'import re\n'), ((10628, 10641), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10639, 10641), False, 'from collections import OrderedDict\n'), ((3427, 3450), 'ast.literal_eval', 'ast.literal_eval', (['value'], {}), '(value)\n', (3443, 3450), False, 'import ast\n'), ((9540, 9571), 're.match', 're.match', (['"""([A-z0-9_+]+)"""', 'name'], {}), "('([A-z0-9_+]+)', name)\n", (9548, 9571), False, 'import re\n'), ((8276, 8319), 'functools.partial', 'partial', (['stream_type_priority', 'stream_types'], {}), '(stream_type_priority, stream_types)\n', (8283, 8319), False, 'from functools import partial\n')] |
Aahbree/reference-data-repository | tests/cli/conftest.py | f318c0532aaf941ec4f00c8375c9dea45c56f186 | # This file is part of the Reference Data Repository (refdata).
#
# Copyright (C) 2021 New York University.
#
# refdata is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""Fixtures for testing the command-line interface."""
import os
import pytest
from click.testing import CliRunner
from refdata.db import DB
import refdata.config as config
@pytest.fixture
def refdata_cli(tmpdir):
"""Initialize the environment and the database for the local store."""
basedir = os.path.abspath(str(tmpdir))
connect_url = 'sqlite:///{}'.format(os.path.join(basedir, 'test.db'))
DB(connect_url=connect_url).init()
os.environ[config.ENV_BASEDIR] = basedir
os.environ[config.ENV_URL] = connect_url
# Make sure to reset the database.
yield CliRunner()
# Clear environment variables that were set for the test runner.
del os.environ[config.ENV_BASEDIR]
del os.environ[config.ENV_URL]
| [((631, 663), 'os.path.join', 'os.path.join', (['basedir', '"""test.db"""'], {}), "(basedir, 'test.db')\n", (643, 663), False, 'import os\n'), ((843, 854), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (852, 854), False, 'from click.testing import CliRunner\n'), ((669, 696), 'refdata.db.DB', 'DB', ([], {'connect_url': 'connect_url'}), '(connect_url=connect_url)\n', (671, 696), False, 'from refdata.db import DB\n')] |
lhoestq/DeDLOC | swav/vissl/vissl/data/ssl_transforms/img_patches_tensor.py | 36f5a6d043c3d727f9d098a35fba94aa351a5cd4 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import math
from typing import Any, Dict
import numpy as np
from classy_vision.dataset.transforms import register_transform
from classy_vision.dataset.transforms.classy_transform import ClassyTransform
@register_transform("ImgPatchesFromTensor")
class ImgPatchesFromTensor(ClassyTransform):
"""
Create image patches from a torch Tensor or numpy array.
This transform was proposed in Jigsaw - https://arxiv.org/abs/1603.09246
Args:
num_patches (int): how many image patches to create
patch_jitter (int): space to leave between patches
"""
def __init__(self, num_patches=9, patch_jitter=21):
self.num_patches = num_patches
self.patch_jitter = patch_jitter
assert self.patch_jitter > 0, "Negative jitter not supported"
self.grid_side_len = int(math.sqrt(self.num_patches)) # usually = 3
logging.info(
f"ImgPatchesFromTensor: num_patches: {num_patches} "
f"patch_jitter: {patch_jitter}"
)
def __call__(self, image):
"""
Input image which is a torch.Tensor object of shape 3 x H x W
"""
data = []
grid_size = int(image.shape[1] / self.grid_side_len)
patch_size = grid_size - self.patch_jitter
jitter = np.random.randint(
0, self.patch_jitter, (2, self.grid_side_len, self.grid_side_len)
)
for i in range(self.grid_side_len):
for j in range(self.grid_side_len):
x_offset = i * grid_size
y_offset = j * grid_size
grid_cell = image[
:, y_offset : y_offset + grid_size, x_offset : x_offset + grid_size
]
patch = grid_cell[
:,
jitter[1, i, j] : jitter[1, i, j] + patch_size,
jitter[0, i, j] : jitter[0, i, j] + patch_size,
]
assert patch.shape[1] == patch_size, "Image not cropped properly"
assert patch.shape[2] == patch_size, "Image not cropped properly"
# copy patch data so that all patches are different in underlying memory
data.append(np.copy(patch))
return data
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "ImgPatchesFromTensor":
"""
Instantiates ImgPatchesFromTensor from configuration.
Args:
config (Dict): arguments for for the transform
Returns:
ImgPatchesFromTensor instance.
"""
num_patches = config.get("num_patches", 9)
patch_jitter = config.get("patch_jitter", 21)
logging.info(f"ImgPatchesFromTensor | Using num_patches: {num_patches}")
logging.info(f"ImgPatchesFromTensor | Using patch_jitter: {patch_jitter}")
return cls(num_patches=num_patches, patch_jitter=patch_jitter)
| [((293, 335), 'classy_vision.dataset.transforms.register_transform', 'register_transform', (['"""ImgPatchesFromTensor"""'], {}), "('ImgPatchesFromTensor')\n", (311, 335), False, 'from classy_vision.dataset.transforms import register_transform\n'), ((957, 1061), 'logging.info', 'logging.info', (['f"""ImgPatchesFromTensor: num_patches: {num_patches} patch_jitter: {patch_jitter}"""'], {}), "(\n f'ImgPatchesFromTensor: num_patches: {num_patches} patch_jitter: {patch_jitter}'\n )\n", (969, 1061), False, 'import logging\n'), ((1363, 1452), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.patch_jitter', '(2, self.grid_side_len, self.grid_side_len)'], {}), '(0, self.patch_jitter, (2, self.grid_side_len, self.\n grid_side_len))\n', (1380, 1452), True, 'import numpy as np\n'), ((2744, 2816), 'logging.info', 'logging.info', (['f"""ImgPatchesFromTensor | Using num_patches: {num_patches}"""'], {}), "(f'ImgPatchesFromTensor | Using num_patches: {num_patches}')\n", (2756, 2816), False, 'import logging\n'), ((2825, 2899), 'logging.info', 'logging.info', (['f"""ImgPatchesFromTensor | Using patch_jitter: {patch_jitter}"""'], {}), "(f'ImgPatchesFromTensor | Using patch_jitter: {patch_jitter}')\n", (2837, 2899), False, 'import logging\n'), ((905, 932), 'math.sqrt', 'math.sqrt', (['self.num_patches'], {}), '(self.num_patches)\n', (914, 932), False, 'import math\n'), ((2280, 2294), 'numpy.copy', 'np.copy', (['patch'], {}), '(patch)\n', (2287, 2294), True, 'import numpy as np\n')] |
Jittor/Jittor | python/jittor/utils/publish.py | bc945bae94bded917214b0afe12be6bf5b919dbe | #!/usr/bin/python3
# ***************************************************************
# Copyright (c) 2022 Jittor. All Rights Reserved.
# Maintainers:
# Dun Liang <[email protected]>.
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
# Publish steps:
# 1. build,push,upload docker image[jittor/jittor]
# 2. build,push,upload docker image[jittor/jittor-cuda]
# upload to pip:
# rm -rf dist && python3.7 ./setup.py sdist && python3.7 -m twine upload dist/*
import os
def run_cmd(cmd):
print("[run cmd]", cmd)
assert os.system(cmd) == 0
def upload_file(path):
run_cmd(f"rsync -avPu {path} jittor-web:Documents/jittor-blog/assets/build/")
def docker_task(name, build_cmd):
run_cmd(build_cmd)
run_cmd(f"sudo docker push {name}")
bname = os.path.basename(name)
run_cmd(f"sudo docker save {name}:latest -o /tmp/{bname}.tgz && sudo chmod 666 /tmp/{bname}.tgz")
upload_file(f"/tmp/{bname}.tgz")
docker_task(
"jittor/jittor-cuda-11-1",
"sudo docker build --tag jittor/jittor-cuda-11-1:latest -f script/Dockerfile_cuda11 . --network host"
)
docker_task(
"jittor/jittor",
"sudo docker build --tag jittor/jittor:latest . --network host"
)
docker_task(
"jittor/jittor-cuda",
"sudo docker build --tag jittor/jittor-cuda:latest --build-arg FROM_IMAGE='nvidia/cuda:10.2-cudnn7-devel-ubuntu18.04' . --network host"
)
docker_task(
"jittor/jittor-cuda-10-1",
"sudo docker build --tag jittor/jittor-cuda-10-1:latest --build-arg FROM_IMAGE='nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04' . --network host"
)
run_cmd("ssh jittor-web Documents/jittor-blog.git/hooks/post-update") | [((915, 937), 'os.path.basename', 'os.path.basename', (['name'], {}), '(name)\n', (931, 937), False, 'import os\n'), ((679, 693), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (688, 693), False, 'import os\n')] |
DineshDevaraj/interview_answers | prodapt_solutions/config/cliargs.py | 8d3d631dc96dc97ebef80604d6455c2c57c8823d |
import argparse
from helper.metaclasses_definition import Singleton
class CliArgs(metaclass=Singleton):
LogLevel = None
BankName = None
InputFilepath = None
@staticmethod
def init():
my_parser = argparse.ArgumentParser()
my_parser.add_argument('--bank-name', required=True)
my_parser.add_argument('--input-filepath')
my_parser.add_argument('--log-level')
args = my_parser.parse_args()
CliArgs.BankName = args.bank_name
CliArgs.InputFilepath = args.input_filepath
CliArgs.LogLevel = args.log_level
| [((229, 254), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (252, 254), False, 'import argparse\n')] |
TeoZosa/flytekit | plugins/flytekit-papermill/setup.py | c4f33c6deaf36a3feaf397cfc6de3bd62e986733 | from setuptools import setup
PLUGIN_NAME = "papermill"
microlib_name = f"flytekitplugins-{PLUGIN_NAME}"
plugin_requires = [
"flytekit>=0.16.0b0,<1.0.0",
"flytekitplugins-spark>=0.16.0b0,<1.0.0,!=0.24.0b0",
"papermill>=1.2.0",
"nbconvert>=6.0.7",
"ipykernel>=5.0.0",
]
__version__ = "0.0.0+develop"
setup(
name=microlib_name,
version=__version__,
author="flyteorg",
author_email="[email protected]",
description="This is the flytekit papermill plugin",
namespace_packages=["flytekitplugins"],
packages=[f"flytekitplugins.{PLUGIN_NAME}"],
install_requires=plugin_requires,
license="apache2",
python_requires=">=3.7",
classifiers=[
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| [((323, 1161), 'setuptools.setup', 'setup', ([], {'name': 'microlib_name', 'version': '__version__', 'author': '"""flyteorg"""', 'author_email': '"""[email protected]"""', 'description': '"""This is the flytekit papermill plugin"""', 'namespace_packages': "['flytekitplugins']", 'packages': "[f'flytekitplugins.{PLUGIN_NAME}']", 'install_requires': 'plugin_requires', 'license': '"""apache2"""', 'python_requires': '""">=3.7"""', 'classifiers': "['Intended Audience :: Science/Research', 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules']"}), "(name=microlib_name, version=__version__, author='flyteorg',\n author_email='[email protected]', description=\n 'This is the flytekit papermill plugin', namespace_packages=[\n 'flytekitplugins'], packages=[f'flytekitplugins.{PLUGIN_NAME}'],\n install_requires=plugin_requires, license='apache2', python_requires=\n '>=3.7', classifiers=['Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules'])\n", (328, 1161), False, 'from setuptools import setup\n')] |
vla3089/adventofcode | 2017/third.py | 0aefb5509e9f816f89eeab703393be7222632e02 | #!/usr/bin/env python
input = 368078
size = 1
s_size = size * size # squared size
while (s_size < input):
size += 2
s_size = size * size
bottom_right = s_size
bottom_left = s_size - size + 1
top_left = s_size - 2 * size + 2
top_right = s_size - 3 * size + 3
input_x = -1
input_y = -1
# bottom horizontal line
if (input > bottom_left):
input_x = size - 1
input_y = input - bottom_left
elif (input > top_left):
input_y = input - top_left
input_x = 0
elif (input > top_right):
input_x = 0
input_y = size - input + top_right - 1
else:
input_x = top_right - input
input_y = size - 1
ap_x = size / 2
ap_y = ap_x
print abs(ap_x - input_x) + abs(ap_y - input_y)
| [] |
max-eth/racer | racer/methods/genetic_programming/parameterized.py | 952991aedec5d8229bb1126c9c066613f5c30146 | import copy
import numpy as np
from racer.utils import load_pickle
from racer.methods.genetic_programming.program_tree import ProgramTree
class ParameterizedTree(ProgramTree):
# This makes the assumption that all children of the underlying tree are in a field .children and that the underlying tree has the field .name
def __init__(self, underlying_tree, init_fct=None, _copy=True):
if _copy:
underlying_tree = copy.deepcopy(underlying_tree) # safety first
if hasattr(underlying_tree, "children"):
underlying_tree.children = [
ParameterizedTree(underlying_tree=child, _copy=False)
for child in underlying_tree.children
]
self.underlying_tree = underlying_tree
if init_fct is None:
self.set_params([1, 0])
else:
self.set_params(init_fct())
def set_params(self, params):
self.weight, self.bias = params
self.name = self.underlying_tree.name + " * {} + {}".format(
self.weight, self.bias
)
def get_params(self):
return [self.weight, self.bias]
def __call__(self, *x):
return self.underlying_tree(*x) * self.weight + self.bias
def __len__(self):
return len(self.underlying_tree)
def display(self, prefix):
res = prefix + self.name + "\n"
if hasattr(self.underlying_tree, "children"):
for child in self.underlying_tree.children:
res += child.display(prefix=" " + prefix)
return res
def _set_dirty(self):
raise Exception("Parameterized trees should not be mutated")
def in_order(self):
yield self
if hasattr(self.underlying_tree, "children"):
for child in self.underlying_tree.children:
for node in child.in_order():
yield node
class ParameterizedIndividual:
def __init__(self, parameterized_trees):
self.parameterized_trees = parameterized_trees
@staticmethod
def from_individual(ind):
return ParameterizedIndividual(
parameterized_trees=[ParameterizedTree(tree) for tree in ind.trees]
)
@staticmethod
def from_pickled_individual(fname):
return ParameterizedIndividual.from_individual(load_pickle(fname))
def __call__(self, *x):
return [tree(*x) for tree in self.parameterized_trees]
def __len__(self):
return sum(len(tree) for tree in self.parameterized_trees)
def set_flat_parameters(self, params):
n_used = 0
for tree in self.parameterized_trees:
for node in tree.in_order():
node.set_params(list(params[n_used : n_used + 2]))
n_used += 2
def get_flat_parameters(self):
params = []
for tree in self.parameterized_trees:
for node in tree.in_order():
params += node.get_params()
return np.array(params)
| [((2964, 2980), 'numpy.array', 'np.array', (['params'], {}), '(params)\n', (2972, 2980), True, 'import numpy as np\n'), ((441, 471), 'copy.deepcopy', 'copy.deepcopy', (['underlying_tree'], {}), '(underlying_tree)\n', (454, 471), False, 'import copy\n'), ((2314, 2332), 'racer.utils.load_pickle', 'load_pickle', (['fname'], {}), '(fname)\n', (2325, 2332), False, 'from racer.utils import load_pickle\n')] |
danielecook/upvote.pub | base/frontends/views.py | fdda3c0895427ddc76f4680d0d63f2d4bac59da0 | # -*- coding: utf-8 -*-
"""
"""
import os
import markdown2
from flask import (Blueprint,
request,
render_template,
flash, g,
session,
redirect,
url_for,
abort,
Markup)
from werkzeug import check_password_hash, generate_password_hash
from logzero import logger
from base import db, app
from base import search as search_module # don't override function name
from base.users.forms import RegisterForm, LoginForm
from base.users.models import User
from base.threads.models import Thread, Publication
from base.subreddits.models import Subreddit
from base.users.decorators import requires_login
from base.utils.user_utils import get_school
from base.subreddits.forms import subreddit_subs, sub_form
from base.utils.email import send_email
from base.utils.misc import random_string, validate_sort_type
mod = Blueprint('frontends', __name__, url_prefix='')
@mod.before_request
def before_request():
g.user = None
if session.get('user_id'):
g.user = User.query.get(session['user_id'])
def home_subreddit():
logger.info(g.user)
if g.get('user'):
subreddit_subs = g.user.subreddit_subs.get('subs')
subs = Thread.query.order_by(db.desc(Thread.hotness), db.desc(Thread.hotness)) \
.filter(Subreddit.name.in_(subreddit_subs))
else:
subs = Thread.query.order_by(db.desc(Thread.hotness), db.desc(Thread.hotness))
return subs
def get_subreddits():
"""
Fetch user subreddits otherwise fetch a list of defaults
"""
if g.get('user'):
subreddit_subs = g.user.subreddit_subs.get('subs')
subreddits = Subreddit.query.filter(Subreddit.name.in_(subreddit_subs))
else:
# Default set of subreddits
subreddits = Subreddit.query.all()
return subreddits
def process_thread_paginator(trending=False, rs=None, subreddit=None, sort_type='hot'):
"""
abstracted because many sources pull from a thread listing
source (subreddit permalink, homepage, etc)
"""
threads_per_page = 15
cur_page = request.args.get('page') or 1
cur_page = int(cur_page)
thread_paginator = None
# if we are passing in a resultset, that means we are just looking to
# quickly paginate some arbitrary data, no sorting
if rs:
thread_paginator = rs.paginate(cur_page,
per_page=threads_per_page,
error_out=True)
return thread_paginator
# sexy line of code :)
base_query = subreddit.threads if subreddit else Thread.query
# Filter by user subs
logger.info(g.user)
if g.user:
subreddit_subs = g.user.subreddit_subs.get('subs')
base_query = base_query.join(Subreddit).filter(Subreddit.name.in_(subreddit_subs))
# Sorting
if sort_type == 'hot':
base_query = base_query.order_by(db.desc(Thread.hotness))
elif sort_type == 'top':
base_query = base_query.order_by(db.desc(Thread.votes))
elif sort_type == 'comments':
base_query = base_query.order_by(db.desc(Thread.n_comments))
elif sort_type == 'new':
base_query = base_query.order_by(db.desc(Thread.created_on))
elif sort_type == 'publication_date':
base_query = base_query.join(Publication).order_by(db.desc(Publication.pub_date))
thread_paginator = base_query.paginate(cur_page, per_page=threads_per_page, error_out=True)
return thread_paginator
@mod.route('/')
def home(sort_type='hot'):
"""
If not trending we order by creation date
"""
atom_url = url_for('subreddits.atom_feed', subreddit_name='frontpage', _external=True)
trending = True if request.path.endswith('trending') else False
page_title = "Trending" if trending else "Frontpage"
thread_paginator = process_thread_paginator(trending=trending)
return render_template('home.html',
atom_url=atom_url,
page_title=page_title,
cur_subreddit=home_subreddit(),
thread_paginator=thread_paginator)
@mod.route('/.atom')
@mod.route('/.xml')
@mod.route('/.rss')
def atom_redirect():
return redirect(url_for("subreddits.atom_feed", subreddit_name="frontpage"))
@mod.route('/h/<string:page>')
def render_markdown(page):
page_md = f"base/markdown/{page}.md"
if not os.path.exists(page_md):
abort(404)
with open(page_md, 'r') as f:
content = f.read()
md = markdown2.markdown(content,
extras = ['fenced-code-blocks',
'nofollow',
'target-blank-links',
'toc',
'tables',
'footnotes',
'metadata',
'markdown-in-html'])
return render_template('markdown.html',
page=md,
**md.metadata)
@mod.route('/search/', methods=['GET'])
def search():
"""
Allows users to search threads and comments
"""
query = request.args.get('query')
page_title=f"Search results for '{query}'"
rs = search_module.search(query, orderby='creation', search_title=True,
search_text=True)
thread_paginator = process_thread_paginator(rs=rs)
#rs = rs.all()
num_searches = rs.count()
subreddits = get_subreddits()
return render_template('home.html',
page_title=page_title,
cur_subreddit=home_subreddit(),
thread_paginator=thread_paginator,
num_searches=num_searches)
@mod.route('/login/', methods=['GET', 'POST'])
def login():
"""
We had to do some extra work to route the user back to
his or her original place before logging in
"""
if g.user:
return redirect(url_for('frontends.home'))
next = ''
if request.method == 'GET':
if 'next' in request.args:
next = request.args['next']
form = LoginForm(request.form)
# make sure data is valid, but doesn't validate password is right
if form.validate_on_submit():
# continue where we left off if so
user = User.query.filter_by(email=form.email.data).first()
# we use werzeug to validate user's password
if user and check_password_hash(user.password, form.password.data):
# the session can't be modified as it's signed,
# it's a safe place to store the user id
session['user_id'] = user.id
if 'next' in request.form and request.form['next']:
return redirect(request.form['next'])
return redirect(url_for('frontends.home'))
flash('Wrong email or password', 'danger')
return render_template("login.html", form=form, next=next)
@mod.route('/logout/', methods=['GET', 'POST'])
@requires_login
def logout():
session.pop('user_id', None)
return redirect(url_for('frontends.home'))
@mod.route('/confirm-email/<string:token>')
def confirm_email(token):
"""
Confirm user email
"""
user = User.query.filter_by(email_token=token).first()
if user.email_token == token:
user.email_verified = True
db.session.commit()
flash("Thank you for confirming your email! You can now submit and comment.", 'success')
return redirect(url_for('frontends.home'))
@mod.route('/register/', methods=['GET', 'POST'])
def register():
"""
Registration page
"""
if g.user:
# If the user is logged in send them home
return redirect(url_for('frontends.home'))
next = ''
if request.method == 'GET':
if 'next' in request.args:
next = request.args['next']
form = RegisterForm(request.form)
if form.validate_on_submit():
# create an user instance not yet stored in the database
user = User(username=form.username.data,
email=form.email.data, \
password=generate_password_hash(form.password.data),
university=get_school(form.email.data),
email_token=random_string())
# Insert the record in our database and commit it
db.session.add(user)
email_confirm_link = url_for('frontends.confirm_email', token = user.email_token)
email_response = send_email("Confirm upvote.pub email",
"""Please visit the link below to confirm your email:\n\n{}{}""".format(request.url_root.strip("/"), email_confirm_link),
user.email)
# Log the user in, as he now has an id
db.session.commit()
session['user_id'] = user.id
flash('Thanks for signing up! Please confirm your email by following the link sent in the confirmation email.', 'success')
if 'next' in request.form and request.form['next']:
return redirect(request.form['next'])
return redirect(url_for('frontends.home'))
return render_template("register.html", form=form, next=next)
@mod.route('/subs/', methods=['GET', 'POST'])
def view_all():
"""
"""
subreddit_list = Subreddit.query.all()
form = None
if g.user:
if request.form:
form = subreddit_subs(request.form)
if form.validate_on_submit():
form_subs = form.data.get('subs')
form_subs = list(set([x['sub_name'] for x in form_subs if x['value']]))
g.user.subreddit_subs = {'subs': form_subs}
flash("Updated Subs", 'success')
db.session.commit()
else:
form = subreddit_subs()
for subreddit in subreddit_list:
sform = sub_form()
sform.sub_name = subreddit.name
sform.sub_group = subreddit.group
if g.user:
sform.value=subreddit.name in g.user.subreddit_subs['subs']
form.subs.append_entry(sform)
return render_template('subreddits/subs.html',
cur_subreddit=None,
page_title='subs',
form=form,
subreddit_list=subreddit_list)
| [((957, 1004), 'flask.Blueprint', 'Blueprint', (['"""frontends"""', '__name__'], {'url_prefix': '""""""'}), "('frontends', __name__, url_prefix='')\n", (966, 1004), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((1074, 1096), 'flask.session.get', 'session.get', (['"""user_id"""'], {}), "('user_id')\n", (1085, 1096), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((1178, 1197), 'logzero.logger.info', 'logger.info', (['g.user'], {}), '(g.user)\n', (1189, 1197), False, 'from logzero import logger\n'), ((1205, 1218), 'flask.g.get', 'g.get', (['"""user"""'], {}), "('user')\n", (1210, 1218), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((1660, 1673), 'flask.g.get', 'g.get', (['"""user"""'], {}), "('user')\n", (1665, 1673), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((2738, 2757), 'logzero.logger.info', 'logger.info', (['g.user'], {}), '(g.user)\n', (2749, 2757), False, 'from logzero import logger\n'), ((3705, 3780), 'flask.url_for', 'url_for', (['"""subreddits.atom_feed"""'], {'subreddit_name': '"""frontpage"""', '_external': '(True)'}), "('subreddits.atom_feed', subreddit_name='frontpage', _external=True)\n", (3712, 3780), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((5120, 5176), 'flask.render_template', 'render_template', (['"""markdown.html"""'], {'page': 'md'}), "('markdown.html', page=md, **md.metadata)\n", (5135, 5176), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((5363, 5388), 'flask.request.args.get', 'request.args.get', (['"""query"""'], {}), "('query')\n", (5379, 5388), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((5445, 5533), 'base.search.search', 'search_module.search', (['query'], {'orderby': '"""creation"""', 'search_title': '(True)', 'search_text': '(True)'}), "(query, orderby='creation', search_title=True,\n search_text=True)\n", (5465, 5533), True, 'from base import search as search_module\n'), ((6336, 6359), 'base.users.forms.LoginForm', 'LoginForm', (['request.form'], {}), '(request.form)\n', (6345, 6359), False, 'from base.users.forms import RegisterForm, LoginForm\n'), ((7094, 7145), 'flask.render_template', 'render_template', (['"""login.html"""'], {'form': 'form', 'next': 'next'}), "('login.html', form=form, next=next)\n", (7109, 7145), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((7230, 7258), 'flask.session.pop', 'session.pop', (['"""user_id"""', 'None'], {}), "('user_id', None)\n", (7241, 7258), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((7553, 7572), 'base.db.session.commit', 'db.session.commit', ([], {}), '()\n', (7570, 7572), False, 'from base import db, app\n'), ((7577, 7669), 'flask.flash', 'flash', (['"""Thank you for confirming your email! You can now submit and comment."""', '"""success"""'], {}), "('Thank you for confirming your email! You can now submit and comment.',\n 'success')\n", (7582, 7669), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((8073, 8099), 'base.users.forms.RegisterForm', 'RegisterForm', (['request.form'], {}), '(request.form)\n', (8085, 8099), False, 'from base.users.forms import RegisterForm, LoginForm\n'), ((9338, 9392), 'flask.render_template', 'render_template', (['"""register.html"""'], {'form': 'form', 'next': 'next'}), "('register.html', form=form, next=next)\n", (9353, 9392), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((9494, 9515), 'base.subreddits.models.Subreddit.query.all', 'Subreddit.query.all', ([], {}), '()\n', (9513, 9515), False, 'from base.subreddits.models import Subreddit\n'), ((10339, 10464), 'flask.render_template', 'render_template', (['"""subreddits/subs.html"""'], {'cur_subreddit': 'None', 'page_title': '"""subs"""', 'form': 'form', 'subreddit_list': 'subreddit_list'}), "('subreddits/subs.html', cur_subreddit=None, page_title=\n 'subs', form=form, subreddit_list=subreddit_list)\n", (10354, 10464), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((1115, 1149), 'base.users.models.User.query.get', 'User.query.get', (["session['user_id']"], {}), "(session['user_id'])\n", (1129, 1149), False, 'from base.users.models import User\n'), ((1245, 1278), 'flask.g.user.subreddit_subs.get', 'g.user.subreddit_subs.get', (['"""subs"""'], {}), "('subs')\n", (1270, 1278), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((1700, 1733), 'flask.g.user.subreddit_subs.get', 'g.user.subreddit_subs.get', (['"""subs"""'], {}), "('subs')\n", (1725, 1733), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((1881, 1902), 'base.subreddits.models.Subreddit.query.all', 'Subreddit.query.all', ([], {}), '()\n', (1900, 1902), False, 'from base.subreddits.models import Subreddit\n'), ((2183, 2207), 'flask.request.args.get', 'request.args.get', (['"""page"""'], {}), "('page')\n", (2199, 2207), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((2798, 2831), 'flask.g.user.subreddit_subs.get', 'g.user.subreddit_subs.get', (['"""subs"""'], {}), "('subs')\n", (2823, 2831), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((3804, 3837), 'flask.request.path.endswith', 'request.path.endswith', (['"""trending"""'], {}), "('trending')\n", (3825, 3837), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((4335, 4394), 'flask.url_for', 'url_for', (['"""subreddits.atom_feed"""'], {'subreddit_name': '"""frontpage"""'}), "('subreddits.atom_feed', subreddit_name='frontpage')\n", (4342, 4394), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((4508, 4531), 'os.path.exists', 'os.path.exists', (['page_md'], {}), '(page_md)\n', (4522, 4531), False, 'import os\n'), ((4541, 4551), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (4546, 4551), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((4626, 4788), 'markdown2.markdown', 'markdown2.markdown', (['content'], {'extras': "['fenced-code-blocks', 'nofollow', 'target-blank-links', 'toc', 'tables',\n 'footnotes', 'metadata', 'markdown-in-html']"}), "(content, extras=['fenced-code-blocks', 'nofollow',\n 'target-blank-links', 'toc', 'tables', 'footnotes', 'metadata',\n 'markdown-in-html'])\n", (4644, 4788), False, 'import markdown2\n'), ((7040, 7082), 'flask.flash', 'flash', (['"""Wrong email or password"""', '"""danger"""'], {}), "('Wrong email or password', 'danger')\n", (7045, 7082), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((7279, 7304), 'flask.url_for', 'url_for', (['"""frontends.home"""'], {}), "('frontends.home')\n", (7286, 7304), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((7686, 7711), 'flask.url_for', 'url_for', (['"""frontends.home"""'], {}), "('frontends.home')\n", (7693, 7711), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((8541, 8561), 'base.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (8555, 8561), False, 'from base import db, app\n'), ((8591, 8649), 'flask.url_for', 'url_for', (['"""frontends.confirm_email"""'], {'token': 'user.email_token'}), "('frontends.confirm_email', token=user.email_token)\n", (8598, 8649), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((8977, 8996), 'base.db.session.commit', 'db.session.commit', ([], {}), '()\n', (8994, 8996), False, 'from base import db, app\n'), ((9042, 9174), 'flask.flash', 'flash', (['"""Thanks for signing up! Please confirm your email by following the link sent in the confirmation email."""', '"""success"""'], {}), "(\n 'Thanks for signing up! Please confirm your email by following the link sent in the confirmation email.'\n , 'success')\n", (9047, 9174), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((1403, 1437), 'base.subreddits.models.Subreddit.name.in_', 'Subreddit.name.in_', (['subreddit_subs'], {}), '(subreddit_subs)\n', (1421, 1437), False, 'from base.subreddits.models import Subreddit\n'), ((1486, 1509), 'base.db.desc', 'db.desc', (['Thread.hotness'], {}), '(Thread.hotness)\n', (1493, 1509), False, 'from base import db, app\n'), ((1511, 1534), 'base.db.desc', 'db.desc', (['Thread.hotness'], {}), '(Thread.hotness)\n', (1518, 1534), False, 'from base import db, app\n'), ((1778, 1812), 'base.subreddits.models.Subreddit.name.in_', 'Subreddit.name.in_', (['subreddit_subs'], {}), '(subreddit_subs)\n', (1796, 1812), False, 'from base.subreddits.models import Subreddit\n'), ((2887, 2921), 'base.subreddits.models.Subreddit.name.in_', 'Subreddit.name.in_', (['subreddit_subs'], {}), '(subreddit_subs)\n', (2905, 2921), False, 'from base.subreddits.models import Subreddit\n'), ((3006, 3029), 'base.db.desc', 'db.desc', (['Thread.hotness'], {}), '(Thread.hotness)\n', (3013, 3029), False, 'from base import db, app\n'), ((6175, 6200), 'flask.url_for', 'url_for', (['"""frontends.home"""'], {}), "('frontends.home')\n", (6182, 6200), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((6647, 6701), 'werkzeug.check_password_hash', 'check_password_hash', (['user.password', 'form.password.data'], {}), '(user.password, form.password.data)\n', (6666, 6701), False, 'from werkzeug import check_password_hash, generate_password_hash\n'), ((7432, 7471), 'base.users.models.User.query.filter_by', 'User.query.filter_by', ([], {'email_token': 'token'}), '(email_token=token)\n', (7452, 7471), False, 'from base.users.models import User\n'), ((7912, 7937), 'flask.url_for', 'url_for', (['"""frontends.home"""'], {}), "('frontends.home')\n", (7919, 7937), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((9244, 9274), 'flask.redirect', 'redirect', (["request.form['next']"], {}), "(request.form['next'])\n", (9252, 9274), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((9299, 9324), 'flask.url_for', 'url_for', (['"""frontends.home"""'], {}), "('frontends.home')\n", (9306, 9324), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((9591, 9619), 'base.subreddits.forms.subreddit_subs', 'subreddit_subs', (['request.form'], {}), '(request.form)\n', (9605, 9619), False, 'from base.subreddits.forms import subreddit_subs, sub_form\n'), ((9978, 9994), 'base.subreddits.forms.subreddit_subs', 'subreddit_subs', ([], {}), '()\n', (9992, 9994), False, 'from base.subreddits.forms import subreddit_subs, sub_form\n'), ((3101, 3122), 'base.db.desc', 'db.desc', (['Thread.votes'], {}), '(Thread.votes)\n', (3108, 3122), False, 'from base import db, app\n'), ((6522, 6565), 'base.users.models.User.query.filter_by', 'User.query.filter_by', ([], {'email': 'form.email.data'}), '(email=form.email.data)\n', (6542, 6565), False, 'from base.users.models import User\n'), ((6945, 6975), 'flask.redirect', 'redirect', (["request.form['next']"], {}), "(request.form['next'])\n", (6953, 6975), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((7004, 7029), 'flask.url_for', 'url_for', (['"""frontends.home"""'], {}), "('frontends.home')\n", (7011, 7029), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((8322, 8364), 'werkzeug.generate_password_hash', 'generate_password_hash', (['form.password.data'], {}), '(form.password.data)\n', (8344, 8364), False, 'from werkzeug import check_password_hash, generate_password_hash\n'), ((8397, 8424), 'base.utils.user_utils.get_school', 'get_school', (['form.email.data'], {}), '(form.email.data)\n', (8407, 8424), False, 'from base.utils.user_utils import get_school\n'), ((8458, 8473), 'base.utils.misc.random_string', 'random_string', ([], {}), '()\n', (8471, 8473), False, 'from base.utils.misc import random_string, validate_sort_type\n'), ((8824, 8851), 'flask.request.url_root.strip', 'request.url_root.strip', (['"""/"""'], {}), "('/')\n", (8846, 8851), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((9876, 9908), 'flask.flash', 'flash', (['"""Updated Subs"""', '"""success"""'], {}), "('Updated Subs', 'success')\n", (9881, 9908), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, abort, Markup\n'), ((9925, 9944), 'base.db.session.commit', 'db.session.commit', ([], {}), '()\n', (9942, 9944), False, 'from base import db, app\n'), ((10064, 10074), 'base.subreddits.forms.sub_form', 'sub_form', ([], {}), '()\n', (10072, 10074), False, 'from base.subreddits.forms import subreddit_subs, sub_form\n'), ((1316, 1339), 'base.db.desc', 'db.desc', (['Thread.hotness'], {}), '(Thread.hotness)\n', (1323, 1339), False, 'from base import db, app\n'), ((1341, 1364), 'base.db.desc', 'db.desc', (['Thread.hotness'], {}), '(Thread.hotness)\n', (1348, 1364), False, 'from base import db, app\n'), ((3199, 3225), 'base.db.desc', 'db.desc', (['Thread.n_comments'], {}), '(Thread.n_comments)\n', (3206, 3225), False, 'from base import db, app\n'), ((3297, 3323), 'base.db.desc', 'db.desc', (['Thread.created_on'], {}), '(Thread.created_on)\n', (3304, 3323), False, 'from base import db, app\n'), ((3426, 3455), 'base.db.desc', 'db.desc', (['Publication.pub_date'], {}), '(Publication.pub_date)\n', (3433, 3455), False, 'from base import db, app\n')] |
vijayeshmt/Securitylock | Jarvis.py | 5877663a170a22ab8b5931dcef07c74f149cf9b8 | import pyttsx3
import datetime
import speech_recognition as sr
import wikipedia
import webbrowser
import os
import smtplib
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
# To change the voice to female change 0 to 1.
def speak(audio):
engine.say(audio)
engine.runAndWait()
pass
def take_command():
"""
It takes microphone input from the user and returns a string
:return:
"""
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1.5 # It will wait 1.5 seconds to complete a sentence
audio = r.listen(source)
#Do read details
try:
print("Recognizing")
query = r.recognize_google(audio,language='en-in')
print(f'user said : {query}\n')
except Exception as e:
#print(e)
print("Say that again please")
return "None"
return query
def sendEmail(to,content):
server =smtplib.SMTP('smtp.gmail.com',28)
# server.connect("smtp.gmail.com",465)
# server.ehlo()
server.login('[email protected]','########')
server.sendmail('[email protected]',to,content)
server.close()
def wish_me():
hour = int(datetime.datetime.now().hour)
if hour >= 0 and hour < 12:
speak("Good morning")
elif hour >= 12 and hour < 18:
speak("Good afternoon")
else:
speak("Good night")
speak("I am JARVIS how can i help you")
if __name__ == '__main__':
wish_me()
while True:
query =take_command().lower()
if 'wikipedia' in query:
speak("Searching wikipedia")
query = query.replace('wikipedia','')
results = wikipedia.summary(query,sentences=2)#To read more increase sentence to decrease sentence decreease sentence
speak("According to wikipedia")
#print(results)
speak(results)
elif 'open youtube' in query:
# webbrowser.Chrome.open_new("youtube.com")
webbrowser.open("youtube.com")
elif "open google" in query:
webbrowser.open("google.com")
elif "play music" in query:
music_dir = "D:\\vijayesh\\music"
songs = os.listdir(music_dir)
print(songs)
os.startfile(os.path.join(music_dir,songs[1]))
elif "the time" in query:
strtime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"The time is {strtime}")
elif " open pycharm" in query:
pycharmpath ="C:\\Program Files\\JetBrains\\PyCharm Community Edition 2021"
os.startfile(pycharmpath)
#elif "open command" in query:
# filelocation = "path of the particular file like above"
# os.startfile(filelocation)
elif " email to vijayesh" or "email to vijesh" in query:
try:
speak("What should i say")#error present
content = take_command()
to = "[email protected]"
sendEmail(to,content)
speak("Email has been sent")
exit()
except Exception as e:
print(e)
speak("Sorry,I am not able to send this email")
exit()
| [((145, 166), 'pyttsx3.init', 'pyttsx3.init', (['"""sapi5"""'], {}), "('sapi5')\n", (157, 166), False, 'import pyttsx3\n'), ((495, 510), 'speech_recognition.Recognizer', 'sr.Recognizer', ([], {}), '()\n', (508, 510), True, 'import speech_recognition as sr\n'), ((961, 995), 'smtplib.SMTP', 'smtplib.SMTP', (['"""smtp.gmail.com"""', '(28)'], {}), "('smtp.gmail.com', 28)\n", (973, 995), False, 'import smtplib\n'), ((518, 533), 'speech_recognition.Microphone', 'sr.Microphone', ([], {}), '()\n', (531, 533), True, 'import speech_recognition as sr\n'), ((1211, 1234), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1232, 1234), False, 'import datetime\n'), ((1640, 1677), 'wikipedia.summary', 'wikipedia.summary', (['query'], {'sentences': '(2)'}), '(query, sentences=2)\n', (1657, 1677), False, 'import wikipedia\n'), ((1910, 1940), 'webbrowser.open', 'webbrowser.open', (['"""youtube.com"""'], {}), "('youtube.com')\n", (1925, 1940), False, 'import webbrowser\n'), ((1977, 2006), 'webbrowser.open', 'webbrowser.open', (['"""google.com"""'], {}), "('google.com')\n", (1992, 2006), False, 'import webbrowser\n'), ((2092, 2113), 'os.listdir', 'os.listdir', (['music_dir'], {}), '(music_dir)\n', (2102, 2113), False, 'import os\n'), ((2148, 2181), 'os.path.join', 'os.path.join', (['music_dir', 'songs[1]'], {}), '(music_dir, songs[1])\n', (2160, 2181), False, 'import os\n'), ((2426, 2451), 'os.startfile', 'os.startfile', (['pycharmpath'], {}), '(pycharmpath)\n', (2438, 2451), False, 'import os\n'), ((2227, 2250), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2248, 2250), False, 'import datetime\n')] |
kolotaev/sdk | clients/kratos/python/test/test_v0alpha1_api.py | 0dda1becd70be8d7b9d678321ebe780c1ba00485 | """
Ory Kratos API
Documentation for all public and administrative Ory Kratos APIs. Public and administrative APIs are exposed on different ports. Public APIs can face the public internet without any protection while administrative APIs should never be exposed without prior authorization. To protect the administative API port you should use something like Nginx, Ory Oathkeeper, or any other technology capable of authorizing incoming requests. # noqa: E501
The version of the OpenAPI document: v0.7.0-alpha.1
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import unittest
import ory_kratos_client
from ory_kratos_client.api.v0alpha1_api import V0alpha1Api # noqa: E501
class TestV0alpha1Api(unittest.TestCase):
"""V0alpha1Api unit test stubs"""
def setUp(self):
self.api = V0alpha1Api() # noqa: E501
def tearDown(self):
pass
def test_admin_create_identity(self):
"""Test case for admin_create_identity
Create an Identity # noqa: E501
"""
pass
def test_admin_create_self_service_recovery_link(self):
"""Test case for admin_create_self_service_recovery_link
Create a Recovery Link # noqa: E501
"""
pass
def test_admin_delete_identity(self):
"""Test case for admin_delete_identity
Delete an Identity # noqa: E501
"""
pass
def test_admin_get_identity(self):
"""Test case for admin_get_identity
Get an Identity # noqa: E501
"""
pass
def test_admin_list_identities(self):
"""Test case for admin_list_identities
List Identities # noqa: E501
"""
pass
def test_admin_update_identity(self):
"""Test case for admin_update_identity
Update an Identity # noqa: E501
"""
pass
def test_create_self_service_logout_flow_url_for_browsers(self):
"""Test case for create_self_service_logout_flow_url_for_browsers
Create a Logout URL for Browsers # noqa: E501
"""
pass
def test_get_json_schema(self):
"""Test case for get_json_schema
"""
pass
def test_get_self_service_error(self):
"""Test case for get_self_service_error
Get Self-Service Errors # noqa: E501
"""
pass
def test_get_self_service_login_flow(self):
"""Test case for get_self_service_login_flow
Get Login Flow # noqa: E501
"""
pass
def test_get_self_service_recovery_flow(self):
"""Test case for get_self_service_recovery_flow
Get Recovery Flow # noqa: E501
"""
pass
def test_get_self_service_registration_flow(self):
"""Test case for get_self_service_registration_flow
Get Registration Flow # noqa: E501
"""
pass
def test_get_self_service_settings_flow(self):
"""Test case for get_self_service_settings_flow
Get Settings Flow # noqa: E501
"""
pass
def test_get_self_service_verification_flow(self):
"""Test case for get_self_service_verification_flow
Get Verification Flow # noqa: E501
"""
pass
def test_initialize_self_service_login_flow_for_browsers(self):
"""Test case for initialize_self_service_login_flow_for_browsers
Initialize Login Flow for Browsers # noqa: E501
"""
pass
def test_initialize_self_service_login_flow_without_browser(self):
"""Test case for initialize_self_service_login_flow_without_browser
Initialize Login Flow for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_initialize_self_service_recovery_flow_for_browsers(self):
"""Test case for initialize_self_service_recovery_flow_for_browsers
Initialize Recovery Flow for Browsers # noqa: E501
"""
pass
def test_initialize_self_service_recovery_flow_without_browser(self):
"""Test case for initialize_self_service_recovery_flow_without_browser
Initialize Recovery Flow for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_initialize_self_service_registration_flow_for_browsers(self):
"""Test case for initialize_self_service_registration_flow_for_browsers
Initialize Registration Flow for Browsers # noqa: E501
"""
pass
def test_initialize_self_service_registration_flow_without_browser(self):
"""Test case for initialize_self_service_registration_flow_without_browser
Initialize Registration Flow for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_initialize_self_service_settings_flow_for_browsers(self):
"""Test case for initialize_self_service_settings_flow_for_browsers
Initialize Settings Flow for Browsers # noqa: E501
"""
pass
def test_initialize_self_service_settings_flow_without_browser(self):
"""Test case for initialize_self_service_settings_flow_without_browser
Initialize Settings Flow for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_initialize_self_service_verification_flow_for_browsers(self):
"""Test case for initialize_self_service_verification_flow_for_browsers
Initialize Verification Flow for Browser Clients # noqa: E501
"""
pass
def test_initialize_self_service_verification_flow_without_browser(self):
"""Test case for initialize_self_service_verification_flow_without_browser
Initialize Verification Flow for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_submit_self_service_login_flow(self):
"""Test case for submit_self_service_login_flow
Submit a Login Flow # noqa: E501
"""
pass
def test_submit_self_service_logout_flow(self):
"""Test case for submit_self_service_logout_flow
Complete Self-Service Logout # noqa: E501
"""
pass
def test_submit_self_service_logout_flow_without_browser(self):
"""Test case for submit_self_service_logout_flow_without_browser
Perform Logout for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_submit_self_service_recovery_flow(self):
"""Test case for submit_self_service_recovery_flow
Complete Recovery Flow # noqa: E501
"""
pass
def test_submit_self_service_registration_flow(self):
"""Test case for submit_self_service_registration_flow
Submit a Registration Flow # noqa: E501
"""
pass
def test_submit_self_service_settings_flow(self):
"""Test case for submit_self_service_settings_flow
Complete Settings Flow # noqa: E501
"""
pass
def test_submit_self_service_verification_flow(self):
"""Test case for submit_self_service_verification_flow
Complete Verification Flow # noqa: E501
"""
pass
def test_to_session(self):
"""Test case for to_session
Check Who the Current HTTP Session Belongs To # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [((7307, 7322), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7320, 7322), False, 'import unittest\n'), ((844, 857), 'ory_kratos_client.api.v0alpha1_api.V0alpha1Api', 'V0alpha1Api', ([], {}), '()\n', (855, 857), False, 'from ory_kratos_client.api.v0alpha1_api import V0alpha1Api\n')] |
XaKingas/osrsapi | osrsapi/__init__.py | 14b93e0f6902724e57ebb1f50d817bd557e41c3d | from .grandexchange import GrandExchange, GameItemNotFound, GameItemParseError
from .item import Item
from .priceinfo import PriceInfo
from .pricetrend import PriceTrend
| [] |
dilum1995/DAugmentor | utils/data_loader.py | 6cc86dccf826415a88b8226265e16ae96b5cc05b | import pandas as pd
import os
import numpy as np
import cv2
from utils import constants as const
import matplotlib.pyplot as plt
class DataLoader:
def load_data():
'''
This function is handling the data loading and pre-processing
:return: (xtrain, ytrain), (xtest, ytest)
'''
print('**** Read data into DAugmentor ****')
x_train = []
y_train = []
x_test = []
y_test = []
# setting the path to metadata
path = const.PATH
metadata_csv_path = os.path.join(path, const.FILE_METADATA)
test_img_dir_path = os.path.join(path, const.DIR_TEST)
train_img_dir_path = os.path.join(path, const.DIR_TRAIN)
print(metadata_csv_path)
# setting the path to train data
x_train_path = os.path.join(path, const.DIR_TRAIN)
print(x_train_path)
# setting the path to train data
x_test_path = os.path.join(path, const.DIR_TEST)
# reading meta data file as dataframe
df = pd.read_csv(metadata_csv_path, delimiter=',')
# dataset format:
# image_name
# label
# data_type
data_type_row = df["data_type"].tolist()
image_row = df["image_name"].tolist()
label_row = df["label"].tolist()
data_rows = len(data_type_row)
for row in range(data_rows):
if (data_type_row[row] == "TRAIN"):
# setting the path of the current image
img_path = os.path.join(train_img_dir_path, image_row[row])
# reading image
image = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
# downscaling image to 28x28
image = cv2.resize(image, (128, 128))
x_train.append(image)
print("Loaded: " + img_path)
# extracting labels
y_train.append(label_row[row])
if (data_type_row[row] == "TEST"):
# setting the path of the current image
img_path = os.path.join(test_img_dir_path, image_row[row])
# reading image
image = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
# downscaling image to 28x28
image = cv2.resize(image, (128, 128))
x_test.append(image)
print("Loaded: " + img_path)
# extracting labels
y_test.append(label_row[row])
xtrain = np.asarray(x_train)
ytrain = np.asarray(y_train)
xtest = np.asarray(x_test)
ytest = np.asarray(y_test)
print(x_train[0].shape)
print(x_train[0].shape)
print(xtrain[0].shape)
print(x_test[0].shape)
#(X_train, y_train), (X_test, y_test)
return (xtrain, ytrain), (xtest, ytest) | [((546, 585), 'os.path.join', 'os.path.join', (['path', 'const.FILE_METADATA'], {}), '(path, const.FILE_METADATA)\n', (558, 585), False, 'import os\n'), ((614, 648), 'os.path.join', 'os.path.join', (['path', 'const.DIR_TEST'], {}), '(path, const.DIR_TEST)\n', (626, 648), False, 'import os\n'), ((678, 713), 'os.path.join', 'os.path.join', (['path', 'const.DIR_TRAIN'], {}), '(path, const.DIR_TRAIN)\n', (690, 713), False, 'import os\n'), ((812, 847), 'os.path.join', 'os.path.join', (['path', 'const.DIR_TRAIN'], {}), '(path, const.DIR_TRAIN)\n', (824, 847), False, 'import os\n'), ((940, 974), 'os.path.join', 'os.path.join', (['path', 'const.DIR_TEST'], {}), '(path, const.DIR_TEST)\n', (952, 974), False, 'import os\n'), ((1035, 1080), 'pandas.read_csv', 'pd.read_csv', (['metadata_csv_path'], {'delimiter': '""","""'}), "(metadata_csv_path, delimiter=',')\n", (1046, 1080), True, 'import pandas as pd\n'), ((2489, 2508), 'numpy.asarray', 'np.asarray', (['x_train'], {}), '(x_train)\n', (2499, 2508), True, 'import numpy as np\n'), ((2526, 2545), 'numpy.asarray', 'np.asarray', (['y_train'], {}), '(y_train)\n', (2536, 2545), True, 'import numpy as np\n'), ((2562, 2580), 'numpy.asarray', 'np.asarray', (['x_test'], {}), '(x_test)\n', (2572, 2580), True, 'import numpy as np\n'), ((2597, 2615), 'numpy.asarray', 'np.asarray', (['y_test'], {}), '(y_test)\n', (2607, 2615), True, 'import numpy as np\n'), ((1517, 1565), 'os.path.join', 'os.path.join', (['train_img_dir_path', 'image_row[row]'], {}), '(train_img_dir_path, image_row[row])\n', (1529, 1565), False, 'import os\n'), ((1622, 1664), 'cv2.imread', 'cv2.imread', (['img_path', 'cv2.IMREAD_GRAYSCALE'], {}), '(img_path, cv2.IMREAD_GRAYSCALE)\n', (1632, 1664), False, 'import cv2\n'), ((1734, 1763), 'cv2.resize', 'cv2.resize', (['image', '(128, 128)'], {}), '(image, (128, 128))\n', (1744, 1763), False, 'import cv2\n'), ((2061, 2108), 'os.path.join', 'os.path.join', (['test_img_dir_path', 'image_row[row]'], {}), '(test_img_dir_path, image_row[row])\n', (2073, 2108), False, 'import os\n'), ((2165, 2207), 'cv2.imread', 'cv2.imread', (['img_path', 'cv2.IMREAD_GRAYSCALE'], {}), '(img_path, cv2.IMREAD_GRAYSCALE)\n', (2175, 2207), False, 'import cv2\n'), ((2277, 2306), 'cv2.resize', 'cv2.resize', (['image', '(128, 128)'], {}), '(image, (128, 128))\n', (2287, 2306), False, 'import cv2\n')] |
valternunez/Compiler | CompilerPython/LexerPython/main.py | 879cecbbeb1c21d9d19021664ace62442273d3ba | from lexer import *
import sys
if len(sys.argv) != 2:
print("usage: main.py file")
else:
lex = Lexer(sys.argv[1])
with open(sys.argv[1]) as f:
while True:
c = f.read(1)
if not c:
break
print(lex.scan().toString())
| [] |
Subsets and Splits