code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import os
from PIL import Image
from pycocotools.coco import COCO
from torch.utils import data
class COCODataset(data.Dataset):
def __init__(self, images_path, ann_path, split='train', transform=None):
self.coco = COCO(ann_path)
self.image_path = images_path
self.ids = list(self.coco.imgs.keys())
self.transform = transform
self.split = split
def __getitem__(self, index):
img_id = self.ids[index]
ann_ids = self.coco.getAnnIds(imgIds=img_id)
# print(ann_ids)
target = self.coco.loadAnns(ann_ids)
# print(target)
file_name = self.coco.loadImgs(img_id)[0]['file_name']
img = Image.open(os.path.join(self.image_path, file_name)).convert('RGB')
if self.transform is not None:
img = self.transform(img)
return img, target, img_id
def __len__(self):
return len(self.ids)
|
[
"pycocotools.coco.COCO",
"os.path.join"
] |
[((229, 243), 'pycocotools.coco.COCO', 'COCO', (['ann_path'], {}), '(ann_path)\n', (233, 243), False, 'from pycocotools.coco import COCO\n'), ((695, 735), 'os.path.join', 'os.path.join', (['self.image_path', 'file_name'], {}), '(self.image_path, file_name)\n', (707, 735), False, 'import os\n')]
|
"""
This script gather functions related to the SZ spectrum
"""
import numpy as np
import astropy.units as u
from astropy import constants as const
from astropy.cosmology import Planck15 as cosmo
#===================================================
#========== CMB intensity
#===================================================
def get_I0_CMB():
"""
Compute the CMB intensity
Parameters
----------
Outputs
--------
- I0 (quantity): the CMB intensity (homogeneous to MJy/sr)
"""
I0 = 2*(const.k_B*cosmo.Tcmb0)**3/(const.h*const.c)**2*u.sr**-1
return I0.to('MJy sr-1')
#===================================================
#========== Non relativistic tSZ spectrum
#===================================================
def tsz_spec(frequency):
"""
Compute the non relativistic SZ spectrum, f(nu)
as in delta I_nu = I0 f(nu) y
Parameters
----------
- frequency (quantity): frequency array homogeneous to GHz
Outputs
--------
- SZ spectrum: f(nu)
"""
x = const.h * frequency / (const.k_B * cosmo.Tcmb0)
f_nu = x**4 * np.exp(x) / (np.exp(x)-1)**2 * (x*(np.exp(x)+1)/(np.exp(x)-1) - 4)
return f_nu
#===================================================
#========== Relativistic tSZ spectrum
#===================================================
def tsz_spec_relativistic(frequency, kBT):
"""
Compute the relativistic SZ spectrum, f(nu, T)
as in delta I_nu = I0 f(nu, T) y
Parameters
----------
- frequency (quantity): frequency array homogeneous to GHz
- temperature (quantity): frequency array homogeneous to GHz
Outputs
--------
- SZ spectrum: f(nu, T)
"""
#========== Make sure that frequency and temperature are arrays
if type(frequency.to_value()) == float:
frequency = np.array([frequency.to_value()]) * frequency.unit
if type(kBT.to_value()) == float:
kBT = np.array([kBT.to_value()]) * kBT.unit
#========== Replicate to work with grids
f_grid = (np.tile(frequency, [len(kBT),1])).T
t_grid = (np.tile(kBT, [len(frequency),1]))
#========== Function variable
theta = t_grid.to_value('keV')/(const.m_e*const.c**2).to_value('keV')
x = (const.h*f_grid/(const.k_B*cosmo.Tcmb0)).to_value('')
#========== Region where x < 1.2
f1 = x**4 * np.exp(x)/(np.exp(x)-1)**2
xtil = x*(np.exp(x)+1)/(np.exp(x)-1)
s = 2*x/(np.exp(x/2)-np.exp(-x/2))
y0 = xtil-4.0
y1a = -10.+47./2.*xtil-42./5.*xtil**(2.)
y1b = 0.7*xtil**(3.)+s**(2.)*(-21./5.+7./5.*xtil)
y1 = y1a+y1b
y2a = -15/2.+1023./8.*xtil-868./5.*xtil**(2.)
y2b = 329./5.*xtil**(3.)-44./5.*xtil**(4.)
y2c = 11./30.*xtil**(5.)
y2d = -434./5.+658/5.*xtil-242./5.*xtil**(2.)+143./30.*xtil**(3.)
y2e = -44./5.+187./60.*xtil
y2 = y2a+y2b+y2c+s**(2.)*y2d+s**(4.)*y2e
y3a = 15./2.+2505./8.*xtil-7098./5.*xtil**(2.)
y3b = 1425.3*xtil**(3.)-18594./35.*xtil**(4.)
y3c = 12059./140.*xtil**(5.)-128./21.*xtil**(6.)+16./105.*xtil**(7.)
y3d1 = -709.8+14253/5.*xtil-102267./35.*xtil**(2.)
y3d2 = 156767./140.*xtil**(3.)-1216./7.*xtil**(4.)+64./7.*xtil**(5.)
y3d = s**(2.)*(y3d1+y3d2)
y3e1 = -18594./35.+205003./280.*xtil
y3e2 = -1920./7.*xtil**(2.)+1024./35.*xtil**(3.)
y3e = s**(4.)*(y3e1+y3e2)
y3f = s**(6.)*(-544./21.+922./105.*xtil)
y3 = y3a+y3b+y3c+y3d+y3e+y3f
y4a = -135./32.+30375./128.*xtil-6239.1*xtil**(2.)
y4b = 61472.7/4.*xtil**(3.)-12438.9*xtil**(4.)
y4c = 35570.3/8.*xtil**(5.)-16568./21.*xtil**(6.)
y4d = 7516./105.*xtil**(7.)-22./7.*xtil**(8.)+11./210.*xtil**(9.)
y4e1 = -62391./20.+614727./20.*xtil
y4e2 = -1368279./20.*xtil**(2.)+4624139./80.*xtil**(3.)
y4e3 = -157396./7.*xtil**(4.)+30064./7.*xtil**(5.)
y4e4 = -2717./7.*xtil**(6.)+2761./210.*xtil**(7.)
y4e = s**(2.)*(y4e1+y4e2+y4e3+y4e4)
y4f1 = -12438.9+6046951./160.*xtil
y4f2 = -248520./7.*xtil**(2.)+481024./35.*xtil**(3.)
y4f3 = -15972./7.*xtil**(4.)+18689./140.*xtil**(5.)
y4f = s**(4.)*(y4f1+y4f2+y4f3)
y4g1 = -70414./21.+465992./105.*xtil
y4g2 = -11792./7.*xtil**(2.)+19778./105.*xtil**(3.)
y4g = s**(6.)*(y4g1+y4g2)
y4h = s**(8.)*(-682./7.+7601./210.*xtil)
y4 = y4a+y4b+y4c+y4d+y4e+y4f+y4g+y4h
DI_over_tau_over_theta_lt12 = f1*(y0+theta*y1+theta**(2.)*y2+theta**(3.)*y3+theta**(4.)*y4)
#========== Region where x > 1.2 if T > 20.0 keV
Tlim = 20.0
x_0 = 3.830 * (1.0 + 1.1674*theta - 0.8533*theta**2.)
a_ij = np.array([
[[-1.81317E+1+x*0],[ 9.97038E+1+x*0],[-6.07438E+1+x*0],[ 1.05143E+3+x*0],[-2.86734E+3+x*0],[ 7.73353E+3+x*0],[-8.16644E+3+x*0],[-5.37712E+3+x*0],[ 1.52226E+4+x*0],[ 7.18726E+3+x*0],[-1.39548E+4+x*0],[-2.08464E+4+x*0],[ 1.79040E+4+x*0]],
[[ 1.68733E+2+x*0],[-6.07829E+2+x*0],[ 1.14933E+3+x*0],[-2.42382E+2+x*0],[-7.73030E+2+x*0],[ 5.33993E+3+x*0],[-4.03443E+3+x*0],[ 3.00692E+3+x*0],[ 9.58809E+3+x*0],[ 8.16574E+3+x*0],[-6.13322E+3+x*0],[-1.48117E+4+x*0],[ 3.43816E+4+x*0]],
[[-6.69883E+2+x*0],[ 1.59654E+3+x*0],[-3.33375E+3+x*0],[-2.13234E+3+x*0],[-1.80812E+2+x*0],[ 3.75605E+3+x*0],[-4.75180E+3+x*0],[-4.50495E+3+x*0],[ 5.38753E+3+x*0],[ 5.03355E+3+x*0],[-1.18396E+4+x*0],[-8.58473E+3+x*0],[ 3.96316E+4+x*0]],
[[ 1.56222E+3+x*0],[-1.78598E+3+x*0],[ 5.13747E+3+x*0],[ 4.10404E+3+x*0],[ 5.54775E+2+x*0],[-3.89994E+3+x*0],[-1.22455E+3+x*0],[ 1.03747E+3+x*0],[ 4.32237E+3+x*0],[ 1.03805E+3+x*0],[-1.47172E+4+x*0],[-1.23591E+4+x*0],[ 1.77290E+4+x*0]],
[[-2.34712E+3+x*0],[ 2.78197E+2+x*0],[-5.49648E+3+x*0],[-5.94988E+2+x*0],[-1.47060E+3+x*0],[-2.84032E+2+x*0],[-1.15352E+3+x*0],[-1.17893E+3+x*0],[ 7.01209E+3+x*0],[ 4.75631E+3+x*0],[-5.13807E+3+x*0],[-8.73615E+3+x*0],[ 9.41580E+3+x*0]],
[[ 1.92894E+3+x*0],[ 1.17970E+3+x*0],[ 3.13650E+3+x*0],[-2.91121E+2+x*0],[-1.15006E+3+x*0],[ 4.17375E+3+x*0],[-3.31788E+2+x*0],[ 1.37973E+3+x*0],[-2.48966E+3+x*0],[ 4.82005E+3+x*0],[-1.06121E+4+x*0],[-1.19394E+4+x*0],[ 1.34908E+4+x*0]],
[[ 6.40881E+2+x*0],[-6.81789E+2+x*0],[ 1.20037E+3+x*0],[-3.27298E+3+x*0],[ 1.02988E+2+x*0],[ 2.03514E+3+x*0],[-2.80502E+3+x*0],[ 8.83880E+2+x*0],[ 1.68409E+3+x*0],[ 4.26227E+3+x*0],[-6.37868E+3+x*0],[-1.11597E+4+x*0],[ 1.46861E+4+x*0]],
[[-4.02494E+3+x*0],[-1.37983E+3+x*0],[-1.65623E+3+x*0],[ 7.36120E+1+x*0],[ 2.66656E+3+x*0],[-2.30516E+3+x*0],[ 5.22182E+3+x*0],[-8.53317E+3+x*0],[ 3.75800E+2+x*0],[ 8.49249E+2+x*0],[-6.88736E+3+x*0],[-1.01475E+4+x*0],[ 4.75820E+3+x*0]],
[[ 4.59247E+3+x*0],[ 3.04203E+3+x*0],[-2.11039E+3+x*0],[ 1.32383E+3+x*0],[ 1.10646E+3+x*0],[-3.53827E+3+x*0],[-1.12073E+3+x*0],[-5.47633E+3+x*0],[ 9.85745E+3+x*0],[ 5.72138E+3+x*0],[ 6.86444E+3+x*0],[-5.72696E+3+x*0],[ 1.29053E+3+x*0]],
[[-1.61848E+3+x*0],[-1.83704E+3+x*0],[ 2.06738E+3+x*0],[ 4.00292E+3+x*0],[-3.72824E+1+x*0],[ 9.10086E+2+x*0],[ 3.72526E+3+x*0],[ 3.41895E+3+x*0],[ 1.31241E+3+x*0],[ 6.68089E+3+x*0],[-4.34269E+3+x*0],[-5.42296E+3+x*0],[ 2.83445E+3+x*0]],
[[-1.00239E+3+x*0],[-1.24281E+3+x*0],[ 2.46998E+3+x*0],[-4.25837E+3+x*0],[-1.83515E+2+x*0],[-6.47138E+2+x*0],[-7.35806E+3+x*0],[-1.50866E+3+x*0],[-2.47275E+3+x*0],[ 9.09399E+3+x*0],[-2.75851E+3+x*0],[-6.75104E+3+x*0],[ 7.00899E+2+x*0]],
[[ 1.04911E+3+x*0],[ 2.07475E+3+x*0],[-3.83953E+3+x*0],[ 7.79924E+2+x*0],[-4.08658E+3+x*0],[ 4.43432E+3+x*0],[ 3.23015E+2+x*0],[ 6.16180E+3+x*0],[-1.00851E+4+x*0],[ 7.65063E+3+x*0],[ 1.52880E+3+x*0],[-6.08330E+3+x*0],[ 1.23369E+3+x*0]],
[[-2.61041E+2+x*0],[-7.22803E+2+x*0],[ 1.34581E+3+x*0],[ 5.90851E+2+x*0],[ 3.32198E+2+x*0],[ 2.58340E+3+x*0],[-5.97604E+2+x*0],[-4.34018E+3+x*0],[-3.58925E+3+x*0],[ 2.59165E+3+x*0],[ 6.76140E+3+x*0],[-6.22138E+3+x*0],[ 4.40668E+3+x*0]]
])[:,:,0,:]
theta_ei = np.array([
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]]
])[:,:,0,:,:]
theta_ei = np.transpose(theta_ei, (1,0,2,3))
Zj = np.array([
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]]
])[:,:,0,:,:]
G_theta_x = np.sum(np.sum(a_ij*theta_ei*Zj, 1), 0)
DI_over_tau_over_theta_gt12 = x**2.0 * np.exp(-x) * (x-x_0) * G_theta_x
#========== Pick the region
f_nu = DI_over_tau_over_theta_lt12
w_gt12 = (x > 1.2) * (t_grid > 20*u.keV)
f_nu[w_gt12] = DI_over_tau_over_theta_gt12[w_gt12]
return f_nu
|
[
"numpy.array",
"numpy.exp",
"numpy.transpose",
"numpy.sum"
] |
[((11551, 11587), 'numpy.transpose', 'np.transpose', (['theta_ei', '(1, 0, 2, 3)'], {}), '(theta_ei, (1, 0, 2, 3))\n', (11563, 11587), True, 'import numpy as np\n'), ((4546, 8066), 'numpy.array', 'np.array', (['[[[-18.1317 + x * 0], [99.7038 + x * 0], [-60.7438 + x * 0], [1051.43 + x *\n 0], [-2867.34 + x * 0], [7733.53 + x * 0], [-8166.44 + x * 0], [-\n 5377.12 + x * 0], [15222.6 + x * 0], [7187.26 + x * 0], [-13954.8 + x *\n 0], [-20846.4 + x * 0], [17904.0 + x * 0]], [[168.733 + x * 0], [-\n 607.829 + x * 0], [1149.33 + x * 0], [-242.382 + x * 0], [-773.03 + x *\n 0], [5339.93 + x * 0], [-4034.43 + x * 0], [3006.92 + x * 0], [9588.09 +\n x * 0], [8165.74 + x * 0], [-6133.22 + x * 0], [-14811.7 + x * 0], [\n 34381.6 + x * 0]], [[-669.883 + x * 0], [1596.54 + x * 0], [-3333.75 + \n x * 0], [-2132.34 + x * 0], [-180.812 + x * 0], [3756.05 + x * 0], [-\n 4751.8 + x * 0], [-4504.95 + x * 0], [5387.53 + x * 0], [5033.55 + x * \n 0], [-11839.6 + x * 0], [-8584.73 + x * 0], [39631.6 + x * 0]], [[\n 1562.22 + x * 0], [-1785.98 + x * 0], [5137.47 + x * 0], [4104.04 + x *\n 0], [554.775 + x * 0], [-3899.94 + x * 0], [-1224.55 + x * 0], [1037.47 +\n x * 0], [4322.37 + x * 0], [1038.05 + x * 0], [-14717.2 + x * 0], [-\n 12359.1 + x * 0], [17729.0 + x * 0]], [[-2347.12 + x * 0], [278.197 + x *\n 0], [-5496.48 + x * 0], [-594.988 + x * 0], [-1470.6 + x * 0], [-\n 284.032 + x * 0], [-1153.52 + x * 0], [-1178.93 + x * 0], [7012.09 + x *\n 0], [4756.31 + x * 0], [-5138.07 + x * 0], [-8736.15 + x * 0], [9415.8 +\n x * 0]], [[1928.94 + x * 0], [1179.7 + x * 0], [3136.5 + x * 0], [-\n 291.121 + x * 0], [-1150.06 + x * 0], [4173.75 + x * 0], [-331.788 + x *\n 0], [1379.73 + x * 0], [-2489.66 + x * 0], [4820.05 + x * 0], [-10612.1 +\n x * 0], [-11939.4 + x * 0], [13490.8 + x * 0]], [[640.881 + x * 0], [-\n 681.789 + x * 0], [1200.37 + x * 0], [-3272.98 + x * 0], [102.988 + x *\n 0], [2035.14 + x * 0], [-2805.02 + x * 0], [883.88 + x * 0], [1684.09 +\n x * 0], [4262.27 + x * 0], [-6378.68 + x * 0], [-11159.7 + x * 0], [\n 14686.1 + x * 0]], [[-4024.94 + x * 0], [-1379.83 + x * 0], [-1656.23 +\n x * 0], [73.612 + x * 0], [2666.56 + x * 0], [-2305.16 + x * 0], [\n 5221.82 + x * 0], [-8533.17 + x * 0], [375.8 + x * 0], [849.249 + x * 0\n ], [-6887.36 + x * 0], [-10147.5 + x * 0], [4758.2 + x * 0]], [[4592.47 +\n x * 0], [3042.03 + x * 0], [-2110.39 + x * 0], [1323.83 + x * 0], [\n 1106.46 + x * 0], [-3538.27 + x * 0], [-1120.73 + x * 0], [-5476.33 + x *\n 0], [9857.45 + x * 0], [5721.38 + x * 0], [6864.44 + x * 0], [-5726.96 +\n x * 0], [1290.53 + x * 0]], [[-1618.48 + x * 0], [-1837.04 + x * 0], [\n 2067.38 + x * 0], [4002.92 + x * 0], [-37.2824 + x * 0], [910.086 + x *\n 0], [3725.26 + x * 0], [3418.95 + x * 0], [1312.41 + x * 0], [6680.89 +\n x * 0], [-4342.69 + x * 0], [-5422.96 + x * 0], [2834.45 + x * 0]], [[-\n 1002.39 + x * 0], [-1242.81 + x * 0], [2469.98 + x * 0], [-4258.37 + x *\n 0], [-183.515 + x * 0], [-647.138 + x * 0], [-7358.06 + x * 0], [-\n 1508.66 + x * 0], [-2472.75 + x * 0], [9093.99 + x * 0], [-2758.51 + x *\n 0], [-6751.04 + x * 0], [700.899 + x * 0]], [[1049.11 + x * 0], [\n 2074.75 + x * 0], [-3839.53 + x * 0], [779.924 + x * 0], [-4086.58 + x *\n 0], [4434.32 + x * 0], [323.015 + x * 0], [6161.8 + x * 0], [-10085.1 +\n x * 0], [7650.63 + x * 0], [1528.8 + x * 0], [-6083.3 + x * 0], [\n 1233.69 + x * 0]], [[-261.041 + x * 0], [-722.803 + x * 0], [1345.81 + \n x * 0], [590.851 + x * 0], [332.198 + x * 0], [2583.4 + x * 0], [-\n 597.604 + x * 0], [-4340.18 + x * 0], [-3589.25 + x * 0], [2591.65 + x *\n 0], [6761.4 + x * 0], [-6221.38 + x * 0], [4406.68 + x * 0]]]'], {}), '([[[-18.1317 + x * 0], [99.7038 + x * 0], [-60.7438 + x * 0], [\n 1051.43 + x * 0], [-2867.34 + x * 0], [7733.53 + x * 0], [-8166.44 + x *\n 0], [-5377.12 + x * 0], [15222.6 + x * 0], [7187.26 + x * 0], [-13954.8 +\n x * 0], [-20846.4 + x * 0], [17904.0 + x * 0]], [[168.733 + x * 0], [-\n 607.829 + x * 0], [1149.33 + x * 0], [-242.382 + x * 0], [-773.03 + x *\n 0], [5339.93 + x * 0], [-4034.43 + x * 0], [3006.92 + x * 0], [9588.09 +\n x * 0], [8165.74 + x * 0], [-6133.22 + x * 0], [-14811.7 + x * 0], [\n 34381.6 + x * 0]], [[-669.883 + x * 0], [1596.54 + x * 0], [-3333.75 + \n x * 0], [-2132.34 + x * 0], [-180.812 + x * 0], [3756.05 + x * 0], [-\n 4751.8 + x * 0], [-4504.95 + x * 0], [5387.53 + x * 0], [5033.55 + x * \n 0], [-11839.6 + x * 0], [-8584.73 + x * 0], [39631.6 + x * 0]], [[\n 1562.22 + x * 0], [-1785.98 + x * 0], [5137.47 + x * 0], [4104.04 + x *\n 0], [554.775 + x * 0], [-3899.94 + x * 0], [-1224.55 + x * 0], [1037.47 +\n x * 0], [4322.37 + x * 0], [1038.05 + x * 0], [-14717.2 + x * 0], [-\n 12359.1 + x * 0], [17729.0 + x * 0]], [[-2347.12 + x * 0], [278.197 + x *\n 0], [-5496.48 + x * 0], [-594.988 + x * 0], [-1470.6 + x * 0], [-\n 284.032 + x * 0], [-1153.52 + x * 0], [-1178.93 + x * 0], [7012.09 + x *\n 0], [4756.31 + x * 0], [-5138.07 + x * 0], [-8736.15 + x * 0], [9415.8 +\n x * 0]], [[1928.94 + x * 0], [1179.7 + x * 0], [3136.5 + x * 0], [-\n 291.121 + x * 0], [-1150.06 + x * 0], [4173.75 + x * 0], [-331.788 + x *\n 0], [1379.73 + x * 0], [-2489.66 + x * 0], [4820.05 + x * 0], [-10612.1 +\n x * 0], [-11939.4 + x * 0], [13490.8 + x * 0]], [[640.881 + x * 0], [-\n 681.789 + x * 0], [1200.37 + x * 0], [-3272.98 + x * 0], [102.988 + x *\n 0], [2035.14 + x * 0], [-2805.02 + x * 0], [883.88 + x * 0], [1684.09 +\n x * 0], [4262.27 + x * 0], [-6378.68 + x * 0], [-11159.7 + x * 0], [\n 14686.1 + x * 0]], [[-4024.94 + x * 0], [-1379.83 + x * 0], [-1656.23 +\n x * 0], [73.612 + x * 0], [2666.56 + x * 0], [-2305.16 + x * 0], [\n 5221.82 + x * 0], [-8533.17 + x * 0], [375.8 + x * 0], [849.249 + x * 0\n ], [-6887.36 + x * 0], [-10147.5 + x * 0], [4758.2 + x * 0]], [[4592.47 +\n x * 0], [3042.03 + x * 0], [-2110.39 + x * 0], [1323.83 + x * 0], [\n 1106.46 + x * 0], [-3538.27 + x * 0], [-1120.73 + x * 0], [-5476.33 + x *\n 0], [9857.45 + x * 0], [5721.38 + x * 0], [6864.44 + x * 0], [-5726.96 +\n x * 0], [1290.53 + x * 0]], [[-1618.48 + x * 0], [-1837.04 + x * 0], [\n 2067.38 + x * 0], [4002.92 + x * 0], [-37.2824 + x * 0], [910.086 + x *\n 0], [3725.26 + x * 0], [3418.95 + x * 0], [1312.41 + x * 0], [6680.89 +\n x * 0], [-4342.69 + x * 0], [-5422.96 + x * 0], [2834.45 + x * 0]], [[-\n 1002.39 + x * 0], [-1242.81 + x * 0], [2469.98 + x * 0], [-4258.37 + x *\n 0], [-183.515 + x * 0], [-647.138 + x * 0], [-7358.06 + x * 0], [-\n 1508.66 + x * 0], [-2472.75 + x * 0], [9093.99 + x * 0], [-2758.51 + x *\n 0], [-6751.04 + x * 0], [700.899 + x * 0]], [[1049.11 + x * 0], [\n 2074.75 + x * 0], [-3839.53 + x * 0], [779.924 + x * 0], [-4086.58 + x *\n 0], [4434.32 + x * 0], [323.015 + x * 0], [6161.8 + x * 0], [-10085.1 +\n x * 0], [7650.63 + x * 0], [1528.8 + x * 0], [-6083.3 + x * 0], [\n 1233.69 + x * 0]], [[-261.041 + x * 0], [-722.803 + x * 0], [1345.81 + \n x * 0], [590.851 + x * 0], [332.198 + x * 0], [2583.4 + x * 0], [-\n 597.604 + x * 0], [-4340.18 + x * 0], [-3589.25 + x * 0], [2591.65 + x *\n 0], [6761.4 + x * 0], [-6221.38 + x * 0], [4406.68 + x * 0]]])\n', (4554, 8066), True, 'import numpy as np\n'), ((7777, 13393), 'numpy.array', 'np.array', (['[[[x * 0 + (10 * theta) ** 0.0], [x * 0 + (10 * theta) ** 1.0], [x * 0 + (\n 10 * theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x * 0 + (10 *\n theta) ** 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 * theta) **\n 6.0], [x * 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) ** 8.0], [x *\n 0 + (10 * theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x * 0 + (10 *\n theta) ** 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (10 * theta\n ) ** 0.0], [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 * theta) ** 2.0],\n [x * 0 + (10 * theta) ** 3.0], [x * 0 + (10 * theta) ** 4.0], [x * 0 + \n (10 * theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x * 0 + (10 *\n theta) ** 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 * theta) **\n 9.0], [x * 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) ** 11.0], [\n x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0], [x * 0 +\n (10 * theta) ** 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + (10 *\n theta) ** 3.0], [x * 0 + (10 * theta) ** 4.0], [x * 0 + (10 * theta) **\n 5.0], [x * 0 + (10 * theta) ** 6.0], [x * 0 + (10 * theta) ** 7.0], [x *\n 0 + (10 * theta) ** 8.0], [x * 0 + (10 * theta) ** 9.0], [x * 0 + (10 *\n theta) ** 10.0], [x * 0 + (10 * theta) ** 11.0], [x * 0 + (10 * theta) **\n 12.0]], [[x * 0 + (10 * theta) ** 0.0], [x * 0 + (10 * theta) ** 1.0],\n [x * 0 + (10 * theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x * 0 + \n (10 * theta) ** 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 *\n theta) ** 6.0], [x * 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) **\n 8.0], [x * 0 + (10 * theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x *\n 0 + (10 * theta) ** 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (\n 10 * theta) ** 0.0], [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 *\n theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x * 0 + (10 * theta) **\n 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x *\n 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 *\n theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) **\n 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0],\n [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + \n (10 * theta) ** 3.0], [x * 0 + (10 * theta) ** 4.0], [x * 0 + (10 *\n theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x * 0 + (10 * theta) **\n 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 * theta) ** 9.0], [x *\n 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) ** 11.0], [x * 0 + (10 *\n theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0], [x * 0 + (10 * theta) **\n 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x *\n 0 + (10 * theta) ** 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 *\n theta) ** 6.0], [x * 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) **\n 8.0], [x * 0 + (10 * theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x *\n 0 + (10 * theta) ** 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (\n 10 * theta) ** 0.0], [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 *\n theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x * 0 + (10 * theta) **\n 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x *\n 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 *\n theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) **\n 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0],\n [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + \n (10 * theta) ** 3.0], [x * 0 + (10 * theta) ** 4.0], [x * 0 + (10 *\n theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x * 0 + (10 * theta) **\n 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 * theta) ** 9.0], [x *\n 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) ** 11.0], [x * 0 + (10 *\n theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0], [x * 0 + (10 * theta) **\n 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x *\n 0 + (10 * theta) ** 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 *\n theta) ** 6.0], [x * 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) **\n 8.0], [x * 0 + (10 * theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x *\n 0 + (10 * theta) ** 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (\n 10 * theta) ** 0.0], [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 *\n theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x * 0 + (10 * theta) **\n 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x *\n 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 *\n theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) **\n 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0],\n [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + \n (10 * theta) ** 3.0], [x * 0 + (10 * theta) ** 4.0], [x * 0 + (10 *\n theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x * 0 + (10 * theta) **\n 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 * theta) ** 9.0], [x *\n 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) ** 11.0], [x * 0 + (10 *\n theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0], [x * 0 + (10 * theta) **\n 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x *\n 0 + (10 * theta) ** 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 *\n theta) ** 6.0], [x * 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) **\n 8.0], [x * 0 + (10 * theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x *\n 0 + (10 * theta) ** 11.0], [x * 0 + (10 * theta) ** 12.0]]]'], {}), '([[[x * 0 + (10 * theta) ** 0.0], [x * 0 + (10 * theta) ** 1.0], [x *\n 0 + (10 * theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x * 0 + (10 *\n theta) ** 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 * theta) **\n 6.0], [x * 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) ** 8.0], [x *\n 0 + (10 * theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x * 0 + (10 *\n theta) ** 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (10 * theta\n ) ** 0.0], [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 * theta) ** 2.0],\n [x * 0 + (10 * theta) ** 3.0], [x * 0 + (10 * theta) ** 4.0], [x * 0 + \n (10 * theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x * 0 + (10 *\n theta) ** 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 * theta) **\n 9.0], [x * 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) ** 11.0], [\n x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0], [x * 0 +\n (10 * theta) ** 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + (10 *\n theta) ** 3.0], [x * 0 + (10 * theta) ** 4.0], [x * 0 + (10 * theta) **\n 5.0], [x * 0 + (10 * theta) ** 6.0], [x * 0 + (10 * theta) ** 7.0], [x *\n 0 + (10 * theta) ** 8.0], [x * 0 + (10 * theta) ** 9.0], [x * 0 + (10 *\n theta) ** 10.0], [x * 0 + (10 * theta) ** 11.0], [x * 0 + (10 * theta) **\n 12.0]], [[x * 0 + (10 * theta) ** 0.0], [x * 0 + (10 * theta) ** 1.0],\n [x * 0 + (10 * theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x * 0 + \n (10 * theta) ** 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 *\n theta) ** 6.0], [x * 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) **\n 8.0], [x * 0 + (10 * theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x *\n 0 + (10 * theta) ** 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (\n 10 * theta) ** 0.0], [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 *\n theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x * 0 + (10 * theta) **\n 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x *\n 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 *\n theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) **\n 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0],\n [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + \n (10 * theta) ** 3.0], [x * 0 + (10 * theta) ** 4.0], [x * 0 + (10 *\n theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x * 0 + (10 * theta) **\n 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 * theta) ** 9.0], [x *\n 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) ** 11.0], [x * 0 + (10 *\n theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0], [x * 0 + (10 * theta) **\n 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x *\n 0 + (10 * theta) ** 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 *\n theta) ** 6.0], [x * 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) **\n 8.0], [x * 0 + (10 * theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x *\n 0 + (10 * theta) ** 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (\n 10 * theta) ** 0.0], [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 *\n theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x * 0 + (10 * theta) **\n 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x *\n 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 *\n theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) **\n 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0],\n [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + \n (10 * theta) ** 3.0], [x * 0 + (10 * theta) ** 4.0], [x * 0 + (10 *\n theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x * 0 + (10 * theta) **\n 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 * theta) ** 9.0], [x *\n 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) ** 11.0], [x * 0 + (10 *\n theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0], [x * 0 + (10 * theta) **\n 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x *\n 0 + (10 * theta) ** 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 *\n theta) ** 6.0], [x * 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) **\n 8.0], [x * 0 + (10 * theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x *\n 0 + (10 * theta) ** 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (\n 10 * theta) ** 0.0], [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 *\n theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x * 0 + (10 * theta) **\n 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x *\n 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 *\n theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) **\n 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0],\n [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + \n (10 * theta) ** 3.0], [x * 0 + (10 * theta) ** 4.0], [x * 0 + (10 *\n theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x * 0 + (10 * theta) **\n 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 * theta) ** 9.0], [x *\n 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) ** 11.0], [x * 0 + (10 *\n theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0], [x * 0 + (10 * theta) **\n 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x *\n 0 + (10 * theta) ** 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 *\n theta) ** 6.0], [x * 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) **\n 8.0], [x * 0 + (10 * theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x *\n 0 + (10 * theta) ** 11.0], [x * 0 + (10 * theta) ** 12.0]]])\n', (7785, 13393), True, 'import numpy as np\n'), ((11599, 15475), 'numpy.array', 'np.array', (['[[[(0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]]]'], {}), '([[[(0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [\n (0.05 * x) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 *\n x) ** 6.0], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** \n 9.0], [(0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]],\n [[(0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 *\n x) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** \n 6.0], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]]])\n', (11607, 15475), True, 'import numpy as np\n'), ((14384, 14415), 'numpy.sum', 'np.sum', (['(a_ij * theta_ei * Zj)', '(1)'], {}), '(a_ij * theta_ei * Zj, 1)\n', (14390, 14415), True, 'import numpy as np\n'), ((2365, 2374), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (2371, 2374), True, 'import numpy as np\n'), ((2420, 2429), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (2426, 2429), True, 'import numpy as np\n'), ((2446, 2459), 'numpy.exp', 'np.exp', (['(x / 2)'], {}), '(x / 2)\n', (2452, 2459), True, 'import numpy as np\n'), ((2458, 2472), 'numpy.exp', 'np.exp', (['(-x / 2)'], {}), '(-x / 2)\n', (2464, 2472), True, 'import numpy as np\n'), ((1118, 1127), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1124, 1127), True, 'import numpy as np\n'), ((2376, 2385), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (2382, 2385), True, 'import numpy as np\n'), ((2406, 2415), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (2412, 2415), True, 'import numpy as np\n'), ((14460, 14470), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (14466, 14470), True, 'import numpy as np\n'), ((1131, 1140), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1137, 1140), True, 'import numpy as np\n'), ((1167, 1176), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1173, 1176), True, 'import numpy as np\n'), ((1153, 1162), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1159, 1162), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
from sae import storage
class SaeStorage(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
access_key = app.config.get('SAE_ACCESS_KEY', storage.ACCESS_KEY)
secret_key = app.config.get('SAE_SECRET_KEY', storage.SECRET_KEY)
app_name = app.config.get('SAE_APP_NAME', storage.APP_NAME)
bucket_name = app.config.get('SAE_BUCKET_NAME', '')
connection = storage.Connection(access_key, secret_key, app_name)
self._bucket = connection.get_bucket(bucket_name)
def save(self, data, filename):
return self._bucket.put_object(filename, data)
def delete(self, filename):
return self._bucket.delete_object(filename)
def url(self, filename):
return self._bucket.generate_url(filename)
|
[
"sae.storage.Connection"
] |
[((520, 572), 'sae.storage.Connection', 'storage.Connection', (['access_key', 'secret_key', 'app_name'], {}), '(access_key, secret_key, app_name)\n', (538, 572), False, 'from sae import storage\n')]
|
# PhysiBoSS Tab
import os
from ipywidgets import Layout, Label, Text, Checkbox, Button, HBox, VBox, Box, \
FloatText, BoundedIntText, BoundedFloatText, HTMLMath, Dropdown, interactive, Output
from collections import deque, Counter
import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
import matplotlib.colors as mplc
import numpy as np
import glob
import platform
import numpy as np
import csv
import itertools
import copy
import scipy
# from debug import debug_view
class PhysiBoSSTab(object):
def __init__(self):
# tab_height = '520px'
# tab_layout = Layout(width='900px', # border='2px solid black',
# height=tab_height, overflow_y='scroll')
self.output_dir = '.'
self.figsize_width = 15.0 # allow extra for colormap
self.figsize_height = 8
constWidth = '180px'
# self.fig = plt.figure(figsize=(6, 6))
# self.fig = plt.figure(figsize=(7, 7))
config_file = "data/PhysiCell_settings.xml"
self.cell_lines = {}
self.cell_lines_by_name = {}
self.cell_lines_array = ["All"]
if os.path.isfile(config_file):
try:
tree = ET.parse(config_file)
except:
print("Cannot parse",config_file, "- check it's XML syntax.")
return
root = tree.getroot()
uep = root.find('.//cell_definitions') # find unique entry point (uep)
for child in uep.findall('cell_definition'):
self.cell_lines[int(child.attrib["ID"])] = child.attrib["name"]
self.cell_lines_by_name[child.attrib["name"]] = int(child.attrib["ID"])
self.cell_lines_array.append(child.attrib["name"])
# print(child.attrib['name'])
else:
print("config.xml does not exist")
max_frames = 0
self.svg_plot = interactive(self.create_area_chart, frame=(0, max_frames), percentage=(0.0, 10.0), total=False, cell_line=self.cell_lines_array, continuous_update=False)
plot_size = '500px' # small: controls the size of the tab height, not the plot (rf. figsize for that)
plot_size = '700px' # medium
plot_size = '750px' # medium
self.svg_plot.layout.width = '1000px'
self.svg_plot.layout.height = '700px'
self.use_defaults = True
self.axes_min = 0.0
self.axes_max = 2000 # hmm, this can change (TODO?)
self.max_frames = BoundedIntText(
min=0, max=99999, value=max_frames,
description='Max',
layout=Layout(width='160px'),
# layout=Layout(flex='1 1 auto', width='auto'), #Layout(width='160px'),
)
self.max_frames.observe(self.update_max_frames)
items_auto = [Label('select slider: drag or left/right arrows'),
self.max_frames,
]
box_layout = Layout(display='flex',
flex_flow='row',
align_items='stretch',
width='900px')
row1 = Box(children=items_auto, layout=box_layout)
self.tab = VBox([row1, self.svg_plot])
self.count_dict = {}
self.file_dict = {}
self.cells_indexes = np.zeros((0))
self.up_to_frame = 0
def update(self, rdir=''):
# with debug_view:
# print("SVG: update rdir=", rdir)
if rdir:
self.output_dir = rdir
all_files = sorted(glob.glob(os.path.join(self.output_dir, 'snapshot*.svg')))
if len(all_files) > 0:
last_file = all_files[-1]
self.max_frames.value = int(last_file[-12:-4]) # assumes naming scheme: "snapshot%08d.svg"
# self.create_dict(self.max_frames.value, self.output_dir)
# self.state_counter(self.max_frames.value)
# with debug_view:
# print("SVG: added %s files" % len(all_files))
def update_max_frames(self,_b):
self.svg_plot.children[0].max = self.max_frames.value
def create_dict(self, number_of_files, folder):
"create a dictionary with the states file in the folder 'output', half of the dict is used to calculate the percentage of the node, the other half is for the states"
if number_of_files > 0:
for i in range (0, number_of_files):
if "state_step{0}".format(i) not in self.file_dict.keys():
states_dict = {}
with open(os.path.join(self.output_dir, 'states_%08u.csv' % i), newline='') as csvfile:
states_reader = csv.reader(csvfile, delimiter=',')
for row in states_reader:
if row[0] != 'ID':
states_dict[int(row[0])] = row[1]
self.file_dict["state_step{0}".format(i)] = states_dict
def state_counter(self, number_of_files, percentage, cell_indexes, cell_line):
"create a dict with the states of the network, it can be used to print states pie chart"
self.count_dict = {}
temp_dict = {}
max_cell = 0
if number_of_files > 0:
for i in range (0, number_of_files):
state_list = []
for key in self.file_dict["state_step{0}".format(i)]:
if cell_line == 'All' or self.cells_indexes[key] == self.cell_lines_by_name[cell_line]:
state_list.append(self.file_dict["state_step{0}".format(i)][key])
state_counts = Counter(state_list)
max_cell = max_cell + sum(state_counts.values())
temp_dict["state_count{0}".format(i)] = state_counts
self.count_dict = self.filter_states(max_cell, temp_dict, percentage)
def create_cell_indexes(self, frame, cell_line):
for i in range(self.up_to_frame, frame):
fname = "output%08d_cells_physicell.mat" % i
full_fname = os.path.join(self.output_dir, fname)
if not os.path.isfile(full_fname):
print("Once output files are generated, click the slider.") # No: output00000000_microenvironment0.mat
return
info_dict = {}
scipy.io.loadmat(full_fname, info_dict)
M = info_dict['cells'][[0,5], :].astype(int)
self.cells_indexes.resize((max(self.cells_indexes.shape[0], M[0, :].max(axis=0)+1)))
self.cells_indexes[M[0, :]] = M[1, :]
self.up_to_frame = frame
return self.cells_indexes
def create_area_chart(self, frame=None, total=False, percentage=(0.0, 100.0), cell_line="All"):
"plot an area chart with the evolution of the network states during the simulation"
cells_indexes = None
if cell_line != "All":
cells_indexes = self.create_cell_indexes(frame, cell_line)
if np.sum(cells_indexes == self.cell_lines_by_name[cell_line]) == 0:
print("There are no %s cells." % cell_line)
return
self.create_dict(frame, self.output_dir)
self.state_counter(frame, percentage, cells_indexes, cell_line)
state_list = []
all_state = []
a = []
for k in self.count_dict:
state_list.append([key for key, value in self.count_dict[k].items() if value > 0])
for l in state_list:
for state in l:
all_state.append(state)
all_state = list(dict.fromkeys(all_state))
for state_count in self.count_dict:
b = []
for states in all_state:
try:
b.append(self.count_dict[state_count][states])
except:
b.append(0)
a.append(b)
a = np.array(a)
#print(a)
a = np.transpose(a)
if not total:
percent = a / a.sum(axis=0).astype(float) * 100
else:
percent = a
x = np.arange(len(self.count_dict))
self.fig = plt.figure(figsize=(self.figsize_width, self.figsize_height))
ax = self.fig.add_subplot(111)
ax.stackplot(x, percent, labels=all_state)
ax.legend(labels=all_state, loc='upper center', bbox_to_anchor=(0.5, -0.05),shadow=True, ncol=2)
# ax.legend(labels=all_state, bbox_to_anchor=(1.05, 1), loc='lower center', borderaxespad=0.)
if not total:
ax.set_ylabel('Percent (%)')
else:
ax.set_ylabel("Total")
ax.margins(0, 0) # Set margins to avoid "whitespace"
# plt.show()
def filter_states(self, max_cell, all_counts, percentage):
"""max_cell = 0
all_counts = {}
for i in range (0, number_of_files):
state_list = []
for key in file_dict["state_step{0}".format(i)]:
state_list.append(file_dict["state_step{0}".format(i)][key])
state_counts = Counter(state_list)
max_cell = max_cell + sum(state_counts.values())
all_counts[i] = state_counts"""
copy_all_counts = copy.deepcopy(all_counts)
state_list = []
all_state = []
for k in all_counts:
state_list.append(list(all_counts[k].keys()))
for l in state_list:
for state in l:
all_state.append(state)
all_state = list(dict.fromkeys(all_state))
banned_list = []
for state in all_state:
a = 0
for i in all_counts.keys():
try:
a = a + all_counts[i][state]
except:
a = a + 0
if (a < (percentage/100) * max_cell):
banned_list.append(state)
for i in all_counts.keys():
del all_counts[i][state]
for i in all_counts.keys():
b = 0
for state in banned_list:
try:
b = b + copy_all_counts[i][state]
except:
b = b + 0
all_counts[i]["others"] = b
return all_counts
|
[
"ipywidgets.interactive",
"copy.deepcopy",
"xml.etree.ElementTree.parse",
"numpy.sum",
"csv.reader",
"scipy.io.loadmat",
"collections.Counter",
"numpy.zeros",
"numpy.transpose",
"ipywidgets.Box",
"os.path.isfile",
"matplotlib.pyplot.figure",
"numpy.array",
"ipywidgets.Label",
"ipywidgets.Layout",
"ipywidgets.VBox",
"os.path.join"
] |
[((1211, 1238), 'os.path.isfile', 'os.path.isfile', (['config_file'], {}), '(config_file)\n', (1225, 1238), False, 'import os\n'), ((2007, 2169), 'ipywidgets.interactive', 'interactive', (['self.create_area_chart'], {'frame': '(0, max_frames)', 'percentage': '(0.0, 10.0)', 'total': '(False)', 'cell_line': 'self.cell_lines_array', 'continuous_update': '(False)'}), '(self.create_area_chart, frame=(0, max_frames), percentage=(0.0,\n 10.0), total=False, cell_line=self.cell_lines_array, continuous_update=\n False)\n', (2018, 2169), False, 'from ipywidgets import Layout, Label, Text, Checkbox, Button, HBox, VBox, Box, FloatText, BoundedIntText, BoundedFloatText, HTMLMath, Dropdown, interactive, Output\n'), ((3023, 3100), 'ipywidgets.Layout', 'Layout', ([], {'display': '"""flex"""', 'flex_flow': '"""row"""', 'align_items': '"""stretch"""', 'width': '"""900px"""'}), "(display='flex', flex_flow='row', align_items='stretch', width='900px')\n", (3029, 3100), False, 'from ipywidgets import Layout, Label, Text, Checkbox, Button, HBox, VBox, Box, FloatText, BoundedIntText, BoundedFloatText, HTMLMath, Dropdown, interactive, Output\n'), ((3176, 3219), 'ipywidgets.Box', 'Box', ([], {'children': 'items_auto', 'layout': 'box_layout'}), '(children=items_auto, layout=box_layout)\n', (3179, 3219), False, 'from ipywidgets import Layout, Label, Text, Checkbox, Button, HBox, VBox, Box, FloatText, BoundedIntText, BoundedFloatText, HTMLMath, Dropdown, interactive, Output\n'), ((3240, 3267), 'ipywidgets.VBox', 'VBox', (['[row1, self.svg_plot]'], {}), '([row1, self.svg_plot])\n', (3244, 3267), False, 'from ipywidgets import Layout, Label, Text, Checkbox, Button, HBox, VBox, Box, FloatText, BoundedIntText, BoundedFloatText, HTMLMath, Dropdown, interactive, Output\n'), ((3354, 3365), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (3362, 3365), True, 'import numpy as np\n'), ((8050, 8061), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (8058, 8061), True, 'import numpy as np\n'), ((8092, 8107), 'numpy.transpose', 'np.transpose', (['a'], {}), '(a)\n', (8104, 8107), True, 'import numpy as np\n'), ((8292, 8353), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(self.figsize_width, self.figsize_height)'}), '(figsize=(self.figsize_width, self.figsize_height))\n', (8302, 8353), True, 'import matplotlib.pyplot as plt\n'), ((9348, 9373), 'copy.deepcopy', 'copy.deepcopy', (['all_counts'], {}), '(all_counts)\n', (9361, 9373), False, 'import copy\n'), ((2909, 2958), 'ipywidgets.Label', 'Label', (['"""select slider: drag or left/right arrows"""'], {}), "('select slider: drag or left/right arrows')\n", (2914, 2958), False, 'from ipywidgets import Layout, Label, Text, Checkbox, Button, HBox, VBox, Box, FloatText, BoundedIntText, BoundedFloatText, HTMLMath, Dropdown, interactive, Output\n'), ((6144, 6180), 'os.path.join', 'os.path.join', (['self.output_dir', 'fname'], {}), '(self.output_dir, fname)\n', (6156, 6180), False, 'import os\n'), ((6425, 6464), 'scipy.io.loadmat', 'scipy.io.loadmat', (['full_fname', 'info_dict'], {}), '(full_fname, info_dict)\n', (6441, 6464), False, 'import scipy\n'), ((1293, 1314), 'xml.etree.ElementTree.parse', 'ET.parse', (['config_file'], {}), '(config_file)\n', (1301, 1314), True, 'import xml.etree.ElementTree as ET\n'), ((2713, 2734), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""160px"""'}), "(width='160px')\n", (2719, 2734), False, 'from ipywidgets import Layout, Label, Text, Checkbox, Button, HBox, VBox, Box, FloatText, BoundedIntText, BoundedFloatText, HTMLMath, Dropdown, interactive, Output\n'), ((3602, 3648), 'os.path.join', 'os.path.join', (['self.output_dir', '"""snapshot*.svg"""'], {}), "(self.output_dir, 'snapshot*.svg')\n", (3614, 3648), False, 'import os\n'), ((5686, 5705), 'collections.Counter', 'Counter', (['state_list'], {}), '(state_list)\n', (5693, 5705), False, 'from collections import deque, Counter\n'), ((6213, 6239), 'os.path.isfile', 'os.path.isfile', (['full_fname'], {}), '(full_fname)\n', (6227, 6239), False, 'import os\n'), ((7114, 7173), 'numpy.sum', 'np.sum', (['(cells_indexes == self.cell_lines_by_name[cell_line])'], {}), '(cells_indexes == self.cell_lines_by_name[cell_line])\n', (7120, 7173), True, 'import numpy as np\n'), ((4710, 4744), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (4720, 4744), False, 'import csv\n'), ((4592, 4644), 'os.path.join', 'os.path.join', (['self.output_dir', "('states_%08u.csv' % i)"], {}), "(self.output_dir, 'states_%08u.csv' % i)\n", (4604, 4644), False, 'import os\n')]
|
import enum
import mesh
from tri_mesh_viewer import TriMeshViewer
import parametrization
from matplotlib import pyplot as plt
def analysisPlots(m, uvs, figsize=(8,4), bins=200):
plt.figure(figsize=figsize)
plt.subplot(1, 2, 1)
for label, uv in uvs.items():
distortion = parametrization.conformalDistortion(m, uv)
plt.hist(distortion, bins=bins, alpha=0.5, label=label)
plt.title('Quasi-conformal Distortion Error Q - 1')
plt.legend()
plt.subplot(1, 2, 2)
for label, uv in uvs.items():
scaleFactor = parametrization.scaleFactor(m, uv)
plt.hist(scaleFactor, bins=bins, alpha=0.5, label=label)
plt.title('Scale Factors')
plt.legend()
plt.tight_layout()
def analysisPlotsGrid(m, uvs, figsize=(8,6), bins=200):
plt.figure(figsize=figsize)
nrows = len(uvs)
for i, (label, uv) in enumerate(uvs.items()):
plt.subplot(nrows, 2, 1 + 2 * i)
distortion = parametrization.conformalDistortion(m, uv)
plt.hist(distortion, bins=bins, alpha=1.0)
plt.title(f'{label} Quasi-conformal Distortion Q - 1')
plt.subplot(nrows, 2, 2 + 2 * i)
scaleFactor = parametrization.scaleFactor(m, uv)
plt.hist(scaleFactor, bins=bins, alpha=1.0)
plt.title(f'{label} Scale Factors')
plt.tight_layout()
class AnalysisField(enum.Enum):
NONE = 1
SCALE = 2
DISTORTION = 3
class ParametrizationViewer:
def __init__(self, m, uv):
self.m = m
self.view_3d = TriMeshViewer(m, wireframe=True)
self.view_2d = None
self.field = AnalysisField.DISTORTION
self.update_parametrization(uv)
def displayField(self, field, updateModelMatrix=False):
self.field = field
sf = None
if (self.field == AnalysisField.DISTORTION): sf = self.distortion
if (self.field == AnalysisField.SCALE ): sf = self.scaleFactor
self.view_2d.update(preserveExisting=False, updateModelMatrix=updateModelMatrix, mesh=self.mflat, scalarField=sf)
def update_parametrization(self, uv, updateModelMatrix=False):
self.mflat = mesh.Mesh(uv, self.m.elements())
if (self.view_2d is None): self.view_2d = TriMeshViewer(self.mflat, wireframe=True)
self.distortion = parametrization.conformalDistortion(self.m, uv)
self.scaleFactor = parametrization.scaleFactor(self.m, uv)
self.displayField(self.field, updateModelMatrix=updateModelMatrix)
def show(self):
from ipywidgets import HBox
return HBox([self.view_3d.show(), self.view_2d.show()])
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.legend",
"tri_mesh_viewer.TriMeshViewer",
"parametrization.scaleFactor",
"parametrization.conformalDistortion",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout"
] |
[((183, 210), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (193, 210), True, 'from matplotlib import pyplot as plt\n'), ((215, 235), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (226, 235), True, 'from matplotlib import pyplot as plt\n'), ((402, 453), 'matplotlib.pyplot.title', 'plt.title', (['"""Quasi-conformal Distortion Error Q - 1"""'], {}), "('Quasi-conformal Distortion Error Q - 1')\n", (411, 453), True, 'from matplotlib import pyplot as plt\n'), ((458, 470), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (468, 470), True, 'from matplotlib import pyplot as plt\n'), ((475, 495), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (486, 495), True, 'from matplotlib import pyplot as plt\n'), ((656, 682), 'matplotlib.pyplot.title', 'plt.title', (['"""Scale Factors"""'], {}), "('Scale Factors')\n", (665, 682), True, 'from matplotlib import pyplot as plt\n'), ((687, 699), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (697, 699), True, 'from matplotlib import pyplot as plt\n'), ((704, 722), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (720, 722), True, 'from matplotlib import pyplot as plt\n'), ((784, 811), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (794, 811), True, 'from matplotlib import pyplot as plt\n'), ((1300, 1318), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1316, 1318), True, 'from matplotlib import pyplot as plt\n'), ((291, 333), 'parametrization.conformalDistortion', 'parametrization.conformalDistortion', (['m', 'uv'], {}), '(m, uv)\n', (326, 333), False, 'import parametrization\n'), ((342, 397), 'matplotlib.pyplot.hist', 'plt.hist', (['distortion'], {'bins': 'bins', 'alpha': '(0.5)', 'label': 'label'}), '(distortion, bins=bins, alpha=0.5, label=label)\n', (350, 397), True, 'from matplotlib import pyplot as plt\n'), ((552, 586), 'parametrization.scaleFactor', 'parametrization.scaleFactor', (['m', 'uv'], {}), '(m, uv)\n', (579, 586), False, 'import parametrization\n'), ((595, 651), 'matplotlib.pyplot.hist', 'plt.hist', (['scaleFactor'], {'bins': 'bins', 'alpha': '(0.5)', 'label': 'label'}), '(scaleFactor, bins=bins, alpha=0.5, label=label)\n', (603, 651), True, 'from matplotlib import pyplot as plt\n'), ((891, 923), 'matplotlib.pyplot.subplot', 'plt.subplot', (['nrows', '(2)', '(1 + 2 * i)'], {}), '(nrows, 2, 1 + 2 * i)\n', (902, 923), True, 'from matplotlib import pyplot as plt\n'), ((945, 987), 'parametrization.conformalDistortion', 'parametrization.conformalDistortion', (['m', 'uv'], {}), '(m, uv)\n', (980, 987), False, 'import parametrization\n'), ((996, 1038), 'matplotlib.pyplot.hist', 'plt.hist', (['distortion'], {'bins': 'bins', 'alpha': '(1.0)'}), '(distortion, bins=bins, alpha=1.0)\n', (1004, 1038), True, 'from matplotlib import pyplot as plt\n'), ((1047, 1101), 'matplotlib.pyplot.title', 'plt.title', (['f"""{label} Quasi-conformal Distortion Q - 1"""'], {}), "(f'{label} Quasi-conformal Distortion Q - 1')\n", (1056, 1101), True, 'from matplotlib import pyplot as plt\n'), ((1110, 1142), 'matplotlib.pyplot.subplot', 'plt.subplot', (['nrows', '(2)', '(2 + 2 * i)'], {}), '(nrows, 2, 2 + 2 * i)\n', (1121, 1142), True, 'from matplotlib import pyplot as plt\n'), ((1165, 1199), 'parametrization.scaleFactor', 'parametrization.scaleFactor', (['m', 'uv'], {}), '(m, uv)\n', (1192, 1199), False, 'import parametrization\n'), ((1208, 1251), 'matplotlib.pyplot.hist', 'plt.hist', (['scaleFactor'], {'bins': 'bins', 'alpha': '(1.0)'}), '(scaleFactor, bins=bins, alpha=1.0)\n', (1216, 1251), True, 'from matplotlib import pyplot as plt\n'), ((1260, 1295), 'matplotlib.pyplot.title', 'plt.title', (['f"""{label} Scale Factors"""'], {}), "(f'{label} Scale Factors')\n", (1269, 1295), True, 'from matplotlib import pyplot as plt\n'), ((1501, 1533), 'tri_mesh_viewer.TriMeshViewer', 'TriMeshViewer', (['m'], {'wireframe': '(True)'}), '(m, wireframe=True)\n', (1514, 1533), False, 'from tri_mesh_viewer import TriMeshViewer\n'), ((2268, 2315), 'parametrization.conformalDistortion', 'parametrization.conformalDistortion', (['self.m', 'uv'], {}), '(self.m, uv)\n', (2303, 2315), False, 'import parametrization\n'), ((2343, 2382), 'parametrization.scaleFactor', 'parametrization.scaleFactor', (['self.m', 'uv'], {}), '(self.m, uv)\n', (2370, 2382), False, 'import parametrization\n'), ((2197, 2238), 'tri_mesh_viewer.TriMeshViewer', 'TriMeshViewer', (['self.mflat'], {'wireframe': '(True)'}), '(self.mflat, wireframe=True)\n', (2210, 2238), False, 'from tri_mesh_viewer import TriMeshViewer\n')]
|
import os
import os.path
import shutil
# 1
def countFilesOfType(top, extension):
"""inputs: top: a String directory
extension: a String file extension
returns a count of files with a given extension in the directory
top and its subdirectories"""
count = 0
filenames = [x[2] for x in os.walk(top)]
for fileList in filenames:
for file in fileList:
if file.endswith(extension):
count += 1
return count
# 2 & 3
def findMaxDepth(top):
"""inputs: top: a String directory
returns maximum directory depth within top, prints path to max depth
"""
return findMaxDepthHelper(top, [])
def findMaxDepthHelper(top, pathList):
maxDepth = 0
maxPath = getMaxSlashes(pathList)
depth = 0
allEntries = os.scandir(top) # all files and directories in top
for entry in allEntries:
if entry.is_dir():
nextDepth, maxPath = findMaxDepthHelper(entry.path, pathList)
pathList += [entry.path]
depth = 1 + nextDepth
if depth > maxDepth:
maxDepth = depth
return maxDepth, maxPath
# 4
def countHaveTenDigits(top):
"""inputs: top: a String directory
returns the number of files within top and its subdirectories that
have a 10 digit phone number
"""
count = 0
allEntries = os.scandir(top) # all files and directories in top
for entry in allEntries:
if entry.is_file():
try:
f = open(entry.path, 'r') # open and read file (used .path to prevent changing directories)
data = f.read()
f.close()
except PermissionError:
data = ""
except UnicodeDecodeError:
data = ""
except TypeError:
data = ""
if countDigits(data) == 10:
count += 1
elif entry.is_dir(): # repeat if entry is a directory
count += countHaveTenDigits(entry.path)
else:
pass
return count
# 5
def count909AreaCode(top):
"""inputs: top: a String directory
returns number of files within top directory and its subdirectories
that have a 909 area code"""
count = 0
allEntries = os.scandir(top) # all files and directories in top
for entry in allEntries:
if entry.is_file():
try:
f = open(entry.path, 'r') # open and read file (used .path to prevent changing directories)
data = f.read()
f.close()
except PermissionError:
data = ""
except UnicodeDecodeError:
data = ""
except TypeError:
data = ""
newData = makeDigitString(data)
if newData[0:3] == '909':
count += 1
elif entry.is_dir(): # repeat if entry is a directory
count += count909AreaCode(entry.path)
else:
pass
return count
# 6
def countLastName(top, name):
"""inputs: top: a String directory
name: a last name
returns a count of files within top and subdirectories
that have a given last name"""
count = 0
allEntries = os.scandir(top) # all files and directories in top
for entry in allEntries:
if entry.is_file():
try:
f = open(entry.path, 'r') # open and read file (used .path to prevent changing directories)
data = f.read()
f.close()
except PermissionError:
data = ""
except UnicodeDecodeError:
data = ""
except TypeError:
data = ""
newData = getName(data)
if ',' in newData:
if newData.startswith(name):
count += 1
else:
if newData.endswith(name):
count += 1
elif entry.is_dir(): # repeat if entry is a directory
count += countLastName(entry.path, name)
else:
pass
return count
# 7
def countFirstName(top, name):
"""inputs: top: a String directory
name: a first name
returns a count of files within top and its subdirectories
that have a given first name"""
count = 0
allEntries = os.scandir(top) # all files and directories in top
for entry in allEntries:
if entry.is_file():
try:
f = open(entry.path, 'r') # open and read file (used .path to prevent changing directories)
data = f.read()
f.close()
except PermissionError:
data = ""
except UnicodeDecodeError:
data = ""
except TypeError:
data = ""
newData = getName(data)
if ',' in newData:
if newData.endswith(name):
count += 1
else:
if newData.startswith(name):
count += 1
elif entry.is_dir(): # repeat if entry is a directory
count += countFirstName(entry.path, name)
else:
pass
return count
# 8
def countInitials(top, firstInit, lastInit):
"""inputs: top: a String directory
firstInit: the name's first initial
lastInit: the name's last initial
returns a count of files within top and its subdirectories
that have a name with the given initials"""
count = 0
allEntries = os.scandir(top) # all files and directories in top
for entry in allEntries:
if entry.is_file():
try:
f = open(entry.path, 'r') # open and read file (used .path to prevent changing directories)
data = f.read()
f.close()
except PermissionError:
data = ""
except UnicodeDecodeError:
data = ""
except TypeError:
data = ""
newData = getName(data)
if ',' in newData:
if getOneAfterSpace(newData) == firstInit and newData[0] == lastInit:
count += 1
else:
if getOneAfterSpace(newData) == lastInit and newData[0] == firstInit:
count += 1
elif entry.is_dir(): # repeat if entry is a directory
count += countInitials(entry.path, firstInit, lastInit)
else:
pass
return count
# 9
def diffFirstName(top):
"""inputs: top: a String directory
returns a number of unique first names in
files of top and its subdirectories"""
return diffFirstNameHelper(top, [])
def diffFirstNameHelper(top, nameList):
allEntries = os.scandir(top) # all files and directories in top
for entry in allEntries:
if entry.is_file():
try:
f = open(entry.path, 'r') # open and read file (used .path to prevent changing directories)
data = f.read()
f.close()
except PermissionError:
data = ""
except UnicodeDecodeError:
data = ""
except TypeError:
data = ""
firstName = getFirstName(data)
if firstName not in nameList and firstName != None:
nameList += [firstName]
elif entry.is_dir(): # repeat if entry is a directory
diffFirstNameHelper(entry.path, nameList)
else:
pass
return len(nameList)
# HELPER FUNCTIONS
def getMaxSlashes(L):
maxCount = 0
index = 0
if L == []:
return ''
else:
for i in range(len(L)):
count = 0
for char in L[i]:
if char == '/':
count += 1
if count > maxCount:
maxCount = count
index = i
return L[index]
def countDigits(string):
"""return number of digits in a string (Helper for countHaveTenDigits)"""
count = 0
for char in string:
if char == '0' or char == '1' or char == '2' or char == '3' or char == '4' or \
char == '5' or char == '6' or char == '7' or char == '8' or char == '9':
count += 1
return count
def makeDigitString(string):
"""Gathers all digits and returns them in a continuous string
(Helper for count909AreaCode)"""
newString = ''
for char in string:
if char in '0123456789':
newString += char
return newString
def getOneAfterSpace(string):
"""returns next character after a space in given string
(Helper for countInitials)"""
result = ''
reachedSpace = False
for i in range(len(string)):
if string[i] == ' ':
return string[i+1]
return ''
def getAllAfterSpace(string):
"""returns all characters after a space in given string
(Helper for getFirstName)"""
result = ''
for i in range(len(string)):
if string[i] == ' ':
return string[i+1:]
return ''
def getAllBeforeSpace(string):
"""returns all characters before a space in given string
(Helper for getFirstName)"""
result = ''
for i in range(len(string)):
if string[i] == ' ':
return string[:i]
def getName(string):
"""Grab the name as written in files (Helper)"""
newString = ''
reachedLetter = False
for char in string:
if char in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
reachedLetter = True
if reachedLetter == True and char == '\n':
break
if reachedLetter == True:
newString += char
return newString
def getFirstName(string):
"""return the first name (Helper for diffFirstName)"""
name = getName(string)
if ',' in name:
return getAllAfterSpace(name)
return getAllBeforeSpace(name)
# MAIN
def main():
print(countFilesOfType('phone_files', '.txt')) # 9893
print(findMaxDepth('phone_files')) # 4, 'phone_files/86/Hidden/Deeper/Deepest'
print(countHaveTenDigits('phone_files')) # 3988
print(count909AreaCode('phone_files')) # 17
print(countLastName('phone_files', 'DAVIS')) # 224
print(countFirstName('phone_files', 'DAVIS'))# 3
print(countInitials('phone_files', 'J', 'S')) # 105
print(diffFirstName('phone_files'))# 224
# got some inconsistent answers first Name, last Name not working
if __name__ == "__main__":
main()
|
[
"os.walk",
"os.scandir"
] |
[((836, 851), 'os.scandir', 'os.scandir', (['top'], {}), '(top)\n', (846, 851), False, 'import os\n'), ((1506, 1521), 'os.scandir', 'os.scandir', (['top'], {}), '(top)\n', (1516, 1521), False, 'import os\n'), ((2540, 2555), 'os.scandir', 'os.scandir', (['top'], {}), '(top)\n', (2550, 2555), False, 'import os\n'), ((3644, 3659), 'os.scandir', 'os.scandir', (['top'], {}), '(top)\n', (3654, 3659), False, 'import os\n'), ((4904, 4919), 'os.scandir', 'os.scandir', (['top'], {}), '(top)\n', (4914, 4919), False, 'import os\n'), ((6253, 6268), 'os.scandir', 'os.scandir', (['top'], {}), '(top)\n', (6263, 6268), False, 'import os\n'), ((7616, 7631), 'os.scandir', 'os.scandir', (['top'], {}), '(top)\n', (7626, 7631), False, 'import os\n'), ((339, 351), 'os.walk', 'os.walk', (['top'], {}), '(top)\n', (346, 351), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
import sys
import importlib
import time
sys.path.append('plugins/')
PCRC = None
PREFIX = '!!PCRC'
# 0=guest 1=user 2=helper 3=admin
Permission = 1
def permission(server, info, perm):
if info.is_user:
if info.source == 1:
return True
elif server.get_permission_level(info) >= perm:
return True
return False
def load_PCRC():
global PCRC
PCRC = importlib.import_module('PCRC-MCDR.PCRC')
def on_info(server, info):
if permission(server, info, Permission) and info.content == '!!PCRC start':
server.reply(info, 'Starting PCRC')
if PCRC.is_working():
server.reply(info, 'PCRC is already running!')
else:
PCRC.start()
if info.source == 1 and info.content == '!!PCRC stop':
if PCRC.is_working():
PCRC.stop()
else:
server.reply(info, 'PCRC is not running!')
def on_load(server, old):
global PCRC
try:
if old is not None and old.PCRC is not None and old.PCRC.is_working():
PCRC = old.PCRC
else:
load_PCRC()
except:
load_PCRC()
def on_mcdr_stop(server):
global PCRC
if PCRC is None:
return
if PCRC.is_working():
PCRC.stop()
else:
for i in range(600):
if not PCRC.is_stopped():
server.logger.info('Waiting for PCRC to stop')
for i in range(10):
if not PCRC.is_stopped():
time.sleep(0.1)
if not PCRC.is_stopped():
server.logger.info('PCRC took too long to stop (more than 10min)! Exit anyway')
|
[
"sys.path.append",
"importlib.import_module",
"time.sleep"
] |
[((65, 92), 'sys.path.append', 'sys.path.append', (['"""plugins/"""'], {}), "('plugins/')\n", (80, 92), False, 'import sys\n'), ((426, 467), 'importlib.import_module', 'importlib.import_module', (['"""PCRC-MCDR.PCRC"""'], {}), "('PCRC-MCDR.PCRC')\n", (449, 467), False, 'import importlib\n'), ((1533, 1548), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1543, 1548), False, 'import time\n')]
|
import timeit
from typing import Union
import numpy as np
import pandas as pd
import copy
from carla.evaluation.distances import get_distances
from carla.evaluation.nearest_neighbours import yNN, yNN_prob, yNN_dist
from carla.evaluation.manifold import yNN_manifold, sphere_manifold
from carla.evaluation.process_nans import remove_nans
from carla.evaluation.redundancy import redundancy
from carla.evaluation.success_rate import success_rate, individual_success_rate
from carla.evaluation.diversity import individual_diversity, avg_diversity
from carla.evaluation.violations import constraint_violation
from carla.evaluation.recourse_time import recourse_time_taken
from carla.models.api import MLModel
from carla.models.catalog import MLModelCatalog
from carla.recourse_methods.api import RecourseMethod
from carla.recourse_methods.processing import get_drop_columns_binary
class Benchmark:
"""
The benchmarking class contains all measurements.
It is possible to run only individual evaluation metrics or all via one single call.
For every given factual, the benchmark object will generate one counterfactual example with
the given recourse method.
Parameters
----------
mlmodel: carla.models.MLModel
Black Box model we want to explain
recmodel: carla.recourse_methods.RecourseMethod
Recourse method we want to benchmark
factuals: pd.DataFrame
Instances we want to find counterfactuals
Methods
-------
compute_ynn:
Computes y-Nearest-Neighbours for generated counterfactuals
compute_average_time:
Computes average time for generated counterfactual
compute_distances:
Calculates the distance measure and returns it as dataframe
compute_constraint_violation:
Computes the constraint violation per factual as dataframe
compute_redundancy:
Computes redundancy for each counterfactual
compute_success_rate:
Computes success rate for the whole recourse method.
run_benchmark:
Runs every measurement and returns every value as dict.
"""
def __init__(
self,
mlmodel: Union[MLModel, MLModelCatalog],
recourse_method: RecourseMethod,
factuals: pd.DataFrame,
dataset: pd.DataFrame = None
) -> None:
self._mlmodel = mlmodel
self._recourse_method = recourse_method
self._full_dataset = dataset
start = timeit.default_timer()
self._counterfactuals = recourse_method.get_counterfactuals(factuals)
stop = timeit.default_timer()
self._timer = stop - start
# Avoid using scaling and normalizing more than once
if isinstance(mlmodel, MLModelCatalog):
self._mlmodel.use_pipeline = False # type: ignore
self._factuals = copy.deepcopy(factuals)
# Normalizing and encoding factual for later use
self._enc_norm_factuals = recourse_method.encode_normalize_order_factuals(
factuals, with_target=True
)
def compute_ynn(self) -> pd.DataFrame:
"""
Computes y-Nearest-Neighbours for generated counterfactuals
Returns
-------
pd.DataFrame
"""
_, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
ynn = np.nan
else:
ynn = yNN(
counterfactuals_without_nans, self._recourse_method, self._mlmodel, 5
)
columns = ["y-Nearest-Neighbours"]
return pd.DataFrame([[ynn]], columns=columns)
def compute_average_time(self) -> pd.DataFrame:
"""
Computes average time for generated counterfactual
Returns
-------
pd.DataFrame
"""
avg_time = self._timer / self._counterfactuals.shape[0]
columns = ["Average_Time"]
return pd.DataFrame([[avg_time]], columns=columns)
def compute_distances(self) -> pd.DataFrame:
"""
Calculates the distance measure and returns it as dataframe
Returns
-------
pd.DataFrame
"""
factual_without_nans, counterfactuals_without_nans = remove_nans(
self._enc_norm_factuals, self._counterfactuals
)
columns = ["Distance_1", "Distance_2", "Distance_3", "Distance_4"]
if counterfactuals_without_nans.empty:
return pd.DataFrame(columns=columns)
if self._mlmodel.encoder.drop is None:
# To prevent double count of encoded features without drop if_binary
binary_columns_to_drop = get_drop_columns_binary(
self._mlmodel.data.categoricals,
counterfactuals_without_nans.columns.tolist(),
)
counterfactuals_without_nans = counterfactuals_without_nans.drop(
binary_columns_to_drop, axis=1
)
factual_without_nans = factual_without_nans.drop(
binary_columns_to_drop, axis=1
)
arr_f = factual_without_nans.to_numpy()
arr_cf = counterfactuals_without_nans.to_numpy()
distances = get_distances(arr_f, arr_cf)
output = pd.DataFrame(distances, columns=columns)
return output
def compute_constraint_violation(self) -> pd.DataFrame:
"""
Computes the constraint violation per factual as dataframe
Returns
-------
pd.Dataframe
"""
factual_without_nans, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
violations = []
else:
violations = constraint_violation(
self._mlmodel, counterfactuals_without_nans, factual_without_nans
)
columns = ["Constraint_Violation"]
return pd.DataFrame(violations, columns=columns)
def compute_time_taken(self) -> pd.DataFrame:
"""
TODO
Computes time taken for generated counterfactual
Returns
-------
pd.DataFrame
"""
factual_without_nans, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
time_taken = []
else:
time_taken = recourse_time_taken(
self._recourse_method, self._factuals
)
columns = ["Time_taken"]
return pd.DataFrame(time_taken, columns=columns)
def compute_individual_diversity(self) -> pd.DataFrame:
"""
TODO
Computes instance-wise diveristy for generated counterfactual
Returns
-------
pd.DataFrame
"""
factual_without_nans, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
diveristy = []
else:
diveristy = individual_diversity(
counterfactuals_without_nans, factual_without_nans
)
columns = ["Individual_Diversity"]
return pd.DataFrame(diveristy, columns=columns)
def compute_avg_diversity(self) -> pd.DataFrame:
"""
TODO
Computes average diversity for generated counterfactual
Returns
-------
pd.DataFrame
"""
factual_without_nans, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
diversity = []
else:
diversity = avg_diversity(
counterfactuals_without_nans, factual_without_nans
)
columns = ["Average_Diversity"]
return pd.DataFrame(diversity, columns=columns)
def compute_ynn_dist(self) -> pd.DataFrame:
"""
TODO
Computes y-Nearest-Neighbours for generated counterfactuals
Returns
-------
pd.DataFrame
"""
_, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
ynn = np.nan
else:
ynn = yNN_dist(
counterfactuals_without_nans, self._recourse_method, self._mlmodel, 5
)
columns = ["y-Nearest-Neighbours-Distance"]
output = pd.DataFrame(ynn, columns=columns)
return output
def compute_manifold_ynn(self) -> pd.DataFrame:
"""
TODO
Computes y-Nearest-Neighbours for generated counterfactuals with respect to positive class
Returns
-------
pd.DataFrame
"""
_, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
ynn = np.nan
else:
ynn = yNN_manifold(
counterfactuals_without_nans, self._recourse_method, self._mlmodel, 5
)
columns = ["y-Nearest-Neighbours-Manifold-Distance"]
output = pd.DataFrame(ynn, columns=columns)
return output
def compute_manifold_sphere(self) -> pd.DataFrame:
"""
TODO
Computes neighbor distance for generated counterfactuals with respect to positive class within sphere
Returns
-------
pd.DataFrame
"""
_, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
ynn = np.nan
else:
ynn = sphere_manifold(
counterfactuals_without_nans, self._recourse_method, self._mlmodel
)
columns = ["Sphere-Manifold-Distance"]
output = pd.DataFrame(ynn, columns=columns)
return output
def compute_ynn_prob(self) -> pd.DataFrame:
"""
TODO
Computes y-Nearest-Neighbours for generated counterfactuals
Returns
-------
pd.DataFrame
"""
_, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
ynn = np.nan
else:
ynn = yNN_prob(
counterfactuals_without_nans, self._recourse_method, self._mlmodel, 5
)
print(ynn)
columns = ["y-Nearest-Neighbours-Probability"]
output = pd.DataFrame(ynn, columns=columns)
return output
def compute_redundancy(self) -> pd.DataFrame:
"""
Computes redundancy for each counterfactual
Returns
-------
pd.Dataframe
"""
factual_without_nans, counterfactuals_without_nans = remove_nans(
self._enc_norm_factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
redundancies = []
else:
redundancies = redundancy(
factual_without_nans, counterfactuals_without_nans, self._mlmodel
)
columns = ["Redundancy"]
return pd.DataFrame(redundancies, columns=columns)
def compute_success_rate(self) -> pd.DataFrame:
"""
Computes success rate for the whole recourse method.
Returns
-------
pd.Dataframe
"""
rate = success_rate(self._counterfactuals)
columns = ["Success_Rate"]
return pd.DataFrame([[rate]], columns=columns)
def compute_individual_success_rate(self) -> pd.DataFrame:
"""
Computes success rate for the whole recourse method.
Returns
-------
pd.Dataframe
"""
rate = individual_success_rate(self._counterfactuals)
columns = ["Individual_Success_Rate"]
return pd.DataFrame([[rate]], columns=columns)
def run_benchmark(self) -> pd.DataFrame:
"""
Runs every measurement and returns every value as dict.
Returns
-------
pd.DataFrame
"""
pipeline = [
self.compute_distances(),
self.compute_constraint_violation(),
self.compute_redundancy(),
self.compute_ynn_prob(),
self.compute_ynn_dist(),
#self.compute_individual_success_rate(),
#self.compute_individual_diversity(),
self.compute_time_taken(),
self.compute_manifold_ynn(),
self.compute_manifold_sphere(),
self.compute_success_rate(),
self.compute_average_time(),
self.compute_ynn()
#self.compute_avg_diversity()
]
output = pd.concat(pipeline, axis=1)
return output
|
[
"pandas.DataFrame",
"carla.evaluation.recourse_time.recourse_time_taken",
"copy.deepcopy",
"timeit.default_timer",
"carla.evaluation.success_rate.success_rate",
"carla.evaluation.diversity.individual_diversity",
"carla.evaluation.diversity.avg_diversity",
"carla.evaluation.manifold.sphere_manifold",
"carla.evaluation.redundancy.redundancy",
"carla.evaluation.violations.constraint_violation",
"carla.evaluation.nearest_neighbours.yNN",
"carla.evaluation.nearest_neighbours.yNN_dist",
"carla.evaluation.manifold.yNN_manifold",
"carla.evaluation.nearest_neighbours.yNN_prob",
"carla.evaluation.distances.get_distances",
"carla.evaluation.success_rate.individual_success_rate",
"carla.evaluation.process_nans.remove_nans",
"pandas.concat"
] |
[((2440, 2462), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (2460, 2462), False, 'import timeit\n'), ((2556, 2578), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (2576, 2578), False, 'import timeit\n'), ((2813, 2836), 'copy.deepcopy', 'copy.deepcopy', (['factuals'], {}), '(factuals)\n', (2826, 2836), False, 'import copy\n'), ((3259, 3309), 'carla.evaluation.process_nans.remove_nans', 'remove_nans', (['self._factuals', 'self._counterfactuals'], {}), '(self._factuals, self._counterfactuals)\n', (3270, 3309), False, 'from carla.evaluation.process_nans import remove_nans\n'), ((3602, 3640), 'pandas.DataFrame', 'pd.DataFrame', (['[[ynn]]'], {'columns': 'columns'}), '([[ynn]], columns=columns)\n', (3614, 3640), True, 'import pandas as pd\n'), ((3948, 3991), 'pandas.DataFrame', 'pd.DataFrame', (['[[avg_time]]'], {'columns': 'columns'}), '([[avg_time]], columns=columns)\n', (3960, 3991), True, 'import pandas as pd\n'), ((4249, 4308), 'carla.evaluation.process_nans.remove_nans', 'remove_nans', (['self._enc_norm_factuals', 'self._counterfactuals'], {}), '(self._enc_norm_factuals, self._counterfactuals)\n', (4260, 4308), False, 'from carla.evaluation.process_nans import remove_nans\n'), ((5210, 5238), 'carla.evaluation.distances.get_distances', 'get_distances', (['arr_f', 'arr_cf'], {}), '(arr_f, arr_cf)\n', (5223, 5238), False, 'from carla.evaluation.distances import get_distances\n'), ((5257, 5297), 'pandas.DataFrame', 'pd.DataFrame', (['distances'], {'columns': 'columns'}), '(distances, columns=columns)\n', (5269, 5297), True, 'import pandas as pd\n'), ((5588, 5638), 'carla.evaluation.process_nans.remove_nans', 'remove_nans', (['self._factuals', 'self._counterfactuals'], {}), '(self._factuals, self._counterfactuals)\n', (5599, 5638), False, 'from carla.evaluation.process_nans import remove_nans\n'), ((5953, 5994), 'pandas.DataFrame', 'pd.DataFrame', (['violations'], {'columns': 'columns'}), '(violations, columns=columns)\n', (5965, 5994), True, 'import pandas as pd\n'), ((6277, 6327), 'carla.evaluation.process_nans.remove_nans', 'remove_nans', (['self._factuals', 'self._counterfactuals'], {}), '(self._factuals, self._counterfactuals)\n', (6288, 6327), False, 'from carla.evaluation.process_nans import remove_nans\n'), ((6603, 6644), 'pandas.DataFrame', 'pd.DataFrame', (['time_taken'], {'columns': 'columns'}), '(time_taken, columns=columns)\n', (6615, 6644), True, 'import pandas as pd\n'), ((6946, 6996), 'carla.evaluation.process_nans.remove_nans', 'remove_nans', (['self._factuals', 'self._counterfactuals'], {}), '(self._factuals, self._counterfactuals)\n', (6957, 6996), False, 'from carla.evaluation.process_nans import remove_nans\n'), ((7294, 7334), 'pandas.DataFrame', 'pd.DataFrame', (['diveristy'], {'columns': 'columns'}), '(diveristy, columns=columns)\n', (7306, 7334), True, 'import pandas as pd\n'), ((7623, 7673), 'carla.evaluation.process_nans.remove_nans', 'remove_nans', (['self._factuals', 'self._counterfactuals'], {}), '(self._factuals, self._counterfactuals)\n', (7634, 7673), False, 'from carla.evaluation.process_nans import remove_nans\n'), ((7961, 8001), 'pandas.DataFrame', 'pd.DataFrame', (['diversity'], {'columns': 'columns'}), '(diversity, columns=columns)\n', (7973, 8001), True, 'import pandas as pd\n'), ((8256, 8306), 'carla.evaluation.process_nans.remove_nans', 'remove_nans', (['self._factuals', 'self._counterfactuals'], {}), '(self._factuals, self._counterfactuals)\n', (8267, 8306), False, 'from carla.evaluation.process_nans import remove_nans\n'), ((8615, 8649), 'pandas.DataFrame', 'pd.DataFrame', (['ynn'], {'columns': 'columns'}), '(ynn, columns=columns)\n', (8627, 8649), True, 'import pandas as pd\n'), ((8958, 9008), 'carla.evaluation.process_nans.remove_nans', 'remove_nans', (['self._factuals', 'self._counterfactuals'], {}), '(self._factuals, self._counterfactuals)\n', (8969, 9008), False, 'from carla.evaluation.process_nans import remove_nans\n'), ((9330, 9364), 'pandas.DataFrame', 'pd.DataFrame', (['ynn'], {'columns': 'columns'}), '(ynn, columns=columns)\n', (9342, 9364), True, 'import pandas as pd\n'), ((9687, 9737), 'carla.evaluation.process_nans.remove_nans', 'remove_nans', (['self._factuals', 'self._counterfactuals'], {}), '(self._factuals, self._counterfactuals)\n', (9698, 9737), False, 'from carla.evaluation.process_nans import remove_nans\n'), ((10045, 10079), 'pandas.DataFrame', 'pd.DataFrame', (['ynn'], {'columns': 'columns'}), '(ynn, columns=columns)\n', (10057, 10079), True, 'import pandas as pd\n'), ((10361, 10411), 'carla.evaluation.process_nans.remove_nans', 'remove_nans', (['self._factuals', 'self._counterfactuals'], {}), '(self._factuals, self._counterfactuals)\n', (10372, 10411), False, 'from carla.evaluation.process_nans import remove_nans\n'), ((10749, 10783), 'pandas.DataFrame', 'pd.DataFrame', (['ynn'], {'columns': 'columns'}), '(ynn, columns=columns)\n', (10761, 10783), True, 'import pandas as pd\n'), ((11053, 11112), 'carla.evaluation.process_nans.remove_nans', 'remove_nans', (['self._enc_norm_factuals', 'self._counterfactuals'], {}), '(self._enc_norm_factuals, self._counterfactuals)\n', (11064, 11112), False, 'from carla.evaluation.process_nans import remove_nans\n'), ((11412, 11455), 'pandas.DataFrame', 'pd.DataFrame', (['redundancies'], {'columns': 'columns'}), '(redundancies, columns=columns)\n', (11424, 11455), True, 'import pandas as pd\n'), ((11664, 11699), 'carla.evaluation.success_rate.success_rate', 'success_rate', (['self._counterfactuals'], {}), '(self._counterfactuals)\n', (11676, 11699), False, 'from carla.evaluation.success_rate import success_rate, individual_success_rate\n'), ((11751, 11790), 'pandas.DataFrame', 'pd.DataFrame', (['[[rate]]'], {'columns': 'columns'}), '([[rate]], columns=columns)\n', (11763, 11790), True, 'import pandas as pd\n'), ((12010, 12056), 'carla.evaluation.success_rate.individual_success_rate', 'individual_success_rate', (['self._counterfactuals'], {}), '(self._counterfactuals)\n', (12033, 12056), False, 'from carla.evaluation.success_rate import success_rate, individual_success_rate\n'), ((12119, 12158), 'pandas.DataFrame', 'pd.DataFrame', (['[[rate]]'], {'columns': 'columns'}), '([[rate]], columns=columns)\n', (12131, 12158), True, 'import pandas as pd\n'), ((12978, 13005), 'pandas.concat', 'pd.concat', (['pipeline'], {'axis': '(1)'}), '(pipeline, axis=1)\n', (12987, 13005), True, 'import pandas as pd\n'), ((3437, 3511), 'carla.evaluation.nearest_neighbours.yNN', 'yNN', (['counterfactuals_without_nans', 'self._recourse_method', 'self._mlmodel', '(5)'], {}), '(counterfactuals_without_nans, self._recourse_method, self._mlmodel, 5)\n', (3440, 3511), False, 'from carla.evaluation.nearest_neighbours import yNN, yNN_prob, yNN_dist\n'), ((4474, 4503), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'columns'}), '(columns=columns)\n', (4486, 4503), True, 'import pandas as pd\n'), ((5776, 5867), 'carla.evaluation.violations.constraint_violation', 'constraint_violation', (['self._mlmodel', 'counterfactuals_without_nans', 'factual_without_nans'], {}), '(self._mlmodel, counterfactuals_without_nans,\n factual_without_nans)\n', (5796, 5867), False, 'from carla.evaluation.violations import constraint_violation\n'), ((6465, 6523), 'carla.evaluation.recourse_time.recourse_time_taken', 'recourse_time_taken', (['self._recourse_method', 'self._factuals'], {}), '(self._recourse_method, self._factuals)\n', (6484, 6523), False, 'from carla.evaluation.recourse_time import recourse_time_taken\n'), ((7132, 7204), 'carla.evaluation.diversity.individual_diversity', 'individual_diversity', (['counterfactuals_without_nans', 'factual_without_nans'], {}), '(counterfactuals_without_nans, factual_without_nans)\n', (7152, 7204), False, 'from carla.evaluation.diversity import individual_diversity, avg_diversity\n'), ((7809, 7874), 'carla.evaluation.diversity.avg_diversity', 'avg_diversity', (['counterfactuals_without_nans', 'factual_without_nans'], {}), '(counterfactuals_without_nans, factual_without_nans)\n', (7822, 7874), False, 'from carla.evaluation.diversity import individual_diversity, avg_diversity\n'), ((8434, 8513), 'carla.evaluation.nearest_neighbours.yNN_dist', 'yNN_dist', (['counterfactuals_without_nans', 'self._recourse_method', 'self._mlmodel', '(5)'], {}), '(counterfactuals_without_nans, self._recourse_method, self._mlmodel, 5)\n', (8442, 8513), False, 'from carla.evaluation.nearest_neighbours import yNN, yNN_prob, yNN_dist\n'), ((9136, 9224), 'carla.evaluation.manifold.yNN_manifold', 'yNN_manifold', (['counterfactuals_without_nans', 'self._recourse_method', 'self._mlmodel', '(5)'], {}), '(counterfactuals_without_nans, self._recourse_method, self.\n _mlmodel, 5)\n', (9148, 9224), False, 'from carla.evaluation.manifold import yNN_manifold, sphere_manifold\n'), ((9865, 9953), 'carla.evaluation.manifold.sphere_manifold', 'sphere_manifold', (['counterfactuals_without_nans', 'self._recourse_method', 'self._mlmodel'], {}), '(counterfactuals_without_nans, self._recourse_method, self.\n _mlmodel)\n', (9880, 9953), False, 'from carla.evaluation.manifold import yNN_manifold, sphere_manifold\n'), ((10547, 10626), 'carla.evaluation.nearest_neighbours.yNN_prob', 'yNN_prob', (['counterfactuals_without_nans', 'self._recourse_method', 'self._mlmodel', '(5)'], {}), '(counterfactuals_without_nans, self._recourse_method, self._mlmodel, 5)\n', (10555, 10626), False, 'from carla.evaluation.nearest_neighbours import yNN, yNN_prob, yNN_dist\n'), ((11254, 11331), 'carla.evaluation.redundancy.redundancy', 'redundancy', (['factual_without_nans', 'counterfactuals_without_nans', 'self._mlmodel'], {}), '(factual_without_nans, counterfactuals_without_nans, self._mlmodel)\n', (11264, 11331), False, 'from carla.evaluation.redundancy import redundancy\n')]
|
# -*- coding: utf-8 -*-
"""
Automation task as a AppDaemon App for Home Assistant
This little app controls the ambient light when Kodi plays video,
dimming some lights and turning off others, and returning to the
initial state when the playback is finished.
In addition, it also sends notifications when starting the video playback,
reporting the video info in the message.
For that, it talks with Kodi through its JSONRPC API by HA service calls.
"""
import datetime as dt
from urllib import parse
import appdaemon.plugins.hass.hassapi as hass
LOG_LEVEL = "DEBUG"
LOG_LEVEL_HIGH = "WARNING"
LOGGER = "event_log"
EVENT_KODI_CALL_METHOD_RESULT = "kodi_call_method_result"
METHOD_GET_PLAYERS = "Player.GetPlayers"
METHOD_GET_ITEM = "Player.GetItem"
PARAMS_GET_ITEM = {
"playerid": 1,
"properties": [
"title",
"artist",
"albumartist",
"genre",
"year",
"rating",
"album",
"track",
"duration",
"playcount",
"fanart",
"plot",
"originaltitle",
"lastplayed",
"firstaired",
"season",
"episode",
"showtitle",
"thumbnail",
"file",
"tvshowid",
"watchedepisodes",
"art",
"description",
"theme",
"dateadded",
"runtime",
"starttime",
"endtime",
],
}
TYPE_ITEMS_NOTIFY = ["movie", "episode"]
TYPE_HA_ITEMS_NOTIFY = ["tvshow", "movie"]
TELEGRAM_KEYBOARD_KODI = ["/luceson, /ambilighttoggle"]
TELEGRAM_INLINEKEYBOARD_KODI = [
[("Luces ON", "/luceson"), ("Switch Ambilight", "/ambilighttoggle")],
]
# noinspection PyClassHasNoInit
class KodiAssistant(hass.Hass):
"""App for Ambient light control when playing video with KODI."""
_lights = None
_light_states = {}
_media_player = None
_is_playing_video = False
_item_playing = None
_last_play = None
_notifier_bot = "telegram_bot"
_target_sensor = None
_ios_notifier = None
def initialize(self):
"""AppDaemon required method for app init."""
_lights_dim_on = self.args.get("lights_dim_on", "").split(",")
_lights_dim_off = self.args.get("lights_dim_off", "").split(",")
_lights_off = self.args.get("lights_off", "").split(",")
self._lights = {
"dim": {"on": _lights_dim_on, "off": _lights_dim_off},
"off": _lights_off,
"state": "off",
}
self._media_player = self.config["media_player"]
self._ios_notifier = self.config["notifier"].replace(".", "/")
self._target_sensor = self.config["chatid_sensor"]
# Listen for Kodi changes:
self._last_play = self.datetime()
self.listen_state(self.kodi_state, self._media_player)
self.listen_event(
self._receive_kodi_result, EVENT_KODI_CALL_METHOD_RESULT
)
def _get_max_brightness_ambient_lights(self):
if self.now_is_between("09:00:00", "19:00:00"):
return 200
elif self.now_is_between("19:00:00", "22:00:00"):
return 150
elif self.now_is_between("22:00:00", "04:00:00"):
return 75
return 25
def _ask_for_playing_item(self):
self.call_service(
"kodi/call_method",
entity_id=self._media_player,
method=METHOD_GET_ITEM,
**PARAMS_GET_ITEM,
)
# noinspection PyUnusedLocal
def _receive_kodi_result(self, event_id, payload_event, *args):
result = payload_event["result"]
method = payload_event["input"]["method"]
if (
event_id == EVENT_KODI_CALL_METHOD_RESULT
and method == METHOD_GET_ITEM
):
if "item" in result:
item = result["item"]
self._is_playing_video = item["type"] in TYPE_ITEMS_NOTIFY
title = message = img_url = ""
if self._is_playing_video:
title, message, img_url = self._get_kodi_info_params(item)
new_video = (
self._item_playing is None or self._item_playing != title
)
now = self.datetime()
delta = now - self._last_play
self._last_play = now
if self._is_playing_video and (
new_video or delta > dt.timedelta(minutes=30)
):
self._adjust_kodi_lights(play=True)
self._item_playing = title
# Notifications
self._notify_ios_message(title, message, img_url)
self._notify_telegram_message(title, message, img_url)
else:
self.log(
"RECEIVED BAD KODI RESULT: {}".format(result),
level=LOG_LEVEL_HIGH,
log=LOGGER,
)
elif (
event_id == EVENT_KODI_CALL_METHOD_RESULT
and method == METHOD_GET_PLAYERS
):
self.log(
"KODI GET_PLAYERS RECEIVED: {}".format(result), log=LOGGER
)
def _get_kodi_info_params(self, item):
"""
media_content_id: {
"unknown": "304004"
}
entity_picture: /api/media_player_proxy/media_player.kodi?token=...
media_duration: 1297
media_title: The One Where Chandler Takes A Bath
media_album_name:
media_season: 8
media_episode: 13
is_volume_muted: false
media_series_title: Friends
media_content_type: tvshow
"""
if item["type"] == "episode":
title = "{} S{:02d}E{:02d} {}".format(
item["showtitle"],
item["season"],
item["episode"],
item["title"],
)
else:
title = "Playing: {}".format(item["title"])
if item["year"]:
title += " [{}]".format(item["year"])
message = "{}\n∆T: {}.".format(
item["plot"], dt.timedelta(hours=item["runtime"] / 3600)
)
img_url = None
try:
if "thumbnail" in item:
raw_img_url = item["thumbnail"]
elif "thumb" in item:
raw_img_url = item["thumb"]
elif "poster" in item["art"]:
raw_img_url = item["art"]["poster"]
elif "season.poster" in item["art"]:
raw_img_url = item["art"]["season.poster"]
else:
self.log(f"No poster in item[art]={item['art']}", log=LOGGER)
k = list(item["art"].keys())[0]
raw_img_url = item["art"][k]
img_url = (
parse.unquote_plus(raw_img_url).rstrip("/").lstrip("image://")
)
if ("192.168." not in img_url) and img_url.startswith("http://"):
img_url = img_url.replace("http:", "https:")
url_valid = self._valid_image_url(img_url)
self.log(
"MESSAGE: T={}, URL={}, ok={}".format(title, message, img_url, url_valid),
log=LOGGER,
level=LOG_LEVEL,
)
if not url_valid:
img_url = None
except KeyError as e:
self.log(
"MESSAGE KeyError: {}; item={}".format(e, item), log=LOGGER
)
return title, message, img_url
def _valid_image_url(self, img_url):
if (img_url is not None) and img_url.startswith("http"):
return True
if img_url is not None:
self.log(
"BAD IMAGE URL: {}".format(img_url),
level=LOG_LEVEL_HIGH,
log=LOGGER,
)
return False
def _notify_ios_message(self, title, message, img_url=None):
data_msg = {
"title": title,
"message": message,
"data": {"push": {"category": "kodiplay"}},
}
if img_url is not None:
data_msg["data"]["attachment"] = {"url": img_url}
self.call_service(self._ios_notifier, **data_msg)
def _notify_telegram_message(self, title, message, img_url=None):
target = int(self.get_state(self._target_sensor))
if img_url is not None:
data_photo = {
"url": img_url,
"keyboard": TELEGRAM_KEYBOARD_KODI,
"disable_notification": True,
}
self.call_service(
"{}/send_photo".format(self._notifier_bot),
target=target,
**data_photo,
)
message + "\n{}\nEND".format(img_url)
data_msg = {
"message": message,
"title": "*{}*".format(title),
"inline_keyboard": TELEGRAM_INLINEKEYBOARD_KODI,
"disable_notification": True,
}
self.call_service(
"{}/send_message".format(self._notifier_bot),
target=target,
**data_msg,
)
def _adjust_kodi_lights(self, play=True):
k_l = self._lights["dim"][self._lights["state"]] + self._lights["off"]
for light_id in k_l:
if play:
light_state = self.get_state(light_id)
attrs_light = self.get_state(light_id, attribute="attributes")
if attrs_light:
attrs_light.update({"state": light_state})
self._light_states[light_id] = attrs_light
max_brightness = self._get_max_brightness_ambient_lights()
if light_id in self._lights["off"]:
self.log(
"Apagando light {} para KODI PLAY".format(
light_id
),
level=LOG_LEVEL,
log=LOGGER,
)
self.call_service(
"light/turn_off", entity_id=light_id, transition=2
)
elif ("brightness" in attrs_light.keys()) and (
attrs_light["brightness"] > max_brightness
):
self.log(
"Atenuando light {} para KODI PLAY".format(
light_id
),
level=LOG_LEVEL,
log=LOGGER,
)
self.call_service(
"light/turn_on",
entity_id=light_id,
transition=2,
brightness=max_brightness,
)
else:
try:
state_before = self._light_states[light_id]
except KeyError:
state_before = {}
if ("state" in state_before) and (
state_before["state"] == "on"
):
try:
new_state_attrs = {
"xy_color": state_before["xy_color"],
"brightness": state_before["brightness"],
}
except KeyError:
new_state_attrs = {
"color_temp": state_before["color_temp"],
"brightness": state_before["brightness"],
}
self.log(
"Reponiendo light {}, con state_before={}".format(
light_id, new_state_attrs
),
level=LOG_LEVEL,
log=LOGGER,
)
self.call_service(
"light/turn_on",
entity_id=light_id,
transition=2,
**new_state_attrs,
)
else:
self.log(
"Doing nothing with light {}, state_before={}".format(
light_id, state_before
),
level=LOG_LEVEL,
log=LOGGER,
)
# noinspection PyUnusedLocal
def kodi_state(self, entity, attribute, old, new, kwargs):
"""Kodi state change main control."""
if new == "playing":
kodi_attrs = self.get_state(
self._media_player, attribute="attributes"
)
self._is_playing_video = (
"media_content_type" in kodi_attrs
and kodi_attrs["media_content_type"] in TYPE_HA_ITEMS_NOTIFY
)
if self._is_playing_video:
self._ask_for_playing_item()
elif (new == "idle") and self._is_playing_video:
self._is_playing_video = False
self._last_play = self.datetime()
self.log(
"KODI STOP. old:{}, new:{}, type_lp={}".format(
old, new, type(self._last_play)
),
level=LOG_LEVEL,
log=LOGGER,
)
self._adjust_kodi_lights(play=False)
elif new == "off":
self._is_playing_video = False
self.log(
"KODI turned off. old:{}, new:{}, type_lp={}".format(
old, new, type(self._last_play)
),
level=LOG_LEVEL,
log=LOGGER,
)
self._light_states = {}
|
[
"datetime.timedelta",
"urllib.parse.unquote_plus"
] |
[((6077, 6119), 'datetime.timedelta', 'dt.timedelta', ([], {'hours': "(item['runtime'] / 3600)"}), "(hours=item['runtime'] / 3600)\n", (6089, 6119), True, 'import datetime as dt\n'), ((4381, 4405), 'datetime.timedelta', 'dt.timedelta', ([], {'minutes': '(30)'}), '(minutes=30)\n', (4393, 4405), True, 'import datetime as dt\n'), ((6759, 6790), 'urllib.parse.unquote_plus', 'parse.unquote_plus', (['raw_img_url'], {}), '(raw_img_url)\n', (6777, 6790), False, 'from urllib import parse\n')]
|
import sys
from fp_lib.common import cliparser
from fp_lib.common import log
from fpstackutils.commands import nova
LOG = log.getLogger(__name__)
def main():
cli_parser = cliparser.SubCliParser('Python Nova Utils')
cli_parser.register_clis(nova.ResourcesInit,
nova.VMCleanup, nova.VMTest, nova.VMEvacuateTest)
try:
cli_parser.call()
return 0
except KeyboardInterrupt:
LOG.error("user interrupt")
return 1
if __name__ == '__main__':
sys.exit(main())
|
[
"fp_lib.common.cliparser.SubCliParser",
"fp_lib.common.log.getLogger"
] |
[((124, 147), 'fp_lib.common.log.getLogger', 'log.getLogger', (['__name__'], {}), '(__name__)\n', (137, 147), False, 'from fp_lib.common import log\n'), ((179, 222), 'fp_lib.common.cliparser.SubCliParser', 'cliparser.SubCliParser', (['"""Python Nova Utils"""'], {}), "('Python Nova Utils')\n", (201, 222), False, 'from fp_lib.common import cliparser\n')]
|
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
# Note: Must download stuff for stopwords:
# showing info https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/index.xml
import re
import string
from typing import Dict
from data_classes import *
def preprocess(docs: Dict[str,str], **kwargs):
"""
Takes as input a dictionary of the documents for clustering and transforms them into the compact data structures of corpus and Vocabulary in preperation for clustering.
This process turns each document from a string of words into a dictionary with (k, v) of (word id, TF-IDF score).
Args:
docs (Dict[str,str]): Documents to preprocess. Must be in format {doc_id (str): doc_text (str)}
**kwargs:
corpus_min_frequency (optional): Minimum corpus wide frequency each word needs to meet in order to be retained in clustering. Defaults to 2.
doc_min_frequency (optional): Minimum document frequency each word needs to meet in order to be retained in each document. Defaults to 2.
tfidf_decimals (optional): Number of decimal places to round TF-IDF scores. Defaults to 4.
stop_words (optional): Words to remove from corpus. If none is provided, the default list of nltk english stopwords is used by default.
lemmatizer (optional): Lemmatizer to be used. Must have a .lematize(word) function. If none is provided, nltk's WordNetLemmatizer is used by default.
Returns:
corpus (Dict[int, ClusterContents]): The document corpus where each document is represented as a dictionary of word_id's and the corresponding TF-IDF scores.
doc_name_map (Dict[str, int]): Mapping of passed document name to cluster (document) id.
vocabulary (Vocabulary): The vocabulary for the given corpus.
"""
# Establish parameter values
params = {'corpus_min_frequency':2, 'doc_min_frequency':2, 'tfidf_digits':4, 'stop_words': set(stopwords.words('english')), 'lemmatizer': WordNetLemmatizer()}
if kwargs is not None:
for k,v in kwargs.items():
params[k] = v
# print(params)
for doc in docs:
# Lowercase
current_doc = docs[doc].lower()
# Remove punctuation and symbols
regex = re.compile(f"[{re.escape(string.punctuation)}]")
current_doc = regex.sub('', current_doc)
# Remove numbers
current_doc = re.sub('\d', '', current_doc)
# Tokenize
current_doc = current_doc.split(' ')
# Remove stopwords and empty strings
current_doc = [word for word in current_doc if word not in params['stop_words'] and word]
# Lemmatize
current_doc = [params['lemmatizer'].lemmatize(word) for word in current_doc]
# Transform to vector format {word: frequency}
transformed_doc = {}
for word in current_doc:
if word not in transformed_doc:
transformed_doc[word] = 1
else:
transformed_doc[word] += 1
# Remove low frequency words from doc
transformed_doc = {k:v for (k,v) in transformed_doc.items() if v >= params['doc_min_frequency']}
# Replace the original doc with transformed_doc
docs[doc] = transformed_doc
# Create vocabulary
vocabulary = Vocabulary({}, {}, {})
current_word_id = 0
for doc in docs:
for word in docs[doc]:
if word in vocabulary.word_id:
existing_id = vocabulary.word_id[word]
vocabulary.id_freq[existing_id] += docs[doc][word]
else:
vocabulary.word_id[word] = current_word_id
vocabulary.id_word[current_word_id] = word
vocabulary.id_freq[current_word_id] = docs[doc][word]
current_word_id += 1
# Find corpus-wide low-frequency words
infrequent_corpus_word_ids = []
for word_id in vocabulary.id_freq:
if vocabulary.id_freq[word_id] < params['corpus_min_frequency']:
infrequent_corpus_word_ids.append(word_id)
# Remove corpus-wide low-frequency words from vocabulary
for word_id_to_drop in infrequent_corpus_word_ids:
vocabulary.id_freq.pop(word_id_to_drop)
word_to_drop = vocabulary.id_word[word_id_to_drop]
vocabulary.id_word.pop(word_id_to_drop)
vocabulary.word_id.pop(word_to_drop)
# Remove corpus-wide low-frequency words from corpus
# Change words to word_ids
# Transform word frequencies to TF-IDF scores
# Create clusters, cluster_ids
doc_name_map = DocumentNameMap({}, {})
cluster_id = 0
new_docs = {}
for doc in docs:
cluster_contents = {}
for word in docs[doc]:
if word in vocabulary.word_id:
word_id = vocabulary.word_id[word]
word_tfidf = float(docs[doc][word]) / float(vocabulary.id_freq[word_id])
cluster_contents[word_id] = round(word_tfidf, ndigits=params['tfidf_digits'])
new_docs[cluster_id] = ClusterContents(cluster_id=cluster_id, contents=cluster_contents)
doc_name_map.name_id[doc] = cluster_id
doc_name_map.id_name[cluster_id] = doc
cluster_id += 1
return new_docs, doc_name_map, vocabulary
|
[
"re.escape",
"re.sub",
"nltk.corpus.stopwords.words",
"nltk.stem.WordNetLemmatizer"
] |
[((2002, 2021), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (2019, 2021), False, 'from nltk.stem import WordNetLemmatizer\n'), ((2420, 2450), 're.sub', 're.sub', (['"""\\\\d"""', '""""""', 'current_doc'], {}), "('\\\\d', '', current_doc)\n", (2426, 2450), False, 'import re\n'), ((1959, 1985), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (1974, 1985), False, 'from nltk.corpus import stopwords\n'), ((2290, 2319), 're.escape', 're.escape', (['string.punctuation'], {}), '(string.punctuation)\n', (2299, 2319), False, 'import re\n')]
|
from sklearn.cluster import KMeans
from random import randint
import numpy as np
import csv
import matplotlib.pyplot as plt
matriz = []
arrayCriacaoCentroides = []
with open('dataset_iris.csv') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
largPetala = (row['larguraPetala'])
compSepala = (row['comprimentoSepala'])
matriz.append([float(largPetala), float(compSepala)])
matriz = np.array(matriz)
def criacaoCentroideRandom():
array = [[randint(0, 9), randint(0, 9)], [randint(0, 9), randint(0, 9)], [randint(0, 9), randint(0, 9)]]
array = np.array(array)
global arrayCriacaoCentroides
arrayCriacaoCentroides = array
return array
def avaliacaoAcertos(arrayAnalise):
g1 = 0
g2 = 0
g3 = 0
acertos = 0
for i in range(0, len(arrayAnalise)):
if (arrayAnalise[i] == 0):
g1+=1
if (arrayAnalise[i] == 1):
g2+=1
if (arrayAnalise[i] == 2):
g3+=1
if (i == 49) or (i == 99) or (i == 149):
print("Agrupamento:", g1, g2, g3)
acertos += max(g1, g2, g3)
g1 = 0
g2 = 0
g3 = 0
return round(acertos/150*100, 2)
for i in range(1, 4):
if (i != 3):
#Minha geração de centroides;
trabmeans = KMeans(n_clusters=3, init=criacaoCentroideRandom(), n_init=1).fit(matriz)
else:
#Geração de centroides otimizada da própria lib;
trabmeans = KMeans(n_clusters=3).fit(matriz)
plt.figure(i)
plt.scatter(matriz[:, 0], matriz[:, 1], s = 100, c = trabmeans.labels_)
if (i != 3):
plt.scatter(arrayCriacaoCentroides[:, 0], arrayCriacaoCentroides[:, 1], s = 100, c = 'green', label = 'Centroides Iniciais')
plt.scatter(trabmeans.cluster_centers_[:, 0], trabmeans.cluster_centers_[:, 1], s = 100, c = 'red', label = 'Centroides Finais')
plt.xlabel('Largura da Petala')
plt.ylabel('Comprimento da Sepala')
plt.legend()
if (i != 3):
print("Centroide inicial - Grupo " + str(i) + ":", arrayCriacaoCentroides[0], arrayCriacaoCentroides[1], arrayCriacaoCentroides[2])
else:
print("Coordenadas do Centroide geradas de maneira otimizada pelo algoritmo.")
print("Porcentagem acerto - Grupo " + str(i) + ":", avaliacaoAcertos(trabmeans.labels_))
print("\n")
plt.show()
|
[
"matplotlib.pyplot.show",
"random.randint",
"csv.DictReader",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"sklearn.cluster.KMeans",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((432, 448), 'numpy.array', 'np.array', (['matriz'], {}), '(matriz)\n', (440, 448), True, 'import numpy as np\n'), ((2352, 2362), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2360, 2362), True, 'import matplotlib.pyplot as plt\n'), ((221, 244), 'csv.DictReader', 'csv.DictReader', (['csvfile'], {}), '(csvfile)\n', (235, 244), False, 'import csv\n'), ((601, 616), 'numpy.array', 'np.array', (['array'], {}), '(array)\n', (609, 616), True, 'import numpy as np\n'), ((1520, 1533), 'matplotlib.pyplot.figure', 'plt.figure', (['i'], {}), '(i)\n', (1530, 1533), True, 'import matplotlib.pyplot as plt\n'), ((1538, 1605), 'matplotlib.pyplot.scatter', 'plt.scatter', (['matriz[:, 0]', 'matriz[:, 1]'], {'s': '(100)', 'c': 'trabmeans.labels_'}), '(matriz[:, 0], matriz[:, 1], s=100, c=trabmeans.labels_)\n', (1549, 1605), True, 'import matplotlib.pyplot as plt\n'), ((1764, 1890), 'matplotlib.pyplot.scatter', 'plt.scatter', (['trabmeans.cluster_centers_[:, 0]', 'trabmeans.cluster_centers_[:, 1]'], {'s': '(100)', 'c': '"""red"""', 'label': '"""Centroides Finais"""'}), "(trabmeans.cluster_centers_[:, 0], trabmeans.cluster_centers_[:,\n 1], s=100, c='red', label='Centroides Finais')\n", (1775, 1890), True, 'import matplotlib.pyplot as plt\n'), ((1897, 1928), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Largura da Petala"""'], {}), "('Largura da Petala')\n", (1907, 1928), True, 'import matplotlib.pyplot as plt\n'), ((1933, 1968), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Comprimento da Sepala"""'], {}), "('Comprimento da Sepala')\n", (1943, 1968), True, 'import matplotlib.pyplot as plt\n'), ((1973, 1985), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1983, 1985), True, 'import matplotlib.pyplot as plt\n'), ((1635, 1758), 'matplotlib.pyplot.scatter', 'plt.scatter', (['arrayCriacaoCentroides[:, 0]', 'arrayCriacaoCentroides[:, 1]'], {'s': '(100)', 'c': '"""green"""', 'label': '"""Centroides Iniciais"""'}), "(arrayCriacaoCentroides[:, 0], arrayCriacaoCentroides[:, 1], s=\n 100, c='green', label='Centroides Iniciais')\n", (1646, 1758), True, 'import matplotlib.pyplot as plt\n'), ((494, 507), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (501, 507), False, 'from random import randint\n'), ((509, 522), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (516, 522), False, 'from random import randint\n'), ((526, 539), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (533, 539), False, 'from random import randint\n'), ((541, 554), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (548, 554), False, 'from random import randint\n'), ((558, 571), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (565, 571), False, 'from random import randint\n'), ((573, 586), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (580, 586), False, 'from random import randint\n'), ((1482, 1502), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(3)'}), '(n_clusters=3)\n', (1488, 1502), False, 'from sklearn.cluster import KMeans\n')]
|
""" 個股月營收資訊 """
import re
import sys
import pdb
import pandas as pd
from stock_web_crawler import stock_crawler, delete_header, excel_formatting
import global_vars
def main():
global_vars.initialize_proxy()
""" valid input formats """
# inputs = "台積電 聯電"
# inputs = "2330 2314"
# inputs = "台積電, 2314"
inputs = input("輸入要搜尋的公司名稱或股號:\n(press q to exit)\n")
if inputs == "q":
sys.exit(0)
stocks_ID = list()
stock_dict = stock_ID_name_mapping()
delims = r"[\s\t,\.;]+"
inputs = re.split(delims, inputs)
for stock in inputs:
if stock not in stock_dict:
print("Invalid input!", stock, "is not in the stock ticker symbol table")
sys.exit(-1)
if stock.isdigit():
stocks_ID.append(stock)
else: # map company name to stock ID
stocks_ID.append(stock_dict[stock])
print("stocks ID:", stocks_ID)
stock_salemon_file = global_vars.DIR_PATH + "個股月營收.xlsx"
with pd.ExcelWriter(stock_salemon_file, mode='w', engine="xlsxwriter") as writer:
# headers = ["月別", "開盤", "收盤", "最高", "最低", "漲跌(元)", "漲跌(%)", "單月營收(億)", "單月月增(%)", "單月年增(%)", "累計營收(億)", "累計年增(%)", "合併單月營收(億)", "合併單月月增(%)", "合併單月年增(%)", "合併累計營收(億)", "合併累計年增(%)"]
table_ID = "#divDetail"
for stock_ID in stocks_ID:
url = f"https://goodinfo.tw/StockInfo/ShowSaleMonChart.asp?STOCK_ID={stock_ID}"
df = stock_crawler(url, None, table_ID)
# reassign headers
headers = list()
for i in range(len(df.columns)):
headers.append('_'.join(pd.Series(df.columns[i]).drop_duplicates().tolist()))
df.columns = headers
delete_header(df, headers)
sheet_name = f"{stock_dict[stock_ID]}"
df.to_excel(writer, index=False, encoding="UTF-8", sheet_name=sheet_name, freeze_panes=(1,2)) # write to different sheets
excel_formatting(writer, df, sheet_name)
# 1101,台泥,台灣水泥股份有限公司
def stock_ID_name_mapping():
stock_dict = dict()
with open(global_vars.DIR_PATH + "公司股市代號對照表.csv", "r", encoding="UTF-8") as file_r:
file_r.readline()
for line in file_r:
line = line.split(",")
stock_ID = line[0]
stock_name = line[1]
if stock_ID not in stock_dict:
stock_dict[stock_ID] = stock_name
if stock_name not in stock_dict:
stock_dict[stock_name] = stock_ID
return stock_dict
if __name__ == "__main__":
main()
|
[
"stock_web_crawler.excel_formatting",
"re.split",
"global_vars.initialize_proxy",
"pandas.Series",
"stock_web_crawler.stock_crawler",
"stock_web_crawler.delete_header",
"pandas.ExcelWriter",
"sys.exit"
] |
[((182, 212), 'global_vars.initialize_proxy', 'global_vars.initialize_proxy', ([], {}), '()\n', (210, 212), False, 'import global_vars\n'), ((543, 567), 're.split', 're.split', (['delims', 'inputs'], {}), '(delims, inputs)\n', (551, 567), False, 'import re\n'), ((421, 432), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (429, 432), False, 'import sys\n'), ((1008, 1073), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['stock_salemon_file'], {'mode': '"""w"""', 'engine': '"""xlsxwriter"""'}), "(stock_salemon_file, mode='w', engine='xlsxwriter')\n", (1022, 1073), True, 'import pandas as pd\n'), ((727, 739), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (735, 739), False, 'import sys\n'), ((1450, 1484), 'stock_web_crawler.stock_crawler', 'stock_crawler', (['url', 'None', 'table_ID'], {}), '(url, None, table_ID)\n', (1463, 1484), False, 'from stock_web_crawler import stock_crawler, delete_header, excel_formatting\n'), ((1755, 1781), 'stock_web_crawler.delete_header', 'delete_header', (['df', 'headers'], {}), '(df, headers)\n', (1768, 1781), False, 'from stock_web_crawler import stock_crawler, delete_header, excel_formatting\n'), ((1979, 2019), 'stock_web_crawler.excel_formatting', 'excel_formatting', (['writer', 'df', 'sheet_name'], {}), '(writer, df, sheet_name)\n', (1995, 2019), False, 'from stock_web_crawler import stock_crawler, delete_header, excel_formatting\n'), ((1643, 1667), 'pandas.Series', 'pd.Series', (['df.columns[i]'], {}), '(df.columns[i])\n', (1652, 1667), True, 'import pandas as pd\n')]
|
"""
********************************************************************************
* Name: tethys_app_quota.py
* Author: tbayer, mlebarron
* Created On: April 2, 2019
* Copyright: (c) Aquaveo 2018
********************************************************************************
"""
import logging
from django.db import models
from tethys_quotas.models.entity_quota import EntityQuota
from tethys_apps.models import TethysApp
log = logging.getLogger('tethys.' + __name__)
class TethysAppQuota(EntityQuota):
"""
entity_id (IntegerField): id of the entity.
"""
class Meta:
verbose_name = 'Tethys App Quota'
entity = models.ForeignKey(TethysApp, on_delete=models.CASCADE)
|
[
"django.db.models.ForeignKey",
"logging.getLogger"
] |
[((436, 475), 'logging.getLogger', 'logging.getLogger', (["('tethys.' + __name__)"], {}), "('tethys.' + __name__)\n", (453, 475), False, 'import logging\n'), ((649, 703), 'django.db.models.ForeignKey', 'models.ForeignKey', (['TethysApp'], {'on_delete': 'models.CASCADE'}), '(TethysApp, on_delete=models.CASCADE)\n', (666, 703), False, 'from django.db import models\n')]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 <NAME> <<EMAIL>>
#
# Distributed under terms of the MIT license.
import hoomd
import hoomd.simple_force
import hoomd.md
import numpy as np
context = hoomd.context.initialize("--notice-level=10 --mode=cpu")
uc = hoomd.lattice.unitcell(
N=1,
a1=[3, 0, 0],
a2=[0, 3, 0],
a3=[0, 0, 3],
dimensions=2,
position=[[0, 0, 0]],
type_name=["R"],
mass=[1.0],
moment_inertia=[[1, 1, 1]],
orientation=[[0.9, 0, 0, 0.2]],
)
system = hoomd.init.create_lattice(unitcell=uc, n=10)
system.particles.types.add("A")
pairs = hoomd.md.pair.lj(2.5, nlist=hoomd.md.nlist.cell())
pairs.pair_coeff.set("A", "A", epsilon=1.0, sigma=1.0)
pairs.pair_coeff.set("R", "A", epsilon=1.0, sigma=1.0)
pairs.pair_coeff.set("R", "R", epsilon=1.0, sigma=1.0)
rigid = hoomd.md.constrain.rigid()
rigid.set_param("R", types=["A"], positions=[(-1, 0, 0)])
rigid.create_bodies()
snap_init = system.take_snapshot()
hoomd.md.update.enforce2d()
hoomd.md.integrate.mode_standard(dt=0.005)
hoomd.md.integrate.nvt(hoomd.group.rigid_center(), kT=1, tau=1)
nmols = min(max(snap_init.particles.body) + 1, snap_init.particles.N)
print(nmols)
rc = hoomd.group.rigid_center()
force = hoomd.simple_force.SimpleForce(rc)
print(rc.cpp_group.getNumMembersGlobal())
hoomd.run(1000)
|
[
"hoomd.simple_force.SimpleForce",
"hoomd.md.update.enforce2d",
"hoomd.md.nlist.cell",
"hoomd.md.constrain.rigid",
"hoomd.lattice.unitcell",
"hoomd.context.initialize",
"hoomd.run",
"hoomd.group.rigid_center",
"hoomd.init.create_lattice",
"hoomd.md.integrate.mode_standard"
] |
[((236, 292), 'hoomd.context.initialize', 'hoomd.context.initialize', (['"""--notice-level=10 --mode=cpu"""'], {}), "('--notice-level=10 --mode=cpu')\n", (260, 292), False, 'import hoomd\n'), ((298, 500), 'hoomd.lattice.unitcell', 'hoomd.lattice.unitcell', ([], {'N': '(1)', 'a1': '[3, 0, 0]', 'a2': '[0, 3, 0]', 'a3': '[0, 0, 3]', 'dimensions': '(2)', 'position': '[[0, 0, 0]]', 'type_name': "['R']", 'mass': '[1.0]', 'moment_inertia': '[[1, 1, 1]]', 'orientation': '[[0.9, 0, 0, 0.2]]'}), "(N=1, a1=[3, 0, 0], a2=[0, 3, 0], a3=[0, 0, 3],\n dimensions=2, position=[[0, 0, 0]], type_name=['R'], mass=[1.0],\n moment_inertia=[[1, 1, 1]], orientation=[[0.9, 0, 0, 0.2]])\n", (320, 500), False, 'import hoomd\n'), ((546, 590), 'hoomd.init.create_lattice', 'hoomd.init.create_lattice', ([], {'unitcell': 'uc', 'n': '(10)'}), '(unitcell=uc, n=10)\n', (571, 590), False, 'import hoomd\n'), ((856, 882), 'hoomd.md.constrain.rigid', 'hoomd.md.constrain.rigid', ([], {}), '()\n', (880, 882), False, 'import hoomd\n'), ((1000, 1027), 'hoomd.md.update.enforce2d', 'hoomd.md.update.enforce2d', ([], {}), '()\n', (1025, 1027), False, 'import hoomd\n'), ((1029, 1071), 'hoomd.md.integrate.mode_standard', 'hoomd.md.integrate.mode_standard', ([], {'dt': '(0.005)'}), '(dt=0.005)\n', (1061, 1071), False, 'import hoomd\n'), ((1225, 1251), 'hoomd.group.rigid_center', 'hoomd.group.rigid_center', ([], {}), '()\n', (1249, 1251), False, 'import hoomd\n'), ((1260, 1294), 'hoomd.simple_force.SimpleForce', 'hoomd.simple_force.SimpleForce', (['rc'], {}), '(rc)\n', (1290, 1294), False, 'import hoomd\n'), ((1337, 1352), 'hoomd.run', 'hoomd.run', (['(1000)'], {}), '(1000)\n', (1346, 1352), False, 'import hoomd\n'), ((1095, 1121), 'hoomd.group.rigid_center', 'hoomd.group.rigid_center', ([], {}), '()\n', (1119, 1121), False, 'import hoomd\n'), ((659, 680), 'hoomd.md.nlist.cell', 'hoomd.md.nlist.cell', ([], {}), '()\n', (678, 680), False, 'import hoomd\n')]
|
import logging
from typing import List
import matplotlib.pyplot as plt
import numpy as np
import streamlit as st
from matplotlib.animation import FuncAnimation
from scipy import integrate
from utils.objects import Body
logger = logging.getLogger(__name__)
# Arbitrary value for G (gravitational constant)
G = 1
def create_initial_conditions(bodies: List[Body]) -> List[int]:
"""
:param bodies: List of Body classes
:return: list of starting x, y, vx, and vy values for each Body in bodies
"""
initial = []
# Loop through bodies and create initial conditions to be passed into the integrator
logger.info(f"Creating initial conditions for the {len(bodies)} bodies")
for body in bodies:
values = [body.x, body.vx, body.y, body.vy]
initial += values
return initial
def calc_2d_distance(x1: float, y1: float, x2: float, y2: float) -> float:
"""
Returns:
Distance between the 2-dimensional co-ordinates supplied.
"""
return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
def calc_dvel(c1: float, c2: float, r: float, m2: float) -> float:
"""
Calculates the change in velocity on a target body due to the gravitational force
of another body (source body) in a single dimension.
Args:
c1: value for target body position in x or y dimension
c2: value for source body position in x or y dimension
r: distance between 2 bodies
m2: mass of the source body
Returns:
change in target body velocity (float)
"""
return (-G * m2 * (c1 - c2)) * r ** (-3)
def n_body_func(t: int, pos_vel: np.ndarray, bodies: List[Body]) -> np.ndarray:
"""
Function to be passed into the ode integrator. Calculates and stores the changes
in spatial and velocity values.
Args:
t: time step
pos_vel: array containing x, y, vx and vy values for each body
[x1, vx1, y1, vy1, x2, ...]
bodies: list of Body objects
Returns:
array containing change in spatial and velocity values for each body
"""
# Set up array to store updated spatial and velocity values
dpos_dvel = np.zeros(4 * len(bodies))
# Change in x, y is velocity in x, y
dpos_dvel[0 : len(dpos_dvel) : 4] = pos_vel[1 : len(pos_vel) : 4]
dpos_dvel[2 : len(dpos_dvel) : 4] = pos_vel[3 : len(pos_vel) : 4]
# Loop through bodies, calculating change in vx, vy due to all other bodies
for i, body in enumerate(bodies):
# Extract x, y values of body
x1 = pos_vel[i * 4]
y1 = pos_vel[i * 4 + 2]
vx1 = 0
vy1 = 0
for j, other_body in enumerate(bodies):
# Check bodies aren't the same
if i != j:
# Extract x, y & mass of other body
x2 = pos_vel[j * 4]
y2 = pos_vel[j * 4 + 2]
# Distance to other body
r = calc_2d_distance(x1=x1, y1=y1, x2=x2, y2=y2,)
# Change in x, y
vx1 += calc_dvel(c1=x1, c2=x2, r=r, m2=other_body.mass)
vy1 += calc_dvel(c1=y1, c2=y2, r=r, m2=other_body.mass)
# Add resultant change in vel to array
dpos_dvel[i * 4 + 1] = vx1
dpos_dvel[i * 4 + 3] = vy1
return dpos_dvel
def calc_orbits(bodies: List[Body], t0: int, t1: int, dt: int) -> np.ndarray:
"""
Calculate the change in x, y, vx and vy at each time step between t0 and t1 due to
the gravitational forces of all other bodies in the system. The integrator used is
dopri835.
Args:
bodies: List of Body classes that describe the starting conditions and
masses of the bodies
t0: start time
t1: end time
dt: time step (seconds)
Returns:
Array containing spatial coordinates and velocities of bodies at each
time step
"""
logger.info(
f"""Orbit settings: n_bodies: {len(bodies)}, t0: {t0}, t1: {t1}, dt: {dt}"""
)
# Initial conditions (x, vx, y, vy)
initial = create_initial_conditions(bodies=bodies)
# Time period over which to calculate orbit paths
t = np.linspace(t0, t1, dt)
# Array for solution
y = np.zeros((len(t), len(bodies) * 4))
y[0, :] = initial
# Setup integrator
integrator = (
integrate.ode(n_body_func)
.set_integrator("dop853", rtol=1e-6, atol=1e-10)
.set_initial_value(initial, t0)
.set_f_params(bodies)
)
# Iterate over time intervals and integrate, storing updated spatial coordinates
# and velocities of bodies
progress_text = st.sidebar.text(f"Iteration: 0/{len(t)}")
progress_bar = st.sidebar.progress(0)
logger.info("Calculating orbits")
for i in range(1, len(t)):
progress_text.text(f"Iteration: {i}/{len(t)-1}")
progress_bar.progress((i + 1) / len(t))
y[i, :] = integrator.integrate(t[i])
progress_text.text("Complete!")
return y
def animate_orbits(orbit_paths: np.ndarray) -> None:
"""
Animates the orbits
Args:
orbit_paths: array containing spatial and velocity values over time
"""
logger.info("Animating orbits")
fig = plt.figure(figsize=(6, 6))
# set size of axis based on max/min spatial values
x_min = orbit_paths[:, 0::4].min() * 1.1
x_max = orbit_paths[:, 0::4].max() * 1.1
y_min = orbit_paths[:, 2::4].min() * 1.1
y_max = orbit_paths[:, 2::4].max() * 1.1
ax = plt.axes(xlim=(x_min, x_max), ylim=(y_min, y_max))
n_bodies = int(orbit_paths.shape[1] / 4)
colours = ["red", "blue", "orange", "green", "black"]
lines = []
for index in range(n_bodies * 2):
if index < n_bodies:
lobj = ax.plot([], [], "--", lw=1, color=colours[index % len(colours)])[0]
else:
lobj = ax.plot(
[], [], "o", color=colours[(index - n_bodies) % len(colours)]
)[0]
lines.append(lobj)
def init():
for line in lines:
line.set_data([], [])
return lines
def animate(i):
for j, line in enumerate(lines):
if j < n_bodies:
orbit_tail_length = 30
if i > orbit_tail_length:
x = orbit_paths[i - orbit_tail_length : i, j * 4]
y = orbit_paths[i - orbit_tail_length : i, j * 4 + 2]
else:
x = orbit_paths[:i, j * 4]
y = orbit_paths[:i, j * 4 + 2]
else:
x = orbit_paths[i, (j - n_bodies) * 4]
y = orbit_paths[i, (j - n_bodies) * 4 + 2]
line.set_data(x, y)
return lines
# TODO: ensure a consistent maximum number of frames so animations aren't too slow
# or too fast
anim = FuncAnimation(
fig, animate, init_func=init, frames=orbit_paths.shape[0], interval=1, blit=True
)
plt.show()
def plot_orbits(orbit_paths: np.ndarray, title: str) -> None:
"""
Plots the orbits
Args:
orbit_paths: array containing spatial and velocity values over time
title: title to use for figure
"""
logger.info("Plotting orbits")
fig = plt.figure(figsize=(10, 10))
plt.title(title)
for i in range(int(orbit_paths.shape[1] / 4)):
plt.plot(orbit_paths[:, i * 4], orbit_paths[:, i * 4 + 2])
st.pyplot(fig)
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"scipy.integrate.ode",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axes",
"streamlit.sidebar.progress",
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.figure",
"numpy.linspace",
"streamlit.pyplot",
"logging.getLogger"
] |
[((231, 258), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (248, 258), False, 'import logging\n'), ((4142, 4165), 'numpy.linspace', 'np.linspace', (['t0', 't1', 'dt'], {}), '(t0, t1, dt)\n', (4153, 4165), True, 'import numpy as np\n'), ((4667, 4689), 'streamlit.sidebar.progress', 'st.sidebar.progress', (['(0)'], {}), '(0)\n', (4686, 4689), True, 'import streamlit as st\n'), ((5188, 5214), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (5198, 5214), True, 'import matplotlib.pyplot as plt\n'), ((5460, 5510), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'xlim': '(x_min, x_max)', 'ylim': '(y_min, y_max)'}), '(xlim=(x_min, x_max), ylim=(y_min, y_max))\n', (5468, 5510), True, 'import matplotlib.pyplot as plt\n'), ((6794, 6893), 'matplotlib.animation.FuncAnimation', 'FuncAnimation', (['fig', 'animate'], {'init_func': 'init', 'frames': 'orbit_paths.shape[0]', 'interval': '(1)', 'blit': '(True)'}), '(fig, animate, init_func=init, frames=orbit_paths.shape[0],\n interval=1, blit=True)\n', (6807, 6893), False, 'from matplotlib.animation import FuncAnimation\n'), ((6908, 6918), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6916, 6918), True, 'import matplotlib.pyplot as plt\n'), ((7191, 7219), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (7201, 7219), True, 'import matplotlib.pyplot as plt\n'), ((7224, 7240), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (7233, 7240), True, 'import matplotlib.pyplot as plt\n'), ((7365, 7379), 'streamlit.pyplot', 'st.pyplot', (['fig'], {}), '(fig)\n', (7374, 7379), True, 'import streamlit as st\n'), ((7385, 7395), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7393, 7395), True, 'import matplotlib.pyplot as plt\n'), ((7301, 7359), 'matplotlib.pyplot.plot', 'plt.plot', (['orbit_paths[:, i * 4]', 'orbit_paths[:, i * 4 + 2]'], {}), '(orbit_paths[:, i * 4], orbit_paths[:, i * 4 + 2])\n', (7309, 7359), True, 'import matplotlib.pyplot as plt\n'), ((4309, 4335), 'scipy.integrate.ode', 'integrate.ode', (['n_body_func'], {}), '(n_body_func)\n', (4322, 4335), False, 'from scipy import integrate\n')]
|
"""Provide a generic class for novelWriter item file representation.
Copyright (c) 2022 <NAME>
For further information see https://github.com/peter88213/yw2nw
Published under the MIT License (https://opensource.org/licenses/mit-license.php)
"""
import os
from pywriter.pywriter_globals import ERROR
class NwdFile:
"""abstract novelWriter item file representation.
Public methods:
read() -- read a content file.
write() -- write a content file.
"""
EXTENSION = '.nwd'
def __init__(self, prj, nwItem):
"""Define instance variables.
Positional arguments:
prj -- NwxFile instance: the novelWriter project represenation.
nwItem -- NwItem instance associated with the .nwd file.
"""
self._prj = prj
self._nwItem = nwItem
self._filePath = os.path.dirname(self._prj.filePath) + self._prj.CONTENT_DIR + nwItem.nwHandle + self.EXTENSION
self._lines = []
def read(self):
"""Read a content file.
Return a message beginning with the ERROR constant in case of error.
"""
try:
with open(self._filePath, 'r', encoding='utf-8') as f:
self._lines = f.read().split('\n')
return 'Item data read in.'
except:
return f'{ERROR}Can not read "{os.path.normpath(self._filePath)}".'
def write(self):
"""Write a content file.
Return a message beginning with the ERROR constant in case of error.
"""
lines = [f'%%~name: {self._nwItem.nwName}',
f'%%~path: {self._nwItem.nwParent}/{self._nwItem.nwHandle}',
f'%%~kind: {self._nwItem.nwClass}/{self._nwItem.nwLayout}',
]
lines.extend(self._lines)
text = '\n'.join(lines)
try:
with open(self._filePath, 'w', encoding='utf-8') as f:
f.write(text)
return 'nwd file saved.'
except:
return f'{ERROR}Can not write "{os.path.normpath(self._filePath)}".'
|
[
"os.path.dirname",
"os.path.normpath"
] |
[((894, 929), 'os.path.dirname', 'os.path.dirname', (['self._prj.filePath'], {}), '(self._prj.filePath)\n', (909, 929), False, 'import os\n'), ((1414, 1446), 'os.path.normpath', 'os.path.normpath', (['self._filePath'], {}), '(self._filePath)\n', (1430, 1446), False, 'import os\n'), ((2128, 2160), 'os.path.normpath', 'os.path.normpath', (['self._filePath'], {}), '(self._filePath)\n', (2144, 2160), False, 'import os\n')]
|
import sys
from operator import itemgetter
import numpy as np
import cv2
import math
import matplotlib.pyplot as plt
# -----------------------------#
# 计算原始输入图像
# 每一次缩放的比例
# -----------------------------#
def calculateScales(img):
copy_img = img.copy()
pr_scale = 1.0
h, w, _ = copy_img.shape
if min(w, h) > 500:
pr_scale = 500.0 / min(h, w)
w = int(w * pr_scale)
h = int(h * pr_scale)
elif max(w, h) < 500:
pr_scale = 500.0 / max(h, w)
w = int(w * pr_scale)
h = int(h * pr_scale)
scales = []
factor = 0.709
factor_count = 0
minl = min(h, w)
while minl >= 12:
scales.append(pr_scale * pow(factor, factor_count))
minl *= factor
factor_count += 1
return scales
# -------------------------------------#
# 对pnet处理后的结果进行处理
# -------------------------------------#
def detect_face_12net(cls_prob, roi, out_side, scale, width, height, threshold):
cls_prob = np.swapaxes(cls_prob, 0, 1)
roi = np.swapaxes(roi, 0, 2)
stride = 0
# stride略等于2
if out_side != 1:
stride = float(2 * out_side - 1) / (out_side - 1)
(x, y) = np.where(cls_prob >= threshold)
boundingbox = np.array([x, y]).T
# 找到对应原图的位置
bb1 = np.fix((stride * (boundingbox) + 0) * scale)
bb2 = np.fix((stride * (boundingbox) + 11) * scale)
# plt.scatter(bb1[:,0],bb1[:,1],linewidths=1)
# plt.scatter(bb2[:,0],bb2[:,1],linewidths=1,c='r')
# plt.show()
boundingbox = np.concatenate((bb1, bb2), axis=1)
dx1 = roi[0][x, y]
dx2 = roi[1][x, y]
dx3 = roi[2][x, y]
dx4 = roi[3][x, y]
score = np.array([cls_prob[x, y]]).T
offset = np.array([dx1, dx2, dx3, dx4]).T
boundingbox = boundingbox + offset * 12.0 * scale
rectangles = np.concatenate((boundingbox, score), axis=1)
rectangles = rect2square(rectangles)
pick = []
for i in range(len(rectangles)):
x1 = int(max(0, rectangles[i][0]))
y1 = int(max(0, rectangles[i][1]))
x2 = int(min(width, rectangles[i][2]))
y2 = int(min(height, rectangles[i][3]))
sc = rectangles[i][4]
if x2 > x1 and y2 > y1:
pick.append([x1, y1, x2, y2, sc])
return NMS(pick, 0.3)
# -----------------------------#
# 将长方形调整为正方形
# -----------------------------#
def rect2square(rectangles):
w = rectangles[:, 2] - rectangles[:, 0]
h = rectangles[:, 3] - rectangles[:, 1]
l = np.maximum(w, h).T
rectangles[:, 0] = rectangles[:, 0] + w * 0.5 - l * 0.5
rectangles[:, 1] = rectangles[:, 1] + h * 0.5 - l * 0.5
rectangles[:, 2:4] = rectangles[:, 0:2] + np.repeat([l], 2, axis=0).T
return rectangles
# -------------------------------------#
# 非极大抑制
# -------------------------------------#
def NMS(rectangles, threshold):
if len(rectangles) == 0:
return rectangles
boxes = np.array(rectangles)
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
s = boxes[:, 4]
area = np.multiply(x2 - x1 + 1, y2 - y1 + 1)
I = np.array(s.argsort())
pick = []
while len(I) > 0:
xx1 = np.maximum(x1[I[-1]], x1[I[0:-1]]) # I[-1] have hightest prob score, I[0:-1]->others
yy1 = np.maximum(y1[I[-1]], y1[I[0:-1]])
xx2 = np.minimum(x2[I[-1]], x2[I[0:-1]])
yy2 = np.minimum(y2[I[-1]], y2[I[0:-1]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
o = inter / (area[I[-1]] + area[I[0:-1]] - inter)
pick.append(I[-1])
I = I[np.where(o <= threshold)[0]]
result_rectangle = boxes[pick].tolist()
return result_rectangle
# -------------------------------------#
# 对pnet处理后的结果进行处理
# -------------------------------------#
def filter_face_24net(cls_prob, roi, rectangles, width, height, threshold):
prob = cls_prob[:, 1]
pick = np.where(prob >= threshold)
rectangles = np.array(rectangles)
x1 = rectangles[pick, 0]
y1 = rectangles[pick, 1]
x2 = rectangles[pick, 2]
y2 = rectangles[pick, 3]
sc = np.array([prob[pick]]).T
dx1 = roi[pick, 0]
dx2 = roi[pick, 1]
dx3 = roi[pick, 2]
dx4 = roi[pick, 3]
w = x2 - x1
h = y2 - y1
x1 = np.array([(x1 + dx1 * w)[0]]).T
y1 = np.array([(y1 + dx2 * h)[0]]).T
x2 = np.array([(x2 + dx3 * w)[0]]).T
y2 = np.array([(y2 + dx4 * h)[0]]).T
rectangles = np.concatenate((x1, y1, x2, y2, sc), axis=1)
rectangles = rect2square(rectangles)
pick = []
for i in range(len(rectangles)):
x1 = int(max(0, rectangles[i][0]))
y1 = int(max(0, rectangles[i][1]))
x2 = int(min(width, rectangles[i][2]))
y2 = int(min(height, rectangles[i][3]))
sc = rectangles[i][4]
if x2 > x1 and y2 > y1:
pick.append([x1, y1, x2, y2, sc])
return NMS(pick, 0.3)
# -------------------------------------#
# 对onet处理后的结果进行处理
# -------------------------------------#
def filter_face_48net(cls_prob, roi, pts, rectangles, width, height, threshold):
prob = cls_prob[:, 1]
pick = np.where(prob >= threshold)
rectangles = np.array(rectangles)
x1 = rectangles[pick, 0]
y1 = rectangles[pick, 1]
x2 = rectangles[pick, 2]
y2 = rectangles[pick, 3]
sc = np.array([prob[pick]]).T
dx1 = roi[pick, 0]
dx2 = roi[pick, 1]
dx3 = roi[pick, 2]
dx4 = roi[pick, 3]
w = x2 - x1
h = y2 - y1
pts0 = np.array([(w * pts[pick, 0] + x1)[0]]).T
pts1 = np.array([(h * pts[pick, 5] + y1)[0]]).T
pts2 = np.array([(w * pts[pick, 1] + x1)[0]]).T
pts3 = np.array([(h * pts[pick, 6] + y1)[0]]).T
pts4 = np.array([(w * pts[pick, 2] + x1)[0]]).T
pts5 = np.array([(h * pts[pick, 7] + y1)[0]]).T
pts6 = np.array([(w * pts[pick, 3] + x1)[0]]).T
pts7 = np.array([(h * pts[pick, 8] + y1)[0]]).T
pts8 = np.array([(w * pts[pick, 4] + x1)[0]]).T
pts9 = np.array([(h * pts[pick, 9] + y1)[0]]).T
x1 = np.array([(x1 + dx1 * w)[0]]).T
y1 = np.array([(y1 + dx2 * h)[0]]).T
x2 = np.array([(x2 + dx3 * w)[0]]).T
y2 = np.array([(y2 + dx4 * h)[0]]).T
rectangles = np.concatenate((x1, y1, x2, y2, sc, pts0, pts1, pts2, pts3, pts4, pts5, pts6, pts7, pts8, pts9),
axis=1)
pick = []
for i in range(len(rectangles)):
x1 = int(max(0, rectangles[i][0]))
y1 = int(max(0, rectangles[i][1]))
x2 = int(min(width, rectangles[i][2]))
y2 = int(min(height, rectangles[i][3]))
if x2 > x1 and y2 > y1:
pick.append([x1, y1, x2, y2, rectangles[i][4],
rectangles[i][5], rectangles[i][6], rectangles[i][7], rectangles[i][8], rectangles[i][9],
rectangles[i][10], rectangles[i][11], rectangles[i][12], rectangles[i][13], rectangles[i][14]])
return NMS(pick, 0.3)
# -------------------------------------#
# 人脸对齐
# -------------------------------------#
def Alignment_1(img, landmark):
if landmark.shape[0] == 68:
x = landmark[36, 0] - landmark[45, 0]
y = landmark[36, 1] - landmark[45, 1]
elif landmark.shape[0] == 5:
x = landmark[0, 0] - landmark[1, 0]
y = landmark[0, 1] - landmark[1, 1]
if x == 0:
angle = 0
else:
angle = math.atan(y / x) * 180 / math.pi
center = (img.shape[1] // 2, img.shape[0] // 2)
RotationMatrix = cv2.getRotationMatrix2D(center, angle, 1)
new_img = cv2.warpAffine(img, RotationMatrix, (img.shape[1], img.shape[0]))
RotationMatrix = np.array(RotationMatrix)
new_landmark = []
for i in range(landmark.shape[0]):
pts = []
pts.append(RotationMatrix[0, 0] * landmark[i, 0] + RotationMatrix[0, 1] * landmark[i, 1] + RotationMatrix[0, 2])
pts.append(RotationMatrix[1, 0] * landmark[i, 0] + RotationMatrix[1, 1] * landmark[i, 1] + RotationMatrix[1, 2])
new_landmark.append(pts)
new_landmark = np.array(new_landmark)
return new_img, new_landmark
def Alignment_2(img, std_landmark, landmark):
def Transformation(std_landmark, landmark):
std_landmark = np.matrix(std_landmark).astype(np.float64)
landmark = np.matrix(landmark).astype(np.float64)
c1 = np.mean(std_landmark, axis=0)
c2 = np.mean(landmark, axis=0)
std_landmark -= c1
landmark -= c2
s1 = np.std(std_landmark)
s2 = np.std(landmark)
std_landmark /= s1
landmark /= s2
U, S, Vt = np.linalg.svd(std_landmark.T * landmark)
R = (U * Vt).T
return np.vstack([np.hstack(((s2 / s1) * R, c2.T - (s2 / s1) * R * c1.T)), np.matrix([0., 0., 1.])])
Trans_Matrix = Transformation(std_landmark, landmark) # Shape: 3 * 3
Trans_Matrix = Trans_Matrix[:2]
Trans_Matrix = cv2.invertAffineTransform(Trans_Matrix)
new_img = cv2.warpAffine(img, Trans_Matrix, (img.shape[1], img.shape[0]))
Trans_Matrix = np.array(Trans_Matrix)
new_landmark = []
for i in range(landmark.shape[0]):
pts = []
pts.append(Trans_Matrix[0, 0] * landmark[i, 0] + Trans_Matrix[0, 1] * landmark[i, 1] + Trans_Matrix[0, 2])
pts.append(Trans_Matrix[1, 0] * landmark[i, 0] + Trans_Matrix[1, 1] * landmark[i, 1] + Trans_Matrix[1, 2])
new_landmark.append(pts)
new_landmark = np.array(new_landmark)
return new_img, new_landmark
# ---------------------------------#
# 图片预处理
# 高斯归一化
# ---------------------------------#
def pre_process(x):
if x.ndim == 4:
axis = (1, 2, 3)
size = x[0].size
elif x.ndim == 3:
axis = (0, 1, 2)
size = x.size
else:
raise ValueError('Dimension should be 3 or 4')
mean = np.mean(x, axis=axis, keepdims=True)
std = np.std(x, axis=axis, keepdims=True)
std_adj = np.maximum(std, 1.0 / np.sqrt(size))
y = (x - mean) / std_adj
return y
# ---------------------------------#
# l2标准化
# ---------------------------------#
def l2_normalize(x, axis=-1, epsilon=1e-10):
output = x / np.sqrt(np.maximum(np.sum(np.square(x), axis=axis, keepdims=True), epsilon))
return output
# ---------------------------------#
# 计算128特征值
# ---------------------------------#
def calc_128_vec(model, img):
face_img = pre_process(img)
pre = model.predict(face_img)
pre = l2_normalize(np.concatenate(pre))
pre = np.reshape(pre, [128])
return pre
# ---------------------------------#
# 计算人脸距离
# ---------------------------------#
def face_distance(face_encodings, face_to_compare):
if len(face_encodings) == 0:
return np.empty((0))
return np.linalg.norm(face_encodings - face_to_compare, axis=1)
# ---------------------------------#
# 比较人脸
# ---------------------------------#
def compare_faces(known_face_encodings, face_encoding_to_check, tolerance=0.6):
dis = face_distance(known_face_encodings, face_encoding_to_check)
return list(dis <= tolerance)
|
[
"numpy.maximum",
"numpy.empty",
"cv2.warpAffine",
"numpy.mean",
"numpy.linalg.norm",
"numpy.linalg.svd",
"cv2.getRotationMatrix2D",
"cv2.invertAffineTransform",
"numpy.multiply",
"numpy.std",
"numpy.swapaxes",
"numpy.reshape",
"numpy.repeat",
"numpy.minimum",
"numpy.fix",
"numpy.square",
"numpy.hstack",
"numpy.concatenate",
"numpy.matrix",
"math.atan",
"numpy.where",
"numpy.array",
"numpy.sqrt"
] |
[((984, 1011), 'numpy.swapaxes', 'np.swapaxes', (['cls_prob', '(0)', '(1)'], {}), '(cls_prob, 0, 1)\n', (995, 1011), True, 'import numpy as np\n'), ((1022, 1044), 'numpy.swapaxes', 'np.swapaxes', (['roi', '(0)', '(2)'], {}), '(roi, 0, 2)\n', (1033, 1044), True, 'import numpy as np\n'), ((1171, 1202), 'numpy.where', 'np.where', (['(cls_prob >= threshold)'], {}), '(cls_prob >= threshold)\n', (1179, 1202), True, 'import numpy as np\n'), ((1267, 1309), 'numpy.fix', 'np.fix', (['((stride * boundingbox + 0) * scale)'], {}), '((stride * boundingbox + 0) * scale)\n', (1273, 1309), True, 'import numpy as np\n'), ((1322, 1365), 'numpy.fix', 'np.fix', (['((stride * boundingbox + 11) * scale)'], {}), '((stride * boundingbox + 11) * scale)\n', (1328, 1365), True, 'import numpy as np\n'), ((1509, 1543), 'numpy.concatenate', 'np.concatenate', (['(bb1, bb2)'], {'axis': '(1)'}), '((bb1, bb2), axis=1)\n', (1523, 1543), True, 'import numpy as np\n'), ((1797, 1841), 'numpy.concatenate', 'np.concatenate', (['(boundingbox, score)'], {'axis': '(1)'}), '((boundingbox, score), axis=1)\n', (1811, 1841), True, 'import numpy as np\n'), ((2886, 2906), 'numpy.array', 'np.array', (['rectangles'], {}), '(rectangles)\n', (2894, 2906), True, 'import numpy as np\n'), ((3022, 3059), 'numpy.multiply', 'np.multiply', (['(x2 - x1 + 1)', '(y2 - y1 + 1)'], {}), '(x2 - x1 + 1, y2 - y1 + 1)\n', (3033, 3059), True, 'import numpy as np\n'), ((3898, 3925), 'numpy.where', 'np.where', (['(prob >= threshold)'], {}), '(prob >= threshold)\n', (3906, 3925), True, 'import numpy as np\n'), ((3944, 3964), 'numpy.array', 'np.array', (['rectangles'], {}), '(rectangles)\n', (3952, 3964), True, 'import numpy as np\n'), ((4426, 4470), 'numpy.concatenate', 'np.concatenate', (['(x1, y1, x2, y2, sc)'], {'axis': '(1)'}), '((x1, y1, x2, y2, sc), axis=1)\n', (4440, 4470), True, 'import numpy as np\n'), ((5100, 5127), 'numpy.where', 'np.where', (['(prob >= threshold)'], {}), '(prob >= threshold)\n', (5108, 5127), True, 'import numpy as np\n'), ((5145, 5165), 'numpy.array', 'np.array', (['rectangles'], {}), '(rectangles)\n', (5153, 5165), True, 'import numpy as np\n'), ((6152, 6260), 'numpy.concatenate', 'np.concatenate', (['(x1, y1, x2, y2, sc, pts0, pts1, pts2, pts3, pts4, pts5, pts6, pts7, pts8, pts9\n )'], {'axis': '(1)'}), '((x1, y1, x2, y2, sc, pts0, pts1, pts2, pts3, pts4, pts5,\n pts6, pts7, pts8, pts9), axis=1)\n', (6166, 6260), True, 'import numpy as np\n'), ((7413, 7454), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['center', 'angle', '(1)'], {}), '(center, angle, 1)\n', (7436, 7454), False, 'import cv2\n'), ((7469, 7534), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'RotationMatrix', '(img.shape[1], img.shape[0])'], {}), '(img, RotationMatrix, (img.shape[1], img.shape[0]))\n', (7483, 7534), False, 'import cv2\n'), ((7557, 7581), 'numpy.array', 'np.array', (['RotationMatrix'], {}), '(RotationMatrix)\n', (7565, 7581), True, 'import numpy as np\n'), ((7955, 7977), 'numpy.array', 'np.array', (['new_landmark'], {}), '(new_landmark)\n', (7963, 7977), True, 'import numpy as np\n'), ((8804, 8843), 'cv2.invertAffineTransform', 'cv2.invertAffineTransform', (['Trans_Matrix'], {}), '(Trans_Matrix)\n', (8829, 8843), False, 'import cv2\n'), ((8858, 8921), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'Trans_Matrix', '(img.shape[1], img.shape[0])'], {}), '(img, Trans_Matrix, (img.shape[1], img.shape[0]))\n', (8872, 8921), False, 'import cv2\n'), ((8942, 8964), 'numpy.array', 'np.array', (['Trans_Matrix'], {}), '(Trans_Matrix)\n', (8950, 8964), True, 'import numpy as np\n'), ((9326, 9348), 'numpy.array', 'np.array', (['new_landmark'], {}), '(new_landmark)\n', (9334, 9348), True, 'import numpy as np\n'), ((9715, 9751), 'numpy.mean', 'np.mean', (['x'], {'axis': 'axis', 'keepdims': '(True)'}), '(x, axis=axis, keepdims=True)\n', (9722, 9751), True, 'import numpy as np\n'), ((9762, 9797), 'numpy.std', 'np.std', (['x'], {'axis': 'axis', 'keepdims': '(True)'}), '(x, axis=axis, keepdims=True)\n', (9768, 9797), True, 'import numpy as np\n'), ((10373, 10395), 'numpy.reshape', 'np.reshape', (['pre', '[128]'], {}), '(pre, [128])\n', (10383, 10395), True, 'import numpy as np\n'), ((10623, 10679), 'numpy.linalg.norm', 'np.linalg.norm', (['(face_encodings - face_to_compare)'], {'axis': '(1)'}), '(face_encodings - face_to_compare, axis=1)\n', (10637, 10679), True, 'import numpy as np\n'), ((1222, 1238), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (1230, 1238), True, 'import numpy as np\n'), ((1649, 1675), 'numpy.array', 'np.array', (['[cls_prob[x, y]]'], {}), '([cls_prob[x, y]])\n', (1657, 1675), True, 'import numpy as np\n'), ((1691, 1721), 'numpy.array', 'np.array', (['[dx1, dx2, dx3, dx4]'], {}), '([dx1, dx2, dx3, dx4])\n', (1699, 1721), True, 'import numpy as np\n'), ((2458, 2474), 'numpy.maximum', 'np.maximum', (['w', 'h'], {}), '(w, h)\n', (2468, 2474), True, 'import numpy as np\n'), ((3140, 3174), 'numpy.maximum', 'np.maximum', (['x1[I[-1]]', 'x1[I[0:-1]]'], {}), '(x1[I[-1]], x1[I[0:-1]])\n', (3150, 3174), True, 'import numpy as np\n'), ((3240, 3274), 'numpy.maximum', 'np.maximum', (['y1[I[-1]]', 'y1[I[0:-1]]'], {}), '(y1[I[-1]], y1[I[0:-1]])\n', (3250, 3274), True, 'import numpy as np\n'), ((3289, 3323), 'numpy.minimum', 'np.minimum', (['x2[I[-1]]', 'x2[I[0:-1]]'], {}), '(x2[I[-1]], x2[I[0:-1]])\n', (3299, 3323), True, 'import numpy as np\n'), ((3338, 3372), 'numpy.minimum', 'np.minimum', (['y2[I[-1]]', 'y2[I[0:-1]]'], {}), '(y2[I[-1]], y2[I[0:-1]])\n', (3348, 3372), True, 'import numpy as np\n'), ((3385, 3415), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1 + 1)'], {}), '(0.0, xx2 - xx1 + 1)\n', (3395, 3415), True, 'import numpy as np\n'), ((3428, 3458), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy2 - yy1 + 1)'], {}), '(0.0, yy2 - yy1 + 1)\n', (3438, 3458), True, 'import numpy as np\n'), ((4092, 4114), 'numpy.array', 'np.array', (['[prob[pick]]'], {}), '([prob[pick]])\n', (4100, 4114), True, 'import numpy as np\n'), ((4253, 4282), 'numpy.array', 'np.array', (['[(x1 + dx1 * w)[0]]'], {}), '([(x1 + dx1 * w)[0]])\n', (4261, 4282), True, 'import numpy as np\n'), ((4294, 4323), 'numpy.array', 'np.array', (['[(y1 + dx2 * h)[0]]'], {}), '([(y1 + dx2 * h)[0]])\n', (4302, 4323), True, 'import numpy as np\n'), ((4335, 4364), 'numpy.array', 'np.array', (['[(x2 + dx3 * w)[0]]'], {}), '([(x2 + dx3 * w)[0]])\n', (4343, 4364), True, 'import numpy as np\n'), ((4376, 4405), 'numpy.array', 'np.array', (['[(y2 + dx4 * h)[0]]'], {}), '([(y2 + dx4 * h)[0]])\n', (4384, 4405), True, 'import numpy as np\n'), ((5293, 5315), 'numpy.array', 'np.array', (['[prob[pick]]'], {}), '([prob[pick]])\n', (5301, 5315), True, 'import numpy as np\n'), ((5456, 5494), 'numpy.array', 'np.array', (['[(w * pts[pick, 0] + x1)[0]]'], {}), '([(w * pts[pick, 0] + x1)[0]])\n', (5464, 5494), True, 'import numpy as np\n'), ((5508, 5546), 'numpy.array', 'np.array', (['[(h * pts[pick, 5] + y1)[0]]'], {}), '([(h * pts[pick, 5] + y1)[0]])\n', (5516, 5546), True, 'import numpy as np\n'), ((5561, 5599), 'numpy.array', 'np.array', (['[(w * pts[pick, 1] + x1)[0]]'], {}), '([(w * pts[pick, 1] + x1)[0]])\n', (5569, 5599), True, 'import numpy as np\n'), ((5613, 5651), 'numpy.array', 'np.array', (['[(h * pts[pick, 6] + y1)[0]]'], {}), '([(h * pts[pick, 6] + y1)[0]])\n', (5621, 5651), True, 'import numpy as np\n'), ((5666, 5704), 'numpy.array', 'np.array', (['[(w * pts[pick, 2] + x1)[0]]'], {}), '([(w * pts[pick, 2] + x1)[0]])\n', (5674, 5704), True, 'import numpy as np\n'), ((5718, 5756), 'numpy.array', 'np.array', (['[(h * pts[pick, 7] + y1)[0]]'], {}), '([(h * pts[pick, 7] + y1)[0]])\n', (5726, 5756), True, 'import numpy as np\n'), ((5771, 5809), 'numpy.array', 'np.array', (['[(w * pts[pick, 3] + x1)[0]]'], {}), '([(w * pts[pick, 3] + x1)[0]])\n', (5779, 5809), True, 'import numpy as np\n'), ((5823, 5861), 'numpy.array', 'np.array', (['[(h * pts[pick, 8] + y1)[0]]'], {}), '([(h * pts[pick, 8] + y1)[0]])\n', (5831, 5861), True, 'import numpy as np\n'), ((5876, 5914), 'numpy.array', 'np.array', (['[(w * pts[pick, 4] + x1)[0]]'], {}), '([(w * pts[pick, 4] + x1)[0]])\n', (5884, 5914), True, 'import numpy as np\n'), ((5928, 5966), 'numpy.array', 'np.array', (['[(h * pts[pick, 9] + y1)[0]]'], {}), '([(h * pts[pick, 9] + y1)[0]])\n', (5936, 5966), True, 'import numpy as np\n'), ((5979, 6008), 'numpy.array', 'np.array', (['[(x1 + dx1 * w)[0]]'], {}), '([(x1 + dx1 * w)[0]])\n', (5987, 6008), True, 'import numpy as np\n'), ((6020, 6049), 'numpy.array', 'np.array', (['[(y1 + dx2 * h)[0]]'], {}), '([(y1 + dx2 * h)[0]])\n', (6028, 6049), True, 'import numpy as np\n'), ((6061, 6090), 'numpy.array', 'np.array', (['[(x2 + dx3 * w)[0]]'], {}), '([(x2 + dx3 * w)[0]])\n', (6069, 6090), True, 'import numpy as np\n'), ((6102, 6131), 'numpy.array', 'np.array', (['[(y2 + dx4 * h)[0]]'], {}), '([(y2 + dx4 * h)[0]])\n', (6110, 6131), True, 'import numpy as np\n'), ((8246, 8275), 'numpy.mean', 'np.mean', (['std_landmark'], {'axis': '(0)'}), '(std_landmark, axis=0)\n', (8253, 8275), True, 'import numpy as np\n'), ((8289, 8314), 'numpy.mean', 'np.mean', (['landmark'], {'axis': '(0)'}), '(landmark, axis=0)\n', (8296, 8314), True, 'import numpy as np\n'), ((8379, 8399), 'numpy.std', 'np.std', (['std_landmark'], {}), '(std_landmark)\n', (8385, 8399), True, 'import numpy as np\n'), ((8413, 8429), 'numpy.std', 'np.std', (['landmark'], {}), '(landmark)\n', (8419, 8429), True, 'import numpy as np\n'), ((8500, 8540), 'numpy.linalg.svd', 'np.linalg.svd', (['(std_landmark.T * landmark)'], {}), '(std_landmark.T * landmark)\n', (8513, 8540), True, 'import numpy as np\n'), ((10342, 10361), 'numpy.concatenate', 'np.concatenate', (['pre'], {}), '(pre)\n', (10356, 10361), True, 'import numpy as np\n'), ((10598, 10609), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (10606, 10609), True, 'import numpy as np\n'), ((2643, 2668), 'numpy.repeat', 'np.repeat', (['[l]', '(2)'], {'axis': '(0)'}), '([l], 2, axis=0)\n', (2652, 2668), True, 'import numpy as np\n'), ((9834, 9847), 'numpy.sqrt', 'np.sqrt', (['size'], {}), '(size)\n', (9841, 9847), True, 'import numpy as np\n'), ((3580, 3604), 'numpy.where', 'np.where', (['(o <= threshold)'], {}), '(o <= threshold)\n', (3588, 3604), True, 'import numpy as np\n'), ((7305, 7321), 'math.atan', 'math.atan', (['(y / x)'], {}), '(y / x)\n', (7314, 7321), False, 'import math\n'), ((8131, 8154), 'numpy.matrix', 'np.matrix', (['std_landmark'], {}), '(std_landmark)\n', (8140, 8154), True, 'import numpy as np\n'), ((8193, 8212), 'numpy.matrix', 'np.matrix', (['landmark'], {}), '(landmark)\n', (8202, 8212), True, 'import numpy as np\n'), ((8591, 8642), 'numpy.hstack', 'np.hstack', (['(s2 / s1 * R, c2.T - s2 / s1 * R * c1.T)'], {}), '((s2 / s1 * R, c2.T - s2 / s1 * R * c1.T))\n', (8600, 8642), True, 'import numpy as np\n'), ((8648, 8674), 'numpy.matrix', 'np.matrix', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (8657, 8674), True, 'import numpy as np\n'), ((10065, 10077), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (10074, 10077), True, 'import numpy as np\n')]
|
# Copyright 2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hypothesis import settings, given, strategies as st
import pytest
import numpy as np
from scipy.special import factorial
from thewalrus.quantum import total_photon_number_distribution
from mrmustard.lab import *
from mrmustard.physics.fock import dm_to_ket, ket_to_dm
# helper strategies
st_angle = st.floats(min_value=0, max_value=2 * np.pi)
@given(n_mean=st.floats(0, 3), phi=st_angle)
def test_two_mode_squeezing_fock(n_mean, phi):
"""Tests that perfect number correlations are obtained for a two-mode squeezed vacuum state
Note that this is consistent with the Strawberryfields convention"""
cutoff = 4
r = np.arcsinh(np.sqrt(n_mean))
circ = Circuit(ops=[S2gate(r=r, phi=phi)])
amps = (Vacuum(num_modes=2) >> circ).ket(cutoffs=[cutoff, cutoff])
diag = (1 / np.cosh(r)) * (np.exp(1j * phi) * np.tanh(r)) ** np.arange(cutoff)
expected = np.diag(diag)
assert np.allclose(amps, expected)
@given(n_mean=st.floats(0, 3), phi=st_angle, varphi=st_angle)
def test_hong_ou_mandel(n_mean, phi, varphi):
"""Tests that perfect number correlations are obtained for a two-mode squeezed vacuum state"""
cutoff = 2
r = np.arcsinh(np.sqrt(n_mean))
ops = [
S2gate(r=r, phi=phi)[0, 1],
S2gate(r=r, phi=phi)[2, 3],
BSgate(theta=np.pi / 4, phi=varphi)[1, 2],
]
circ = Circuit(ops)
amps = (Vacuum(4) >> circ).ket(cutoffs=[cutoff, cutoff, cutoff, cutoff])
assert np.allclose(amps[1, 1, 1, 1], 0.0, atol=1e-6)
@given(alpha=st.complex_numbers(min_magnitude=0, max_magnitude=2))
def test_coherent_state(alpha):
"""Test that coherent states have the correct photon number statistics"""
cutoff = 10
amps = Coherent(x=alpha.real, y=alpha.imag).ket(cutoffs=[cutoff])
expected = np.exp(-0.5 * np.abs(alpha) ** 2) * np.array(
[alpha**n / np.sqrt(factorial(n)) for n in range(cutoff)]
)
assert np.allclose(amps, expected, atol=1e-6)
@given(r=st.floats(0, 2), phi=st_angle)
def test_squeezed_state(r, phi):
"""Test that squeezed states have the correct photon number statistics
Note that we use the same sign with respect to SMSV in https://en.wikipedia.org/wiki/Squeezed_coherent_state"""
cutoff = 10
amps = SqueezedVacuum(r=r, phi=phi).ket(cutoffs=[cutoff])
assert np.allclose(amps[1::2], 0.0)
non_zero_amps = amps[0::2]
len_non_zero = len(non_zero_amps)
amp_pairs = (
1
/ np.sqrt(np.cosh(r))
* np.array(
[
(-np.exp(1j * phi) * np.tanh(r)) ** n
* np.sqrt(factorial(2 * n))
/ (2**n * factorial(n))
for n in range(len_non_zero)
]
)
)
assert np.allclose(non_zero_amps, amp_pairs)
@given(n_mean=st.floats(0, 3), phi=st_angle)
def test_two_mode_squeezing_fock_mean_and_covar(n_mean, phi):
"""Tests that perfect number correlations are obtained for a two-mode squeezed vacuum state"""
r = np.arcsinh(np.sqrt(n_mean))
state = Vacuum(num_modes=2) >> S2gate(r=r, phi=phi)
meanN = state.number_means
covN = state.number_cov
expectedN = np.array([n_mean, n_mean])
expectedCov = n_mean * (n_mean + 1) * np.ones([2, 2])
assert np.allclose(meanN, expectedN)
assert np.allclose(covN, expectedCov)
@given(n_mean=st.floats(0, 2), phi=st_angle, eta=st.floats(min_value=0, max_value=1))
def test_lossy_squeezing(n_mean, phi, eta):
"""Tests the total photon number distribution of a lossy squeezed state"""
r = np.arcsinh(np.sqrt(n_mean))
cutoff = 40
ps = (SqueezedVacuum(r=r, phi=phi) >> Attenuator(transmissivity=eta)).fock_probabilities(
[cutoff]
)
expected = np.array([total_photon_number_distribution(n, 1, r, eta) for n in range(cutoff)])
assert np.allclose(ps, expected, atol=1e-6)
@given(n_mean=st.floats(0, 2), phi=st_angle, eta_0=st.floats(0, 1), eta_1=st.floats(0, 1))
def test_lossy_two_mode_squeezing(n_mean, phi, eta_0, eta_1):
"""Tests the photon number distribution of a lossy two-mode squeezed state"""
cutoff = 40
n = np.arange(cutoff)
L = Attenuator(transmissivity=[eta_0, eta_1])
state = TMSV(r=np.arcsinh(np.sqrt(n_mean)), phi=phi) >> L
ps0 = state.get_modes(0).fock_probabilities([cutoff])
ps1 = state.get_modes(1).fock_probabilities([cutoff])
mean_0 = np.sum(n * ps0)
mean_1 = np.sum(n * ps1)
assert np.allclose(mean_0, n_mean * eta_0, atol=1e-5)
assert np.allclose(mean_1, n_mean * eta_1, atol=1e-5)
@given(num_modes=st.integers(1, 3))
def test_density_matrix(num_modes):
"""Tests the density matrix of a pure state is equal to |psi><psi|"""
modes = list(range(num_modes))
cutoffs = [num_modes + 1] * num_modes
G = Ggate(num_modes=num_modes)
L = Attenuator(transmissivity=1.0)
rho_legit = (Vacuum(num_modes) >> G >> L[modes]).dm(cutoffs=cutoffs)
rho_made = (Vacuum(num_modes) >> G).dm(cutoffs=cutoffs)
# rho_legit = L[modes](G(Vacuum(num_modes))).dm(cutoffs=cutoffs)
# rho_built = G(Vacuum(num_modes=num_modes)).dm(cutoffs=cutoffs)
assert np.allclose(rho_legit, rho_made)
@pytest.mark.parametrize(
"state",
[
Vacuum(num_modes=2),
Fock(4),
Coherent(x=0.1, y=-0.4, cutoffs=[15]),
Gaussian(num_modes=2, cutoffs=[15]),
],
)
def test_dm_to_ket(state):
"""Tests pure state density matrix conversion to ket"""
dm = state.dm()
ket = dm_to_ket(dm)
# check if ket is normalized
assert np.allclose(np.linalg.norm(ket), 1)
# check kets are equivalent
assert np.allclose(ket, state.ket())
dm_reconstructed = ket_to_dm(ket)
# check ket leads to same dm
assert np.allclose(dm, dm_reconstructed)
def test_dm_to_ket_error():
"""Test dm_to_ket raises an error when state is mixed"""
state = Coherent(x=0.1, y=-0.4, cutoffs=[15]) >> Attenuator(0.5)
with pytest.raises(ValueError):
dm_to_ket(state)
|
[
"numpy.sum",
"numpy.abs",
"numpy.allclose",
"numpy.ones",
"numpy.arange",
"numpy.linalg.norm",
"numpy.exp",
"numpy.diag",
"pytest.raises",
"hypothesis.strategies.complex_numbers",
"hypothesis.strategies.integers",
"mrmustard.physics.fock.dm_to_ket",
"scipy.special.factorial",
"numpy.tanh",
"numpy.cosh",
"hypothesis.strategies.floats",
"numpy.array",
"mrmustard.physics.fock.ket_to_dm",
"thewalrus.quantum.total_photon_number_distribution",
"numpy.sqrt"
] |
[((901, 944), 'hypothesis.strategies.floats', 'st.floats', ([], {'min_value': '(0)', 'max_value': '(2 * np.pi)'}), '(min_value=0, max_value=2 * np.pi)\n', (910, 944), True, 'from hypothesis import settings, given, strategies as st\n'), ((1475, 1488), 'numpy.diag', 'np.diag', (['diag'], {}), '(diag)\n', (1482, 1488), True, 'import numpy as np\n'), ((1500, 1527), 'numpy.allclose', 'np.allclose', (['amps', 'expected'], {}), '(amps, expected)\n', (1511, 1527), True, 'import numpy as np\n'), ((2041, 2087), 'numpy.allclose', 'np.allclose', (['amps[1, 1, 1, 1]', '(0.0)'], {'atol': '(1e-06)'}), '(amps[1, 1, 1, 1], 0.0, atol=1e-06)\n', (2052, 2087), True, 'import numpy as np\n'), ((2496, 2535), 'numpy.allclose', 'np.allclose', (['amps', 'expected'], {'atol': '(1e-06)'}), '(amps, expected, atol=1e-06)\n', (2507, 2535), True, 'import numpy as np\n'), ((2890, 2918), 'numpy.allclose', 'np.allclose', (['amps[1::2]', '(0.0)'], {}), '(amps[1::2], 0.0)\n', (2901, 2918), True, 'import numpy as np\n'), ((3304, 3341), 'numpy.allclose', 'np.allclose', (['non_zero_amps', 'amp_pairs'], {}), '(non_zero_amps, amp_pairs)\n', (3315, 3341), True, 'import numpy as np\n'), ((3717, 3743), 'numpy.array', 'np.array', (['[n_mean, n_mean]'], {}), '([n_mean, n_mean])\n', (3725, 3743), True, 'import numpy as np\n'), ((3813, 3842), 'numpy.allclose', 'np.allclose', (['meanN', 'expectedN'], {}), '(meanN, expectedN)\n', (3824, 3842), True, 'import numpy as np\n'), ((3854, 3884), 'numpy.allclose', 'np.allclose', (['covN', 'expectedCov'], {}), '(covN, expectedCov)\n', (3865, 3884), True, 'import numpy as np\n'), ((4373, 4410), 'numpy.allclose', 'np.allclose', (['ps', 'expected'], {'atol': '(1e-06)'}), '(ps, expected, atol=1e-06)\n', (4384, 4410), True, 'import numpy as np\n'), ((4671, 4688), 'numpy.arange', 'np.arange', (['cutoff'], {}), '(cutoff)\n', (4680, 4688), True, 'import numpy as np\n'), ((4930, 4945), 'numpy.sum', 'np.sum', (['(n * ps0)'], {}), '(n * ps0)\n', (4936, 4945), True, 'import numpy as np\n'), ((4959, 4974), 'numpy.sum', 'np.sum', (['(n * ps1)'], {}), '(n * ps1)\n', (4965, 4974), True, 'import numpy as np\n'), ((4986, 5033), 'numpy.allclose', 'np.allclose', (['mean_0', '(n_mean * eta_0)'], {'atol': '(1e-05)'}), '(mean_0, n_mean * eta_0, atol=1e-05)\n', (4997, 5033), True, 'import numpy as np\n'), ((5044, 5091), 'numpy.allclose', 'np.allclose', (['mean_1', '(n_mean * eta_1)'], {'atol': '(1e-05)'}), '(mean_1, n_mean * eta_1, atol=1e-05)\n', (5055, 5091), True, 'import numpy as np\n'), ((5672, 5704), 'numpy.allclose', 'np.allclose', (['rho_legit', 'rho_made'], {}), '(rho_legit, rho_made)\n', (5683, 5704), True, 'import numpy as np\n'), ((6017, 6030), 'mrmustard.physics.fock.dm_to_ket', 'dm_to_ket', (['dm'], {}), '(dm)\n', (6026, 6030), False, 'from mrmustard.physics.fock import dm_to_ket, ket_to_dm\n'), ((6208, 6222), 'mrmustard.physics.fock.ket_to_dm', 'ket_to_dm', (['ket'], {}), '(ket)\n', (6217, 6222), False, 'from mrmustard.physics.fock import dm_to_ket, ket_to_dm\n'), ((6267, 6300), 'numpy.allclose', 'np.allclose', (['dm', 'dm_reconstructed'], {}), '(dm, dm_reconstructed)\n', (6278, 6300), True, 'import numpy as np\n'), ((1242, 1257), 'numpy.sqrt', 'np.sqrt', (['n_mean'], {}), '(n_mean)\n', (1249, 1257), True, 'import numpy as np\n'), ((961, 976), 'hypothesis.strategies.floats', 'st.floats', (['(0)', '(3)'], {}), '(0, 3)\n', (970, 976), True, 'from hypothesis import settings, given, strategies as st\n'), ((1771, 1786), 'numpy.sqrt', 'np.sqrt', (['n_mean'], {}), '(n_mean)\n', (1778, 1786), True, 'import numpy as np\n'), ((1544, 1559), 'hypothesis.strategies.floats', 'st.floats', (['(0)', '(3)'], {}), '(0, 3)\n', (1553, 1559), True, 'from hypothesis import settings, given, strategies as st\n'), ((2102, 2154), 'hypothesis.strategies.complex_numbers', 'st.complex_numbers', ([], {'min_magnitude': '(0)', 'max_magnitude': '(2)'}), '(min_magnitude=0, max_magnitude=2)\n', (2120, 2154), True, 'from hypothesis import settings, given, strategies as st\n'), ((2546, 2561), 'hypothesis.strategies.floats', 'st.floats', (['(0)', '(2)'], {}), '(0, 2)\n', (2555, 2561), True, 'from hypothesis import settings, given, strategies as st\n'), ((3569, 3584), 'numpy.sqrt', 'np.sqrt', (['n_mean'], {}), '(n_mean)\n', (3576, 3584), True, 'import numpy as np\n'), ((3786, 3801), 'numpy.ones', 'np.ones', (['[2, 2]'], {}), '([2, 2])\n', (3793, 3801), True, 'import numpy as np\n'), ((3358, 3373), 'hypothesis.strategies.floats', 'st.floats', (['(0)', '(3)'], {}), '(0, 3)\n', (3367, 3373), True, 'from hypothesis import settings, given, strategies as st\n'), ((4115, 4130), 'numpy.sqrt', 'np.sqrt', (['n_mean'], {}), '(n_mean)\n', (4122, 4130), True, 'import numpy as np\n'), ((3901, 3916), 'hypothesis.strategies.floats', 'st.floats', (['(0)', '(2)'], {}), '(0, 2)\n', (3910, 3916), True, 'from hypothesis import settings, given, strategies as st\n'), ((3936, 3971), 'hypothesis.strategies.floats', 'st.floats', ([], {'min_value': '(0)', 'max_value': '(1)'}), '(min_value=0, max_value=1)\n', (3945, 3971), True, 'from hypothesis import settings, given, strategies as st\n'), ((4426, 4441), 'hypothesis.strategies.floats', 'st.floats', (['(0)', '(2)'], {}), '(0, 2)\n', (4435, 4441), True, 'from hypothesis import settings, given, strategies as st\n'), ((4463, 4478), 'hypothesis.strategies.floats', 'st.floats', (['(0)', '(1)'], {}), '(0, 1)\n', (4472, 4478), True, 'from hypothesis import settings, given, strategies as st\n'), ((4486, 4501), 'hypothesis.strategies.floats', 'st.floats', (['(0)', '(1)'], {}), '(0, 1)\n', (4495, 4501), True, 'from hypothesis import settings, given, strategies as st\n'), ((5110, 5127), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(3)'], {}), '(1, 3)\n', (5121, 5127), True, 'from hypothesis import settings, given, strategies as st\n'), ((6087, 6106), 'numpy.linalg.norm', 'np.linalg.norm', (['ket'], {}), '(ket)\n', (6101, 6106), True, 'import numpy as np\n'), ((6471, 6496), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6484, 6496), False, 'import pytest\n'), ((6506, 6522), 'mrmustard.physics.fock.dm_to_ket', 'dm_to_ket', (['state'], {}), '(state)\n', (6515, 6522), False, 'from mrmustard.physics.fock import dm_to_ket, ket_to_dm\n'), ((1393, 1403), 'numpy.cosh', 'np.cosh', (['r'], {}), '(r)\n', (1400, 1403), True, 'import numpy as np\n'), ((1442, 1459), 'numpy.arange', 'np.arange', (['cutoff'], {}), '(cutoff)\n', (1451, 1459), True, 'import numpy as np\n'), ((4290, 4336), 'thewalrus.quantum.total_photon_number_distribution', 'total_photon_number_distribution', (['n', '(1)', 'r', 'eta'], {}), '(n, 1, r, eta)\n', (4322, 4336), False, 'from thewalrus.quantum import total_photon_number_distribution\n'), ((1408, 1426), 'numpy.exp', 'np.exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (1414, 1426), True, 'import numpy as np\n'), ((1427, 1437), 'numpy.tanh', 'np.tanh', (['r'], {}), '(r)\n', (1434, 1437), True, 'import numpy as np\n'), ((3034, 3044), 'numpy.cosh', 'np.cosh', (['r'], {}), '(r)\n', (3041, 3044), True, 'import numpy as np\n'), ((2381, 2394), 'numpy.abs', 'np.abs', (['alpha'], {}), '(alpha)\n', (2387, 2394), True, 'import numpy as np\n'), ((4769, 4784), 'numpy.sqrt', 'np.sqrt', (['n_mean'], {}), '(n_mean)\n', (4776, 4784), True, 'import numpy as np\n'), ((2441, 2453), 'scipy.special.factorial', 'factorial', (['n'], {}), '(n)\n', (2450, 2453), False, 'from scipy.special import factorial\n'), ((3204, 3216), 'scipy.special.factorial', 'factorial', (['n'], {}), '(n)\n', (3213, 3216), False, 'from scipy.special import factorial\n'), ((3160, 3176), 'scipy.special.factorial', 'factorial', (['(2 * n)'], {}), '(2 * n)\n', (3169, 3176), False, 'from scipy.special import factorial\n'), ((3117, 3127), 'numpy.tanh', 'np.tanh', (['r'], {}), '(r)\n', (3124, 3127), True, 'import numpy as np\n'), ((3098, 3116), 'numpy.exp', 'np.exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (3104, 3116), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import os,sys,glob,multiprocessing,time,csv,math,pprint
from parsl.app.app import python_app
from os.path import *
from mappgene.subscripts import *
@python_app(executors=['worker'])
def run_ivar(params):
subject_dir = params['work_dir']
subject = basename(subject_dir)
input_reads = params['input_reads']
variant_frequency = params['variant_frequency']
read_cutoff_bp = params['read_cutoff_bp']
primers_bp = params['primers_bp']
depth_cap = params['depth_cap']
stdout = params['stdout']
ivar_dir = join(subject_dir, 'ivar')
output_dir = join(subject_dir, 'ivar_outputs')
alignments_dir = join(output_dir, 'alignments')
raw_dir = join(ivar_dir, 'raw_data')
smart_remove(raw_dir)
smart_remove(output_dir)
smart_mkdir(raw_dir)
smart_mkdir(output_dir)
smart_mkdir(alignments_dir)
reads = []
start_time = time.time()
start_str = f'''
=====================================
Starting iVar with subject: {subject}
{get_time_date()}
Arguments:
{pprint.pformat(params, width=1)}
=====================================
'''
write(stdout, start_str)
print(start_str)
update_permissions(ivar_dir, params)
update_permissions(output_dir, params)
# Run fixq.sh
for input_read in input_reads:
tmp_f = join(raw_dir, 'tmp_' + basename(input_read))
f = join(raw_dir, basename(input_read))
smart_copy(input_read, f)
run(f'zcat {f} | awk \'NR%4 == 0 {{ gsub(\\"F\\", \\"?\\"); gsub(\\":\\", \\"5\\") }}1\'' +
f' | gzip -c > {tmp_f}', params)
if exists(tmp_f):
smart_remove(f)
os.rename(tmp_f, f)
reads.append(f)
# Deinterleave if only a single FASTQ was found
# fasta = join(ivar_dir, 'references/PS_1200bp.fasta')
fasta = join(ivar_dir, 'references/NC_045512.2.fasta')
if len(reads) == 1:
f = reads[0]
read1 = replace_extension(f, '_R1.fastq.gz')
read2 = replace_extension(f, '_R2.fastq.gz')
run(f'reformat.sh in={f} out1={read1} out2={read2}', params)
smart_remove(f)
elif len(reads) == 2:
reads.sort()
read1 = reads[0]
read2 = reads[1]
else:
raise Exception(f'Invalid reads: {reads}')
align_prefix = join(alignments_dir, subject)
bam = replace_extension(align_prefix, '.bam')
trimmed = replace_extension(align_prefix, '.trimmed')
trimmed_sorted = replace_extension(align_prefix, '.trimmed.sorted.bam')
variants = replace_extension(align_prefix, '.variants')
noinsertions = replace_extension(align_prefix, '.noins.variants')
masked = replace_extension(align_prefix, '.masked.txt')
trimmed_masked = replace_extension(align_prefix, '.trimmed.masked.bam')
trimmed_masked_bedgraph = join(output_dir, f'{subject}.ivar.bedgraph')
final_masked = replace_extension(align_prefix, '.final.masked.variants')
lofreq_bam = replace_extension(align_prefix, '.lofreq.bam')
lofreq_bedgraph = join(output_dir, f'{subject}.ivar.lofreq.bedgraph')
vcf_s0 = replace_extension(align_prefix, '.vcf')
tsv = replace_extension(align_prefix, '.final.masked.variants.tsv')
output_vcf = join(alignments_dir, f'{subject}.ivar.vcf')
output_tsv = join(output_dir, f'{subject}.ivar.tsv')
output_fa = join(output_dir, f'{subject}.ivar.consensus')
run(f'bwa index {fasta}', params)
run(f'bwa mem -t 8 {fasta} {read1} {read2} | samtools sort -o {bam}', params)
run(f'ivar trim -m {read_cutoff_bp} -b {ivar_dir}/primers_{primers_bp}bp/nCoV-2019.scheme.bed -p {trimmed} -i {bam} -e', params)
run(f'samtools sort {trimmed}.bam -o {trimmed_sorted}', params)
# call variants with ivar (produces {subject}.variants.tsv)
run(f'samtools mpileup -aa -A -d 0 -B -Q 0 {trimmed_sorted} | ' +
f'ivar variants -p {variants} -q 20 -t {variant_frequency} -r {fasta} ' +
f'-g {ivar_dir}/GCF_009858895.2_ASM985889v3_genomic.gff', params)
# remove low quality insertions because we want to ignore most mismatches
# to primers that are insertions (produces {subject}.noins.variants.tsv)
run(f"awk \'! (\\$4 ~ /^\\+/ && \\$10 >= 20) {{ print }}\' < {variants}.tsv > {noinsertions}.tsv", params)
# get primers with mismatches to reference (produces {subject}.masked.txt)
run(f'ivar getmasked -i {noinsertions}.tsv -b {ivar_dir}/primers_{primers_bp}bp/nCoV-2019.bed ' +
f'-f {ivar_dir}/primers_{primers_bp}bp/nCoV-2019.tsv -p {masked}', params)
# remove reads with primer mismatches (produces {subject}.trimmed.masked.bam)
run(f'ivar removereads -i {trimmed_sorted} -p {trimmed_masked} ' +
f'-t {masked} -b {ivar_dir}/primers_{primers_bp}bp/nCoV-2019.bed', params)
# call variants with reads with primer mismatches removed (produces {subject}.final.masked.variants.tsv)
run(f'samtools mpileup -aa -A -d 0 -B -Q 0 {trimmed_masked} | ' +
f'ivar variants -p {final_masked} -q 20 -t {variant_frequency} -r {fasta} ' +
f'-g {ivar_dir}/GCF_009858895.2_ASM985889v3_genomic.gff', params)
smart_copy(tsv, output_tsv)
# convert ivar output to vcf (produces {subject}.final.masked.variants.vcf)
run(f'python /opt/ivar_variants_to_vcf.py {output_tsv} {output_vcf}', params)
# use lofreq to call variants (produces {subject}.lofreq.bam and {subject}.vcf)
run(f'lofreq indelqual --dindel -f {fasta} -o {lofreq_bam} --verbose {trimmed_masked}', params)
run(f'samtools index {lofreq_bam}', params)
run(f'lofreq call -d {depth_cap} --verbose --call-indels -f {fasta} -o {vcf_s0} --verbose {lofreq_bam}', params)
# create consensus sequence for comparing to reference genome (produces {subject}.consensus.fa)
run(f'samtools mpileup -aa -A -d 0 -B -Q 0 {lofreq_bam} | ' +
f'ivar consensus -p {output_fa}', params)
# create bedgraphs of gene coverage (produces {subject}.lofreq.bedgraph and {subject}.trimmed.masked.bedgraph)
# https://bedtools.readthedocs.io/en/latest/content/tools/genomecov.html
run(f'bedtools genomecov -ibam {lofreq_bam} -bga > {lofreq_bedgraph}', params)
run(f'bedtools genomecov -ibam {trimmed_masked} -bga > {trimmed_masked_bedgraph}', params)
# Run snpEff postprocessing
vcf_s1 = join(output_dir, f'{subject}.ivar.lofreq.vcf')
vcf_s2 = join(output_dir, f'{subject}.ivar.lofreq.snpEFF.vcf')
vcf_s3 = join(output_dir, f'{subject}.ivar.lofreq.snpSIFT.txt')
run(f'sed "s/MN908947.3/NC_045512.2/g" {vcf_s0} > {vcf_s1}', params)
run(f'java -Xmx8g -jar /opt/snpEff/snpEff.jar NC_045512.2 {vcf_s1} > {vcf_s2}', params)
run(f'cat {vcf_s2} | /opt/snpEff/scripts/vcfEffOnePerLine.pl | java -jar /opt/snpEff/SnpSift.jar ' +
f' extractFields - CHROM POS REF ALT AF DP "ANN[*].IMPACT" "ANN[*].FEATUREID" "ANN[*].EFFECT" ' +
f' "ANN[*].HGVS_C" "ANN[*].HGVS_P" "ANN[*].CDNA_POS" "ANN[*].AA_POS" "ANN[*].GENE" > {vcf_s3}', params)
# //TODO: make this DRY
i_vcf_s1 = join(output_dir, f'{subject}.ivar.vcf')
i_vcf_s2 = join(output_dir, f'{subject}.ivar.snpEFF.vcf')
i_vcf_s3 = join(output_dir, f'{subject}.ivar.snpSIFT.txt')
run(f'sed "s/MN908947.3/NC_045512.2/g" {output_vcf} > {i_vcf_s1}', params)
run(f'java -Xmx8g -jar /opt/snpEff/snpEff.jar NC_045512.2 -noStats {i_vcf_s1} > {i_vcf_s2}', params)
run(f'cat {i_vcf_s2} | /opt/snpEff/scripts/vcfEffOnePerLine.pl | java -jar /opt/snpEff/SnpSift.jar ' +
f' extractFields - CHROM POS REF ALT "GEN[0].ALT_FREQ" DP "ANN[*].IMPACT" "ANN[*].FEATUREID" "ANN[*].EFFECT" ' +
f' "ANN[*].HGVS_C" "ANN[*].HGVS_P" "ANN[*].CDNA_POS" "ANN[*].AA_POS" "ANN[*].GENE" ' +
f' FILTER "GEN[0].ALT_QUAL" | ' +
f' awk \'/^CHROM/ {{ sub(\\"GEN\\\\[0\\\\].ALT_FREQ\\", \\"AF\\"); \
sub(\\"GEN\\\\[0\\\\].ALT_QUAL\\", \\"ALT_QUAL\\") }}1\' > {i_vcf_s3}', params)
# Clear extra files
smart_remove('snpEff_genes.txt')
smart_remove('snpEff_summary.html')
update_permissions(ivar_dir, params)
update_permissions(output_dir, params)
finish_str = f'''
=====================================
Finished iVar with subject: {subject}
{get_time_date()}
Arguments:
{pprint.pformat(params, width=1)}
Total time: {get_time_string(time.time() - start_time)} (HH:MM:SS)
=====================================
'''
write(stdout, finish_str)
print(finish_str)
|
[
"parsl.app.app.python_app",
"os.rename",
"pprint.pformat",
"time.time"
] |
[((174, 206), 'parsl.app.app.python_app', 'python_app', ([], {'executors': "['worker']"}), "(executors=['worker'])\n", (184, 206), False, 'from parsl.app.app import python_app\n'), ((903, 914), 'time.time', 'time.time', ([], {}), '()\n', (912, 914), False, 'import os, sys, glob, multiprocessing, time, csv, math, pprint\n'), ((1060, 1091), 'pprint.pformat', 'pprint.pformat', (['params'], {'width': '(1)'}), '(params, width=1)\n', (1074, 1091), False, 'import os, sys, glob, multiprocessing, time, csv, math, pprint\n'), ((1660, 1679), 'os.rename', 'os.rename', (['tmp_f', 'f'], {}), '(tmp_f, f)\n', (1669, 1679), False, 'import os, sys, glob, multiprocessing, time, csv, math, pprint\n'), ((8240, 8271), 'pprint.pformat', 'pprint.pformat', (['params'], {'width': '(1)'}), '(params, width=1)\n', (8254, 8271), False, 'import os, sys, glob, multiprocessing, time, csv, math, pprint\n'), ((8302, 8313), 'time.time', 'time.time', ([], {}), '()\n', (8311, 8313), False, 'import os, sys, glob, multiprocessing, time, csv, math, pprint\n')]
|
# http://github.com/timestocome/
# build a markov chain and use it to predict Alice In Wonderland/Through the Looking Glass text
import numpy as np
import pickle
from collections import Counter
import markovify # https://github.com/jsvine/markovify
#######################################################################
# read in text and break into words and sentences
#####################################################################
# open file and read in text
#file = open('AliceInWonderland.txt', 'r')
file = open('BothBooks.txt', encoding='utf-8')
data = file.read()
file.close()
# create markov model
model_3 = markovify.Text(data, state_size=3)
# generate text from model
print("*******************************")
for i in range(10):
print("__________________________")
print(model_3.make_sentence())
|
[
"markovify.Text"
] |
[((635, 669), 'markovify.Text', 'markovify.Text', (['data'], {'state_size': '(3)'}), '(data, state_size=3)\n', (649, 669), False, 'import markovify\n')]
|
# Kokeillaan mediaanin ja keskiarvon eroa. Kuvissa (30kpl) on oppilaita
# satunnaisissa kohdissa, ja kamera oli jalustalla luokkahuoneessa. Otetaan
# toisaalta keskiarvot ja toisaalta mediaanit pikseliarvoista.
# Lopputulokset ovat hyvin erilaiset!
#
# <NAME> huhtikuu 2021
# Matlab -> Python Ville Tilvis kesäkuu 2021
import numpy as np
import matplotlib.pyplot as plt
# Kuvien lukumäärä
Nim = 30
# Alustetaan matriisit, joihin tallennetaam keskiarvot ja mediaanit
im_ave = np.zeros([2000,2997,3])
im_median = np.zeros([2000,2997,3])
im_4D = np.zeros([2000,2997,3,Nim])
print("Ladataan kuvat:")
# Avataan kuvat yksi kerrallaan
for iii in range (0,Nim):
fname = '../_kuvat/IMGP'+str(1423+iii)+'.jpg'
im_orig = plt.imread(fname,'jpg');
# Lisätään tämänhetkinen kuva pakkaan
im_4D[:,:,:,iii] = im_orig;
# Seuraa ajoa
print(iii+1,"/",Nim)
print("Lasketaan keskiarvo ja mediaani...")
im_ave = np.mean(im_4D,axis=3)/255;
im_median = np.median(im_4D,axis=3)/255;
print("Valmis!")
print("")
print("Näytetään kuvat...")
# Vähennetään keskiarvokuva ja
# mediaanikuva tyhjän kuvan
# punaisesta värikanavasta
im0 = np.array(plt.imread('../_kuvat/IMGP1444.jpg','jpg'))/255
error1 = np.abs(im_ave-im0)
error2 = np.abs(im_median-im0)
errorpic = np.concatenate((error1,error2),axis=1)
errorpic = errorpic/np.max(errorpic[:,:,0])
errorpic = np.power(errorpic,0.3)
#Katsotaan kuvia
plt.subplot(2,1,1)
plt.imshow(np.concatenate((im_ave,im_median),axis=1))
plt.axis('off')
plt.gcf().set_dpi(600)
plt.subplot(2,1,2)
plt.imshow(errorpic[:,:,0],cmap='gray', interpolation='none')
plt.axis('off')
plt.gcf().set_dpi(600)
plt.show()
print("Valmis!")
print("")
print("Tallennetaan kuvat...")
# Tallennetaan kuvat
plt.imsave('../_kuvat/im_average.jpg',im_ave,);
plt.imsave('../_kuvat/im_median.jpg',im_median);
print("Valmis!")
|
[
"matplotlib.pyplot.subplot",
"numpy.abs",
"matplotlib.pyplot.show",
"numpy.median",
"numpy.power",
"matplotlib.pyplot.imshow",
"numpy.zeros",
"matplotlib.pyplot.axis",
"numpy.max",
"numpy.mean",
"matplotlib.pyplot.imsave",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.imread",
"numpy.concatenate"
] |
[((479, 504), 'numpy.zeros', 'np.zeros', (['[2000, 2997, 3]'], {}), '([2000, 2997, 3])\n', (487, 504), True, 'import numpy as np\n'), ((515, 540), 'numpy.zeros', 'np.zeros', (['[2000, 2997, 3]'], {}), '([2000, 2997, 3])\n', (523, 540), True, 'import numpy as np\n'), ((547, 577), 'numpy.zeros', 'np.zeros', (['[2000, 2997, 3, Nim]'], {}), '([2000, 2997, 3, Nim])\n', (555, 577), True, 'import numpy as np\n'), ((1215, 1235), 'numpy.abs', 'np.abs', (['(im_ave - im0)'], {}), '(im_ave - im0)\n', (1221, 1235), True, 'import numpy as np\n'), ((1243, 1266), 'numpy.abs', 'np.abs', (['(im_median - im0)'], {}), '(im_median - im0)\n', (1249, 1266), True, 'import numpy as np\n'), ((1276, 1316), 'numpy.concatenate', 'np.concatenate', (['(error1, error2)'], {'axis': '(1)'}), '((error1, error2), axis=1)\n', (1290, 1316), True, 'import numpy as np\n'), ((1370, 1393), 'numpy.power', 'np.power', (['errorpic', '(0.3)'], {}), '(errorpic, 0.3)\n', (1378, 1393), True, 'import numpy as np\n'), ((1411, 1431), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (1422, 1431), True, 'import matplotlib.pyplot as plt\n'), ((1486, 1501), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1494, 1501), True, 'import matplotlib.pyplot as plt\n'), ((1526, 1546), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (1537, 1546), True, 'import matplotlib.pyplot as plt\n'), ((1546, 1610), 'matplotlib.pyplot.imshow', 'plt.imshow', (['errorpic[:, :, 0]'], {'cmap': '"""gray"""', 'interpolation': '"""none"""'}), "(errorpic[:, :, 0], cmap='gray', interpolation='none')\n", (1556, 1610), True, 'import matplotlib.pyplot as plt\n'), ((1608, 1623), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1616, 1623), True, 'import matplotlib.pyplot as plt\n'), ((1648, 1658), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1656, 1658), True, 'import matplotlib.pyplot as plt\n'), ((1741, 1787), 'matplotlib.pyplot.imsave', 'plt.imsave', (['"""../_kuvat/im_average.jpg"""', 'im_ave'], {}), "('../_kuvat/im_average.jpg', im_ave)\n", (1751, 1787), True, 'import matplotlib.pyplot as plt\n'), ((1789, 1837), 'matplotlib.pyplot.imsave', 'plt.imsave', (['"""../_kuvat/im_median.jpg"""', 'im_median'], {}), "('../_kuvat/im_median.jpg', im_median)\n", (1799, 1837), True, 'import matplotlib.pyplot as plt\n'), ((724, 748), 'matplotlib.pyplot.imread', 'plt.imread', (['fname', '"""jpg"""'], {}), "(fname, 'jpg')\n", (734, 748), True, 'import matplotlib.pyplot as plt\n'), ((931, 953), 'numpy.mean', 'np.mean', (['im_4D'], {'axis': '(3)'}), '(im_4D, axis=3)\n', (938, 953), True, 'import numpy as np\n'), ((970, 994), 'numpy.median', 'np.median', (['im_4D'], {'axis': '(3)'}), '(im_4D, axis=3)\n', (979, 994), True, 'import numpy as np\n'), ((1335, 1360), 'numpy.max', 'np.max', (['errorpic[:, :, 0]'], {}), '(errorpic[:, :, 0])\n', (1341, 1360), True, 'import numpy as np\n'), ((1443, 1486), 'numpy.concatenate', 'np.concatenate', (['(im_ave, im_median)'], {'axis': '(1)'}), '((im_ave, im_median), axis=1)\n', (1457, 1486), True, 'import numpy as np\n'), ((1157, 1200), 'matplotlib.pyplot.imread', 'plt.imread', (['"""../_kuvat/IMGP1444.jpg"""', '"""jpg"""'], {}), "('../_kuvat/IMGP1444.jpg', 'jpg')\n", (1167, 1200), True, 'import matplotlib.pyplot as plt\n'), ((1502, 1511), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1509, 1511), True, 'import matplotlib.pyplot as plt\n'), ((1624, 1633), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1631, 1633), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy as np
import gym
from reps.acreps import acREPS
np.random.seed(1337)
env = gym.make('Pendulum-RL-v1')
env._max_episode_steps = 250
env.unwrapped.dt = 0.05
env.unwrapped.sigma = 1e-4
# env.seed(1337)
acreps = acREPS(env=env, kl_bound=0.1, discount=0.985, lmbda=0.95,
scale=[1., 1., 8.0, 2.5], mult=0.5,
nb_vfeat=75, nb_pfeat=75, vf_reg=1e-12)
acreps.run(nb_iter=15, nb_train_samples=5000,
nb_eval_rollouts=25, nb_eval_steps=100)
# evaluate
rollouts, _ = acreps.evaluate(nb_rollouts=25, nb_steps=100)
import matplotlib.pyplot as plt
fig, ax = plt.subplots(nrows=1, ncols=acreps.state_dim + acreps.act_dim, figsize=(12, 4))
for roll in rollouts:
for k, col in enumerate(ax[:-1]):
col.plot(roll['x'][:, k])
ax[-1].plot(roll['uc'])
plt.show()
|
[
"reps.acreps.acREPS",
"numpy.random.seed",
"gym.make",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
] |
[((63, 83), 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), '(1337)\n', (77, 83), True, 'import numpy as np\n'), ((91, 117), 'gym.make', 'gym.make', (['"""Pendulum-RL-v1"""'], {}), "('Pendulum-RL-v1')\n", (99, 117), False, 'import gym\n'), ((225, 365), 'reps.acreps.acREPS', 'acREPS', ([], {'env': 'env', 'kl_bound': '(0.1)', 'discount': '(0.985)', 'lmbda': '(0.95)', 'scale': '[1.0, 1.0, 8.0, 2.5]', 'mult': '(0.5)', 'nb_vfeat': '(75)', 'nb_pfeat': '(75)', 'vf_reg': '(1e-12)'}), '(env=env, kl_bound=0.1, discount=0.985, lmbda=0.95, scale=[1.0, 1.0, \n 8.0, 2.5], mult=0.5, nb_vfeat=75, nb_pfeat=75, vf_reg=1e-12)\n', (231, 365), False, 'from reps.acreps import acREPS\n'), ((604, 683), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(acreps.state_dim + acreps.act_dim)', 'figsize': '(12, 4)'}), '(nrows=1, ncols=acreps.state_dim + acreps.act_dim, figsize=(12, 4))\n', (616, 683), True, 'import matplotlib.pyplot as plt\n'), ((806, 816), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (814, 816), True, 'import matplotlib.pyplot as plt\n')]
|
from sage.all import RDF, CDF, matrix, prod
import scipy.linalg
import numpy as np
def column_space_intersection(*As, tol, orthonormal=False):
r"""
Return a matrix with orthonormal columns spanning the intersection of the
column spaces of the given matrices.
INPUT:
- ``*As`` -- matrices with a fixed number of rows and linearly independent
(or orthonormal) columns each
- ``tol`` -- tolerance for truncating the singular values to determine the
rank of the intersection
- ``orthonormal`` -- boolean (default: ``False``); if ``True``, the columns
of each matrix are assumed to be orthonormal
ALGORITHM: <NAME> -- Algorithm 12.4.3
"""
if len(As) < 1:
raise ValueError("at least one matrix required")
n = As[0].nrows()
for A in As:
if A.nrows() != n:
raise ValueError("matrices must have same number of rows")
if all(A.base_ring().is_exact() for A in As):
V = As[0].column_space()
for A in As[1:]:
V = V.intersection(A.column_space())
return V.basis_matrix().T
for A in As:
if A.base_ring() not in (RDF, CDF):
raise ValueError("only matrices over RDF/CDF or exact fields supported")
if any(A.ncols() == 0 for A in As):
return matrix(As[0].base_ring(), n, 0)
Qs = As if orthonormal else [A.QR()[0][:,:A.ncols()] for A in As]
if len(As) == 1:
return Qs[0]
# for better performance, we switch to numpy
# Taking slices or hermitian transposes is a bottleneck with double dense matrices in Sage.
Qs = [Q.numpy() for Q in Qs]
# C = prod([Qs[0].H] + [Q*Q.H for Q in Qs[1:-1]] + [Qs[-1]])
# sort Qs such that smallest matrix is last, second smallest first
Q_last = Qs.pop(min(range(len(Qs)), key=lambda j: Qs[j].shape[1]))
Q_first = Qs.pop(min(range(len(Qs)), key=lambda j: Qs[j].shape[1]))
C = Q_last
for Q in Qs: # without Q_last and Q_first
C = Q @ (Q.conj().T @ C) # this should be faster than (Q * Q.H) * C, since Q*Q.H is very large
C = Q_first.conj().T @ C
Σ, Vh = scipy.linalg.svd(C, overwrite_a=True)[1:] # we can overwrite, since C involves at least 1 multiplication
rk = np.sum(1-Σ < tol)
return matrix(Q_last @ Vh.T[:,:rk].conj())
def null_space_intersection(*As, tol):
r"""
Return a matrix with orthonormal columns spanning the intersection of the
null spaces of the given matrices.
INPUT:
- ``*As`` -- matrices with a fixed number of columns
- ``tol`` -- tolerance for truncating the singular values to determine the
rank of intermediate results
ALGORITHM: <NAME> -- Algorithm 12.4.2
"""
if len(As) < 1:
raise ValueError("at least one matrix required")
n = As[0].ncols()
if all(A.base_ring().is_exact() for A in As):
ker = As[0].right_kernel()
for A in As[1:]:
ker = ker.intersection(A.right_kernel())
# TODO document that this does not have orthonormal columns
return ker.basis_matrix().T
for A in As:
if A.base_ring() not in (RDF, CDF):
raise ValueError("only matrices over RDF/CDF or exact rings supported")
if A.ncols() != n:
raise ValueError("matrices must have same number of columns")
Y = None
for A in As:
if A.nrows() == 0:
continue
C = A * Y if Y is not None else A
Σ, V = C.SVD()[1:]
q = len([s for s in Σ.diagonal() if s > tol])
if q >= C.ncols():
return matrix(As[0].base_ring(), n, 0)
X = V[:, q:]
Y = Y * X if Y is not None else X
if Y is None:
# all the matrices have 0 rows
return matrix.identity(As[0].base_ring(), n)
else:
return Y
def null_space(A, tol):
import numpy
import scipy.linalg
if A.nrows() == 0:
return matrix.identity(A.base_ring(), A.ncols())
return matrix(numpy.ascontiguousarray(scipy.linalg.null_space(A, rcond=tol)))
def _tests_sage():
"""
TESTS::
sage: from momentproblems import intersections
sage: TestSuite(intersections._tests_sage()).run(skip='_test_pickling')
"""
from sage.all import SageObject, matrix, RDF, ZZ
import numpy
import numpy.linalg
import scipy.linalg
class Tests(SageObject):
def matrices(self):
# test data
for _ in range(5):
for num in range(1, 5):
# generate some matrices with few rows, so we can intersect their kernels
matrices = [matrix.random(RDF, ZZ.random_element(0, 4), 9) for _ in range(num)]
yield matrices
def matrices2(self):
# test data
for _ in range(5):
for num in range(1, 5):
# generate some matrices with few rows, so we can intersect their kernels
matrices = [matrix.random(RDF, 9, 9 - ZZ.random_element(0, 4)) for _ in range(num)]
yield matrices
def equal_spaces(self, A, B, tol):
from numpy.linalg import matrix_rank
return matrix_rank(A.augment(B), tol) == matrix_rank(A, tol) == matrix_rank(B, tol)
def _test_null_space_intersection(self, **kwds):
tol = 1e-10
for As in self.matrices():
ker = null_space_intersection(*As, tol=tol)
assert all([ker.ncols() == 0 or A.nrows() == 0 or (A * ker).norm() < tol for A in As])
assert max(0, As[0].ncols() - sum([A.nrows() for A in As])) == ker.ncols() # generically the correct dimension
# the intersection is also simply the null space of the augmented matrix
ker2 = null_space(matrix(RDF, [v for A in As for v in A.rows()], ncols=As[0].ncols()), tol)
assert self.equal_spaces(ker, ker2, tol)
def _test_column_space_intersection(self, **kwds):
tol = 1e-10
for As in self.matrices2():
B = column_space_intersection(*As, tol=tol)
assert B.ncols() == max(0, As[0].nrows() - sum([A.nrows() - A.ncols() for A in As])) # generically the correct dimension
for A in As:
assert self.equal_spaces(A.augment(B), A, tol) # B is contained in A
def _test_compatibilty(self, **kwds):
tol = 1e-10
for As in self.matrices():
# computing null space intersection is the same as computing
# column space intersection of null spaces
ker = null_space_intersection(*As, tol=tol)
ker2 = column_space_intersection(*[null_space(A, tol) for A in As], tol=tol, orthonormal=True)
assert self.equal_spaces(ker, ker2, tol)
return Tests()
|
[
"numpy.linalg.matrix_rank",
"numpy.sum",
"sage.all.ZZ.random_element"
] |
[((2226, 2245), 'numpy.sum', 'np.sum', (['(1 - Σ < tol)'], {}), '(1 - Σ < tol)\n', (2232, 2245), True, 'import numpy as np\n'), ((5203, 5222), 'numpy.linalg.matrix_rank', 'matrix_rank', (['A', 'tol'], {}), '(A, tol)\n', (5214, 5222), False, 'from numpy.linalg import matrix_rank\n'), ((5226, 5245), 'numpy.linalg.matrix_rank', 'matrix_rank', (['B', 'tol'], {}), '(B, tol)\n', (5237, 5245), False, 'from numpy.linalg import matrix_rank\n'), ((4615, 4638), 'sage.all.ZZ.random_element', 'ZZ.random_element', (['(0)', '(4)'], {}), '(0, 4)\n', (4632, 4638), False, 'from sage.all import SageObject, matrix, RDF, ZZ\n'), ((4976, 4999), 'sage.all.ZZ.random_element', 'ZZ.random_element', (['(0)', '(4)'], {}), '(0, 4)\n', (4993, 4999), False, 'from sage.all import SageObject, matrix, RDF, ZZ\n')]
|
try:
from setuptools.core import setup
except ImportError:
from distutils.core import setup
__version__ = '0.1.0'
setup(
name='pyneurovault_upload',
version='0.1.0',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/ljchang/pyneurovault_upload',
packages=['pyneurovault_upload'],
license='MIT',
install_requires=['requests>=2.10.0'],
description='A Python library for interfacing with http://neurovault.org upload API',
keywords=['neuroimaging', 'neurovault'],
classifiers=[
"Programming Language :: Python",
"Operating System :: OS Independent",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
]
)
|
[
"distutils.core.setup"
] |
[((124, 694), 'distutils.core.setup', 'setup', ([], {'name': '"""pyneurovault_upload"""', 'version': '"""0.1.0"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/ljchang/pyneurovault_upload"""', 'packages': "['pyneurovault_upload']", 'license': '"""MIT"""', 'install_requires': "['requests>=2.10.0']", 'description': '"""A Python library for interfacing with http://neurovault.org upload API"""', 'keywords': "['neuroimaging', 'neurovault']", 'classifiers': "['Programming Language :: Python', 'Operating System :: OS Independent',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License']"}), "(name='pyneurovault_upload', version='0.1.0', author='<NAME>',\n author_email='<EMAIL>', url=\n 'https://github.com/ljchang/pyneurovault_upload', packages=[\n 'pyneurovault_upload'], license='MIT', install_requires=[\n 'requests>=2.10.0'], description=\n 'A Python library for interfacing with http://neurovault.org upload API',\n keywords=['neuroimaging', 'neurovault'], classifiers=[\n 'Programming Language :: Python', 'Operating System :: OS Independent',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License'])\n", (129, 694), False, 'from distutils.core import setup\n')]
|
"""User Write Stage API Views"""
from rest_framework.viewsets import ModelViewSet
from authentik.core.api.used_by import UsedByMixin
from authentik.flows.api.stages import StageSerializer
from authentik.stages.user_write.models import UserWriteStage
class UserWriteStageSerializer(StageSerializer):
"""UserWriteStage Serializer"""
class Meta:
model = UserWriteStage
fields = StageSerializer.Meta.fields + ["create_users_as_inactive", "create_users_group"]
class UserWriteStageViewSet(UsedByMixin, ModelViewSet):
"""UserWriteStage Viewset"""
queryset = UserWriteStage.objects.all()
serializer_class = UserWriteStageSerializer
filterset_fields = "__all__"
search_fields = ["name"]
ordering = ["name"]
|
[
"authentik.stages.user_write.models.UserWriteStage.objects.all"
] |
[((592, 620), 'authentik.stages.user_write.models.UserWriteStage.objects.all', 'UserWriteStage.objects.all', ([], {}), '()\n', (618, 620), False, 'from authentik.stages.user_write.models import UserWriteStage\n')]
|
import asyncio
from threading import Thread
async def production_task():
i = 0
while 1:
# 将consumption这个协程每秒注册一个到运行在线程中的循环,thread_loop每秒会获得一个一直打印i的无限循环任务
asyncio.run_coroutine_threadsafe(consumption(i),
thread_loop) # 注意:run_coroutine_threadsafe 这个方法只能用在运行在线程中的循环事件使用
await asyncio.sleep(2) # 必须加await
i += 1
async def consumption(i):
while True:
print("我是第{}任务".format(i))
await asyncio.sleep(1)
def start_loop(loop):
# 运行事件循环, loop以参数的形式传递进来运行
asyncio.set_event_loop(loop)
loop.run_forever()
#消费者循环
thread_loop = asyncio.new_event_loop() # 获取一个事件循环
run_loop_thread = Thread(target=start_loop, args=(thread_loop,)) # 将次事件循环运行在一个线程中,防止阻塞当前主线程
run_loop_thread.start() # 运行线程,同时协程事件循环也会运行
#生产者循环
advocate_loop = asyncio.get_event_loop() # 将生产任务的协程注册到这个循环中
advocate_loop.run_until_complete(production_task()) # 运行次循环
|
[
"threading.Thread",
"asyncio.get_event_loop",
"asyncio.sleep",
"asyncio.set_event_loop",
"asyncio.new_event_loop"
] |
[((658, 682), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (680, 682), False, 'import asyncio\n'), ((714, 760), 'threading.Thread', 'Thread', ([], {'target': 'start_loop', 'args': '(thread_loop,)'}), '(target=start_loop, args=(thread_loop,))\n', (720, 760), False, 'from threading import Thread\n'), ((862, 886), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (884, 886), False, 'import asyncio\n'), ((580, 608), 'asyncio.set_event_loop', 'asyncio.set_event_loop', (['loop'], {}), '(loop)\n', (602, 608), False, 'import asyncio\n'), ((358, 374), 'asyncio.sleep', 'asyncio.sleep', (['(2)'], {}), '(2)\n', (371, 374), False, 'import asyncio\n'), ((500, 516), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (513, 516), False, 'import asyncio\n')]
|
'''
This file is a modification of the file below to enable map save
https://github.com/simondlevy/PyRoboViz/blob/master/roboviz/__init__.py
roboviz.py - Python classes for displaying maps and robots
Requires: numpy, matplotlib
Copyright (C) 2018 <NAME>
This file is part of PyRoboViz.
PyRoboViz is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
PyRoboViz is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
'''
# Essential imports
import matplotlib.pyplot as plt
import matplotlib.cm as colormap
import matplotlib.lines as mlines
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import datetime
# This helps with Raspberry Pi
import matplotlib
matplotlib.use('TkAgg')
class Visualizer(object):
# Robot display params
ROBOT_HEIGHT_M = 0.5
ROBOT_WIDTH_M = 0.3
def __init__(self, map_size_pixels, map_size_meters, title, show_trajectory=False, zero_angle=0):
# Put origin in center
self._init(map_size_pixels, map_size_meters, title, -map_size_pixels / 2, show_trajectory, zero_angle)
def display(self, x_m, y_m, theta_deg):
self._setPose(x_m, y_m, theta_deg)
return self._refresh()
def _init(self, map_size_pixels, map_size_meters, title, shift, show_trajectory=False, zero_angle=0):
# Store constants for update
map_size_meters = map_size_meters
self.map_size_pixels = map_size_pixels
self.map_scale_meters_per_pixel = map_size_meters / float(map_size_pixels)
# Create a byte array to display the map with a color overlay
self.bgrbytes = bytearray(map_size_pixels * map_size_pixels * 3)
# Make a nice big (10"x10") figure
fig = plt.figure(figsize=(10,10), facecolor="white")
fig.set_facecolor("white")
# Added this line to make sure the map background is white
plt.rcParams['figure.facecolor'] = 'white'
# Store Python ID of figure to detect window close
self.figid = id(fig)
fig.canvas.set_window_title('SLAM')
plt.title(title)
# Use an "artist" to speed up map drawing
self.img_artist = None
# No vehicle to show yet
self.vehicle = None
# Create axes
self.ax = fig.gca()
self.ax.set_xlabel('X (m)')
self.ax.set_ylabel('Y (m)')
# self.ax.grid(False)
# Hence we must relabel the axis ticks to show millimeters
ticks = np.arange(shift,self.map_size_pixels+shift+100,100)
labels = [str(self.map_scale_meters_per_pixel * tick) for tick in ticks]
self.ax.set_xticklabels(labels)
self.ax.set_yticklabels(labels)
self.ax.set_facecolor('w')
# Store previous position for trajectory
self.prevpos = None
self.showtraj = show_trajectory
# We base the axis on pixels, to support displaying the map
self.ax.set_xlim([shift, self.map_size_pixels+shift])
self.ax.set_ylim([shift, self.map_size_pixels+shift])
# Set up default shift for centering at origin
shift = -self.map_size_pixels / 2
# print("shift = " + str(shift))
self.zero_angle = zero_angle
self.start_angle = None
self.rotate_angle = 0
def _setPose(self, x_m, y_m, theta_deg):
'''
Sets vehicle pose:
X: left/right (m)
Y: forward/back (m)
theta: rotation (degrees)
'''
# If zero-angle was indicated, grab first angle to compute rotation
if self.start_angle is None and self.zero_angle != 0:
self.start_angle = theta_deg
self.rotate_angle = self.zero_angle - self.start_angle
# Rotate by computed angle, or zero if no zero-angle indicated
d = self.rotate_angle
a = np.radians(d)
c = np.cos(a)
s = np.sin(a)
x_m,y_m = x_m*c-y_m*s, y_m*c+x_m*s
# Erase previous vehicle image after first iteration
if not self.vehicle is None:
self.vehicle.remove()
# Use a very short arrow shaft to orient the head of the arrow
theta_rad = np.radians(theta_deg+d)
c = np.cos(theta_rad)
s = np.sin(theta_rad)
l = 0.1
dx = l * c
dy = l * s
s = self.map_scale_meters_per_pixel
self.vehicle=self.ax.arrow(x_m/s, y_m/s,
dx, dy, head_width=Visualizer.ROBOT_WIDTH_M/s,
head_length=Visualizer.ROBOT_HEIGHT_M/s, fc='r', ec='r')
# Show trajectory if indicated
currpos = self._m2pix(x_m,y_m)
if self.showtraj and not self.prevpos is None:
if (self.prevpos[0] != 0 and self.prevpos[1] != 0):
self.ax.add_line(mlines.Line2D((self.prevpos[0],currpos[0]), (self.prevpos[1],currpos[1])))
self.prevpos = currpos
def _refresh(self):
# If we have a new figure, something went wrong (closing figure failed)
if self.figid != id(plt.gcf()):
return False
# Added this line to make sure the map background is white
plt.rcParams['figure.facecolor'] = 'white'
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['savefig.facecolor'] = 'white'
# Redraw current objects without blocking
plt.draw()
now = datetime.datetime.now()
# Create a directory named 'gif' inside the base directory
plt.savefig('gif/slamMap' + '- ' + str(now.hour).zfill(2) + '- ' + str(now.minute).zfill(2) + '- ' + str(now.second).zfill(2) + '.png')
# Refresh display, setting flag on window close or keyboard interrupt
try:
plt.pause(.01) # Arbitrary pause to force redraw
return True
except:
return False
return True
def _m2pix(self, x_m, y_m):
s = self.map_scale_meters_per_pixel
return x_m/s, y_m/s
class MapVisualizer(Visualizer):
def __init__(self, map_size_pixels, map_size_meters, title='MapVisualizer', show_trajectory=False):
# Put origin in lower left; disallow zero-angle setting
Visualizer._init(self, map_size_pixels, map_size_meters, title, 0, show_trajectory, 0)
def display(self, x_m, y_m, theta_deg, mapbytes):
self._setPose(x_m, y_m, theta_deg)
mapimg = np.reshape(np.frombuffer(mapbytes, dtype=np.uint8), (self.map_size_pixels, self.map_size_pixels))
# Pause to allow display to refresh
plt.pause(.001)
if self.img_artist is None:
self.img_artist = self.ax.imshow(mapimg, cmap=colormap.gray)
else:
self.img_artist.set_data(mapimg)
return self._refresh()
|
[
"matplotlib.pyplot.title",
"numpy.radians",
"matplotlib.lines.Line2D",
"numpy.frombuffer",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.figure",
"matplotlib.use",
"numpy.arange",
"numpy.sin",
"numpy.cos",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.gcf",
"datetime.datetime.now"
] |
[((1023, 1046), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (1037, 1046), False, 'import matplotlib\n'), ((2046, 2093), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)', 'facecolor': '"""white"""'}), "(figsize=(10, 10), facecolor='white')\n", (2056, 2093), True, 'import matplotlib.pyplot as plt\n'), ((2388, 2404), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2397, 2404), True, 'import matplotlib.pyplot as plt\n'), ((2786, 2843), 'numpy.arange', 'np.arange', (['shift', '(self.map_size_pixels + shift + 100)', '(100)'], {}), '(shift, self.map_size_pixels + shift + 100, 100)\n', (2795, 2843), True, 'import numpy as np\n'), ((4145, 4158), 'numpy.radians', 'np.radians', (['d'], {}), '(d)\n', (4155, 4158), True, 'import numpy as np\n'), ((4171, 4180), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (4177, 4180), True, 'import numpy as np\n'), ((4193, 4202), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (4199, 4202), True, 'import numpy as np\n'), ((4471, 4496), 'numpy.radians', 'np.radians', (['(theta_deg + d)'], {}), '(theta_deg + d)\n', (4481, 4496), True, 'import numpy as np\n'), ((4507, 4524), 'numpy.cos', 'np.cos', (['theta_rad'], {}), '(theta_rad)\n', (4513, 4524), True, 'import numpy as np\n'), ((4537, 4554), 'numpy.sin', 'np.sin', (['theta_rad'], {}), '(theta_rad)\n', (4543, 4554), True, 'import numpy as np\n'), ((5648, 5658), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (5656, 5658), True, 'import matplotlib.pyplot as plt\n'), ((5673, 5696), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5694, 5696), False, 'import datetime\n'), ((6830, 6846), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (6839, 6846), True, 'import matplotlib.pyplot as plt\n'), ((6013, 6028), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (6022, 6028), True, 'import matplotlib.pyplot as plt\n'), ((6690, 6729), 'numpy.frombuffer', 'np.frombuffer', (['mapbytes'], {'dtype': 'np.uint8'}), '(mapbytes, dtype=np.uint8)\n', (6703, 6729), True, 'import numpy as np\n'), ((5332, 5341), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5339, 5341), True, 'import matplotlib.pyplot as plt\n'), ((5073, 5148), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['(self.prevpos[0], currpos[0])', '(self.prevpos[1], currpos[1])'], {}), '((self.prevpos[0], currpos[0]), (self.prevpos[1], currpos[1]))\n', (5086, 5148), True, 'import matplotlib.lines as mlines\n')]
|
#!/usr/bin/env python
# coding: utf-8
# # Import libraries and data
#
# Dataset was obtained in the capstone project description (direct link [here](https://d3c33hcgiwev3.cloudfront.net/_429455574e396743d399f3093a3cc23b_capstone.zip?Expires=1530403200&Signature=FECzbTVo6TH7aRh7dXXmrASucl~Cy5mlO94P7o0UXygd13S~Afi38FqCD7g9BOLsNExNB0go0aGkYPtodekxCGblpc3I~R8TCtWRrys~2gciwuJLGiRp4CfNtfp08sFvY9NENaRb6WE2H4jFsAo2Z2IbXV~llOJelI3k-9Waj~M_&Key-Pair-Id=<KEY>)) and splited manually in separated csv files. They were stored at my personal github account (folder link [here](https://github.com/caiomiyashiro/RecommenderSystemsNotebooks/tree/master/data/capstone)) and you can download and paste inside your working directory in order for this notebook to run.
# In[1]:
import pandas as pd
import numpy as np
# ## Preprocess data
#
# Float data came with ',' in the csv and python works with '.', so it treated the number as text. In order to convert them to numbers, I first replaced all the commas by punct and then converted the columns to float.
# In[2]:
items = pd.read_csv('data/capstone/Capstone Data - Office Products - Items.csv', index_col=0)
actual_ratings = pd.read_csv('data/capstone/Capstone Data - Office Products - Ratings.csv', index_col=0)
content_based = pd.read_csv('data/capstone/Capstone Data - Office Products - CBF.csv', index_col=0)
user_user = pd.read_csv('data/capstone/Capstone Data - Office Products - User-User.csv', index_col=0)
item_item = pd.read_csv('data/capstone/Capstone Data - Office Products - Item-Item.csv', index_col=0)
matrix_fact = pd.read_csv('data/capstone/Capstone Data - Office Products - MF.csv', index_col=0)
pers_bias = pd.read_csv('data/capstone/Capstone Data - Office Products - PersBias.csv', index_col=0)
items[['Availability','Price']] = items[['Availability','Price']].apply(lambda col: col.apply(lambda elem: str(elem).replace(',', '.'))).astype(float)
# preprocess
content_based = content_based.apply(lambda col: col.apply(lambda elem: str(elem).replace(',', '.'))).astype(float)
user_user = user_user.apply(lambda col: col.apply(lambda elem: str(elem).replace(',', '.'))).astype(float)
item_item = item_item.apply(lambda col: col.apply(lambda elem: str(elem).replace(',', '.'))).astype(float)
matrix_fact = matrix_fact.apply(lambda col: col.apply(lambda elem: str(elem).replace(',', '.'))).astype(float)
pers_bias = pers_bias.apply(lambda col: col.apply(lambda elem: str(elem).replace(',', '.'))).astype(float)
print('items.shape = ' + str(items.shape))
print('actual_ratings.shape = ' + str(actual_ratings.shape))
print('content_based.shape = ' + str(content_based.shape))
print('user_user.shape = ' + str(user_user.shape))
print('item_item.shape = ' + str(item_item.shape))
print('matrix_fact.shape = ' + str(matrix_fact.shape))
print('pers_bias.shape = ' + str(pers_bias.shape))
actual_ratings.head()
# # Class RecommenderEvaluator
#
# In order to become easier to evaluate the metrics, I created a class that receives all the original ratings and predicted ratings for every recommender system and defined functions to extract all the metrics established in section 1 of the capstone report. Lets take a look at a summary of the class before looking at the code:
# - **Constructor (init)**: receive all recommendation algorithms, besides the actual rating list and the list of items. All data is contained in the data downloaded from Coursera. Besides storing all recommendation algorithms, the constructor also calculate the 20 most frequent items, which is used in the popularity metric calculation.
#
# - **get_observed_ratings**: as the ratings matrix is sparse, this method only returns the items a user with id userId has purchased.
#
# - **get_top_n**: by ordering all the predicted ratings for each recommendation algorithm, we can extract what would be their 'top' recommendation for a given user. Given a parameter $n$, we can then return all the top $n$ recommendations for all the recommendation algorithms.
#
# - **rmse**: by comparing the observed ratings a given user has given to an item and the predicted rating an algorithm has defined for a user, we can have an idea of how much error the algorithm is predicting the user's ratings. Here we don't work with lists, as usually each user has rated only a few amount of items. So here we get all the items the user has rated, recover these items from the algorithms' recommendations and them calculate the error.
#
# - **nDCG**: By looking at lists now, we can have an idea of how optimal the ranked lists are. By using the scoring factor defined in the report, we can calculate the overall DCG for the recommenders' lists and then normalise them using the concepts of the nDCG.
#
# - **Price and avalaibility diversity**: Diversity metric which evaluate how the recommended items' prices vary, *i.e.*, how is the standard deviation of the price. The higher, the better in this case. The same is for the availability index, but here, with higher standard deviations, it means the models are recommending items which are present and not present in local stores.
#
# - **Popularity**: A popular recommender tries to recommend items which has a high chance of being purchased. In the formulation of this metric, an item has a high chance of being purchased if lots of people have purchased them. In the class constructor, we take the observed ratings data and the item list and select which were the top $n$ (standard = 20) most purchased data. In a recommendation list, we return the ration of how many items were inside this list of top $n$ ones.
# In[3]:
class RecommenderEvaluator:
def __init__(self, items, actual_ratings, content_based, user_user, item_item, matrix_fact, pers_bias):
self.items = items
self.actual_ratings = actual_ratings
# static data containing the average score given by each user
self.average_rating_per_userid = actual_ratings.apply(lambda row: np.average(row[~np.isnan(row)]))
self.content_based = content_based
self.user_user = user_user
self.item_item = item_item
self.matrix_fact = matrix_fact
self.pers_bias = pers_bias
# aggregate list. Makes for loops among all recommenders' predictions easier
self.recommenders_list = [self.content_based, self.user_user, self.item_item, self.matrix_fact,self.pers_bias]
self.recommenders_list_names = ['content_based', 'user_user', 'item_item', 'matrix_fact','pers_bias']
# Used for item popularity metric.
# Calculate the 20 most popular items (item which most of the customers bought)
N_LIM = 20
perc_users_bought_item = self.actual_ratings.apply(lambda item: np.sum(~np.isnan(item)), axis=0)/actual_ratings.shape[1]
sort_pop_items = np.argsort(perc_users_bought_item)[::-1]
self.pop_items = perc_users_bought_item.iloc[sort_pop_items][:N_LIM].index.values.astype(np.int)
def get_observed_ratings(self, userId):
"""
Returns all the items a given user evaluated and their ratings. Used mainly by all the metrics calculation
:parameter: userId - user id
:return: array of rated items. Index is the item id and value is the item rating
"""
userId = str(userId)
filtered_ratings = self.actual_ratings[userId]
rated_items = filtered_ratings[~np.isnan(filtered_ratings)]
return rated_items
def get_top_n(self, userId, n):
"""
Get the top n recommendations for every recommender in the list given a user id
:parameter: userId - user id
:parameter: n - max number of recommendations to return
:return: dictionary where the key is the recommender's name and the value is an array of size n for the top n recommnendations.
"""
userId = str(userId)
predicted_ratings = dict()
for recommender, recommender_name in zip(self.recommenders_list,self.recommenders_list_names):
item_ids = recommender[userId].argsort().sort_values()[:n].index.values
predicted_ratings[recommender_name] = item_ids
return predicted_ratings
def rmse(self, userId):
"""
Root Mean Square Error of the predicted and observed values between the recommender's prediction and the actual ratings
:parameter: userId - user id
:return: dataframe of containing the rmse from all recommenders given user id
"""
userId = str(userId)
observed_ratings = self.get_observed_ratings(userId)
rmse_list = {'rmse': []}
for recommender in self.recommenders_list:
predicted_ratings = recommender.loc[observed_ratings.index, userId]
rmse_list['rmse'].append(np.sqrt(np.average((predicted_ratings - observed_ratings)**2)))
rmse_list = pd.DataFrame(rmse_list, index = self.recommenders_list_names)
return rmse_list
def nDCG(self, userId, top_n = 5, individual_recommendation = None):
"""
Normalised Discounted Cumulative Gain for all recommenders given user id
:parameter: userId - user id
:return: dataframe of containing the nDCG from all recommenders given user id
"""
ri = self.get_observed_ratings(userId)
if(individual_recommendation is None):
topn = self.get_top_n(userId,top_n)
results_pandas_index = self.recommenders_list_names
else:
topn = individual_recommendation
results_pandas_index = list(individual_recommendation.keys())
# 1st step: Given recommendations, transform list into scores (see score transcriptions in the capstone report)
scores_all = []
for name, item_list in topn.items():
scores = np.empty_like(item_list) # initialise 'random' array
scores[:] = -10 ###########################
# check which items returned by the recommender
is_already_rated = np.isin(item_list, ri.index.values) # the user already rated. Items users didn't rate
scores[~is_already_rated] = 0 # receive score = 0
for index, score in enumerate(scores):
if(score != 0): # for each recommended items the user rated
if(ri[item_list[index]] < self.average_rating_per_userid[userId] - 1): # score accordingly the report
scores[index] = -1
elif((ri[item_list[index]] >= self.average_rating_per_userid[userId] - 1) &
(ri[item_list[index]] < self.average_rating_per_userid[userId] + 0.5)):
scores[index] = 1
else:
scores[index] = 2
scores_all.append(scores) # append all the transformed scores
scores_all
# 2nd step: Given scores, calculate the model's DCG, ideal DCG and then nDCG
nDCG_all = dict()
for index_model, scores_model in enumerate(scores_all): # for each model
model_DCG = 0 # calculate model's DCG
for index, score in enumerate(scores_model): #
index_ = index + 1 #
model_DCG = model_DCG + score/np.log2(index_ + 1) #
ideal_rank_items = np.sort(scores_model)[::-1] # calculate model's ideal DCG
ideal_rank_DCG = 0 #
for index, ideal_score in enumerate(ideal_rank_items): #
index_ = index + 1 #
ideal_rank_DCG = ideal_rank_DCG + ideal_score/np.log2(index_ + 1) #
if((ideal_rank_DCG == 0) | (np.abs(ideal_rank_DCG) < np.abs(model_DCG))): # if nDCG is 0 or only negative scores came up
nDCG = 0
else: # calculate final nDCG when ideal DCG is != 0
nDCG = model_DCG/ideal_rank_DCG
nDCG_all[results_pandas_index[index_model]] = nDCG # save each model's nDCG in a dict
# convert it to dataframe
result_final = pd.DataFrame(nDCG_all, index=range(1)).transpose()
result_final.columns = ['nDCG']
return result_final
def price_diversity(self,userId,top_n = 5,individual_recommendation = None):
"""
Mean and standard deviation of the price of the top n products recommended by each algorithm.
Intuition for a high price wise diversity recommender is to have a high price standard deviation
:parameter: userId - user id
:return: dataframe of containing the price's mean and standard deviation from all recommenders given user id
"""
if(individual_recommendation is None):
topn = self.get_top_n(userId,top_n)
else:
topn = individual_recommendation
stats = pd.DataFrame()
for key, value in topn.items():
data_filtered = self.items.loc[topn[key]][['Price']].agg(['mean','std']).transpose()
data_filtered.index = [key]
stats = stats.append(data_filtered)
return stats
def availability_diversity(self,userId,top_n = 5,individual_recommendation = None):
"""
Mean and standard deviation of the availabity index of the top n products recommended by each algorithm.
Intuition for a high availabity diversity is to have a small mean value in the availabity index
:parameter: userId - user id
:return: dataframe of containing the availabity index's mean and standard deviation from all recommenders given user id
"""
if(individual_recommendation is None):
topn = self.get_top_n(userId,top_n)
else:
topn = individual_recommendation
stats = pd.DataFrame()
for key, value in topn.items():
data_filtered = self.items.loc[topn[key]][['Availability']].agg(['mean','std']).transpose()
data_filtered.index = [key]
stats = stats.append(data_filtered)
return stats
def popularity(self, userId,top_n = 5,individual_recommendation = None):
"""
Return the ratio of how many items of the top n items are among the most popular purchased items. Default is
the 20 most purchased items.
:parameter: userId - user id
:return: dataframe of containing ratio of popular items in the recommended list from all recommenders given user id
"""
if(individual_recommendation is None):
topn = self.get_top_n(userId,top_n)
results_pandas_index = self.recommenders_list_names
else:
topn = individual_recommendation
results_pandas_index = list(individual_recommendation.keys())
results = {'popularity': []}
for recommender, recommendations in topn.items():
popularity = np.sum(np.isin(recommendations,self.pop_items))
results['popularity'].append(popularity)
return pd.DataFrame(results,index = results_pandas_index)
def precision_at_n(self, userId, top_n = 5, individual_recommendation = None):
if(individual_recommendation is None):
topn = self.get_top_n(userId,top_n)
results_pandas_index = self.recommenders_list_names
else:
topn = individual_recommendation
results_pandas_index = list(individual_recommendation.keys())
observed_ratings = self.get_observed_ratings(userId).index.values
precisions = {'precision_at_'+str(top_n): []}
for recommender, recommendations in topn.items():
precisions['precision_at_'+str(top_n)].append(np.sum(np.isin(recommendations, observed_ratings))/top_n)
return pd.DataFrame(precisions,index = results_pandas_index)
# # Test methods:
#
# Just to have an idea of the output of each method, lets call all them with a test user. At the next section we will calculate these metrics for all users.
# In[4]:
userId = '64'
re = RecommenderEvaluator(items, actual_ratings, content_based, user_user, item_item, matrix_fact, pers_bias)
# ## Test RMSE
# In[5]:
re.rmse(userId)
# ## Test nDCG
# In[6]:
re.nDCG(userId)
# ## Test Diversity - Price and Availability
# In[7]:
re.price_diversity(userId)
# In[8]:
re.availability_diversity(userId)
# ## Test Popularity
# In[9]:
re.popularity(userId)
# ## Test Precision@N
# In[10]:
re.precision_at_n(userId)
# # Average metrics by all users
#
# Espefically for user 907, the recommendations from the user user came with all nulls (original dataset). This specifically impacted the RMSE calculation, as one Nan damaged the entire average calculation. So specifically for RMSE we did a separate calculation section. All the other metrics are going the be calculated in the next code block.
# In[11]:
re = RecommenderEvaluator(items, actual_ratings, content_based, user_user, item_item, matrix_fact, pers_bias)
i = 0
count = np.array([0,0,0,0,0])
for userId in actual_ratings.columns:
if(userId == '907'):
rmse_recommenders = re.rmse(userId).fillna(0)
else:
rmse_recommenders = re.rmse(userId)
count = count + rmse_recommenders['rmse']
# as we didn't use user 907 for user user, divide it by the number of users - 1
denominator = [len(actual_ratings.columns)] * 5
denominator[1] = len(actual_ratings.columns) - 1
print('Average RMSE for all users')
count/ denominator
# In[12]:
count_nDCG = np.array([0,0,0,0,0])
count_diversity_price = np.ndarray([5,2])
count_diversity_availability = np.ndarray([5,2])
count_popularity = np.array([0,0,0,0,0])
count_precision_at_5 = np.array([0,0,0,0,0])
for userId in actual_ratings.columns:
nDCG_recommenders = re.nDCG(userId)
count_nDCG = count_nDCG + nDCG_recommenders['nDCG']
diversity_price_recommenders = re.price_diversity(userId)
count_diversity_price = count_diversity_price + diversity_price_recommenders[['mean','std']]
diversity_availability_recommenders = re.availability_diversity(userId)
count_diversity_availability = count_diversity_availability + diversity_availability_recommenders[['mean','std']]
popularity_recommenders = re.popularity(userId)
count_popularity = count_popularity + popularity_recommenders['popularity']
precision_recommenders = re.precision_at_n(userId)
count_precision_at_5 = count_precision_at_5 + precision_recommenders['precision_at_5']
print('\n---')
print('Average nDCG')
print('---\n')
print(count_nDCG/len(actual_ratings.columns))
print('\n---')
print('Average Price - Diversity Measure')
print('---\n')
print(count_diversity_price/len(actual_ratings.columns))
print('\n---')
print('Average Availability - Diversity Measure')
print('---\n')
print(count_diversity_availability/len(actual_ratings.columns))
print('\n---')
print('Average Popularity')
print('---\n')
print(count_popularity/len(actual_ratings.columns))
print('---\n')
print('Average Precision@5')
print('---\n')
print(count_precision_at_5/len(actual_ratings.columns))
# # Final Analysis
#
# In terms of **RMSE**, the user-user collaborative filtering showed to be the most effective, despite it not being significantly better.
#
# For nDCG rank score, again user user and now item item collaborative filtering were the best.
#
# In terms of price diversity, the item item algorith was the most diverse, providing products varying ~32 dollars from the mean item price list. Matrix factorisation and user user follow right behind, with price standard deviation around 25 dollars. An interesting factor here was the *pers_bias* algorithm, as it recommended basically cheap products with a low standard deviation.
#
# For the availabity index, all the algorithms besides the user user managed to recommend items not so present in the local stores **together** with items present in local stores, as we can see they also provided items with availability index high (high standard deviation).
#
# In terms of popularity, no algorithm actually managed to obtain good scores in the way we defined. So, if the popularity is focused in the future, we can either change the popularity concept or improve mechanics in the recommender so it predict higher scores for the most popular items in the store.
#
# After this evaluation, it seemed to us that the item-item recommender system had an overall better performance, highlighted in terms of its diversity scores. Unfortunately, the items that item item recommender has suggested are in overall pricy, and we can check if there is any mixture possibility with the pers_bias algorithm, as it really indicated cheap prices and a low price standard deviation. Matrix factorization performed good as well but it didn't outperform any of the other recommenders.
# # Hibridization Techniques - Part III
#
# We are trying four different types of hibridization here.
#
# 1. Linear ensemble
# 2. Non linear ensemble
# 3. Top 1 from each recommender
# 4. Recommender switching
#
# The first two options approach the recommender's performance in terms of how good it predicts the users' ratings, so its only evaluation will be in terms of RMSE.
#
# The third approach have the intuition that, if we get the top 1 recommendation from each algorithm, the resulting 5 item list will have a better performance in terms of identyfing 'good' items to users. In this case, we defined the good items if the recommender suggested an already bought item for an user. Therefore, the final measurement of this hibridization mechanism is through the precision@5, as we end up with a 5 item list.
#
# The final mixing algorithm has the underlying theory of how collaborative filtering mechanisms perform with items that had not enough users/items in its calculations. As a well known weakness of these recommenders, the idea was to check how many items we would affect if we established a threshold of enough data in order for us to use a collaborative filtering. Otherwise, if the item doesn't have enough support in form of users' ratings we could have a support of a content based recommendation, or even, in last case, a non personalised one.
#
#
# ## Dataset Creation and User Sample Definition
#
# ### Dataset
#
# For the first and second approach, we need another perspective on the data. The dataset contains all the existing ratings from all users and concatenates all the predictions made the 5 traditional recommenders. The idea is to use the observed rating as target variable and all recommenders' predictions as dependent variable, *i.e.* treat this as a regression problems.
# In[13]:
obs_ratings_list = []
content_based_list = []
user_user_list = []
item_item_list = []
matrix_fact_list = []
pers_bias_list = []
re = RecommenderEvaluator(items, actual_ratings, content_based, user_user, item_item, matrix_fact, pers_bias)
for userId in actual_ratings.columns:
observed_ratings = re.get_observed_ratings(userId)
obs_ratings_list.extend(observed_ratings.values)
content_based_list.extend(content_based.loc[observed_ratings.index, userId].values)
user_user_list.extend(user_user.loc[observed_ratings.index, userId].values)
item_item_list.extend(item_item.loc[observed_ratings.index, userId].values)
matrix_fact_list.extend(matrix_fact.loc[observed_ratings.index, userId].values)
pers_bias_list.extend(pers_bias.loc[observed_ratings.index, userId].values)
dataset = pd.DataFrame({'rating': obs_ratings_list, 'content_based':content_based_list, 'user_user': user_user_list,
'item_item':item_item_list, 'matrix_fact':matrix_fact_list,'pers_bias':pers_bias_list})
dataset = dataset.dropna()
dataset.head()
# ### In order to have an idea of the results, let's choose 3 users randomly to show the predictions using the new hybrid models
# In[14]:
np.random.seed(42)
sample_users = np.random.choice(actual_ratings.columns, 3).astype(str)
print('sample_users: ' + str(sample_users))
# ### Get recommenders' predictions for sample users in order to create input for ensemble models (hybridization I and II)
# In[15]:
from collections import OrderedDict
df_sample = pd.DataFrame()
for user in sample_users:
content_based_ = re.content_based[user]
user_user_ = re.user_user[user]
item_item_ = re.item_item[user]
matrix_fact_ = re.matrix_fact[user]
pers_bias_ = re.pers_bias[user]
df_sample = df_sample.append(pd.DataFrame(OrderedDict({'user':user,'item':actual_ratings.index.values,'content_based':content_based_, 'user_user':user_user_, 'item_item':item_item_,
'matrix_fact':matrix_fact_,'pers_bias':pers_bias_})), ignore_index=True)
df_sample.head()
#
# ## Focus on Performance (RMSE) I - Linear Model
# In[16]:
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
linear = LinearRegression()
print('RMSE for linear ensemble of recommender systems:')
np.mean(cross_val_score(linear, dataset.drop('rating', axis=1), dataset['rating'], cv=5))
# ### Predictions for sample users: Creating top 5 recommendations for sample users
# In[17]:
pred_cols = ['content_based','user_user','item_item','matrix_fact','pers_bias']
predictions = linear.fit(dataset.drop('rating', axis=1), dataset['rating']).predict(df_sample[pred_cols])
recommendations = pd.DataFrame(OrderedDict({'user':df_sample['user'], 'item':df_sample['item'], 'predictions':predictions}))
recommendations.groupby('user').apply(lambda df_user : df_user.loc[df_user['predictions'].sort_values(ascending=False)[:5].index.values])
# ## Focus on Performance (RMSE) II - Emsemble
# In[18]:
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(random_state=42)
print('RMSE for non linear ensemble of recommender systems:')
np.mean(cross_val_score(rf, dataset.drop('rating', axis=1), dataset['rating'], cv=5))
# ### Predictions for sample users:
# In[19]:
predictions = rf.fit(dataset.drop('rating', axis=1), dataset['rating']).predict(df_sample[pred_cols])
recommendations = pd.DataFrame(OrderedDict({'user':df_sample['user'], 'item':df_sample['item'], 'predictions':predictions}))
recommendations.groupby('user').apply(lambda df_user : df_user.loc[df_user['predictions'].sort_values(ascending=False)[:5].index.values])
# ## Focus on Recommendations - Top 1 from each Recommender
#
# With the all top 1 recommender, we can evaluate its performance not just with RMSE, but all the list metrics we evaluated before. As a business constraint, we will also pay more attention to the *precision@5* metric, as a general information on how good is the recommender on providing suggestions that the user will buy, or already bought in this case.
# The majority of metrics were in the same scale as the best metrics in the all models comparison. However, it's good to highlight the the top 1 all recommender had the best *precision@5* metric among all recommender, showing to be a **good suitable hibridization mechanism**.
# In[20]:
count_nDCG = np.array([0])
count_diversity_price = np.ndarray([1,2])
count_diversity_availability = np.ndarray([1,2])
count_popularity = np.array([0])
count_precision = np.array([0])
for userId in actual_ratings.columns:
top_n_1 = re.get_top_n(userId,1)
user_items = {}
user_items['top_1_all'] = [a[0] for a in top_n_1.values()]
nDCG_recommenders = re.nDCG(userId, individual_recommendation = user_items)
count_nDCG = count_nDCG + nDCG_recommenders['nDCG']
diversity_price_recommenders = re.price_diversity(userId, individual_recommendation = user_items)
count_diversity_price = count_diversity_price + diversity_price_recommenders[['mean','std']]
diversity_availability_recommenders = re.availability_diversity(userId, individual_recommendation = user_items)
count_diversity_availability = count_diversity_availability + diversity_availability_recommenders[['mean','std']]
popularity_recommenders = re.popularity(userId, individual_recommendation = user_items)
count_popularity = count_popularity + popularity_recommenders['popularity']
precision_recommenders = re.precision_at_n(userId, individual_recommendation = user_items)
count_precision = count_precision + precision_recommenders['precision_at_5']
print('\n---')
print('Average nDCG')
print('---\n')
print(count_nDCG/len(actual_ratings.columns))
print('\n---')
print('Average Price - Diversity Measure')
print('---\n')
print(count_diversity_price/len(actual_ratings.columns))
print('\n---')
print('Average Availability - Diversity Measure')
print('---\n')
print(count_diversity_availability/len(actual_ratings.columns))
print('\n---')
print('Average Popularity')
print('---\n')
print(count_popularity/len(actual_ratings.columns))
print('\n---')
print('Average Precision@5')
print('---\n')
print(count_precision/len(actual_ratings.columns))
# ### Predictions for sample users:
# In[21]:
results = {}
for user_sample in sample_users:
results[user_sample] = [a[0] for a in list(re.get_top_n(user_sample, 1).values())]
results
# ## Focus on Recommendations - Switching algorithm
#
# ### Can we use a Content Based Recommender for items with less evaluations?
#
# We can see in the cumulative histogram that only around 20% of the rated items had 10 or more ratings. This signals us that maybe we can prioritize the use of a content based recommender or even a non personalised one for the majority of the items which don't have a sufficient amount of ratings in order to make the collaborative filtering algorithms to be stable.
# In[23]:
import matplotlib.pyplot as plt
item_nbr_ratings = actual_ratings.apply(lambda col: np.sum(~np.isnan(col)), axis=1)
item_max_nbr_ratings = item_nbr_ratings.max()
range_item_max_nbr_ratings = range(item_max_nbr_ratings+1)
plt.figure(figsize=(15,3))
plt.subplot(121)
nbr_ratings_items = []
for i in range_item_max_nbr_ratings:
nbr_ratings_items.append(len(item_nbr_ratings[item_nbr_ratings == i]))
plt.plot(nbr_ratings_items)
plt.xlabel('Number of ratings')
plt.ylabel('Amount of items')
plt.title('Histogram of amount of ratings')
plt.subplot(122)
cum_nbr_ratings_items = []
for i in range(len(nbr_ratings_items)):
cum_nbr_ratings_items.append(np.sum(nbr_ratings_items[:i]))
cum_nbr_ratings_items = np.array(cum_nbr_ratings_items)
plt.plot(cum_nbr_ratings_items/actual_ratings.shape[0])
plt.xlabel('Number of ratings')
plt.ylabel('Cumulative distribution')
plt.title('Cumulative histogram of amount of ratings');
# In[ ]:
|
[
"matplotlib.pyplot.title",
"numpy.isin",
"numpy.random.seed",
"numpy.sum",
"numpy.abs",
"pandas.read_csv",
"numpy.isnan",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.ndarray",
"pandas.DataFrame",
"numpy.empty_like",
"numpy.random.choice",
"numpy.average",
"numpy.log2",
"sklearn.ensemble.RandomForestRegressor",
"sklearn.linear_model.LinearRegression",
"numpy.sort",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.plot",
"numpy.array",
"collections.OrderedDict",
"matplotlib.pyplot.xlabel"
] |
[((1069, 1158), 'pandas.read_csv', 'pd.read_csv', (['"""data/capstone/Capstone Data - Office Products - Items.csv"""'], {'index_col': '(0)'}), "('data/capstone/Capstone Data - Office Products - Items.csv',\n index_col=0)\n", (1080, 1158), True, 'import pandas as pd\n'), ((1173, 1264), 'pandas.read_csv', 'pd.read_csv', (['"""data/capstone/Capstone Data - Office Products - Ratings.csv"""'], {'index_col': '(0)'}), "('data/capstone/Capstone Data - Office Products - Ratings.csv',\n index_col=0)\n", (1184, 1264), True, 'import pandas as pd\n'), ((1279, 1366), 'pandas.read_csv', 'pd.read_csv', (['"""data/capstone/Capstone Data - Office Products - CBF.csv"""'], {'index_col': '(0)'}), "('data/capstone/Capstone Data - Office Products - CBF.csv',\n index_col=0)\n", (1290, 1366), True, 'import pandas as pd\n'), ((1375, 1468), 'pandas.read_csv', 'pd.read_csv', (['"""data/capstone/Capstone Data - Office Products - User-User.csv"""'], {'index_col': '(0)'}), "('data/capstone/Capstone Data - Office Products - User-User.csv',\n index_col=0)\n", (1386, 1468), True, 'import pandas as pd\n'), ((1477, 1570), 'pandas.read_csv', 'pd.read_csv', (['"""data/capstone/Capstone Data - Office Products - Item-Item.csv"""'], {'index_col': '(0)'}), "('data/capstone/Capstone Data - Office Products - Item-Item.csv',\n index_col=0)\n", (1488, 1570), True, 'import pandas as pd\n'), ((1581, 1667), 'pandas.read_csv', 'pd.read_csv', (['"""data/capstone/Capstone Data - Office Products - MF.csv"""'], {'index_col': '(0)'}), "('data/capstone/Capstone Data - Office Products - MF.csv',\n index_col=0)\n", (1592, 1667), True, 'import pandas as pd\n'), ((1676, 1768), 'pandas.read_csv', 'pd.read_csv', (['"""data/capstone/Capstone Data - Office Products - PersBias.csv"""'], {'index_col': '(0)'}), "('data/capstone/Capstone Data - Office Products - PersBias.csv',\n index_col=0)\n", (1687, 1768), True, 'import pandas as pd\n'), ((17465, 17490), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0])\n', (17473, 17490), True, 'import numpy as np\n'), ((17964, 17989), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0])\n', (17972, 17989), True, 'import numpy as np\n'), ((18010, 18028), 'numpy.ndarray', 'np.ndarray', (['[5, 2]'], {}), '([5, 2])\n', (18020, 18028), True, 'import numpy as np\n'), ((18059, 18077), 'numpy.ndarray', 'np.ndarray', (['[5, 2]'], {}), '([5, 2])\n', (18069, 18077), True, 'import numpy as np\n'), ((18096, 18121), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0])\n', (18104, 18121), True, 'import numpy as np\n'), ((18141, 18166), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0])\n', (18149, 18166), True, 'import numpy as np\n'), ((23941, 24152), 'pandas.DataFrame', 'pd.DataFrame', (["{'rating': obs_ratings_list, 'content_based': content_based_list,\n 'user_user': user_user_list, 'item_item': item_item_list, 'matrix_fact':\n matrix_fact_list, 'pers_bias': pers_bias_list}"], {}), "({'rating': obs_ratings_list, 'content_based':\n content_based_list, 'user_user': user_user_list, 'item_item':\n item_item_list, 'matrix_fact': matrix_fact_list, 'pers_bias':\n pers_bias_list})\n", (23953, 24152), True, 'import pandas as pd\n'), ((24335, 24353), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (24349, 24353), True, 'import numpy as np\n'), ((24657, 24671), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (24669, 24671), True, 'import pandas as pd\n'), ((25365, 25383), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (25381, 25383), False, 'from sklearn.linear_model import LinearRegression\n'), ((26199, 26237), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'random_state': '(42)'}), '(random_state=42)\n', (26220, 26237), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((27526, 27539), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (27534, 27539), True, 'import numpy as np\n'), ((27564, 27582), 'numpy.ndarray', 'np.ndarray', (['[1, 2]'], {}), '([1, 2])\n', (27574, 27582), True, 'import numpy as np\n'), ((27613, 27631), 'numpy.ndarray', 'np.ndarray', (['[1, 2]'], {}), '([1, 2])\n', (27623, 27631), True, 'import numpy as np\n'), ((27650, 27663), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (27658, 27663), True, 'import numpy as np\n'), ((27682, 27695), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (27690, 27695), True, 'import numpy as np\n'), ((30332, 30359), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 3)'}), '(figsize=(15, 3))\n', (30342, 30359), True, 'import matplotlib.pyplot as plt\n'), ((30359, 30375), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (30370, 30375), True, 'import matplotlib.pyplot as plt\n'), ((30511, 30538), 'matplotlib.pyplot.plot', 'plt.plot', (['nbr_ratings_items'], {}), '(nbr_ratings_items)\n', (30519, 30538), True, 'import matplotlib.pyplot as plt\n'), ((30539, 30570), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of ratings"""'], {}), "('Number of ratings')\n", (30549, 30570), True, 'import matplotlib.pyplot as plt\n'), ((30571, 30600), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amount of items"""'], {}), "('Amount of items')\n", (30581, 30600), True, 'import matplotlib.pyplot as plt\n'), ((30601, 30644), 'matplotlib.pyplot.title', 'plt.title', (['"""Histogram of amount of ratings"""'], {}), "('Histogram of amount of ratings')\n", (30610, 30644), True, 'import matplotlib.pyplot as plt\n'), ((30646, 30662), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (30657, 30662), True, 'import matplotlib.pyplot as plt\n'), ((30823, 30854), 'numpy.array', 'np.array', (['cum_nbr_ratings_items'], {}), '(cum_nbr_ratings_items)\n', (30831, 30854), True, 'import numpy as np\n'), ((30855, 30912), 'matplotlib.pyplot.plot', 'plt.plot', (['(cum_nbr_ratings_items / actual_ratings.shape[0])'], {}), '(cum_nbr_ratings_items / actual_ratings.shape[0])\n', (30863, 30912), True, 'import matplotlib.pyplot as plt\n'), ((30911, 30942), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of ratings"""'], {}), "('Number of ratings')\n", (30921, 30942), True, 'import matplotlib.pyplot as plt\n'), ((30943, 30980), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cumulative distribution"""'], {}), "('Cumulative distribution')\n", (30953, 30980), True, 'import matplotlib.pyplot as plt\n'), ((30981, 31035), 'matplotlib.pyplot.title', 'plt.title', (['"""Cumulative histogram of amount of ratings"""'], {}), "('Cumulative histogram of amount of ratings')\n", (30990, 31035), True, 'import matplotlib.pyplot as plt\n'), ((25848, 25947), 'collections.OrderedDict', 'OrderedDict', (["{'user': df_sample['user'], 'item': df_sample['item'], 'predictions':\n predictions}"], {}), "({'user': df_sample['user'], 'item': df_sample['item'],\n 'predictions': predictions})\n", (25859, 25947), False, 'from collections import OrderedDict\n'), ((26570, 26669), 'collections.OrderedDict', 'OrderedDict', (["{'user': df_sample['user'], 'item': df_sample['item'], 'predictions':\n predictions}"], {}), "({'user': df_sample['user'], 'item': df_sample['item'],\n 'predictions': predictions})\n", (26581, 26669), False, 'from collections import OrderedDict\n'), ((8900, 8959), 'pandas.DataFrame', 'pd.DataFrame', (['rmse_list'], {'index': 'self.recommenders_list_names'}), '(rmse_list, index=self.recommenders_list_names)\n', (8912, 8959), True, 'import pandas as pd\n'), ((13310, 13324), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (13322, 13324), True, 'import pandas as pd\n'), ((14242, 14256), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (14254, 14256), True, 'import pandas as pd\n'), ((15460, 15509), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {'index': 'results_pandas_index'}), '(results, index=results_pandas_index)\n', (15472, 15509), True, 'import pandas as pd\n'), ((16226, 16278), 'pandas.DataFrame', 'pd.DataFrame', (['precisions'], {'index': 'results_pandas_index'}), '(precisions, index=results_pandas_index)\n', (16238, 16278), True, 'import pandas as pd\n'), ((24369, 24412), 'numpy.random.choice', 'np.random.choice', (['actual_ratings.columns', '(3)'], {}), '(actual_ratings.columns, 3)\n', (24385, 24412), True, 'import numpy as np\n'), ((30763, 30792), 'numpy.sum', 'np.sum', (['nbr_ratings_items[:i]'], {}), '(nbr_ratings_items[:i])\n', (30769, 30792), True, 'import numpy as np\n'), ((6836, 6870), 'numpy.argsort', 'np.argsort', (['perc_users_bought_item'], {}), '(perc_users_bought_item)\n', (6846, 6870), True, 'import numpy as np\n'), ((9852, 9876), 'numpy.empty_like', 'np.empty_like', (['item_list'], {}), '(item_list)\n', (9865, 9876), True, 'import numpy as np\n'), ((10125, 10160), 'numpy.isin', 'np.isin', (['item_list', 'ri.index.values'], {}), '(item_list, ri.index.values)\n', (10132, 10160), True, 'import numpy as np\n'), ((24936, 25145), 'collections.OrderedDict', 'OrderedDict', (["{'user': user, 'item': actual_ratings.index.values, 'content_based':\n content_based_, 'user_user': user_user_, 'item_item': item_item_,\n 'matrix_fact': matrix_fact_, 'pers_bias': pers_bias_}"], {}), "({'user': user, 'item': actual_ratings.index.values,\n 'content_based': content_based_, 'user_user': user_user_, 'item_item':\n item_item_, 'matrix_fact': matrix_fact_, 'pers_bias': pers_bias_})\n", (24947, 25145), False, 'from collections import OrderedDict\n'), ((7429, 7455), 'numpy.isnan', 'np.isnan', (['filtered_ratings'], {}), '(filtered_ratings)\n', (7437, 7455), True, 'import numpy as np\n'), ((11586, 11607), 'numpy.sort', 'np.sort', (['scores_model'], {}), '(scores_model)\n', (11593, 11607), True, 'import numpy as np\n'), ((15351, 15391), 'numpy.isin', 'np.isin', (['recommendations', 'self.pop_items'], {}), '(recommendations, self.pop_items)\n', (15358, 15391), True, 'import numpy as np\n'), ((30202, 30215), 'numpy.isnan', 'np.isnan', (['col'], {}), '(col)\n', (30210, 30215), True, 'import numpy as np\n'), ((8824, 8879), 'numpy.average', 'np.average', (['((predicted_ratings - observed_ratings) ** 2)'], {}), '((predicted_ratings - observed_ratings) ** 2)\n', (8834, 8879), True, 'import numpy as np\n'), ((12043, 12065), 'numpy.abs', 'np.abs', (['ideal_rank_DCG'], {}), '(ideal_rank_DCG)\n', (12049, 12065), True, 'import numpy as np\n'), ((12068, 12085), 'numpy.abs', 'np.abs', (['model_DCG'], {}), '(model_DCG)\n', (12074, 12085), True, 'import numpy as np\n'), ((11530, 11549), 'numpy.log2', 'np.log2', (['(index_ + 1)'], {}), '(index_ + 1)\n', (11537, 11549), True, 'import numpy as np\n'), ((11981, 12000), 'numpy.log2', 'np.log2', (['(index_ + 1)'], {}), '(index_ + 1)\n', (11988, 12000), True, 'import numpy as np\n'), ((16160, 16202), 'numpy.isin', 'np.isin', (['recommendations', 'observed_ratings'], {}), '(recommendations, observed_ratings)\n', (16167, 16202), True, 'import numpy as np\n'), ((5987, 6000), 'numpy.isnan', 'np.isnan', (['row'], {}), '(row)\n', (5995, 6000), True, 'import numpy as np\n'), ((6762, 6776), 'numpy.isnan', 'np.isnan', (['item'], {}), '(item)\n', (6770, 6776), True, 'import numpy as np\n')]
|
import torch.nn as nn
from UNIQ.quantize import act_quantize, act_noise, check_quantization
import torch.nn.functional as F
class ActQuant(nn.Module):
def __init__(self, quatize_during_training=False, noise_during_training=False, quant=False, noise=False,
bitwidth=32):
super(ActQuant, self).__init__()
self.quant = quant
self.noise = noise
self.bitwidth = bitwidth
self.quatize_during_training = quatize_during_training
self.noise_during_training = noise_during_training
def update_stage(self, quatize_during_training=False, noise_during_training=False):
self.quatize_during_training = quatize_during_training
self.noise_during_training = noise_during_training
def forward(self, input):
if self.quant and (not self.training or (self.training and self.quatize_during_training)):
assert (isinstance(self.bitwidth, int))
x = act_quantize.apply(input, self.bitwidth)
elif self.noise and self.training and self.noise_during_training:
assert (False)
x = act_noise.apply(input, bitwidth=self.bitwidth, training=self.training)
else:
x = F.relu(input)
# print('Activation is quantized to {} values'.format(check_quantization(x)))
return x
|
[
"UNIQ.quantize.act_noise.apply",
"UNIQ.quantize.act_quantize.apply",
"torch.nn.functional.relu"
] |
[((953, 993), 'UNIQ.quantize.act_quantize.apply', 'act_quantize.apply', (['input', 'self.bitwidth'], {}), '(input, self.bitwidth)\n', (971, 993), False, 'from UNIQ.quantize import act_quantize, act_noise, check_quantization\n'), ((1111, 1181), 'UNIQ.quantize.act_noise.apply', 'act_noise.apply', (['input'], {'bitwidth': 'self.bitwidth', 'training': 'self.training'}), '(input, bitwidth=self.bitwidth, training=self.training)\n', (1126, 1181), False, 'from UNIQ.quantize import act_quantize, act_noise, check_quantization\n'), ((1212, 1225), 'torch.nn.functional.relu', 'F.relu', (['input'], {}), '(input)\n', (1218, 1225), True, 'import torch.nn.functional as F\n')]
|
import random
import arcade
from badwing.constants import *
from badwing.effect import Effect
from badwing.particle import AnimatedAlphaParticle
#TODO: Some of this will go up into ParticleEffect
class Firework(Effect):
def __init__(self, position=(0,0), r1=30, r2=40):
super().__init__(position)
self.radius = random.randint(r1, r2)
self.emitters = []
self.make_sparks(position)
def draw(self):
for e in self.emitters:
e.draw()
def update(self, delta_time):
# prevent list from being mutated (often by callbacks) while iterating over it
emitters_to_update = self.emitters.copy()
# update
for e in emitters_to_update:
e.update()
# remove emitters that can be reaped
to_del = [e for e in emitters_to_update if e.can_reap()]
for e in to_del:
self.emitters.remove(e)
def make_sparks(self, position):
spark_texture = random.choice(SPARK_TEXTURES)
sparks = arcade.Emitter(
center_xy=position,
emit_controller=arcade.EmitBurst(self.radius),
particle_factory=lambda emitter: AnimatedAlphaParticle(
filename_or_texture=spark_texture,
change_xy=arcade.rand_in_circle((0.0, 0.0), 9.0),
start_alpha=255,
duration1=random.uniform(0.6, 1.0),
mid_alpha=0,
duration2=random.uniform(0.1, 0.2),
end_alpha=255,
mutation_callback=firework_spark_mutator
)
)
self.emitters.append(sparks)
def firework_spark_mutator(particle: arcade.FadeParticle):
"""mutation_callback shared by all fireworks sparks"""
# gravity
particle.change_y += -0.03
# drag
particle.change_x *= 0.92
particle.change_y *= 0.92
|
[
"arcade.EmitBurst",
"random.randint",
"random.uniform",
"arcade.rand_in_circle",
"random.choice"
] |
[((335, 357), 'random.randint', 'random.randint', (['r1', 'r2'], {}), '(r1, r2)\n', (349, 357), False, 'import random\n'), ((976, 1005), 'random.choice', 'random.choice', (['SPARK_TEXTURES'], {}), '(SPARK_TEXTURES)\n', (989, 1005), False, 'import random\n'), ((1099, 1128), 'arcade.EmitBurst', 'arcade.EmitBurst', (['self.radius'], {}), '(self.radius)\n', (1115, 1128), False, 'import arcade\n'), ((1275, 1313), 'arcade.rand_in_circle', 'arcade.rand_in_circle', (['(0.0, 0.0)', '(9.0)'], {}), '((0.0, 0.0), 9.0)\n', (1296, 1313), False, 'import arcade\n'), ((1374, 1398), 'random.uniform', 'random.uniform', (['(0.6)', '(1.0)'], {}), '(0.6, 1.0)\n', (1388, 1398), False, 'import random\n'), ((1455, 1479), 'random.uniform', 'random.uniform', (['(0.1)', '(0.2)'], {}), '(0.1, 0.2)\n', (1469, 1479), False, 'import random\n')]
|
# Lint as: python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RQMC support."""
from tf_quant_finance.math.qmc import utils
from tf_quant_finance.math.qmc.digital_net import digital_net_sample
from tf_quant_finance.math.qmc.digital_net import random_digital_shift
from tf_quant_finance.math.qmc.digital_net import random_scrambling_matrices
from tf_quant_finance.math.qmc.digital_net import scramble_generating_matrices
from tf_quant_finance.math.qmc.lattice_rule import lattice_rule_sample
from tf_quant_finance.math.qmc.lattice_rule import random_scrambling_vectors
from tf_quant_finance.math.qmc.sobol import sobol_generating_matrices
from tf_quant_finance.math.qmc.sobol import sobol_sample
from tensorflow.python.util.all_util import remove_undocumented # pylint: disable=g-direct-tensorflow-import
_allowed_symbols = [
'digital_net_sample',
'lattice_rule_sample',
'random_digital_shift',
'random_scrambling_matrices',
'random_scrambling_vectors',
'scramble_generating_matrices',
'sobol_generating_matrices',
'sobol_sample',
'utils',
]
remove_undocumented(__name__, _allowed_symbols)
|
[
"tensorflow.python.util.all_util.remove_undocumented"
] |
[((1616, 1663), 'tensorflow.python.util.all_util.remove_undocumented', 'remove_undocumented', (['__name__', '_allowed_symbols'], {}), '(__name__, _allowed_symbols)\n', (1635, 1663), False, 'from tensorflow.python.util.all_util import remove_undocumented\n')]
|
"""all routes"""
from flask import Blueprint
from flask_restful import Api
from .questions.views import Questions, Question, UpdateTitle, UpdateQuestion
VERSION_UNO = Blueprint('api', __name__, url_prefix='/api/v1')
API = Api(VERSION_UNO)
API.add_resource(Questions, '/questions')
API.add_resource(Question, '/questions/<int:question_id>')
API.add_resource(UpdateTitle, '/questions/<int:question_id>/title')
API.add_resource(UpdateQuestion, '/questions/<int:question_id>/question')
|
[
"flask_restful.Api",
"flask.Blueprint"
] |
[((169, 217), 'flask.Blueprint', 'Blueprint', (['"""api"""', '__name__'], {'url_prefix': '"""/api/v1"""'}), "('api', __name__, url_prefix='/api/v1')\n", (178, 217), False, 'from flask import Blueprint\n'), ((224, 240), 'flask_restful.Api', 'Api', (['VERSION_UNO'], {}), '(VERSION_UNO)\n', (227, 240), False, 'from flask_restful import Api\n')]
|
#coding=utf-8
#import libs
import MergeNew_cmd
import MergeNew_sty
import Fun
import os
import tkinter
from tkinter import *
import tkinter.ttk
import tkinter.font
#Add your Varial Here: (Keep This Line of comments)
#Define UI Class
class MergeNew:
def __init__(self,root,isTKroot = True):
uiName = self.__class__.__name__
Fun.Register(uiName,'UIClass',self)
self.root = root
style = MergeNew_sty.SetupStyle()
if isTKroot == True:
root.title("Form1")
Fun.CenterDlg(uiName,root,563,375)
root['background'] = '#efefef'
Form_1= tkinter.Canvas(root,width = 10,height = 4)
Form_1.place(x = 0,y = 0,width = 563,height = 375)
Form_1.configure(bg = "#efefef")
Form_1.configure(highlightthickness = 0)
Fun.Register(uiName,'root',root)
Fun.Register(uiName,'Form_1',Form_1)
#Create the elements of root
Button_2= tkinter.Button(root,text="打开文件夹",width = 10,height = 4)
Fun.Register(uiName,'Button_2',Button_2)
Button_2.place(x = 16,y = 15,width = 109,height = 35)
Button_2.configure(command=lambda:MergeNew_cmd.Button_2_onCommand(uiName,"Button_2"))
Button_2_Ft=tkinter.font.Font(family='System', size=12,weight='bold',slant='roman',underline=0,overstrike=0)
Button_2.configure(font = Button_2_Ft)
ListBox_3= tkinter.Listbox(root)
Fun.Register(uiName,'ListBox_3',ListBox_3)
ListBox_3.place(x = 16,y = 57,width = 210,height = 215)
Button_4= tkinter.Button(root,text=">",width = 10,height = 4)
Fun.Register(uiName,'Button_4',Button_4)
Button_4.place(x = 241,y = 86,width = 80,height = 28)
Button_4.configure(command=lambda:MergeNew_cmd.Button_4_onCommand(uiName,"Button_4"))
Button_4_Ft=tkinter.font.Font(family='System', size=12,weight='bold',slant='roman',underline=0,overstrike=0)
Button_4.configure(font = Button_4_Ft)
Button_5= tkinter.Button(root,text=">>",width = 10,height = 4)
Fun.Register(uiName,'Button_5',Button_5)
Button_5.place(x = 241,y = 132,width = 80,height = 28)
Button_5.configure(command=lambda:MergeNew_cmd.Button_5_onCommand(uiName,"Button_5"))
Button_5_Ft=tkinter.font.Font(family='System', size=12,weight='bold',slant='roman',underline=0,overstrike=0)
Button_5.configure(font = Button_5_Ft)
Button_6= tkinter.Button(root,text="<",width = 10,height = 4)
Fun.Register(uiName,'Button_6',Button_6)
Button_6.place(x = 241,y = 178,width = 80,height = 28)
Button_6.configure(command=lambda:MergeNew_cmd.Button_6_onCommand(uiName,"Button_6"))
Button_6_Ft=tkinter.font.Font(family='System', size=12,weight='bold',slant='roman',underline=0,overstrike=0)
Button_6.configure(font = Button_6_Ft)
Button_7= tkinter.Button(root,text="<<",width = 10,height = 4)
Fun.Register(uiName,'Button_7',Button_7)
Button_7.place(x = 241,y = 222,width = 80,height = 28)
Button_7.configure(command=lambda:MergeNew_cmd.Button_7_onCommand(uiName,"Button_7"))
Button_7_Ft=tkinter.font.Font(family='System', size=12,weight='bold',slant='roman',underline=0,overstrike=0)
Button_7.configure(font = Button_7_Ft)
ListBox_8= tkinter.Listbox(root)
Fun.Register(uiName,'ListBox_8',ListBox_8)
ListBox_8.place(x = 337,y = 59,width = 210,height = 215)
Entry_9_Variable = Fun.AddTKVariable(uiName,'Entry_9','')
Entry_9= tkinter.Entry(root,textvariable=Entry_9_Variable)
Fun.Register(uiName,'Entry_9',Entry_9)
Entry_9.place(x = 134,y = 293,width = 199,height = 34)
Entry_9.configure(relief = "sunken")
Label_10= tkinter.Label(root,text="合并后文件名",width = 10,height = 4)
Fun.Register(uiName,'Label_10',Label_10)
Label_10.place(x = 15,y = 298,width = 111,height = 24)
Label_10.configure(relief = "flat")
Label_10_Ft=tkinter.font.Font(family='System', size=12,weight='bold',slant='roman',underline=0,overstrike=0)
Label_10.configure(font = Label_10_Ft)
Button_11= tkinter.Button(root,text="合并",width = 10,height = 4)
Fun.Register(uiName,'Button_11',Button_11)
Button_11.place(x = 370,y = 292,width = 115,height = 36)
Button_11.configure(command=lambda:MergeNew_cmd.Button_11_onCommand(uiName,"Button_11"))
Button_11_Ft=tkinter.font.Font(family='System', size=12,weight='bold',slant='roman',underline=0,overstrike=0)
Button_11.configure(font = Button_11_Ft)
Label_12= tkinter.Label(root,text="需要合并的文件列表",width = 10,height = 4)
Fun.Register(uiName,'Label_12',Label_12)
Label_12.place(x = 341,y = 22,width = 205,height = 34)
Label_12.configure(relief = "flat")
Label_12_Ft=tkinter.font.Font(family='System', size=12,weight='bold',slant='roman',underline=0,overstrike=0)
Label_12.configure(font = Label_12_Ft)
#Inital all element's Data
Fun.InitElementData(uiName)
#Add Some Logic Code Here: (Keep This Line of comments)
#Create the root of Kinter
if __name__ == '__main__':
root = tkinter.Tk()
MyDlg = MergeNew(root)
root.mainloop()
|
[
"MergeNew_cmd.Button_7_onCommand",
"tkinter.Canvas",
"MergeNew_cmd.Button_4_onCommand",
"tkinter.Button",
"Fun.CenterDlg",
"tkinter.Listbox",
"tkinter.Entry",
"tkinter.font.Font",
"MergeNew_cmd.Button_2_onCommand",
"Fun.Register",
"Fun.AddTKVariable",
"MergeNew_cmd.Button_5_onCommand",
"MergeNew_cmd.Button_6_onCommand",
"MergeNew_sty.SetupStyle",
"MergeNew_cmd.Button_11_onCommand",
"tkinter.Label",
"tkinter.Tk",
"Fun.InitElementData"
] |
[((5187, 5199), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (5197, 5199), False, 'import tkinter\n'), ((348, 385), 'Fun.Register', 'Fun.Register', (['uiName', '"""UIClass"""', 'self'], {}), "(uiName, 'UIClass', self)\n", (360, 385), False, 'import Fun\n'), ((425, 450), 'MergeNew_sty.SetupStyle', 'MergeNew_sty.SetupStyle', ([], {}), '()\n', (448, 450), False, 'import MergeNew_sty\n'), ((618, 658), 'tkinter.Canvas', 'tkinter.Canvas', (['root'], {'width': '(10)', 'height': '(4)'}), '(root, width=10, height=4)\n', (632, 658), False, 'import tkinter\n'), ((818, 852), 'Fun.Register', 'Fun.Register', (['uiName', '"""root"""', 'root'], {}), "(uiName, 'root', root)\n", (830, 852), False, 'import Fun\n'), ((859, 897), 'Fun.Register', 'Fun.Register', (['uiName', '"""Form_1"""', 'Form_1'], {}), "(uiName, 'Form_1', Form_1)\n", (871, 897), False, 'import Fun\n'), ((952, 1006), 'tkinter.Button', 'tkinter.Button', (['root'], {'text': '"""打开文件夹"""', 'width': '(10)', 'height': '(4)'}), "(root, text='打开文件夹', width=10, height=4)\n", (966, 1006), False, 'import tkinter\n'), ((1016, 1058), 'Fun.Register', 'Fun.Register', (['uiName', '"""Button_2"""', 'Button_2'], {}), "(uiName, 'Button_2', Button_2)\n", (1028, 1058), False, 'import Fun\n'), ((1233, 1337), 'tkinter.font.Font', 'tkinter.font.Font', ([], {'family': '"""System"""', 'size': '(12)', 'weight': '"""bold"""', 'slant': '"""roman"""', 'underline': '(0)', 'overstrike': '(0)'}), "(family='System', size=12, weight='bold', slant='roman',\n underline=0, overstrike=0)\n", (1250, 1337), False, 'import tkinter\n'), ((1396, 1417), 'tkinter.Listbox', 'tkinter.Listbox', (['root'], {}), '(root)\n', (1411, 1417), False, 'import tkinter\n'), ((1426, 1470), 'Fun.Register', 'Fun.Register', (['uiName', '"""ListBox_3"""', 'ListBox_3'], {}), "(uiName, 'ListBox_3', ListBox_3)\n", (1438, 1470), False, 'import Fun\n'), ((1551, 1601), 'tkinter.Button', 'tkinter.Button', (['root'], {'text': '""">"""', 'width': '(10)', 'height': '(4)'}), "(root, text='>', width=10, height=4)\n", (1565, 1601), False, 'import tkinter\n'), ((1611, 1653), 'Fun.Register', 'Fun.Register', (['uiName', '"""Button_4"""', 'Button_4'], {}), "(uiName, 'Button_4', Button_4)\n", (1623, 1653), False, 'import Fun\n'), ((1828, 1932), 'tkinter.font.Font', 'tkinter.font.Font', ([], {'family': '"""System"""', 'size': '(12)', 'weight': '"""bold"""', 'slant': '"""roman"""', 'underline': '(0)', 'overstrike': '(0)'}), "(family='System', size=12, weight='bold', slant='roman',\n underline=0, overstrike=0)\n", (1845, 1932), False, 'import tkinter\n'), ((1990, 2041), 'tkinter.Button', 'tkinter.Button', (['root'], {'text': '""">>"""', 'width': '(10)', 'height': '(4)'}), "(root, text='>>', width=10, height=4)\n", (2004, 2041), False, 'import tkinter\n'), ((2051, 2093), 'Fun.Register', 'Fun.Register', (['uiName', '"""Button_5"""', 'Button_5'], {}), "(uiName, 'Button_5', Button_5)\n", (2063, 2093), False, 'import Fun\n'), ((2269, 2373), 'tkinter.font.Font', 'tkinter.font.Font', ([], {'family': '"""System"""', 'size': '(12)', 'weight': '"""bold"""', 'slant': '"""roman"""', 'underline': '(0)', 'overstrike': '(0)'}), "(family='System', size=12, weight='bold', slant='roman',\n underline=0, overstrike=0)\n", (2286, 2373), False, 'import tkinter\n'), ((2431, 2481), 'tkinter.Button', 'tkinter.Button', (['root'], {'text': '"""<"""', 'width': '(10)', 'height': '(4)'}), "(root, text='<', width=10, height=4)\n", (2445, 2481), False, 'import tkinter\n'), ((2491, 2533), 'Fun.Register', 'Fun.Register', (['uiName', '"""Button_6"""', 'Button_6'], {}), "(uiName, 'Button_6', Button_6)\n", (2503, 2533), False, 'import Fun\n'), ((2709, 2813), 'tkinter.font.Font', 'tkinter.font.Font', ([], {'family': '"""System"""', 'size': '(12)', 'weight': '"""bold"""', 'slant': '"""roman"""', 'underline': '(0)', 'overstrike': '(0)'}), "(family='System', size=12, weight='bold', slant='roman',\n underline=0, overstrike=0)\n", (2726, 2813), False, 'import tkinter\n'), ((2871, 2922), 'tkinter.Button', 'tkinter.Button', (['root'], {'text': '"""<<"""', 'width': '(10)', 'height': '(4)'}), "(root, text='<<', width=10, height=4)\n", (2885, 2922), False, 'import tkinter\n'), ((2932, 2974), 'Fun.Register', 'Fun.Register', (['uiName', '"""Button_7"""', 'Button_7'], {}), "(uiName, 'Button_7', Button_7)\n", (2944, 2974), False, 'import Fun\n'), ((3150, 3254), 'tkinter.font.Font', 'tkinter.font.Font', ([], {'family': '"""System"""', 'size': '(12)', 'weight': '"""bold"""', 'slant': '"""roman"""', 'underline': '(0)', 'overstrike': '(0)'}), "(family='System', size=12, weight='bold', slant='roman',\n underline=0, overstrike=0)\n", (3167, 3254), False, 'import tkinter\n'), ((3313, 3334), 'tkinter.Listbox', 'tkinter.Listbox', (['root'], {}), '(root)\n', (3328, 3334), False, 'import tkinter\n'), ((3343, 3387), 'Fun.Register', 'Fun.Register', (['uiName', '"""ListBox_8"""', 'ListBox_8'], {}), "(uiName, 'ListBox_8', ListBox_8)\n", (3355, 3387), False, 'import Fun\n'), ((3478, 3518), 'Fun.AddTKVariable', 'Fun.AddTKVariable', (['uiName', '"""Entry_9"""', '""""""'], {}), "(uiName, 'Entry_9', '')\n", (3495, 3518), False, 'import Fun\n'), ((3534, 3584), 'tkinter.Entry', 'tkinter.Entry', (['root'], {'textvariable': 'Entry_9_Variable'}), '(root, textvariable=Entry_9_Variable)\n', (3547, 3584), False, 'import tkinter\n'), ((3592, 3632), 'Fun.Register', 'Fun.Register', (['uiName', '"""Entry_9"""', 'Entry_9'], {}), "(uiName, 'Entry_9', Entry_9)\n", (3604, 3632), False, 'import Fun\n'), ((3757, 3811), 'tkinter.Label', 'tkinter.Label', (['root'], {'text': '"""合并后文件名"""', 'width': '(10)', 'height': '(4)'}), "(root, text='合并后文件名', width=10, height=4)\n", (3770, 3811), False, 'import tkinter\n'), ((3821, 3863), 'Fun.Register', 'Fun.Register', (['uiName', '"""Label_10"""', 'Label_10'], {}), "(uiName, 'Label_10', Label_10)\n", (3833, 3863), False, 'import Fun\n'), ((3989, 4093), 'tkinter.font.Font', 'tkinter.font.Font', ([], {'family': '"""System"""', 'size': '(12)', 'weight': '"""bold"""', 'slant': '"""roman"""', 'underline': '(0)', 'overstrike': '(0)'}), "(family='System', size=12, weight='bold', slant='roman',\n underline=0, overstrike=0)\n", (4006, 4093), False, 'import tkinter\n'), ((4152, 4203), 'tkinter.Button', 'tkinter.Button', (['root'], {'text': '"""合并"""', 'width': '(10)', 'height': '(4)'}), "(root, text='合并', width=10, height=4)\n", (4166, 4203), False, 'import tkinter\n'), ((4213, 4257), 'Fun.Register', 'Fun.Register', (['uiName', '"""Button_11"""', 'Button_11'], {}), "(uiName, 'Button_11', Button_11)\n", (4225, 4257), False, 'import Fun\n'), ((4439, 4543), 'tkinter.font.Font', 'tkinter.font.Font', ([], {'family': '"""System"""', 'size': '(12)', 'weight': '"""bold"""', 'slant': '"""roman"""', 'underline': '(0)', 'overstrike': '(0)'}), "(family='System', size=12, weight='bold', slant='roman',\n underline=0, overstrike=0)\n", (4456, 4543), False, 'import tkinter\n'), ((4603, 4660), 'tkinter.Label', 'tkinter.Label', (['root'], {'text': '"""需要合并的文件列表"""', 'width': '(10)', 'height': '(4)'}), "(root, text='需要合并的文件列表', width=10, height=4)\n", (4616, 4660), False, 'import tkinter\n'), ((4670, 4712), 'Fun.Register', 'Fun.Register', (['uiName', '"""Label_12"""', 'Label_12'], {}), "(uiName, 'Label_12', Label_12)\n", (4682, 4712), False, 'import Fun\n'), ((4838, 4942), 'tkinter.font.Font', 'tkinter.font.Font', ([], {'family': '"""System"""', 'size': '(12)', 'weight': '"""bold"""', 'slant': '"""roman"""', 'underline': '(0)', 'overstrike': '(0)'}), "(family='System', size=12, weight='bold', slant='roman',\n underline=0, overstrike=0)\n", (4855, 4942), False, 'import tkinter\n'), ((5026, 5053), 'Fun.InitElementData', 'Fun.InitElementData', (['uiName'], {}), '(uiName)\n', (5045, 5053), False, 'import Fun\n'), ((524, 561), 'Fun.CenterDlg', 'Fun.CenterDlg', (['uiName', 'root', '(563)', '(375)'], {}), '(uiName, root, 563, 375)\n', (537, 561), False, 'import Fun\n'), ((1161, 1212), 'MergeNew_cmd.Button_2_onCommand', 'MergeNew_cmd.Button_2_onCommand', (['uiName', '"""Button_2"""'], {}), "(uiName, 'Button_2')\n", (1192, 1212), False, 'import MergeNew_cmd\n'), ((1756, 1807), 'MergeNew_cmd.Button_4_onCommand', 'MergeNew_cmd.Button_4_onCommand', (['uiName', '"""Button_4"""'], {}), "(uiName, 'Button_4')\n", (1787, 1807), False, 'import MergeNew_cmd\n'), ((2197, 2248), 'MergeNew_cmd.Button_5_onCommand', 'MergeNew_cmd.Button_5_onCommand', (['uiName', '"""Button_5"""'], {}), "(uiName, 'Button_5')\n", (2228, 2248), False, 'import MergeNew_cmd\n'), ((2637, 2688), 'MergeNew_cmd.Button_6_onCommand', 'MergeNew_cmd.Button_6_onCommand', (['uiName', '"""Button_6"""'], {}), "(uiName, 'Button_6')\n", (2668, 2688), False, 'import MergeNew_cmd\n'), ((3078, 3129), 'MergeNew_cmd.Button_7_onCommand', 'MergeNew_cmd.Button_7_onCommand', (['uiName', '"""Button_7"""'], {}), "(uiName, 'Button_7')\n", (3109, 3129), False, 'import MergeNew_cmd\n'), ((4364, 4417), 'MergeNew_cmd.Button_11_onCommand', 'MergeNew_cmd.Button_11_onCommand', (['uiName', '"""Button_11"""'], {}), "(uiName, 'Button_11')\n", (4396, 4417), False, 'import MergeNew_cmd\n')]
|
#!/usr/bin/env python
from setuptools import setup, find_packages
VERSION = '0.2'
with open('README.md') as readme:
long_description = readme.read()
setup(
name='sentry-scrapy',
version=VERSION,
description='Scrapy integration with Sentry SDK (unofficial)',
long_description=long_description,
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(),
install_requires=['Scrapy', 'sentry-sdk'],
license="MIT",
keywords="sentry scrapy sdk integration",
url='https://github.com/m-vdb/sentry-scrapy',
download_url='https://github.com/m-vdb/sentry-scrapy/archive/v{}.tar.gz'.format(VERSION),
project_urls={
"Source Code": "https://github.com/m-vdb/sentry-scrapy",
}
)
|
[
"setuptools.find_packages"
] |
[((431, 446), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (444, 446), False, 'from setuptools import setup, find_packages\n')]
|
import pymongo
from .mongod import Mongod
class MongoClient(pymongo.MongoClient):
def __init__(self, host=None, port=None, **kwargs):
self._mongod = Mongod()
self._mongod.start()
super().__init__(self._mongod.connection_string, **kwargs)
def close(self):
self._mongod.stop()
super().close()
def pim_mongodump(self, *args, **kwargs):
return self._mongod.mongodump(*args, **kwargs)
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.DEBUG)
m = MongoClient("mongodb://127.0.0.1/something", 27017)
m.close()
|
[
"logging.basicConfig"
] |
[((497, 537), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (516, 537), False, 'import logging\n')]
|
# Generated by Django 3.0.5 on 2020-04-15 10:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ipam', '0036_standardize_description'),
('netbox_ddns', '0002_add_ttl'),
]
operations = [
migrations.CreateModel(
name='DNSStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False)),
('last_update', models.DateTimeField(auto_now=True)),
('forward_action', models.PositiveSmallIntegerField(blank=True, null=True)),
('forward_rcode', models.PositiveIntegerField(blank=True, null=True)),
('reverse_action', models.PositiveSmallIntegerField(blank=True, null=True)),
('reverse_rcode', models.PositiveIntegerField(blank=True, null=True)),
('ip_address', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='ipam.IPAddress')),
],
options={
'verbose_name': 'DNS status',
'verbose_name_plural': 'DNS status',
},
),
]
|
[
"django.db.models.OneToOneField",
"django.db.models.PositiveIntegerField",
"django.db.models.PositiveSmallIntegerField",
"django.db.models.AutoField",
"django.db.models.DateTimeField"
] |
[((409, 479), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)'}), '(auto_created=True, primary_key=True, serialize=False)\n', (425, 479), False, 'from django.db import migrations, models\n'), ((514, 549), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (534, 549), False, 'from django.db import migrations, models\n'), ((587, 642), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (619, 642), False, 'from django.db import migrations, models\n'), ((679, 729), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (706, 729), False, 'from django.db import migrations, models\n'), ((767, 822), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (799, 822), False, 'from django.db import migrations, models\n'), ((859, 909), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (886, 909), False, 'from django.db import migrations, models\n'), ((943, 1034), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""ipam.IPAddress"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'ipam.IPAddress')\n", (963, 1034), False, 'from django.db import migrations, models\n')]
|
import os
import shutil
import sys
import django
from django.apps import apps
from django.conf import settings
from django.test.utils import get_runner
def manage_model(model):
model._meta.managed = True
if __name__ == '__main__':
os.environ['DJANGO_SETTINGS_MODULE'] = 'schedulesy.settings.unittest'
django.setup()
# Set all tested models to "managed = True"
for app in settings.LOCAL_APPS:
config = (
apps.get_app_config(app.split('.')[-1])
if not app.endswith('Config')
else apps.get_app_config(app.split('.')[-3])
)
print(type(config))
list(map(manage_model, config.get_models()))
test_apps = ['schedulesy'] if len(sys.argv) <= 1 else sys.argv[1:]
TestRunner = get_runner(settings)
test_runner = TestRunner(
pattern='test_*.py', verbosity=2, interactive=True, failfast=False
)
failures = test_runner.run_tests(test_apps)
# Delete temporary directory
shutil.rmtree(settings.MEDIA_ROOT, ignore_errors=True)
sys.exit(failures)
|
[
"django.test.utils.get_runner",
"django.setup",
"sys.exit",
"shutil.rmtree"
] |
[((318, 332), 'django.setup', 'django.setup', ([], {}), '()\n', (330, 332), False, 'import django\n'), ((769, 789), 'django.test.utils.get_runner', 'get_runner', (['settings'], {}), '(settings)\n', (779, 789), False, 'from django.test.utils import get_runner\n'), ((987, 1041), 'shutil.rmtree', 'shutil.rmtree', (['settings.MEDIA_ROOT'], {'ignore_errors': '(True)'}), '(settings.MEDIA_ROOT, ignore_errors=True)\n', (1000, 1041), False, 'import shutil\n'), ((1047, 1065), 'sys.exit', 'sys.exit', (['failures'], {}), '(failures)\n', (1055, 1065), False, 'import sys\n')]
|
from nltk.sentiment.vader import SentimentIntensityAnalyzer
dir= 'C:\\Users\\asmazi01\\dir_path'
commentfile= 'input.txt'
delim ='\t'
fname = dir + '\\' + commentfile
with open(fname, encoding='utf-8', errors='ignore') as f:
sentences = f.readlines()
sid = SentimentIntensityAnalyzer()
totalCompoundScore = 0.0
totalNegativeScore = 0.0
totalNeutralScore = 0.0
totalPositiveScore = 0.0
totalNumOfSentences = 0.0
outfpath = fname + '.sentiment.txt'
outf = open(outfpath,'wb')
outf.write("Sentence\tcompound score\tnegative score\tneutral score\tpositive score\n".encode('utf-8'))
for sentence in sentences:
if sentence.strip() == "":
continue
totalNumOfSentences += 1.0
print(sentence)
ss = sid.polarity_scores(sentence)
outline = "\"" + sentence.strip() + "\""
compScore = 0.0
negScore = 0.0
neuScore = 0.0
posScore = 0.0
for k in sorted(ss):
print('{0}: {1}, '.format(k, ss[k]), end='')
if k == "compound":
compScore = ss[k]
if k == "neg":
negScore = ss[k]
if k == "neu":
neuScore = ss[k]
if k == "pos":
posScore = ss[k]
outline = outline + delim \
+ str(compScore) + delim \
+ str(negScore) + delim \
+ str(neuScore) + delim \
+ str(posScore) + "\n"
totalCompoundScore += compScore
totalNegativeScore += negScore
totalNeutralScore += neuScore
totalPositiveScore += posScore
print()
outf.write(outline.encode('utf-8'))
avgCompoundScore = str(totalCompoundScore/totalNumOfSentences)
avgNegativeScore = str(totalNegativeScore/totalNumOfSentences)
avgNeutralScore = str(totalNeutralScore/totalNumOfSentences)
avgPositiveScore = str(totalPositiveScore/totalNumOfSentences)
outline = "total sentence=" + str(int(totalNumOfSentences))\
+ delim + avgCompoundScore\
+ delim + avgNegativeScore\
+ delim + avgNeutralScore\
+ delim + avgPositiveScore + "\n"
print(outline)
#outf.write(outline.encode('utf-8'))
outf.close()
|
[
"nltk.sentiment.vader.SentimentIntensityAnalyzer"
] |
[((261, 289), 'nltk.sentiment.vader.SentimentIntensityAnalyzer', 'SentimentIntensityAnalyzer', ([], {}), '()\n', (287, 289), False, 'from nltk.sentiment.vader import SentimentIntensityAnalyzer\n')]
|
"""meals dinner many2many
Revision ID: 00034ea37afb
Revises: <PASSWORD>
Create Date: 2019-12-16 11:54:41.895663
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('dinners_meals',
sa.Column('dinner_id', sa.Integer(), nullable=True),
sa.Column('meal_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['dinner_id'], ['dinners.id'], ),
sa.ForeignKeyConstraint(['meal_id'], ['meals.id'], )
)
op.drop_table('airports')
op.add_column('meals', sa.Column('nutrition_value', sa.Float(), nullable=True))
op.add_column('meals', sa.Column('vitamins', sa.String(length=100), nullable=True))
op.add_column('users', sa.Column('role', sa.String(length=20), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'role')
op.drop_column('meals', 'vitamins')
op.drop_column('meals', 'nutrition_value')
op.create_table('airports',
sa.Column('IATA_CODE', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('AIRPORT', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('CITY', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('STATE', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('COUNTRY', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('LATITUDE', sa.NUMERIC(precision=7, scale=5), autoincrement=False, nullable=True),
sa.Column('LONGITUDE', sa.NUMERIC(precision=8, scale=5), autoincrement=False, nullable=True)
)
op.drop_table('dinners_meals')
# ### end Alembic commands ###
|
[
"alembic.op.drop_table",
"sqlalchemy.Float",
"sqlalchemy.NUMERIC",
"alembic.op.drop_column",
"sqlalchemy.ForeignKeyConstraint",
"sqlalchemy.String",
"sqlalchemy.TEXT",
"sqlalchemy.Integer"
] |
[((656, 681), 'alembic.op.drop_table', 'op.drop_table', (['"""airports"""'], {}), "('airports')\n", (669, 681), False, 'from alembic import op\n'), ((1061, 1092), 'alembic.op.drop_column', 'op.drop_column', (['"""users"""', '"""role"""'], {}), "('users', 'role')\n", (1075, 1092), False, 'from alembic import op\n'), ((1097, 1132), 'alembic.op.drop_column', 'op.drop_column', (['"""meals"""', '"""vitamins"""'], {}), "('meals', 'vitamins')\n", (1111, 1132), False, 'from alembic import op\n'), ((1137, 1179), 'alembic.op.drop_column', 'op.drop_column', (['"""meals"""', '"""nutrition_value"""'], {}), "('meals', 'nutrition_value')\n", (1151, 1179), False, 'from alembic import op\n'), ((1778, 1808), 'alembic.op.drop_table', 'op.drop_table', (['"""dinners_meals"""'], {}), "('dinners_meals')\n", (1791, 1808), False, 'from alembic import op\n'), ((531, 585), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['dinner_id']", "['dinners.id']"], {}), "(['dinner_id'], ['dinners.id'])\n", (554, 585), True, 'import sqlalchemy as sa\n'), ((593, 643), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['meal_id']", "['meals.id']"], {}), "(['meal_id'], ['meals.id'])\n", (616, 643), True, 'import sqlalchemy as sa\n'), ((442, 454), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (452, 454), True, 'import sqlalchemy as sa\n'), ((497, 509), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (507, 509), True, 'import sqlalchemy as sa\n'), ((738, 748), 'sqlalchemy.Float', 'sa.Float', ([], {}), '()\n', (746, 748), True, 'import sqlalchemy as sa\n'), ((815, 836), 'sqlalchemy.String', 'sa.String', ([], {'length': '(100)'}), '(length=100)\n', (824, 836), True, 'import sqlalchemy as sa\n'), ((899, 919), 'sqlalchemy.String', 'sa.String', ([], {'length': '(20)'}), '(length=20)\n', (908, 919), True, 'import sqlalchemy as sa\n'), ((1239, 1248), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (1246, 1248), True, 'import sqlalchemy as sa\n'), ((1312, 1321), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (1319, 1321), True, 'import sqlalchemy as sa\n'), ((1382, 1391), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (1389, 1391), True, 'import sqlalchemy as sa\n'), ((1453, 1462), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (1460, 1462), True, 'import sqlalchemy as sa\n'), ((1526, 1535), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (1533, 1535), True, 'import sqlalchemy as sa\n'), ((1600, 1632), 'sqlalchemy.NUMERIC', 'sa.NUMERIC', ([], {'precision': '(7)', 'scale': '(5)'}), '(precision=7, scale=5)\n', (1610, 1632), True, 'import sqlalchemy as sa\n'), ((1698, 1730), 'sqlalchemy.NUMERIC', 'sa.NUMERIC', ([], {'precision': '(8)', 'scale': '(5)'}), '(precision=8, scale=5)\n', (1708, 1730), True, 'import sqlalchemy as sa\n')]
|
import unittest
from test import AppTest
class TestVersion(AppTest):
@staticmethod
def make_version_request(client, token):
return client.get(
'/version/',
headers={'Authorization': token})
def test_version(self, client, auth_provider):
r = TestVersion.make_version_request(
client, auth_provider('root', -1))
assert 200 == r.status_code
j = r.get_json()
assert j is not None
version = j['version']
assert '1.0.0.0' == version
def test_no_auth(self, client):
r = TestVersion.make_version_request(
client, '')
assert 401 == r.status_code
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main"
] |
[((711, 726), 'unittest.main', 'unittest.main', ([], {}), '()\n', (724, 726), False, 'import unittest\n')]
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import, unicode_literals
import argparse
from logging import getLogger, Formatter, StreamHandler
import os
import sys
from esanpy import analyzers
from esanpy import elasticsearch
from esanpy.core import ESRUNNER_VERSION, DEFAULT_CLUSTER_NAME, DEFAULT_HTTP_PORT, DEFAULT_TRANSPORT_PORT,\
DEFAULT_PLUGINS
start_server = elasticsearch.start_server
stop_server = elasticsearch.stop_server
create_analysis = elasticsearch.create_analysis
get_analysis = elasticsearch.get_analysis
delete_analysis = elasticsearch.delete_analysis
analyzer = analyzers.analyzer
custom_analyzer = analyzers.custom_analyzer
logger = getLogger('esanpy')
def parse_args(args):
parser = argparse.ArgumentParser(description=None)
parser.add_argument('--runner-version', dest='esrunner_version', action='store',
default=ESRUNNER_VERSION, help='Elasticsearch cluster name')
parser.add_argument('--cluster-name', dest='cluster_name', action='store',
default=DEFAULT_CLUSTER_NAME, help='Elasticsearch cluster name')
parser.add_argument('--host', dest='host', action='store',
default='localhost', help='Elasticsearch host name')
parser.add_argument('--http-port', dest='http_port', action='store',
default=DEFAULT_HTTP_PORT, type=int, help='Elasticsearch HTTP port')
parser.add_argument('--transport-port', dest='transport_port', action='store',
default=DEFAULT_TRANSPORT_PORT, type=int, help='Elasticsearch Transport port')
parser.add_argument('--analyzer-name', dest='analyzer_name', action='store',
default='standard', help='Analyzer name')
parser.add_argument('--text', dest='text', action='store', help='Text to analyze')
parser.add_argument('--plugin', dest='plugins', action='append', help='Plugins to install')
parser.add_argument('--verbose', '-v', dest='verbose', action='store_true',
default=False, help='Display debug messages')
parser.add_argument('--stop', dest='stop', action='store_true',
default=False, help='Stop Elasticsearch on exit')
return parser.parse_args(args=args)
def configure_logging(options):
formatter = Formatter('[%(asctime)s] %(message)s')
handler = StreamHandler(sys.stderr)
handler.setFormatter(formatter)
logger.addHandler(handler)
if options.verbose:
logger.setLevel(10)
else:
logger.setLevel(20)
def main(args=None):
options = parse_args(args)
configure_logging(options)
plugin_names = DEFAULT_PLUGINS if options.plugins is None else options.plugins
start_server(host=options.host,
http_port=options.http_port,
transport_port=options.transport_port,
cluster_name=options.cluster_name,
plugin_names=plugin_names,
esrunner_version=options.esrunner_version)
tokens = analyzer(options.text,
analyzer=options.analyzer_name)
print('\n'.join(tokens))
if options.stop:
stop_server(host=options.host,
http_port=options.http_port,
esrunner_version=options.esrunner_version)
return 0
if __name__ == '__main__':
sys.exit(main())
|
[
"logging.Formatter",
"logging.StreamHandler",
"argparse.ArgumentParser",
"logging.getLogger"
] |
[((698, 717), 'logging.getLogger', 'getLogger', (['"""esanpy"""'], {}), "('esanpy')\n", (707, 717), False, 'from logging import getLogger, Formatter, StreamHandler\n'), ((755, 796), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'None'}), '(description=None)\n', (778, 796), False, 'import argparse\n'), ((2339, 2377), 'logging.Formatter', 'Formatter', (['"""[%(asctime)s] %(message)s"""'], {}), "('[%(asctime)s] %(message)s')\n", (2348, 2377), False, 'from logging import getLogger, Formatter, StreamHandler\n'), ((2392, 2417), 'logging.StreamHandler', 'StreamHandler', (['sys.stderr'], {}), '(sys.stderr)\n', (2405, 2417), False, 'from logging import getLogger, Formatter, StreamHandler\n')]
|
import psutil
import binascii
import socket
import ipaddress
"""
**Module Overview:**
This module will interact with Tor to get real time statistical and analytical information.
|-is_alive - check tor process is alive or killed
|-is_valid_ipv4_address-check for valid ip address
|-authenticate- cookie authentication of control port
|-get_version- get version of tor
|-get_pid- find pid of tor service
|-get_info- get information like version,exit policy,network status etc
|-set_conf- change the value of one or more configurable variable
|-reset_conf-set the configurable variable to default values
|-get_conf- Request the value of zero or more configuration variable
|-get_ports- retreive informations about listeners of different ports
|-get_network_statuses- Router status info (v3 directory style) for all ORs.
|-get_exit_policy-The default exit policy lines that Tor will *append* to the ExitPolicy config option.
|-prt_check-check validity of ports
|-can_exit_to- check whether one can exit through a particular port
|-get_circuit- get information about circuits present for use
|-port_usage-Usage of particular port
|-get_info_relay- retrieve information from database about a particular relay
|-status-tell status of a circuit BUILT or not
|-build_flag- build flag on circuit and relays
|-path- return path of circuit
|-created- circuit created info
|-signal-signal control port like NEWNYM,RELOAD etc
|-get_fingerprint-the contents of the fingerprint file that Tor writes as a relay, or a 551 if we're not a relay currently.
!-get_network_status-network status of a relay with given fingerprint
"""
def is_alive():
for proc in psutil.process_iter():
try:
if 'tor' in proc.name().lower():
return True
except(psutil.NoSuchProcess,psutil.AccessDenied,psutil.ZombieProcess):
pass
return False
def is_valid_ipv4_address(address):
if not isinstance(address, (bytes, str)):
return False
if address.count('.') != 3:
return False
for entry in address.split('.'):
if not entry.isdigit() or int(entry) < 0 or int(entry) > 255:
return False
elif entry[0] == '0' and len(entry) > 1:
return False
return True
def authenticate():
control_socket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
control_socket.connect(('127.0.0.1',9051))
signal=bytes('PROTOCOLINFO \r\n','utf-8')
control_socket.send(signal)
rcv=control_socket.recv(4096).decode('utf-8')
rcv=rcv.splitlines()
if rcv[0]!='250-PROTOCOLINFO 1':
return None
cookie_path=rcv[1].split('"')
cookie_path=cookie_path[1]
f=open(cookie_path,"rb")
q=f.read()
q=binascii.b2a_hex(q)
q=q.decode('utf-8')
signal=bytes('AUTHENTICATE ' +q+' \r\n','utf-8')
control_socket.send(signal)
rcv=control_socket.recv(4096).decode('utf-8').split()[0]
if rcv=='250':
return control_socket
return None
def get_version():
control_socket=authenticate()
control_socket.send(bytes("GETINFO version \r\n",'utf-8'))
result=control_socket.recv(4096)
result=result.decode('utf-8')
result=result.split('=')
result=result[1].split(' ')
return result[0]
def get_pid(name):
control_socket=authenticate()
control_socket.send(bytes("GETINFO process/pid \r\n",'utf-8'))
result=control_socket.recv(4096)
result=result.decode('utf-8')
result=result.splitlines()
result=result[0].split('=')[1]
pid=result
return int(pid)
def get_info(query):
control_socket=authenticate()
getinfo='GETINFO '+query+" \r\n"
control_socket.send(bytes(getinfo,'utf-8'))
result=control_socket.recv(4096)
result=result+control_socket.recv(4096)
result=result+control_socket.recv(4096)
return result
def set_conf(name,new_value):
control_socket=authenticate()
setconf='SETCONF '+name+'='+new_value+' \r\n'
control_socket.send(bytes(setconf,'utf-8'))
result=control_socket.recv(4096)
def reset_conf(name):
control_socket=authenticate()
setconf='SETCONF '+name+'= \r\n'
control_socket.send(bytes(setconf,'utf-8'))
result=control_socket.recv(4096)
def get_conf(name):
control_socket=authenticate()
control_socket.send(bytes("GETCONF "+ name+" \r\n",'utf-8'))
result=control_socket.recv(4096)
result=result.decode('utf-8')
if result is None or "=" not in result:
return result
result=result.split('=')
result=' '.join(result[1].split())
return result
def get_ports(port_name):
control_socket=authenticate()
port_name=port_name.lower()
control_socket.send(bytes("GETINFO net/listeners/"+ port_name +" \r\n",'utf-8'))
result=control_socket.recv(4096)
result=result.decode('utf-8')
result=result.splitlines()
result=result[0].split('=')[1]
if len(result.split())>1 and len(result.split()[0].split(':'))>1:
result=result.split()[0].split(':')[1][:-1]
portlist=[]
if result!='':
try:
value=int(result)
portlist.append(value)
except ValueError:
pass
return portlist
def get_network_statuses():
control_socket=authenticate()
control_socket.send(bytes("GETINFO ns/all \r\n",'utf-8'))
controlsocket=control_socket.recv(4096).decode('utf-8')
result=""
for i in range(0,250):
result+=controlsocket
controlsocket=control_socket.recv(4096).decode('utf-8')
address_list=[]
or_list=[]
for line in result.splitlines():
if(line[0]=='r'):
data=line.split()
if(len(data)==9):
address_list.append(data[6])
or_list.append(data[7])
else:
continue
return address_list,or_list
def get_exit_policy():
PRIVATE_ADDRESSES = (
'0.0.0.0/8',
'169.254.0.0/16',
'127.0.0.0/8',
'192.168.0.0/16',
'10.0.0.0/8',
'172.16.0.0/12',
)
control_socket=authenticate()
control_socket.send(bytes("GETINFO address \r\n",'utf-8'))
address=control_socket.recv(4096).decode('utf-8').split('=')
if len(address)>=2:
address=address[1].splitlines()[0]
PRIVATE_ADDRESSES+=(address,)
control_socket.send(bytes("GETCONF ExitPolicyRejectPrivate \r\n",'utf-8'))
exitpolicy=control_socket.recv(4096).decode('utf-8')
exitpolicy=exitpolicy.split('=')[1]
exitpolicy=int(exitpolicy)
if exitpolicy==1:
acceptance='reject'
else:
acceptence='accept'
result=""
for ip_address in PRIVATE_ADDRESSES:
result+=acceptance+' '+ip_address+':*, '
control_socket.send(bytes("GETINFO exit-policy/default \r\n",'utf-8'))
result+=control_socket.recv(4096).decode('utf-8').split('=')[1].replace(',',', ')
return result.splitlines()[0]
def prt_check(prt,port):
prt=prt.split('-')
if len(prt)==2:
miniport=int(prt[0])
maxiport=int(prt[1])
else:
miniport=int(prt[0])
maxiport=int(prt[0])
if port>=miniport and port<=maxiport:
return True
else:
return False
def can_exit_to(policy,address,port):
policy=policy.split(',')
for policy_itr in policy:
accept=policy_itr.split()[0]
addr=policy_itr.split()[1].split(':')[0]
prt=policy_itr.split()[1].split(':')[1]
if (addr=='*' or ipaddress.ip_address(address) in ipaddress.ip_network(addr)) and (prt=='*' or prt_check(prt,port)):
if(accept=='reject'):
return False
else:
return True
return True
def get_circuits():
control_socket=authenticate()
control_socket.send(bytes("GETINFO circuit-status \r\n","utf-8"))
response=control_socket.recv(4096).decode('utf-8')
response=response.splitlines()
circuit_info=[]
response=response[1:-2]
for res in response:
circuit_info.append("CIRC "+res+"\n")
return circuit_info
def port_usage(port):
file=open('ports.cfg','r')
lines=file.readlines()
port_usg=''
for line in lines:
line=line.split()
if len(line)>3:
if line[0]=='port':
if line[1]==str(port):
port_usg=line[3]
if port_usg!='':
return port_usg
else:
log_trace("BUG failed to find port usages")
return None
def get_info_relay(query):
control_socket=authenticate()
control_socket.send(bytes("GETINFO "+query+" \r\n",'utf-8'))
response=control_socket.recv(4096).decode('utf-8')
if response[0]=='5':
return None
response=response.splitlines()[0]
response=response.split('=')[1]
return response
def status(circuit_info):
if len(circuit_info.split())>2 and circuit_info.split()[2]=='BUILT':
return 'BUILT'
return 'NOT BUILT'
def build_flags(circuit_info):
if len(circuit_info.split())<5:
return []
circuit_info=circuit_info.split()[4]
if len(circuit_info.split('='))<2:
return []
circuit_info=circuit_info.split('=')[1]
circuit_info=circuit_info.split(',')
return circuit_info
def path(circuit_info):
path_list=[]
if len(circuit_info.split())<4:
return []
circuit_info=circuit_info.split()[3]
circuit_info=circuit_info.split(',')
for circ in circuit_info:
path_list.append(circ.split('~'))
return path_list
def created(circuit_info):
if(len(circuit_info.split())<7):
return ''
circuit_info=circuit_info.split()[6]
circuit_info=circuit_info.split('=')[1]
circuit_info=circuit_info[:10]+" "+circuit_info[11:]
return circuit_info
def signal(signal_query,control_socket):
control_socket.send(bytes("SIGNAL "+ signal_query+" \r\n","utf-8"))
response=control_socket.recv(4096).decode('utf-8')
if response.split()[0]=='250':
return True
else:
return False
def get_fingerprint():
control_socket=authenticate()
control_socket.send(bytes("GETINFO fingerprint \r\n",'utf-8'))
result=control_socket.recv(4096)
response_code=result.decode('utf-8').split()
if response_code[0]=='551':
return ""
fingerprint=result.decode('utf-8').split('=')
return fingerprint
def get_network_status(fingerprint):
control_socket=authenticate()
control_socket.send(bytes("GETINFO ns/id/"+fingerprint+" \r\n",'utf-8'))
result=control_socket.recv(4096)
result=result.decode('utf-8')
dict_network_status={}
if len(result.split('='))<2:
return dict_network_status
result=result.split('=')[1]
result=result.splitlines()
flags=result[2]
result=result[1]
result=result.split()
if len(result)>=9:
dict_network_status["dir_port"]=result[8]
else:
dict_network_status["dir_port"]='None'
if len(result)>=7:
dict_network_status["or_port"]=result[7]
else:
dict_network_status["or_port"]="None"
dict_network_status["nickname"]=result[1]
dict_network_status["published"]=result[4]+" "+result[5]
dict_network_status["flags"]=flags.split()[1:]
return dict_network_status
|
[
"psutil.process_iter",
"ipaddress.ip_network",
"binascii.b2a_hex",
"socket.socket",
"ipaddress.ip_address"
] |
[((1643, 1664), 'psutil.process_iter', 'psutil.process_iter', ([], {}), '()\n', (1662, 1664), False, 'import psutil\n'), ((2183, 2232), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (2196, 2232), False, 'import socket\n'), ((2566, 2585), 'binascii.b2a_hex', 'binascii.b2a_hex', (['q'], {}), '(q)\n', (2582, 2585), False, 'import binascii\n'), ((6737, 6766), 'ipaddress.ip_address', 'ipaddress.ip_address', (['address'], {}), '(address)\n', (6757, 6766), False, 'import ipaddress\n'), ((6770, 6796), 'ipaddress.ip_network', 'ipaddress.ip_network', (['addr'], {}), '(addr)\n', (6790, 6796), False, 'import ipaddress\n')]
|
import paramiko
from src.server_base import ServerBase
from src.ssh_server_interface import SshServerInterface
from src.shell import Shell
class SshServer(ServerBase):
def __init__(self, host_key_file, host_key_file_password=None):
super(SshServer, self).__init__()
self._host_key = paramiko.RSAKey.from_private_key_file(host_key_file, host_key_file_password)
def connection_function(self, client):
try:
# create the SSH transport object
session = paramiko.Transport(client)
session.add_server_key(self._host_key)
# create the server
server = SshServerInterface()
# start the SSH server
try:
session.start_server(server=server)
except paramiko.SSHException:
return
# create the channel and get the stdio
channel = session.accept()
stdio = channel.makefile('rwU')
# create the client shell and start it
# cmdloop() will block execution of this thread.
self.client_shell = Shell(stdio, stdio)
self.client_shell.cmdloop()
# After execution continues, we can close the session
# since the only way execution will continue from
# cmdloop() is if we explicitly return True from it,
# which we do with the bye command.
session.close()
except:
pass
|
[
"src.shell.Shell",
"paramiko.RSAKey.from_private_key_file",
"src.ssh_server_interface.SshServerInterface",
"paramiko.Transport"
] |
[((307, 383), 'paramiko.RSAKey.from_private_key_file', 'paramiko.RSAKey.from_private_key_file', (['host_key_file', 'host_key_file_password'], {}), '(host_key_file, host_key_file_password)\n', (344, 383), False, 'import paramiko\n'), ((509, 535), 'paramiko.Transport', 'paramiko.Transport', (['client'], {}), '(client)\n', (527, 535), False, 'import paramiko\n'), ((641, 661), 'src.ssh_server_interface.SshServerInterface', 'SshServerInterface', ([], {}), '()\n', (659, 661), False, 'from src.ssh_server_interface import SshServerInterface\n'), ((1112, 1131), 'src.shell.Shell', 'Shell', (['stdio', 'stdio'], {}), '(stdio, stdio)\n', (1117, 1131), False, 'from src.shell import Shell\n')]
|
# import our libraries
import time
import datetime
# get today's date
today = date.today()
print(today)
# create a custom date
future_date = date(2020, 1, 31)
print(future_date)
# let's create a time stamp
time_stamp = time.time()
print(time_stamp)
# create a date from a timestamp
date_stamp = date.fromtimestamp(time_stamp)
print(date_stamp)
# get components of a date
print(date_stamp.year)
print(date_stamp.month)
print(date_stamp.day)
# ------------------------- PART TWO --------------------------
from datetime import datetime, date, time
# create a date and a time
my_date = date(2019, 3, 22)
my_time = time(12, 30)
# create a datetime
my_datetime = datetime.combine(my_date, my_time)
print(my_datetime)
# get the different components
print(my_datetime.year)
print(my_datetime.month)
print(my_datetime.day)
print(my_datetime.hour)
print(my_datetime.minute)
|
[
"datetime.date",
"datetime.date.today",
"datetime.date.fromtimestamp",
"datetime.time.time",
"datetime.time",
"datetime.datetime.combine"
] |
[((79, 91), 'datetime.date.today', 'date.today', ([], {}), '()\n', (89, 91), False, 'from datetime import datetime, date, time\n'), ((143, 160), 'datetime.date', 'date', (['(2020)', '(1)', '(31)'], {}), '(2020, 1, 31)\n', (147, 160), False, 'from datetime import datetime, date, time\n'), ((222, 233), 'datetime.time.time', 'time.time', ([], {}), '()\n', (231, 233), False, 'from datetime import datetime, date, time\n'), ((299, 329), 'datetime.date.fromtimestamp', 'date.fromtimestamp', (['time_stamp'], {}), '(time_stamp)\n', (317, 329), False, 'from datetime import datetime, date, time\n'), ((591, 608), 'datetime.date', 'date', (['(2019)', '(3)', '(22)'], {}), '(2019, 3, 22)\n', (595, 608), False, 'from datetime import datetime, date, time\n'), ((619, 631), 'datetime.time', 'time', (['(12)', '(30)'], {}), '(12, 30)\n', (623, 631), False, 'from datetime import datetime, date, time\n'), ((667, 701), 'datetime.datetime.combine', 'datetime.combine', (['my_date', 'my_time'], {}), '(my_date, my_time)\n', (683, 701), False, 'from datetime import datetime, date, time\n')]
|
import socket
import time
import struct
import os
import numpy
import sys
with open(sys.argv[1], "rb") as f:
data = f.read()[8:]
datarts = numpy.array(struct.unpack("{}Q".format(len(data) // 8), data))
nEvents = 8
HOST = 'localhost' # The remote host
PORT = 6666 # The same port as used by the server
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
index = 0
while True:
s.sendall(data[index*8*nEvents:(index+1)*8*nEvents])
index+=1
#s.sendall(b'\x55\x55\x00\x00\x00\x00\x00\x00')
time.sleep(datarts[index] / 1000000)
|
[
"socket.socket",
"time.sleep"
] |
[((330, 379), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (343, 379), False, 'import socket\n'), ((586, 622), 'time.sleep', 'time.sleep', (['(datarts[index] / 1000000)'], {}), '(datarts[index] / 1000000)\n', (596, 622), False, 'import time\n')]
|
"""
Various round-to-integer helpers.
"""
import math
import functools
import logging
log = logging.getLogger(__name__)
__all__ = [
"noRound",
"otRound",
"maybeRound",
"roundFunc",
]
def noRound(value):
return value
def otRound(value):
"""Round float value to nearest integer towards ``+Infinity``.
The OpenType spec (in the section on `"normalization" of OpenType Font Variations <https://docs.microsoft.com/en-us/typography/opentype/spec/otvaroverview#coordinate-scales-and-normalization>`_)
defines the required method for converting floating point values to
fixed-point. In particular it specifies the following rounding strategy:
for fractional values of 0.5 and higher, take the next higher integer;
for other fractional values, truncate.
This function rounds the floating-point value according to this strategy
in preparation for conversion to fixed-point.
Args:
value (float): The input floating-point value.
Returns
float: The rounded value.
"""
# See this thread for how we ended up with this implementation:
# https://github.com/fonttools/fonttools/issues/1248#issuecomment-383198166
return int(math.floor(value + 0.5))
def maybeRound(v, tolerance, round=otRound):
rounded = round(v)
return rounded if abs(rounded - v) <= tolerance else v
def roundFunc(tolerance, round=otRound):
if tolerance < 0:
raise ValueError("Rounding tolerance must be positive")
if tolerance == 0:
return noRound
if tolerance >= .5:
return round
return functools.partial(maybeRound, tolerance=tolerance, round=round)
|
[
"functools.partial",
"math.floor",
"logging.getLogger"
] |
[((94, 121), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (111, 121), False, 'import logging\n'), ((1521, 1584), 'functools.partial', 'functools.partial', (['maybeRound'], {'tolerance': 'tolerance', 'round': 'round'}), '(maybeRound, tolerance=tolerance, round=round)\n', (1538, 1584), False, 'import functools\n'), ((1141, 1164), 'math.floor', 'math.floor', (['(value + 0.5)'], {}), '(value + 0.5)\n', (1151, 1164), False, 'import math\n')]
|
from keras.preprocessing.image import img_to_array
from keras.models import load_model
import imutils
import cv2
import numpy as np
import sys
# parameters for loading data and images
detection_model_path = 'haarcascade_files/haarcascade_frontalface_default.xml'
emotion_model_path = 'models/_mini_XCEPTION.106-0.65.hdf5'
img_path = sys.argv[1]
# hyper-parameters for bounding boxes shape
# loading models
face_detection = cv2.CascadeClassifier(detection_model_path)
emotion_classifier = load_model(emotion_model_path, compile=False)
EMOTIONS = ["angry","disgust","scared", "happy", "sad", "surprised","neutral"]
#reading the frame
orig_frame = cv2.imread(img_path)
frame = cv2.imread(img_path,0)
faces = face_detection.detectMultiScale(frame,scaleFactor=1.2,minNeighbors=5,minSize=(30,30),flags=cv2.CASCADE_SCALE_IMAGE)
if len(faces) > 0:
faces = sorted(faces, reverse=True,key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0]
(fX, fY, fW, fH) = faces
roi = frame[fY:fY + fH, fX:fX + fW]
roi = cv2.resize(roi, (48, 48))
roi = roi.astype("float") / 255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi, axis=0)
preds = emotion_classifier.predict(roi)[0]
emotion_probability = np.max(preds)
label = EMOTIONS[preds.argmax()]
cv2.putText(orig_frame, label, (fX, fY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
cv2.rectangle(orig_frame, (fX, fY), (fX + fW, fY + fH),(0, 0, 255), 2)
print(label)
cv2.imshow('test_face', orig_frame)
cv2.imwrite('test_output/'+img_path.split('/')[-1],orig_frame)
if (cv2.waitKey(2000) & 0xFF == ord('q')):
sys.exit("Thanks")
cv2.destroyAllWindows()
|
[
"keras.models.load_model",
"cv2.putText",
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.expand_dims",
"cv2.rectangle",
"cv2.imread",
"keras.preprocessing.image.img_to_array",
"numpy.max",
"cv2.CascadeClassifier",
"sys.exit",
"cv2.imshow",
"cv2.resize"
] |
[((425, 468), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['detection_model_path'], {}), '(detection_model_path)\n', (446, 468), False, 'import cv2\n'), ((490, 535), 'keras.models.load_model', 'load_model', (['emotion_model_path'], {'compile': '(False)'}), '(emotion_model_path, compile=False)\n', (500, 535), False, 'from keras.models import load_model\n'), ((649, 669), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (659, 669), False, 'import cv2\n'), ((679, 702), 'cv2.imread', 'cv2.imread', (['img_path', '(0)'], {}), '(img_path, 0)\n', (689, 702), False, 'import cv2\n'), ((1461, 1496), 'cv2.imshow', 'cv2.imshow', (['"""test_face"""', 'orig_frame'], {}), "('test_face', orig_frame)\n", (1471, 1496), False, 'import cv2\n'), ((1626, 1649), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1647, 1649), False, 'import cv2\n'), ((1012, 1037), 'cv2.resize', 'cv2.resize', (['roi', '(48, 48)'], {}), '(roi, (48, 48))\n', (1022, 1037), False, 'import cv2\n'), ((1086, 1103), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['roi'], {}), '(roi)\n', (1098, 1103), False, 'from keras.preprocessing.image import img_to_array\n'), ((1114, 1141), 'numpy.expand_dims', 'np.expand_dims', (['roi'], {'axis': '(0)'}), '(roi, axis=0)\n', (1128, 1141), True, 'import numpy as np\n'), ((1215, 1228), 'numpy.max', 'np.max', (['preds'], {}), '(preds)\n', (1221, 1228), True, 'import numpy as np\n'), ((1270, 1368), 'cv2.putText', 'cv2.putText', (['orig_frame', 'label', '(fX, fY - 10)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.45)', '(0, 0, 255)', '(2)'], {}), '(orig_frame, label, (fX, fY - 10), cv2.FONT_HERSHEY_SIMPLEX, \n 0.45, (0, 0, 255), 2)\n', (1281, 1368), False, 'import cv2\n'), ((1368, 1439), 'cv2.rectangle', 'cv2.rectangle', (['orig_frame', '(fX, fY)', '(fX + fW, fY + fH)', '(0, 0, 255)', '(2)'], {}), '(orig_frame, (fX, fY), (fX + fW, fY + fH), (0, 0, 255), 2)\n', (1381, 1439), False, 'import cv2\n'), ((1607, 1625), 'sys.exit', 'sys.exit', (['"""Thanks"""'], {}), "('Thanks')\n", (1615, 1625), False, 'import sys\n'), ((1564, 1581), 'cv2.waitKey', 'cv2.waitKey', (['(2000)'], {}), '(2000)\n', (1575, 1581), False, 'import cv2\n')]
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="positional_encodings",
version="5.0.0",
author="<NAME>",
author_email="<EMAIL>",
description="1D, 2D, and 3D Sinusodal Positional Encodings in PyTorch",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/tatp22/multidim-positional-encoding",
packages=setuptools.find_packages(),
keywords=["transformers", "attention"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
install_requires=["torch", "tensorflow", "numpy"],
)
|
[
"setuptools.find_packages"
] |
[((454, 480), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (478, 480), False, 'import setuptools\n')]
|
import pytest
import json
from collections import OrderedDict
from gendata import gen_permutations, gen_random, prepare_col_opts
@pytest.fixture
def col_opts_test_data_one_level():
col_opts = OrderedDict()
col_opts["Col0"] = {
"Value0_A": 0.1,
"Value0_B": 0.2,
"Value0_C": 0.7
}
return dict(col_opts=col_opts,
total_cols_exp=1,
total_rows_exp=3)
@pytest.fixture
def col_opts_test_data_two_level(col_opts_test_data_one_level):
test_data = col_opts_test_data_one_level
col_opts = test_data['col_opts']
col_opts["Col1"] = {
"Value1_A_half": 0.5,
"Value1_B_half": 0.5
}
test_data['total_cols_exp'] += 1 #we added one column
test_data['total_rows_exp'] *= 2 # we added 2 values. so 2x expected permutations
return test_data
@pytest.fixture
def col_opts_test_data_three_level(col_opts_test_data_two_level):
test_data = col_opts_test_data_two_level
col_opts = test_data['col_opts']
col_opts["Col2"] = {
"Value2_A_10perc": 0.10,
"Value2_B_20perc": 0.20,
"Value2_C_30perc": 0.30,
"Value2_D_40perc_DEFAULT": "DEFAULT"
}
test_data['total_cols_exp'] += 1 #we added one column
test_data['total_rows_exp'] *= 4 # we added 3 values. so 3x expected permutations
return test_data
@pytest.fixture
def col_opts_test_data_four_level(col_opts_test_data_three_level):
test_data = col_opts_test_data_three_level
col_opts = test_data['col_opts']
col_opts["Col3"] = {
"Value3_A_100perc": "DEFAULT"
}
test_data['total_cols_exp'] += 1 #we added one column
test_data['total_rows_exp'] *= 1 # we added 1 value. No additional rows
return test_data
def _assert_result_shape(test_data, rows):
"""
Make sure the row result set is correct shape (#rows, # columns
:param col_opts:
:param rows: array or rows
"""
assert test_data
assert rows
assert len(rows) == test_data['total_rows_exp']
assert len(rows[0].keys()) == test_data['total_cols_exp']
assert len(rows[-1].keys()) == test_data['total_cols_exp']
class Test_gen_permutations():
def test_one_level(self, col_opts_test_data_one_level):
test_data = col_opts_test_data_one_level
rows = gen_permutations(test_data['col_opts'])
_assert_result_shape(test_data, rows)
def test_two_level(self, col_opts_test_data_two_level):
test_data = col_opts_test_data_two_level
rows = gen_permutations(test_data['col_opts'])
_assert_result_shape(test_data, rows)
def test_three_level(self, col_opts_test_data_three_level):
test_data = col_opts_test_data_three_level
rows = gen_permutations(test_data['col_opts'])
_assert_result_shape(test_data, rows)
def test_four_level(self, col_opts_test_data_four_level):
test_data = col_opts_test_data_four_level
rows = gen_permutations(test_data['col_opts'])
_assert_result_shape(test_data, rows)
|
[
"collections.OrderedDict",
"gendata.gen_permutations"
] |
[((198, 211), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (209, 211), False, 'from collections import OrderedDict\n'), ((2304, 2343), 'gendata.gen_permutations', 'gen_permutations', (["test_data['col_opts']"], {}), "(test_data['col_opts'])\n", (2320, 2343), False, 'from gendata import gen_permutations, gen_random, prepare_col_opts\n'), ((2515, 2554), 'gendata.gen_permutations', 'gen_permutations', (["test_data['col_opts']"], {}), "(test_data['col_opts'])\n", (2531, 2554), False, 'from gendata import gen_permutations, gen_random, prepare_col_opts\n'), ((2732, 2771), 'gendata.gen_permutations', 'gen_permutations', (["test_data['col_opts']"], {}), "(test_data['col_opts'])\n", (2748, 2771), False, 'from gendata import gen_permutations, gen_random, prepare_col_opts\n'), ((2946, 2985), 'gendata.gen_permutations', 'gen_permutations', (["test_data['col_opts']"], {}), "(test_data['col_opts'])\n", (2962, 2985), False, 'from gendata import gen_permutations, gen_random, prepare_col_opts\n')]
|
import matplotlib.pyplot as plt
import plotly.graph_objects as go
from plotly.subplots import make_subplots
class PieChart:
"""
Class which defines a PieChart graph.
Attributes:
__fig : fig ; reference to diagram (which contains all graphs)
__max_rows : int ; max number of rows
__max_cols : int ; max number of cols
__numCols : int ; current number of cols added to diagram
__numRows : int ; current numer of rows added to diagram
__title : str ; title name of diagram
__current_title_index : str ; current value of index of title's list
__titles : list ; list of titles of each graph
"""
def __init__(self, rows : int, cols : int, title : str, titles_sub_graphs : list):
"""Set attributes as arguments."""
specs_l : list[list] = list(list())
specs_sl : list = list()
for i in range (0, rows):
specs_sl = list()
for j in range(0, cols):
specs_sl.append({'type' : 'domain'})
specs_l.append(specs_sl)
self.__fig = make_subplots(rows = rows, cols = cols, specs = specs_l, subplot_titles = titles_sub_graphs)
self.__max_rows : int = rows
self.__max_cols : int = cols
self.__num_cols : int = 0
self.__num_rows : int = 1
self.__title : str = title
self.__titles : list = titles_sub_graphs
self.__current_title_index : int = 0
pass
def draw(labels : list, sizes : list, explode : list):
if len(labels) != len(sizes) or len(labels) != len(explode):
return False
plt.pie(sizes, explode = explode, labels = labels, autopct='%1.1f%%',
shadow = True, startangle = 90)
plt.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.savefig('books_read.png')
pass
def add_graph(self, labels : list, values : list, legend_group : str) -> bool:
if self.__num_cols > self.__max_cols or self.__num_rows > self.__max_rows or self.__current_title_index >= len(self.__titles) :
return False
self.__num_cols += 1
if self.__num_cols > self.__max_cols:
self.__num_cols = 1
self.__num_rows += 1
if self.__num_rows > self.__max_rows:
return False
self.__fig.add_trace(go.Pie(labels = labels, values = values, showlegend = True, legendgroup = legend_group), row = self.__num_rows, col = self.__num_cols)
self.__fig.update_yaxes(title_text = self.__titles[self.__current_title_index], row = self.__num_rows, col = self.__num_rows)
self.__current_title_index += 1
return True
pass
def __set_features(self):
""" Set some features."""
plt.tight_layout()
self.__fig.update_layout(title = {'text' : self.__title, 'x' : 0.5, 'xanchor': 'center'}, #legend = dict(yanchor = "top",
#y = 0.9, xanchor = "right", x = 0.01),
legend_title = "Legend", font = dict(size = 12, color = "Black"), legend_traceorder="grouped")
pass
def show(self):
""" Show Graph."""
self.__set_features()
self.__fig.show()
pass
def save(self, file_str : str):
""" Save figure in file indicated as argument.
Params:
file_str : str ; path to file where save figure
"""
self.__set_features()
self.__fig.write_html(file_str)
pass
|
[
"plotly.graph_objects.Pie",
"matplotlib.pyplot.axis",
"plotly.subplots.make_subplots",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.pie"
] |
[((1212, 1301), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': 'rows', 'cols': 'cols', 'specs': 'specs_l', 'subplot_titles': 'titles_sub_graphs'}), '(rows=rows, cols=cols, specs=specs_l, subplot_titles=\n titles_sub_graphs)\n', (1225, 1301), False, 'from plotly.subplots import make_subplots\n'), ((1753, 1851), 'matplotlib.pyplot.pie', 'plt.pie', (['sizes'], {'explode': 'explode', 'labels': 'labels', 'autopct': '"""%1.1f%%"""', 'shadow': '(True)', 'startangle': '(90)'}), "(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=\n True, startangle=90)\n", (1760, 1851), True, 'import matplotlib.pyplot as plt\n'), ((1875, 1892), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (1883, 1892), True, 'import matplotlib.pyplot as plt\n'), ((1962, 1991), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""books_read.png"""'], {}), "('books_read.png')\n", (1973, 1991), True, 'import matplotlib.pyplot as plt\n'), ((2921, 2939), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2937, 2939), True, 'import matplotlib.pyplot as plt\n'), ((2498, 2577), 'plotly.graph_objects.Pie', 'go.Pie', ([], {'labels': 'labels', 'values': 'values', 'showlegend': '(True)', 'legendgroup': 'legend_group'}), '(labels=labels, values=values, showlegend=True, legendgroup=legend_group)\n', (2504, 2577), True, 'import plotly.graph_objects as go\n')]
|
from ehr_functions.models.types._sklearn import SKLearnModel
from sklearn.linear_model import ElasticNet as EN
import numpy as np
class ElasticNet(SKLearnModel):
def __init__(self, round_output=False, **kwargs):
super().__init__(EN, kwargs)
self.round_output = round_output
def predict(self, x):
output = super().predict(x)
if self.round_output:
output = np.round(output)
return output
|
[
"numpy.round"
] |
[((410, 426), 'numpy.round', 'np.round', (['output'], {}), '(output)\n', (418, 426), True, 'import numpy as np\n')]
|
import logging
import traceback
from django.conf import settings
from sparrow_cloud.dingtalk.sender import send_message
from sparrow_cloud.middleware.base.base_middleware import MiddlewareMixin
logger = logging.getLogger(__name__)
MESSAGE_LINE = """
##### <font color=\"info\"> 服务名称: {service_name}</font> #####
> 进程异常message:<font color=\"warning\">{exception_info}</font>
"""
class ExceptionMiddleware(MiddlewareMixin):
def process_exception(self, request, exception):
debug = settings.DEBUG
code = getattr(settings, "CLOUD_ERROR_NOTIFICATION_ROBOT", "cloud_error_notification_robot")
service_name = getattr(settings, "SERVICE_CONF", None).get("NAME", None)
if debug is True:
pass
else:
exception_info = traceback.format_exc()[-800:-1]
try:
msg = MESSAGE_LINE.format(service_name=service_name, exception_info=exception_info)
logger.info("sparrow_cloud log, service process_exception info : {}".format(msg))
send_message(msg=msg, code_list=[code], channel="wechat", message_type="markdown")
except Exception as ex:
logger.error("sparrow_cloud 发送服务异常信息通知失败,原因: {}".format(ex))
|
[
"sparrow_cloud.dingtalk.sender.send_message",
"traceback.format_exc",
"logging.getLogger"
] |
[((204, 231), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (221, 231), False, 'import logging\n'), ((779, 801), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (799, 801), False, 'import traceback\n'), ((1042, 1129), 'sparrow_cloud.dingtalk.sender.send_message', 'send_message', ([], {'msg': 'msg', 'code_list': '[code]', 'channel': '"""wechat"""', 'message_type': '"""markdown"""'}), "(msg=msg, code_list=[code], channel='wechat', message_type=\n 'markdown')\n", (1054, 1129), False, 'from sparrow_cloud.dingtalk.sender import send_message\n')]
|
# Generated by Django 3.2.4 on 2021-07-05 08:53
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Audio_store1',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('record', models.FileField(upload_to='')),
('title', models.CharField(default='my file', max_length=120)),
('wpm', models.IntegerField(blank=True, null=True)),
('pausesandfillerwords', models.IntegerField(blank=True, null=True)),
('pitch', models.IntegerField(blank=True, null=True)),
('duration', models.FloatField(blank=True, null=True)),
('pronunciation', models.FloatField(blank=True, null=True)),
('balance', models.FloatField(blank=True, null=True)),
('spotwords', models.IntegerField(blank=True, null=True)),
('sensitivewords', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'Audio_store1',
},
),
]
|
[
"django.db.models.FileField",
"django.db.models.BigAutoField",
"django.db.models.CharField",
"django.db.models.FloatField",
"django.db.models.IntegerField"
] |
[((308, 404), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (327, 404), False, 'from django.db import migrations, models\n'), ((430, 460), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '""""""'}), "(upload_to='')\n", (446, 460), False, 'from django.db import migrations, models\n'), ((489, 540), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""my file"""', 'max_length': '(120)'}), "(default='my file', max_length=120)\n", (505, 540), False, 'from django.db import migrations, models\n'), ((567, 609), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (586, 609), False, 'from django.db import migrations, models\n'), ((653, 695), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (672, 695), False, 'from django.db import migrations, models\n'), ((724, 766), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (743, 766), False, 'from django.db import migrations, models\n'), ((798, 838), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (815, 838), False, 'from django.db import migrations, models\n'), ((875, 915), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (892, 915), False, 'from django.db import migrations, models\n'), ((946, 986), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (963, 986), False, 'from django.db import migrations, models\n'), ((1019, 1061), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1038, 1061), False, 'from django.db import migrations, models\n'), ((1099, 1141), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1118, 1141), False, 'from django.db import migrations, models\n')]
|
import time
from collections import defaultdict
from dataclasses import dataclass
from logging import getLogger
from typing import Optional
@dataclass
class Bucket:
value: int = 0
last_updated_at: Optional[int] = None
def increment(self, timestamp: int):
self.value += 1
self.last_updated_at = timestamp
def reset(self):
self.value = 0
self.last_updated_at = None
class Metrix:
def __init__(self, ttl: int = 300, debug=False):
"""
Inits Metrix.
Args:
ttl (int): Time-to-live in seconds for events.
debug (bool): Set to True to enable additional logging.
Raises:
ValueError: TTL can't be non-positive
"""
if ttl <= 0:
raise ValueError(f"TTL can't be non-positive, got ttl={ttl}")
self.ttl = ttl
self.debug = debug
self._metrics_data = defaultdict(lambda: [Bucket() for _ in range(self.ttl)])
self._start_time = None
if self.debug:
self._logger = getLogger(name="metrix")
def increment(self, metric_name: str) -> None:
"""
Increments counter for a specified `metric_name`.
Args:
metric_name (str): Name of metric to increment.
"""
event_time = int(time.time())
if self._start_time is None:
self._start_time = event_time
# we use ring buffers to store events so we need to find an index
bucket_ind = (event_time - self._start_time) % self.ttl
bucket = self._metrics_data[metric_name][bucket_ind]
# in case of already used and outdated bucket we need to reset its value before we increment
if (
bucket.last_updated_at is not None
and bucket.last_updated_at < event_time - self.ttl
):
bucket.reset()
bucket.increment(event_time)
def sum(self, metric_name: str, interval: int) -> int:
"""
Returns counter value for a specified `metric_name` and specified
time range.
Args:
metric_name (str): Name of metric to retrieve number of occurrences.
interval (int): Number of seconds representing range of a query.
Returns:
sum (int): number
"""
event_time = int(time.time())
if metric_name not in self._metrics_data:
if self.debug:
self._logger.debug(f"No events for metric_name={metric_name}")
return 0
if interval > self.ttl:
interval = self.ttl
if self.debug:
self._logger.debug(f"Clipped interval={interval} to ttl={self.ttl}")
sum_ = 0
for bucket in self._metrics_data[metric_name]:
if bucket.last_updated_at is not None:
if (
bucket.last_updated_at < event_time - self.ttl
): # reset outdated buckets
bucket.reset()
elif bucket.last_updated_at > event_time - interval:
sum_ += bucket.value
return sum_
|
[
"logging.getLogger",
"time.time"
] |
[((1056, 1080), 'logging.getLogger', 'getLogger', ([], {'name': '"""metrix"""'}), "(name='metrix')\n", (1065, 1080), False, 'from logging import getLogger\n'), ((1314, 1325), 'time.time', 'time.time', ([], {}), '()\n', (1323, 1325), False, 'import time\n'), ((2329, 2340), 'time.time', 'time.time', ([], {}), '()\n', (2338, 2340), False, 'import time\n')]
|
from __future__ import annotations
from typing import Iterable, Optional, Union
import materia as mtr
import numpy as np
import scipy.linalg
__all__ = [
"Identity",
"Inversion",
"Reflection",
"ProperRotation",
"ImproperRotation",
"SymmetryOperation",
]
class SymmetryOperation:
def __init__(
self,
matrix: Optional[np.ndarray] = None,
determinant: Optional[Union[int, float]] = None,
trace: Optional[float] = None,
axis: Optional[np.ndarray] = None,
) -> None:
if matrix is not None:
self.matrix, _ = scipy.linalg.polar(matrix)
elif determinant is not None and trace is not None:
if axis is None:
self.matrix, _ = scipy.linalg.polar(
determinant * np.eye(3).astype("float64")
)
else:
a = mtr.normalize(axis)
cos_theta = (trace - determinant) / 2
cos_theta = max(min(cos_theta, 1), -1)
theta = np.arccos(cos_theta)
self.matrix = mtr.rotation_matrix(
axis=a, theta=theta, improper=(determinant == -1)
)
else:
raise ValueError
def __eq__(self, other: SymmetryOperation) -> bool:
return hasattr(other, "matrix") and np.allclose(
self.matrix, other.matrix, atol=1e-3
)
@property
def det(self) -> int:
return int(round(np.linalg.det(self.matrix)))
@property
def tr(self) -> float:
return np.trace(self.matrix)
@property
def cos_theta(self) -> float:
return max(min((self.tr - np.sign(self.det)) / 2, 1.0), -1.0)
@property
def axis(self) -> np.ndarray:
# algorithm from scipp.ucsc.edu/~haber/ph116A/rotation_11.pdf
if np.isclose(abs(self.tr), 3):
return None
if np.isclose(self.tr * self.det, -1):
S = (np.eye(3) + self.det * self.matrix) / 2
for i in range(3):
signs = np.sign(S[:, i])
if not np.allclose(signs, [0, 0, 0]):
return signs * np.sqrt(np.abs(np.diag(S)))
inds = np.triu_indices(3, k=1)
return mtr.normalize(
(self.matrix.T - self.matrix)[inds][::-1] * np.array([1, -1, 1])
)
@property
def inverse(self) -> SymmetryOperation:
return SymmetryOperation(matrix=self.matrix.T)
def apply(self, structure: mtr.Structure):
return self.matrix @ structure.centered_atomic_positions.value
def error(self, structure: mtr.Structure):
kdt = scipy.spatial.KDTree(structure.centered_atomic_positions.value.T)
dists, _ = np.abs(kdt.query(self.apply(structure).T))
rs = np.abs(self.axis @ structure.centered_atomic_positions.value)
return dists / rs
def is_symmetry_of(self, structure: mtr.Structure, tolerance: float) -> bool:
round_to = round(-np.log(tolerance) / np.log(10))
X = structure.centered_atomic_positions.value
return set(
tuple(row) for row in self.apply(structure).T.round(round_to)
) == set(tuple(row) for row in X.T.round(round_to))
@property
def order(self) -> int:
return mtr.periodicity(self.matrix)
def __mul__(self, other):
return SymmetryOperation(matrix=self.matrix @ other.matrix)
class Identity(SymmetryOperation):
def __init__(self) -> None:
determinant = 1
trace = 3
axis = None
super().__init__(determinant=determinant, trace=trace, axis=axis)
class Inversion(SymmetryOperation):
def __init__(self) -> None:
determinant = -1
trace = -3
axis = None
super().__init__(determinant=determinant, trace=trace, axis=axis)
class Reflection(SymmetryOperation):
def __init__(self, axis: Iterable[Union[float, int]]) -> None:
determinant = -1
trace = 1
super().__init__(determinant=determinant, trace=trace, axis=axis)
class ProperRotation(SymmetryOperation):
def __init__(self, order: int, axis: Iterable[Union[float, int]]) -> None:
determinant = 1
trace = 2 * np.cos(2 * np.pi / order) + determinant
super().__init__(determinant=determinant, trace=trace, axis=axis)
def __repr__(self) -> str:
return f"ProperRotation(order={self.order})"
class ImproperRotation(SymmetryOperation):
def __init__(self, order: int, axis: Iterable[Union[float, int]]) -> None:
determinant = -1
trace = 2 * np.cos(2 * np.pi / order) + determinant
super().__init__(determinant=determinant, trace=trace, axis=axis)
|
[
"materia.rotation_matrix",
"numpy.trace",
"numpy.abs",
"numpy.log",
"numpy.eye",
"materia.normalize",
"numpy.allclose",
"numpy.triu_indices",
"numpy.isclose",
"numpy.array",
"numpy.cos",
"numpy.sign",
"numpy.linalg.det",
"materia.periodicity",
"numpy.arccos",
"numpy.diag"
] |
[((1568, 1589), 'numpy.trace', 'np.trace', (['self.matrix'], {}), '(self.matrix)\n', (1576, 1589), True, 'import numpy as np\n'), ((1904, 1938), 'numpy.isclose', 'np.isclose', (['(self.tr * self.det)', '(-1)'], {}), '(self.tr * self.det, -1)\n', (1914, 1938), True, 'import numpy as np\n'), ((2202, 2225), 'numpy.triu_indices', 'np.triu_indices', (['(3)'], {'k': '(1)'}), '(3, k=1)\n', (2217, 2225), True, 'import numpy as np\n'), ((2779, 2840), 'numpy.abs', 'np.abs', (['(self.axis @ structure.centered_atomic_positions.value)'], {}), '(self.axis @ structure.centered_atomic_positions.value)\n', (2785, 2840), True, 'import numpy as np\n'), ((3275, 3303), 'materia.periodicity', 'mtr.periodicity', (['self.matrix'], {}), '(self.matrix)\n', (3290, 3303), True, 'import materia as mtr\n'), ((1344, 1394), 'numpy.allclose', 'np.allclose', (['self.matrix', 'other.matrix'], {'atol': '(0.001)'}), '(self.matrix, other.matrix, atol=0.001)\n', (1355, 1394), True, 'import numpy as np\n'), ((1482, 1508), 'numpy.linalg.det', 'np.linalg.det', (['self.matrix'], {}), '(self.matrix)\n', (1495, 1508), True, 'import numpy as np\n'), ((2052, 2068), 'numpy.sign', 'np.sign', (['S[:, i]'], {}), '(S[:, i])\n', (2059, 2068), True, 'import numpy as np\n'), ((2312, 2332), 'numpy.array', 'np.array', (['[1, -1, 1]'], {}), '([1, -1, 1])\n', (2320, 2332), True, 'import numpy as np\n'), ((2996, 3006), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (3002, 3006), True, 'import numpy as np\n'), ((4205, 4230), 'numpy.cos', 'np.cos', (['(2 * np.pi / order)'], {}), '(2 * np.pi / order)\n', (4211, 4230), True, 'import numpy as np\n'), ((4573, 4598), 'numpy.cos', 'np.cos', (['(2 * np.pi / order)'], {}), '(2 * np.pi / order)\n', (4579, 4598), True, 'import numpy as np\n'), ((885, 904), 'materia.normalize', 'mtr.normalize', (['axis'], {}), '(axis)\n', (898, 904), True, 'import materia as mtr\n'), ((1039, 1059), 'numpy.arccos', 'np.arccos', (['cos_theta'], {}), '(cos_theta)\n', (1048, 1059), True, 'import numpy as np\n'), ((1091, 1159), 'materia.rotation_matrix', 'mtr.rotation_matrix', ([], {'axis': 'a', 'theta': 'theta', 'improper': '(determinant == -1)'}), '(axis=a, theta=theta, improper=determinant == -1)\n', (1110, 1159), True, 'import materia as mtr\n'), ((1957, 1966), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1963, 1966), True, 'import numpy as np\n'), ((2092, 2121), 'numpy.allclose', 'np.allclose', (['signs', '[0, 0, 0]'], {}), '(signs, [0, 0, 0])\n', (2103, 2121), True, 'import numpy as np\n'), ((2976, 2993), 'numpy.log', 'np.log', (['tolerance'], {}), '(tolerance)\n', (2982, 2993), True, 'import numpy as np\n'), ((1673, 1690), 'numpy.sign', 'np.sign', (['self.det'], {}), '(self.det)\n', (1680, 1690), True, 'import numpy as np\n'), ((2173, 2183), 'numpy.diag', 'np.diag', (['S'], {}), '(S)\n', (2180, 2183), True, 'import numpy as np\n'), ((801, 810), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (807, 810), True, 'import numpy as np\n')]
|
# 7old
# search engine algorithm
# that gets data from DB
import sqlite3 as sl
def searchdb(q):
con = sl.connect("results.db")
cur = con.cursor()
rows = cur.execute("SELECT * FROM RESULT ORDER BY title")
result = []
for row in rows:
if (q in row[1] # URL
and row[1].count('/') <= 3
and (row[1].count('.') == 1
or (row[1].startswith('https://www.')
and row[1].count('.') == 2))
and '?' not in row[1]):
result.insert(0, row)
elif any(q in s for s in row):
result.append(row)
con.close()
return result
|
[
"sqlite3.connect"
] |
[((110, 134), 'sqlite3.connect', 'sl.connect', (['"""results.db"""'], {}), "('results.db')\n", (120, 134), True, 'import sqlite3 as sl\n')]
|
from django.conf.urls import url
from Basic_app import views
from pathlib import Path
urlpatterns = [
# The about page will be the homepage
url(r'^$',views.AboutView.as_view(),name='about'),
# Creating contact page
url(r'^contact/$',views.Contact_View,name='contact_create'),
# Contact confirmation page
url(r'^contact/confirm/$',views.ContactConfirmed.as_view(),name='contact_confirm'),
# List of Contacts page
url(r'^contact/contact_list/$',views.ContactList.as_view(),name='contact_list'),
# List of projects
url(r'^projects/$',views.ProjectList.as_view(),name='project_list'),
# New project creation,updating, and deleting
url(r'^projects/new/$',views.ProjectCreate.as_view(),name='project_create'),
url(r'^projects/(?P<pk>\d+)/edit/$',views.ProjectUpdate.as_view(),name='project_edit'),
url(r'^projects/(?P<pk>\d+)/remove/$',views.ProjectDelete.as_view(),name='project_remove'),
# Url for project detail generated by primary key
url(r'^projects/(?P<pk>\d+)$',views.ProjectDetailView.as_view(),name='project_detail'),
]
|
[
"Basic_app.views.ContactConfirmed.as_view",
"Basic_app.views.ProjectUpdate.as_view",
"Basic_app.views.ProjectList.as_view",
"django.conf.urls.url",
"Basic_app.views.ProjectDetailView.as_view",
"Basic_app.views.ProjectCreate.as_view",
"Basic_app.views.AboutView.as_view",
"Basic_app.views.ProjectDelete.as_view",
"Basic_app.views.ContactList.as_view"
] |
[((234, 294), 'django.conf.urls.url', 'url', (['"""^contact/$"""', 'views.Contact_View'], {'name': '"""contact_create"""'}), "('^contact/$', views.Contact_View, name='contact_create')\n", (237, 294), False, 'from django.conf.urls import url\n'), ((160, 185), 'Basic_app.views.AboutView.as_view', 'views.AboutView.as_view', ([], {}), '()\n', (183, 185), False, 'from Basic_app import views\n'), ((358, 390), 'Basic_app.views.ContactConfirmed.as_view', 'views.ContactConfirmed.as_view', ([], {}), '()\n', (388, 390), False, 'from Basic_app import views\n'), ((480, 507), 'Basic_app.views.ContactList.as_view', 'views.ContactList.as_view', ([], {}), '()\n', (505, 507), False, 'from Basic_app import views\n'), ((577, 604), 'Basic_app.views.ProjectList.as_view', 'views.ProjectList.as_view', ([], {}), '()\n', (602, 604), False, 'from Basic_app import views\n'), ((705, 734), 'Basic_app.views.ProjectCreate.as_view', 'views.ProjectCreate.as_view', ([], {}), '()\n', (732, 734), False, 'from Basic_app import views\n'), ((799, 828), 'Basic_app.views.ProjectUpdate.as_view', 'views.ProjectUpdate.as_view', ([], {}), '()\n', (826, 828), False, 'from Basic_app import views\n'), ((893, 922), 'Basic_app.views.ProjectDelete.as_view', 'views.ProjectDelete.as_view', ([], {}), '()\n', (920, 922), False, 'from Basic_app import views\n'), ((1036, 1069), 'Basic_app.views.ProjectDetailView.as_view', 'views.ProjectDetailView.as_view', ([], {}), '()\n', (1067, 1069), False, 'from Basic_app import views\n')]
|
import chess
import chess.polyglot
import random
def play_from_opening_book(
book, max_depth=10, fen=chess.STARTING_FEN, random_seed=None
):
"""Play out moves from an opening book and return the resulting board.
From the given `fen` starting position, draw weighted random moves from the opening book
to a maximum depth of `2 * max_depth` plies. Whenever there are no move moves in the book,
the play stops and the board returned.
If a seed integer is given, then this will always return the same final position.
Arguments
---------
book: str
Path to a polyglot opening book file.
max_depth: int, optional
The maximum depth to play to. The number of moves (plies) made will at most be 2 times this.
Default is 10.
fen: str, optional
Starting position in FEN notation. Default is the standard opening position.
random_seed: int, optional
Seed the random number generator to produce the same results each call. Default is to not seed,
and so successive calls will in general yield different boards.
Returns
-------
A `chess.Board` with the resulting position.
"""
if random_seed is not None:
random.seed(random_seed)
board = chess.Board(fen)
with chess.polyglot.MemoryMappedReader(book) as reader:
try:
for _ in range(2 * max_depth):
move = reader.weighted_choice(board).move()
board.push(move)
except IndexError:
pass
return board
|
[
"chess.Board",
"random.seed",
"chess.polyglot.MemoryMappedReader"
] |
[((1262, 1278), 'chess.Board', 'chess.Board', (['fen'], {}), '(fen)\n', (1273, 1278), False, 'import chess\n'), ((1224, 1248), 'random.seed', 'random.seed', (['random_seed'], {}), '(random_seed)\n', (1235, 1248), False, 'import random\n'), ((1289, 1328), 'chess.polyglot.MemoryMappedReader', 'chess.polyglot.MemoryMappedReader', (['book'], {}), '(book)\n', (1322, 1328), False, 'import chess\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
from std_msgs.msg import Float64
if __name__ == "__main__":
rospy.init_node("fake_battery_percentage")
pub = rospy.Publisher("battery_percentage", Float64, queue_size=1)
battery_percentage = rospy.get_param("~battery_percentage", 100)
publish_rate = rospy.get_param("~publish_rate", 1)
loop_rate = rospy.Rate(publish_rate)
while not rospy.is_shutdown():
battery_percentage -= 0.1
if battery_percentage < 0:
battery_percentage = 0
battery_percentage_msg = battery_percentage
pub.publish(battery_percentage_msg)
loop_rate.sleep()
|
[
"rospy.Publisher",
"rospy.Rate",
"rospy.get_param",
"rospy.is_shutdown",
"rospy.init_node"
] |
[((125, 167), 'rospy.init_node', 'rospy.init_node', (['"""fake_battery_percentage"""'], {}), "('fake_battery_percentage')\n", (140, 167), False, 'import rospy\n'), ((183, 243), 'rospy.Publisher', 'rospy.Publisher', (['"""battery_percentage"""', 'Float64'], {'queue_size': '(1)'}), "('battery_percentage', Float64, queue_size=1)\n", (198, 243), False, 'import rospy\n'), ((269, 312), 'rospy.get_param', 'rospy.get_param', (['"""~battery_percentage"""', '(100)'], {}), "('~battery_percentage', 100)\n", (284, 312), False, 'import rospy\n'), ((332, 367), 'rospy.get_param', 'rospy.get_param', (['"""~publish_rate"""', '(1)'], {}), "('~publish_rate', 1)\n", (347, 367), False, 'import rospy\n'), ((385, 409), 'rospy.Rate', 'rospy.Rate', (['publish_rate'], {}), '(publish_rate)\n', (395, 409), False, 'import rospy\n'), ((424, 443), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (441, 443), False, 'import rospy\n')]
|
from django.contrib import admin
from .models import (
Building,
BuildingPart,
Container,
EmailToken,
FullContainerReport,
TankTakeoutCompany,
)
class ContainerAdmin(admin.ModelAdmin):
readonly_fields = [
"mass",
"activated_at",
"avg_fill_time",
"calc_avg_fill_time",
"avg_takeout_wait_time",
"cur_fill_time",
"cur_takeout_wait_time",
"last_full_report",
"last_emptied_report",
"ignore_reports_count",
"is_full",
"check_time_conditions",
"requested_activation"
]
class BuildingPartAdmin(admin.ModelAdmin):
readonly_fields = [
"current_mass",
"meets_mass_takeout_condition",
"meets_time_takeout_condition",
"needs_takeout",
"containers_for_takeout",
"container_count"
]
class BuildingAdmin(admin.ModelAdmin):
readonly_fields = [
"current_mass",
"meets_mass_takeout_condition",
"meets_time_takeout_condition",
"needs_takeout",
"containers_for_takeout",
"container_count",
"calculated_collected_mass",
"confirmed_collected_mass",
"avg_fill_speed"
]
class FullContainerReportAdmin(admin.ModelAdmin):
readonly_fields = [
"takeout_wait_time"
]
admin.site.register(Container, ContainerAdmin)
admin.site.register(Building, BuildingAdmin)
admin.site.register(BuildingPart, BuildingPartAdmin)
admin.site.register(FullContainerReport, FullContainerReportAdmin)
admin.site.register(EmailToken)
admin.site.register(TankTakeoutCompany)
|
[
"django.contrib.admin.site.register"
] |
[((1337, 1383), 'django.contrib.admin.site.register', 'admin.site.register', (['Container', 'ContainerAdmin'], {}), '(Container, ContainerAdmin)\n', (1356, 1383), False, 'from django.contrib import admin\n'), ((1384, 1428), 'django.contrib.admin.site.register', 'admin.site.register', (['Building', 'BuildingAdmin'], {}), '(Building, BuildingAdmin)\n', (1403, 1428), False, 'from django.contrib import admin\n'), ((1429, 1481), 'django.contrib.admin.site.register', 'admin.site.register', (['BuildingPart', 'BuildingPartAdmin'], {}), '(BuildingPart, BuildingPartAdmin)\n', (1448, 1481), False, 'from django.contrib import admin\n'), ((1482, 1548), 'django.contrib.admin.site.register', 'admin.site.register', (['FullContainerReport', 'FullContainerReportAdmin'], {}), '(FullContainerReport, FullContainerReportAdmin)\n', (1501, 1548), False, 'from django.contrib import admin\n'), ((1549, 1580), 'django.contrib.admin.site.register', 'admin.site.register', (['EmailToken'], {}), '(EmailToken)\n', (1568, 1580), False, 'from django.contrib import admin\n'), ((1581, 1620), 'django.contrib.admin.site.register', 'admin.site.register', (['TankTakeoutCompany'], {}), '(TankTakeoutCompany)\n', (1600, 1620), False, 'from django.contrib import admin\n')]
|
# Copyright (c) 2015-2020, Oracle and/or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import os
def generate_data(mode='train', problem_type='binary'):
assert mode == 'train' or mode == 'test'
rng = np.random.RandomState(1)
if problem_type == 'binary':
labels = ['POS', 'NEG']
else:
labels = ['POS', 'NEG', 'NEU']
texts = ['aaa', 'bbb', 'ccc']
counts = {label: 0 for label in labels}
if mode == 'train':
n = 1000
else:
n = 100
lns = []
for i in range(n):
y = rng.choice(labels)
counts[y] += 1
x = rng.choice(texts)
lns.append('%s##%s\n' % (y, x))
print(counts)
with open('%s_input_%s.tribuo' % (mode, problem_type), 'w') as f:
for ln in lns:
f.write(ln)
def generate_models():
lltypes = [
'L2R_LR',
'L2R_L2LOSS_SVC_DUAL',
'L2R_L2LOSS_SVC',
'L2R_L1LOSS_SVC_DUAL',
'MCSVM_CS',
'L1R_L2LOSS_SVC',
'L1R_LR',
'L2R_LR_DUAL'
]
for lltype in lltypes:
cmd = './src/test/scripts/generate-model.sh %s %s %s %s' % (lltype, lltype, 'train_input_binary.tribuo', 'test_input_binary.tribuo')
print(cmd)
os.system(cmd)
# multiclass model
lltype = 'L2R_LR'
cmd = './src/test/scripts/generate-model.sh %s %s %s %s' % (lltype, lltype+'_multiclass', 'train_input_multiclass.tribuo', 'test_input_multiclass.tribuo')
print(cmd)
os.system(cmd)
if __name__ == '__main__':
generate_data(mode='train')
generate_data(mode='test')
generate_data(mode='train', problem_type='multiclass')
generate_data(mode='test', problem_type='multiclass')
generate_models()
|
[
"os.system",
"numpy.random.RandomState"
] |
[((775, 799), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (796, 799), True, 'import numpy as np\n'), ((2026, 2040), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (2035, 2040), False, 'import os\n'), ((1788, 1802), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (1797, 1802), False, 'import os\n')]
|
#!/usr/bin/env python3
# Process raw CSV data and output Parquet
# Author: <NAME> (November 2020)
import argparse
from pyspark.sql import SparkSession
def main():
args = parse_args()
spark = SparkSession \
.builder \
.appName("movie-ratings-csv-to-parquet") \
.getOrCreate()
for file in ["credits", "keywords", "links", "links_small", "movies_metadata", "ratings", "ratings_small"]:
convert_to_parquet(spark, file, args)
def convert_to_parquet(spark, file, args):
df_bakery = spark.read \
.format("csv") \
.option("header", "true") \
.option("delimiter", ",") \
.option("inferSchema", "true") \
.load(f"s3a://{args.bronze_bucket}/movie_ratings/{file}.csv")
df_bakery.write \
.format("parquet") \
.save(f"s3a://{args.silver_bucket}/movie_ratings/{file}/", mode="overwrite")
def parse_args():
"""Parse argument values from command-line"""
parser = argparse.ArgumentParser(description="Arguments required for script.")
parser.add_argument("--bronze-bucket", required=True, help="Raw data location")
parser.add_argument("--silver-bucket", required=True, help="Processed data location")
args = parser.parse_args()
return args
if __name__ == "__main__":
main()
|
[
"pyspark.sql.SparkSession.builder.appName",
"argparse.ArgumentParser"
] |
[((975, 1044), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Arguments required for script."""'}), "(description='Arguments required for script.')\n", (998, 1044), False, 'import argparse\n'), ((205, 265), 'pyspark.sql.SparkSession.builder.appName', 'SparkSession.builder.appName', (['"""movie-ratings-csv-to-parquet"""'], {}), "('movie-ratings-csv-to-parquet')\n", (233, 265), False, 'from pyspark.sql import SparkSession\n')]
|
import collections
import os
import pandas as pd
from catalyst.dl import ConfigExperiment
from segmentation_models_pytorch.encoders import get_preprocessing_fn
from sklearn.model_selection import train_test_split
from src.augmentations import get_transforms
from src.dataset import CloudDataset
class Experiment(ConfigExperiment):
def get_datasets(self, **kwargs):
path = kwargs.get("path", None)
df_train_name = kwargs.get("df_train_name", None)
df_pl_name = kwargs.get("df_pl_name", None)
image_folder = kwargs.get("image_folder", None)
encoder_name = kwargs.get("model_name", None)
test_mode = kwargs.get("test_mode", None)
type = kwargs.get("type", None)
height = kwargs.get("height", None)
width = kwargs.get("width", None)
if type == "train":
df_train = pd.read_csv(os.path.join(path, df_train_name))
if df_pl_name is not None:
df_pl = pd.read_csv(os.path.join(path, df_pl_name))
df_train = df_train.append(df_pl)
print(
f"Pseudo-labels named {df_pl_name} {len(df_pl) / 4} added to train df"
)
if test_mode:
df_train = df_train[:150]
df_train["label"] = df_train["Image_Label"].apply(lambda x: x.split("_")[1])
df_train["im_id"] = df_train["Image_Label"].apply(lambda x: x.split("_")[0])
id_mask_count = (
df_train.loc[~df_train["EncodedPixels"].isnull(), "Image_Label"]
.apply(lambda x: x.split("_")[0])
.value_counts()
.reset_index()
.rename(columns={"index": "img_id", "Image_Label": "count"})
.sort_values(["count", "img_id"])
)
assert len(id_mask_count["img_id"].values) == len(
id_mask_count["img_id"].unique()
)
train_ids, valid_ids = train_test_split(
id_mask_count["img_id"].values,
random_state=42,
stratify=id_mask_count["count"],
test_size=0.1,
)
df_test = pd.read_csv(os.path.join(path, "sample_submission.csv"))
df_test["label"] = df_test["Image_Label"].apply(lambda x: x.split("_")[1])
df_test["im_id"] = df_test["Image_Label"].apply(lambda x: x.split("_")[0])
test_ids = (
df_test["Image_Label"]
.apply(lambda x: x.split("_")[0])
.drop_duplicates()
.values
)
preprocess_fn = get_preprocessing_fn(encoder_name, pretrained="imagenet")
if type != "test":
train_dataset = CloudDataset(
df=df_train,
path=path,
img_ids=train_ids,
image_folder=image_folder,
transforms=get_transforms("train"),
preprocessing_fn=preprocess_fn,
height=height,
width=width,
)
valid_dataset = CloudDataset(
df=df_train,
path=path,
img_ids=valid_ids,
image_folder=image_folder,
transforms=get_transforms("valid"),
preprocessing_fn=preprocess_fn,
height=height,
width=width,
)
test_dataset = CloudDataset(
df=df_test,
path=path,
img_ids=test_ids,
image_folder="test_images",
transforms=get_transforms("valid"),
preprocessing_fn=preprocess_fn,
height=height,
width=width,
)
datasets = collections.OrderedDict()
if type == "train":
datasets["train"] = train_dataset
datasets["valid"] = valid_dataset
elif type == "postprocess":
datasets["infer"] = valid_dataset
elif type == "test":
datasets["infer"] = test_dataset
return datasets
|
[
"os.path.join",
"sklearn.model_selection.train_test_split",
"src.augmentations.get_transforms",
"collections.OrderedDict",
"segmentation_models_pytorch.encoders.get_preprocessing_fn"
] |
[((2662, 2719), 'segmentation_models_pytorch.encoders.get_preprocessing_fn', 'get_preprocessing_fn', (['encoder_name'], {'pretrained': '"""imagenet"""'}), "(encoder_name, pretrained='imagenet')\n", (2682, 2719), False, 'from segmentation_models_pytorch.encoders import get_preprocessing_fn\n'), ((3814, 3839), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (3837, 3839), False, 'import collections\n'), ((2022, 2140), 'sklearn.model_selection.train_test_split', 'train_test_split', (["id_mask_count['img_id'].values"], {'random_state': '(42)', 'stratify': "id_mask_count['count']", 'test_size': '(0.1)'}), "(id_mask_count['img_id'].values, random_state=42, stratify=\n id_mask_count['count'], test_size=0.1)\n", (2038, 2140), False, 'from sklearn.model_selection import train_test_split\n'), ((2253, 2296), 'os.path.join', 'os.path.join', (['path', '"""sample_submission.csv"""'], {}), "(path, 'sample_submission.csv')\n", (2265, 2296), False, 'import os\n'), ((898, 931), 'os.path.join', 'os.path.join', (['path', 'df_train_name'], {}), '(path, df_train_name)\n', (910, 931), False, 'import os\n'), ((3657, 3680), 'src.augmentations.get_transforms', 'get_transforms', (['"""valid"""'], {}), "('valid')\n", (3671, 3680), False, 'from src.augmentations import get_transforms\n'), ((1010, 1040), 'os.path.join', 'os.path.join', (['path', 'df_pl_name'], {}), '(path, df_pl_name)\n', (1022, 1040), False, 'import os\n'), ((2959, 2982), 'src.augmentations.get_transforms', 'get_transforms', (['"""train"""'], {}), "('train')\n", (2973, 2982), False, 'from src.augmentations import get_transforms\n'), ((3321, 3344), 'src.augmentations.get_transforms', 'get_transforms', (['"""valid"""'], {}), "('valid')\n", (3335, 3344), False, 'from src.augmentations import get_transforms\n')]
|
############ This program is not successful ##############
import pandas as pd
import numpy as np
import argparse
import pandas as pd
from datetime import datetime
import tensorflow as tf
# from tensorflow import keras
from tensorflow.keras.layers import Input, Embedding, Dense, Flatten, Activation, concatenate
# from tensorflow.keras.layers.advanced_activations import ReLU
# from tensorflow.keras.layers.normalization import BatchNormalization
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.utils import plot_model
from lib.read_conf import Config
from lib.dataset import input_fn
from train import Wide_and_Deep
class Wide_and_Deep_Intermediate_Output(Wide_and_Deep):
def __init__(self, mode="deep"):
super().__init__(mode)
def predict_intermediate(self, layer_name="dense_1"):
if not self.model:
self.load_model()
input_data = self.get_dataset(mode="pred", batch_size=128)
# print("Input data shape: {}".format(len(input_data)))
self.model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
intermediate_layer_model = tf.keras.Model(inputs=self.model.input,
outputs=self.model.get_layer(layer_name).output)
result = intermediate_output = intermediate_layer_model.predict(input_data)
print("result: {}".format(result))
print("result shape:{}".format(result.shape))
if __name__ == '__main__':
# mode = "wide and deep"
mode = "deep"
# wide_deep_net = Wide_and_Deep_Intermediate_Output(mode)
wide_deep_net = Wide_and_Deep(mode)
wide_deep_net.load_model()
get_3rd_layer_output = tf.keras.backend.function([wide_deep_net.model.layers[0].input], [wide_deep_net.model.layers[3].output])
layer_output = get_3rd_layer_output([x])[0]
# wide_deep_net.predict_model()
# wide_deep_net.predict_intermediate()
|
[
"tensorflow.keras.backend.function",
"train.Wide_and_Deep"
] |
[((1626, 1645), 'train.Wide_and_Deep', 'Wide_and_Deep', (['mode'], {}), '(mode)\n', (1639, 1645), False, 'from train import Wide_and_Deep\n'), ((1704, 1813), 'tensorflow.keras.backend.function', 'tf.keras.backend.function', (['[wide_deep_net.model.layers[0].input]', '[wide_deep_net.model.layers[3].output]'], {}), '([wide_deep_net.model.layers[0].input], [\n wide_deep_net.model.layers[3].output])\n', (1729, 1813), True, 'import tensorflow as tf\n')]
|
from django.contrib import admin
from accounts.models import UserGroup, UserProfile
# Register your models here.
class UserAdmin(admin.ModelAdmin):
pass
class GroupAdmin(admin.ModelAdmin):
pass
admin.site.register(UserProfile, UserAdmin)
admin.site.register(UserGroup, GroupAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((219, 262), 'django.contrib.admin.site.register', 'admin.site.register', (['UserProfile', 'UserAdmin'], {}), '(UserProfile, UserAdmin)\n', (238, 262), False, 'from django.contrib import admin\n'), ((264, 306), 'django.contrib.admin.site.register', 'admin.site.register', (['UserGroup', 'GroupAdmin'], {}), '(UserGroup, GroupAdmin)\n', (283, 306), False, 'from django.contrib import admin\n')]
|
import os
import subprocess
import time
import logging
from re import sub
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-p", "--path", dest="root_path",
help="set root path to start search", metavar="PATH")
(options, args) = parser.parse_args()
root_path = options.root_path if options.root_path else '.'
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s',
filename='SVNUpdate.log',
filemode='a')
startupinfo = None
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
def main():
if is_svn_installed():
update_all_repo()
else:
print('Please install SVN command line tools to use this application')
def is_svn_installed():
cmd = 'svn --version'
try:
subprocess.Popen(cmd, startupinfo=startupinfo,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return True
except Exception as e:
return False
def update_all_repo():
logging.info('Update started @ : {}'.format(time.asctime(time.localtime(time.time()))))
count = 0
print('Collecting SVN repositories')
for root, dirs, files in os.walk(root_path, topdown=False):
for name in dirs:
if name == '.svn':
count += 1
svn_dir = os.path.join(root, name)[2:-5]
print('Updating ' + svn_dir)
cmd = 'svn up "' + svn_dir + '"'
try:
p = subprocess.Popen(cmd, startupinfo=startupinfo, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
pout, _ = p.communicate()
pout = sub('[\n\r]', '', pout.decode('utf-8'))
pout = sub('[:]', ' is ', pout)
logging.info('{}'.format(pout))
p.wait()
except Exception as e:
print('Whoops !! Something went wrong, check log for more info')
logging.error('{}'.format(e))
print('Total svn repositories updated : {}'.format(str(count)))
logging.info('Total svn repositories updated : {}'.format(str(count)))
logging.info('Update done @ : {}'.format(time.asctime(time.localtime(time.time()))))
logging.shutdown()
if __name__ == '__main__':
main()
|
[
"subprocess.Popen",
"logging.basicConfig",
"optparse.OptionParser",
"os.walk",
"subprocess.STARTUPINFO",
"time.time",
"logging.shutdown",
"os.path.join",
"re.sub"
] |
[((118, 132), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (130, 132), False, 'from optparse import OptionParser\n'), ((358, 495), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)s %(levelname)s %(message)s"""', 'filename': '"""SVNUpdate.log"""', 'filemode': '"""a"""'}), "(level=logging.DEBUG, format=\n '%(asctime)s %(levelname)s %(message)s', filename='SVNUpdate.log',\n filemode='a')\n", (377, 495), False, 'import logging\n'), ((582, 606), 'subprocess.STARTUPINFO', 'subprocess.STARTUPINFO', ([], {}), '()\n', (604, 606), False, 'import subprocess\n'), ((1297, 1330), 'os.walk', 'os.walk', (['root_path'], {'topdown': '(False)'}), '(root_path, topdown=False)\n', (1304, 1330), False, 'import os\n'), ((2380, 2398), 'logging.shutdown', 'logging.shutdown', ([], {}), '()\n', (2396, 2398), False, 'import logging\n'), ((887, 987), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'startupinfo': 'startupinfo', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(cmd, startupinfo=startupinfo, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n', (903, 987), False, 'import subprocess\n'), ((1186, 1197), 'time.time', 'time.time', ([], {}), '()\n', (1195, 1197), False, 'import time\n'), ((1442, 1466), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (1454, 1466), False, 'import os\n'), ((1612, 1710), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'startupinfo': 'startupinfo', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(cmd, startupinfo=startupinfo, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n', (1628, 1710), False, 'import subprocess\n'), ((1847, 1871), 're.sub', 'sub', (['"""[:]"""', '""" is """', 'pout'], {}), "('[:]', ' is ', pout)\n", (1850, 1871), False, 'from re import sub\n'), ((2360, 2371), 'time.time', 'time.time', ([], {}), '()\n', (2369, 2371), False, 'import time\n')]
|
import requests
import pytest
import subprocess
from datetime import datetime
from helpers import wait_for_grafana_url_generation, create_job
from helpers import stop_job, MANAGER_URL, VISUALIZER_URL, get_jobs, delete_job
from helpers import restart_container, wait_for_job_complete
from helpers.fixtures import job_payload, manager_container_id
def test_create_job(job_payload):
""" Tests if a Job is being created successfully
Arguments:
job_payload {dict} -- A pytest fixture providing the Job
payload to be sent to Asperathos
Returns:
None
"""
response = requests.post(MANAGER_URL + '/submissions', json=job_payload)
response_payload = response.json()
assert response.ok
assert response_payload
stop_job(manager_url=MANAGER_URL, job_id=response_payload.get('job_id'))
def test_visualize_grafana():
""" Tests if the Grafana URL is being generated successfully
Arguments:
None
Returns:
None
"""
job_id = create_job(MANAGER_URL, 1)
grafana_url = wait_for_grafana_url_generation(VISUALIZER_URL, job_id)
assert requests.get(grafana_url).ok
stop_job(manager_url=MANAGER_URL, job_id=job_id)
def test_controller_scales_up():
""" Tests if the Controlling is able to scale
Arguments:
None
Returns:
None
"""
INITIAL_REPLICAS = 1
job_id = create_job(MANAGER_URL, 2)
wait_for_job_complete(MANAGER_URL, job_id, max_wait_time=180)
detailed_report = requests.get(MANAGER_URL + '/submissions/{}/report'\
.format(job_id))
data = detailed_report.json()
assertion = any(data[time]['replicas'] > INITIAL_REPLICAS for time in data)
assert assertion
stop_job(manager_url=MANAGER_URL, job_id=job_id)
def test_controller_scales_down():
""" Tests if the Controlling is able to scale
Arguments:
None
Returns:
None
"""
INITIAL_REPLICAS = 10
job_id = create_job(MANAGER_URL, 3)
wait_for_job_complete(MANAGER_URL, job_id, max_wait_time=180)
detailed_report = requests.get(MANAGER_URL + '/submissions/{}/report'\
.format(job_id))
data = detailed_report.json()
assertion = any(data[time]['replicas'] < INITIAL_REPLICAS for time in data)
assert assertion
stop_job(manager_url=MANAGER_URL, job_id=job_id)
def test_monitor_report_matches_detailed():
""" Tests if the metrics in the
simple report matches with the detailed one.
Arguments:
None
Returns:
None
"""
job_id = create_job(MANAGER_URL, 3)
wait_for_job_complete(MANAGER_URL, job_id, max_wait_time=180)
submission_url = MANAGER_URL + '/submissions/{}'.format(job_id)
report_url = submission_url + "/report"
monitor = requests.get(submission_url).json()
detailed = requests.get(report_url).json()
monitor_max_error,monitor_max_error_time = monitor['max_error']
monitor_min_error,monitor_min_error_time = monitor['min_error']
monitor_last_error,monitor_last_error_time = monitor['final_error']
detailed_report_max_error = detailed[monitor_max_error_time]['error']
assert detailed_report_max_error == monitor_max_error
detailed_report_max_error = detailed[monitor_min_error_time]['error']
assert detailed_report_max_error == monitor_min_error
date_format = "%Y-%m-%dT%H:%M:%SZ"
last_date = datetime.strptime(monitor_last_error_time,date_format)
dates = detailed.keys()
assertion = all(datetime.strptime(date,date_format) <= last_date\
for date in dates)
assert assertion
@pytest.mark.last
def test_persistence_works(manager_container_id):
""" Tests if Job persistence is working properly
when manager is restarted
Arguments:
None
Returns:
None
"""
# This test is here to ensure there will be more than 0 jobs registered
jobs = get_jobs(MANAGER_URL)
n_jobs = len(jobs)
assert n_jobs > 0
restart_container(manager_container_id)
assert n_jobs == len(get_jobs(MANAGER_URL))
delete_job(MANAGER_URL, list(jobs.keys())[0])
assert len(get_jobs(MANAGER_URL)) < n_jobs
|
[
"helpers.restart_container",
"helpers.wait_for_job_complete",
"helpers.create_job",
"helpers.get_jobs",
"helpers.stop_job",
"datetime.datetime.strptime",
"requests.get",
"requests.post",
"helpers.wait_for_grafana_url_generation"
] |
[((668, 729), 'requests.post', 'requests.post', (["(MANAGER_URL + '/submissions')"], {'json': 'job_payload'}), "(MANAGER_URL + '/submissions', json=job_payload)\n", (681, 729), False, 'import requests\n'), ((1101, 1127), 'helpers.create_job', 'create_job', (['MANAGER_URL', '(1)'], {}), '(MANAGER_URL, 1)\n', (1111, 1127), False, 'from helpers import wait_for_grafana_url_generation, create_job\n'), ((1146, 1201), 'helpers.wait_for_grafana_url_generation', 'wait_for_grafana_url_generation', (['VISUALIZER_URL', 'job_id'], {}), '(VISUALIZER_URL, job_id)\n', (1177, 1201), False, 'from helpers import wait_for_grafana_url_generation, create_job\n'), ((1246, 1294), 'helpers.stop_job', 'stop_job', ([], {'manager_url': 'MANAGER_URL', 'job_id': 'job_id'}), '(manager_url=MANAGER_URL, job_id=job_id)\n', (1254, 1294), False, 'from helpers import stop_job, MANAGER_URL, VISUALIZER_URL, get_jobs, delete_job\n'), ((1495, 1521), 'helpers.create_job', 'create_job', (['MANAGER_URL', '(2)'], {}), '(MANAGER_URL, 2)\n', (1505, 1521), False, 'from helpers import wait_for_grafana_url_generation, create_job\n'), ((1526, 1587), 'helpers.wait_for_job_complete', 'wait_for_job_complete', (['MANAGER_URL', 'job_id'], {'max_wait_time': '(180)'}), '(MANAGER_URL, job_id, max_wait_time=180)\n', (1547, 1587), False, 'from helpers import restart_container, wait_for_job_complete\n'), ((1841, 1889), 'helpers.stop_job', 'stop_job', ([], {'manager_url': 'MANAGER_URL', 'job_id': 'job_id'}), '(manager_url=MANAGER_URL, job_id=job_id)\n', (1849, 1889), False, 'from helpers import stop_job, MANAGER_URL, VISUALIZER_URL, get_jobs, delete_job\n'), ((2093, 2119), 'helpers.create_job', 'create_job', (['MANAGER_URL', '(3)'], {}), '(MANAGER_URL, 3)\n', (2103, 2119), False, 'from helpers import wait_for_grafana_url_generation, create_job\n'), ((2124, 2185), 'helpers.wait_for_job_complete', 'wait_for_job_complete', (['MANAGER_URL', 'job_id'], {'max_wait_time': '(180)'}), '(MANAGER_URL, job_id, max_wait_time=180)\n', (2145, 2185), False, 'from helpers import restart_container, wait_for_job_complete\n'), ((2439, 2487), 'helpers.stop_job', 'stop_job', ([], {'manager_url': 'MANAGER_URL', 'job_id': 'job_id'}), '(manager_url=MANAGER_URL, job_id=job_id)\n', (2447, 2487), False, 'from helpers import stop_job, MANAGER_URL, VISUALIZER_URL, get_jobs, delete_job\n'), ((2697, 2723), 'helpers.create_job', 'create_job', (['MANAGER_URL', '(3)'], {}), '(MANAGER_URL, 3)\n', (2707, 2723), False, 'from helpers import wait_for_grafana_url_generation, create_job\n'), ((2728, 2789), 'helpers.wait_for_job_complete', 'wait_for_job_complete', (['MANAGER_URL', 'job_id'], {'max_wait_time': '(180)'}), '(MANAGER_URL, job_id, max_wait_time=180)\n', (2749, 2789), False, 'from helpers import restart_container, wait_for_job_complete\n'), ((3535, 3590), 'datetime.datetime.strptime', 'datetime.strptime', (['monitor_last_error_time', 'date_format'], {}), '(monitor_last_error_time, date_format)\n', (3552, 3590), False, 'from datetime import datetime\n'), ((4081, 4102), 'helpers.get_jobs', 'get_jobs', (['MANAGER_URL'], {}), '(MANAGER_URL)\n', (4089, 4102), False, 'from helpers import stop_job, MANAGER_URL, VISUALIZER_URL, get_jobs, delete_job\n'), ((4153, 4192), 'helpers.restart_container', 'restart_container', (['manager_container_id'], {}), '(manager_container_id)\n', (4170, 4192), False, 'from helpers import restart_container, wait_for_job_complete\n'), ((1213, 1238), 'requests.get', 'requests.get', (['grafana_url'], {}), '(grafana_url)\n', (1225, 1238), False, 'import requests\n'), ((2917, 2945), 'requests.get', 'requests.get', (['submission_url'], {}), '(submission_url)\n', (2929, 2945), False, 'import requests\n'), ((2971, 2995), 'requests.get', 'requests.get', (['report_url'], {}), '(report_url)\n', (2983, 2995), False, 'import requests\n'), ((4218, 4239), 'helpers.get_jobs', 'get_jobs', (['MANAGER_URL'], {}), '(MANAGER_URL)\n', (4226, 4239), False, 'from helpers import stop_job, MANAGER_URL, VISUALIZER_URL, get_jobs, delete_job\n'), ((4307, 4328), 'helpers.get_jobs', 'get_jobs', (['MANAGER_URL'], {}), '(MANAGER_URL)\n', (4315, 4328), False, 'from helpers import stop_job, MANAGER_URL, VISUALIZER_URL, get_jobs, delete_job\n'), ((3638, 3674), 'datetime.datetime.strptime', 'datetime.strptime', (['date', 'date_format'], {}), '(date, date_format)\n', (3655, 3674), False, 'from datetime import datetime\n')]
|
#!/usr/bin/env
# -*- coding: utf-8 -*-
# Módulo de Gauss:
# Métodos de calculo da solução de um sistema linear por eliminação de gauss
# Método para calculo do erro da solução de gauss em relação a solução real
import numpy as np
import construtor
import solve
# Calcula o vetor solução pelo método de Gauss.
# Entradas: matriz, vetor de termos independentes, número de pontos
# Retorno: vetor solução
def v_sol(m, v, n):
# Verifica e escalona a matriz
for j in range(n):
if m[j][j] == 0:
k = j
while True:
if 0 == m[k][j]:
k += 1
if k == n:
print("Matriz inválida")
break
else:
temp = m[k].copy()
m[k] = m[j].copy()
m[j] = temp.copy()
break
for i in range(j + 1, n):
mult = - m[i][j] / m[j][j]
for k in range(j, n):
m[i][k] += mult * m[j][k]
v[i] += mult * v[j]
# Resolve a matriz escalonada
x = [None] * n
for i in range(n-1, -1, -1):
x[i] = v[i]
for j in range(i + 1, n):
x[i] -= m[i][j] * x[j]
x[i] = x[i] / m[i][i]
return x
# Calcula o vetor solução, para a matriz de uma equação, pelo método de Gauss.
# Entradas: q(x), r(x), malha de pontos, passo, número de pontos, y(a), y(b)
# Retorno: vetor solução
def v_sol_mh(q, r, x, h, n, a_, b_):
# Calcula a matriz e o vetor de termos independentes
m_h = construtor.matriz(q, x, h, n)
v_h = construtor.vetor(r, x, h, n, a_, b_)
# Calcula e retorna o vetor solução
return v_sol(m_h, v_h, n - 1)
# Calcula o vetor solução, para a matriz de uma equação e diversos valores de n, pelo método de Gauss.
# Compara os valores solução do método de Gauss com a solução real.
# Plota o gráfico do erro máximo para cada valor de n.
# Entradas: y(x), q(x), r(x), extremo inicial (a), extremo final (b), y(a), y(b)
# Retorno: vetor com o erro máximo para cada valor de n.
def erro_n(y, q, r, a, b, a_, b_, n, n_step):
# Erro entre valores obtidos pelo método de Gauss e a solução conhecida
e = []
# Erro máximo da cada iteração
e_max = []
for ni in range(5, n, n_step):
# Calcula o passo adequado ao intervalo
h = (b - a) / ni
# Cria a malha de pontos
x = []
for i in range(1, ni):
x.append(a + i * h)
# Calcula o vetor solução real
v_sol = solve.v_sol(y, x)
# Calcula o vetor solução pelo método de Gauss
v_gauss = v_sol_mh(q, r, x, h, ni, a_, b_)
# Compara as soluções
dif = [abs(i) for i in (np.array(v_sol) - np.array(v_gauss)).tolist()]
e.append(dif)
e_max.append(np.max(dif))
return e_max
# ----------------teste----------------
if __name__ == "__main__":
b = [[1, 2, 3], [4, 5, 8], [7, 8, 5]]
c = [10, 11, 12]
print(v_sol(b, c, 3))
|
[
"solve.v_sol",
"numpy.max",
"construtor.vetor",
"numpy.array",
"construtor.matriz"
] |
[((1625, 1654), 'construtor.matriz', 'construtor.matriz', (['q', 'x', 'h', 'n'], {}), '(q, x, h, n)\n', (1642, 1654), False, 'import construtor\n'), ((1666, 1702), 'construtor.vetor', 'construtor.vetor', (['r', 'x', 'h', 'n', 'a_', 'b_'], {}), '(r, x, h, n, a_, b_)\n', (1682, 1702), False, 'import construtor\n'), ((2631, 2648), 'solve.v_sol', 'solve.v_sol', (['y', 'x'], {}), '(y, x)\n', (2642, 2648), False, 'import solve\n'), ((2917, 2928), 'numpy.max', 'np.max', (['dif'], {}), '(dif)\n', (2923, 2928), True, 'import numpy as np\n'), ((2825, 2840), 'numpy.array', 'np.array', (['v_sol'], {}), '(v_sol)\n', (2833, 2840), True, 'import numpy as np\n'), ((2843, 2860), 'numpy.array', 'np.array', (['v_gauss'], {}), '(v_gauss)\n', (2851, 2860), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020. Distributed under the terms of the MIT License.
import re
from dataclasses import dataclass
from typing import List, Optional
from monty.json import MSONable
@dataclass(frozen=True)
class Defect(MSONable):
name: str
charges: tuple
@property
def str_list(self):
return ["_".join([self.name, str(charge)]) for charge in self.charges]
@property
def charge_list(self):
return [charge for charge in self.charges]
class SimpleDefect(Defect):
def __init__(self, in_atom, out_atom, charge_list):
if in_atom is None:
in_atom = "Va"
super().__init__("_".join([in_atom, out_atom]), tuple(charge_list))
@property
def in_atom(self):
result = self.name.split("_")[0]
if result == "Va":
return
return result
@property
def out_atom(self):
return self.name.split("_")[1]
def screen_simple_defect(defect: SimpleDefect, keywords: List[str]
) -> Optional[SimpleDefect]:
charges = []
for charge in defect.charges:
full_name = "_".join([defect.name, str(charge)])
if any([re.search(keyword, full_name) for keyword in keywords]):
charges.append(charge)
if charges:
return SimpleDefect(defect.in_atom, defect.out_atom, tuple(charges))
else:
return
|
[
"re.search",
"dataclasses.dataclass"
] |
[((210, 232), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (219, 232), False, 'from dataclasses import dataclass\n'), ((1190, 1219), 're.search', 're.search', (['keyword', 'full_name'], {}), '(keyword, full_name)\n', (1199, 1219), False, 'import re\n')]
|
"""
百度词法分析API,补全未识别出的:
pip install baidu-aip
"""
import time
import os
import sys
import codecs
import json
import traceback
from tqdm import tqdm
from aip import AipNlp
sys.path.insert(0, './') # 定义搜索路径的优先顺序,序号从0开始,表示最大优先级
from data import baidu_config # noqa
""" 你的 APPID AK SK """
APP_ID = baidu_config.APP_ID # '你的 App ID'
API_KEY = baidu_config.API_KEY # '你的 Api Key'
SECRET_KEY = baidu_config.SECRET_KEY # '你的 Secret Key'
client = AipNlp(APP_ID, API_KEY, SECRET_KEY)
# text = "百度是一家高科技公司"
""" 调用词法分析 """
# print(client.lexer(text))
import myClue # noqa
print('myClue module path :{}'.format(myClue.__file__)) # 输出测试模块文件位置
from myClue.core import logger # noqa
from myClue.tools.file import read_file_texts # noqa
from myClue.tools.file import init_file_path # noqa
def get_baidu_cws(text):
for i in range(20):
try:
text = text.encode('gbk', errors='ignore').decode('gbk', errors='ignore') # 去掉GBK不识别的字符串,该接口接收GBK格式
cws_result = client.lexer(text)
if 'items' in cws_result:
return cws_result['items']
else:
continue
except Exception as e:
time.sleep(0.5)
print('text:{}, i:{}, exception:{}'.format(text, i, e))
traceback.print_exc()
return []
if __name__ == "__main__":
train_file_config = {
'dev': './data/UCAS_NLP_TC/data_baidu_cws/dev_cws.json',
'test': './data/UCAS_NLP_TC/data_baidu_cws/test_cws.json',
'train': './data/UCAS_NLP_TC/data_baidu_cws/train_cws.json',
}
for file_label, file_name in train_file_config.items():
logger.info('开始处理:{}'.format(file_label))
texts = read_file_texts(file_name)
with codecs.open(file_name, mode='w', encoding='utf8') as fw:
for text in tqdm(texts):
row_json = json.loads(text)
if len(row_json['cws_items']) == 0:
news_content = row_json['news_content']
if len(news_content) > 10000:
cws_items = get_baidu_cws(news_content[:10000])
time.sleep(0.3)
cws_items.extend(get_baidu_cws(news_content[10000:]))
else:
cws_items = get_baidu_cws(news_content)
time.sleep(0.3)
row_json['cws_items'] = cws_items
fw.write('{}\n'.format(json.dumps(row_json, ensure_ascii=False)))
time.sleep(0.3)
else:
fw.write('{}\n'.format(text))
|
[
"tqdm.tqdm",
"traceback.print_exc",
"codecs.open",
"json.loads",
"sys.path.insert",
"json.dumps",
"time.sleep",
"myClue.tools.file.read_file_texts",
"aip.AipNlp"
] |
[((171, 195), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""./"""'], {}), "(0, './')\n", (186, 195), False, 'import sys\n'), ((445, 480), 'aip.AipNlp', 'AipNlp', (['APP_ID', 'API_KEY', 'SECRET_KEY'], {}), '(APP_ID, API_KEY, SECRET_KEY)\n', (451, 480), False, 'from aip import AipNlp\n'), ((1695, 1721), 'myClue.tools.file.read_file_texts', 'read_file_texts', (['file_name'], {}), '(file_name)\n', (1710, 1721), False, 'from myClue.tools.file import read_file_texts\n'), ((1735, 1784), 'codecs.open', 'codecs.open', (['file_name'], {'mode': '"""w"""', 'encoding': '"""utf8"""'}), "(file_name, mode='w', encoding='utf8')\n", (1746, 1784), False, 'import codecs\n'), ((1816, 1827), 'tqdm.tqdm', 'tqdm', (['texts'], {}), '(texts)\n', (1820, 1827), False, 'from tqdm import tqdm\n'), ((1175, 1190), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1185, 1190), False, 'import time\n'), ((1271, 1292), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1290, 1292), False, 'import traceback\n'), ((1856, 1872), 'json.loads', 'json.loads', (['text'], {}), '(text)\n', (1866, 1872), False, 'import json\n'), ((2335, 2350), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (2345, 2350), False, 'import time\n'), ((2511, 2526), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (2521, 2526), False, 'import time\n'), ((2131, 2146), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (2141, 2146), False, 'import time\n'), ((2448, 2488), 'json.dumps', 'json.dumps', (['row_json'], {'ensure_ascii': '(False)'}), '(row_json, ensure_ascii=False)\n', (2458, 2488), False, 'import json\n')]
|
from NXController import Controller
ctr = Controller()
for i in range(30):
ctr.A()
if i == 0:
ctr.RIGHT()
ctr.RIGHT()
else:
ctr.LEFT()
ctr.LEFT()
ctr.LEFT()
ctr.UP()
ctr.RIGHT(0.4)
ctr.A()
ctr.close()
|
[
"NXController.Controller"
] |
[((43, 55), 'NXController.Controller', 'Controller', ([], {}), '()\n', (53, 55), False, 'from NXController import Controller\n')]
|
from support import *
import chat
def main():
#Creating Login Page
global val, w, root,top,username,name
root = tk.Tk()
username = tk.StringVar()
name = tk.StringVar()
#root.attributes('-fullscreen',True)
top = Toplevel1 (root)
init(root, top)
root.mainloop()
def authentication():
#Logining and receiving token
global username_info,name_info,token,username,name
username_info=username.get()
name_info=name.get()
print("Username:",username_info)
print("Name:",name_info)
try:
response = requests.post(
'http://127.0.0.1:5000/send',
json={'name': name_info, 'username': username_info, 'text': '', 'status': 'login'}
)
except:
messagebox.showinfo("Error!","No connection with server!")
return
data = response.json()
if data['status'] == 'Ok':
chat.main(name_info,username_info)
else:
messagebox.showinfo("Error!","That username was used! Input your username again!")
return
#messagebox.showinfo("Wrong", "You entered wrong credentials!")
#print(r1.text)
class Toplevel1:
def __init__(self, top=None):
#This class contains information about our Window
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#d9d9d9' # X11 color: 'gray85'
_ana1color = '#d9d9d9' # X11 color: 'gray85'
_ana2color = '#ececec' # Closest X11 color: 'gray92'
top.geometry("{}x{}".format(size_x, size_y))
top.minsize(1, 1)
top.maxsize(size_x,size_y)
top.resizable(1, 1)
top.title("Login")
#Background image
self.img = tk.PhotoImage(file="images/bg.png")
self.my_canvas = tk.Canvas(top)
self.my_canvas.place(relx=0.0, rely=0.0,height=size_y,width=size_x)
self.my_canvas.create_image(0,0,image=self.img,anchor="nw")
#Entries
self.my_canvas.create_text(255,130,text="Username",font="-family {DejaVu Sans} -size 20")
self.Entry1 = tk.Entry(top,textvariable=username)
self.Entry1.place(relx=0.300, rely=0.420, height=23, relwidth=0.40)
self.Entry1.configure(background="white")
self.Entry1.configure(font="FixedFont")
self.my_canvas.create_text(255,220,text="<NAME>",font="-family {DejaVu Sans} -size 20")
self.Entry2 = tk.Entry(top,textvariable=name)
self.Entry2.place(relx=0.300, rely=0.650, height=23, relwidth=0.40)
self.Entry2.configure(background="white")
self.Entry2.configure(font="FixedFont")
#Login Button
self.butn1 = tk.Button(text='Login',command=authentication)
self.butn1.place(relx=0.440, rely=0.800,height=30,width=70)
|
[
"chat.main"
] |
[((882, 917), 'chat.main', 'chat.main', (['name_info', 'username_info'], {}), '(name_info, username_info)\n', (891, 917), False, 'import chat\n')]
|
# -*- coding: utf-8 -*-
# @date 2016/06/03
# @author <EMAIL>
# @desc custom methods of the query class in Flask-SQLAlchemy
# @record
#
from flask import request
from flask_sqlalchemy import (
BaseQuery,
Model,
_BoundDeclarativeMeta,
SQLAlchemy as BaseSQLAlchemy,
_QueryProperty)
from sqlalchemy.ext.declarative import declarative_base
from flask_sqlalchemy._compat import iteritems, itervalues, xrange, \
string_types
class MyBaseQuery(BaseQuery):
# do stuff here
def all(self):
tenant_ctx = None if not request else request.environ.get('tenant_ctx')
if tenant_ctx is None or hasattr(tenant_ctx, 'db_filters')is False:
self = self
else:
for k, v in tenant_ctx.db_filters.items():
self = self.filter_by(**{k: v})
return list(self)
def first(self):
"""改写basequery的first方法. 增加过滤条件
"""
tenant_ctx = None if not request else request.environ.get('tenant_ctx')
if tenant_ctx is None or hasattr(tenant_ctx, 'db_filters')is False:
self = self
else:
for k, v in tenant_ctx.db_filters.items():
self = self.filter_by(**{k: v})
if self._statement is not None:
ret = list(self)[0:1]
else:
ret = list(self[0:1])
if len(ret) > 0:
return ret[0]
else:
return None
class MyModel(Model):
# in this case we're just using a custom BaseQuery class,
# but you can add other stuff as well
query_class = MyBaseQuery
def _my_declarative_constructor(self, tenant=None, **kwargs):
"""A simple constructor that allows initialization from kwargs.
Sets attributes on the constructed instance using the names and
values in ``kwargs``.
Only keys that are present as
attributes of the instance's class are allowed. These could be,
for example, any mapped columns or relationships.
"""
if tenant is not None:
setattr(self, "company_id", tenant.db_filters.get('company_id'))
cls_ = type(self)
for k in kwargs:
if not hasattr(cls_, k):
raise TypeError(
"%r is an invalid keyword argument for %s" %
(k, cls_.__name__))
setattr(self, k, kwargs[k])
class MySQLAlchemy(BaseSQLAlchemy):
def make_declarative_base(self, metadata=None):
# in this case we're just using a custom Model class,
# but you can change the DelcarativeMeta or other stuff as well
base = declarative_base(cls=MyModel,
name='Model',
metadata=metadata,
metaclass=_BoundDeclarativeMeta,
constructor=_my_declarative_constructor)
base.query = _QueryProperty(self)
return base
# Fixed Flask-Fixtures's Bug: https://github.com/croach/Flask-Fixtures/issues/22
db = MySQLAlchemy()
|
[
"flask.request.environ.get",
"sqlalchemy.ext.declarative.declarative_base",
"flask_sqlalchemy._QueryProperty"
] |
[((2552, 2693), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {'cls': 'MyModel', 'name': '"""Model"""', 'metadata': 'metadata', 'metaclass': '_BoundDeclarativeMeta', 'constructor': '_my_declarative_constructor'}), "(cls=MyModel, name='Model', metadata=metadata, metaclass=\n _BoundDeclarativeMeta, constructor=_my_declarative_constructor)\n", (2568, 2693), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((2838, 2858), 'flask_sqlalchemy._QueryProperty', '_QueryProperty', (['self'], {}), '(self)\n', (2852, 2858), False, 'from flask_sqlalchemy import BaseQuery, Model, _BoundDeclarativeMeta, SQLAlchemy as BaseSQLAlchemy, _QueryProperty\n'), ((563, 596), 'flask.request.environ.get', 'request.environ.get', (['"""tenant_ctx"""'], {}), "('tenant_ctx')\n", (582, 596), False, 'from flask import request\n'), ((960, 993), 'flask.request.environ.get', 'request.environ.get', (['"""tenant_ctx"""'], {}), "('tenant_ctx')\n", (979, 993), False, 'from flask import request\n')]
|
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TODO: Add a description here."""
from __future__ import absolute_import, division, print_function
import glob
import os
import pandas as pd
import datasets
_CITATION = """\
@misc{friedrich2020sofcexp,
title={The SOFC-Exp Corpus and Neural Approaches to Information Extraction in the Materials Science Domain},
author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
year={2020},
eprint={2006.03039},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """\
The SOFC-Exp corpus consists of 45 open-access scholarly articles annotated by domain experts.
A corpus and an inter-annotator agreement study demonstrate the complexity of the suggested
named entity recognition and slot filling tasks as well as high annotation quality is presented
in the accompanying paper.
"""
_HOMEPAGE = "https://arxiv.org/abs/2006.03039"
_LICENSE = ""
_URL = "https://github.com/boschresearch/sofc-exp_textmining_resources/archive/master.zip"
class SOFCMaterialsArticles(datasets.GeneratorBasedBuilder):
""""""
VERSION = datasets.Version("1.1.0")
def _info(self):
features = datasets.Features(
{
"text": datasets.Value("string"),
"sentence_offsets": datasets.features.Sequence(
{"begin_char_offset": datasets.Value("int64"), "end_char_offset": datasets.Value("int64")}
),
"sentences": datasets.features.Sequence(datasets.Value("string")),
"sentence_labels": datasets.features.Sequence(datasets.Value("int64")),
"token_offsets": datasets.features.Sequence(
{
"offsets": datasets.features.Sequence(
{"begin_char_offset": datasets.Value("int64"), "end_char_offset": datasets.Value("int64")}
)
}
),
"tokens": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string"))),
"entity_labels": datasets.features.Sequence(
datasets.features.Sequence(
datasets.features.ClassLabel(
names=[
"B-DEVICE",
"B-EXPERIMENT",
"B-MATERIAL",
"B-VALUE",
"I-DEVICE",
"I-EXPERIMENT",
"I-MATERIAL",
"I-VALUE",
"O",
]
)
)
),
"slot_labels": datasets.features.Sequence(
datasets.features.Sequence(
datasets.features.ClassLabel(
names=[
"B-anode_material",
"B-cathode_material",
"B-conductivity",
"B-current_density",
"B-degradation_rate",
"B-device",
"B-electrolyte_material",
"B-experiment_evoking_word",
"B-fuel_used",
"B-interlayer_material",
"B-interconnect_material",
"B-open_circuit_voltage",
"B-power_density",
"B-resistance",
"B-support_material",
"B-thickness",
"B-time_of_operation",
"B-voltage",
"B-working_temperature",
"I-anode_material",
"I-cathode_material",
"I-conductivity",
"I-current_density",
"I-degradation_rate",
"I-device",
"I-electrolyte_material",
"I-experiment_evoking_word",
"I-fuel_used",
"I-interlayer_material",
"I-interconnect_material",
"I-open_circuit_voltage",
"I-power_density",
"I-resistance",
"I-support_material",
"I-thickness",
"I-time_of_operation",
"I-voltage",
"I-working_temperature",
"O",
]
)
)
),
"links": datasets.Sequence(
{
"relation_label": datasets.features.ClassLabel(
names=["coreference", "experiment_variation", "same_experiment", "thickness"]
),
"start_span_id": datasets.Value("int64"),
"end_span_id": datasets.Value("int64"),
}
),
"slots": datasets.features.Sequence(
{
"frame_participant_label": datasets.features.ClassLabel(
names=[
"anode_material",
"cathode_material",
"current_density",
"degradation_rate",
"device",
"electrolyte_material",
"fuel_used",
"interlayer_material",
"open_circuit_voltage",
"power_density",
"resistance",
"support_material",
"time_of_operation",
"voltage",
"working_temperature",
]
),
"slot_id": datasets.Value("int64"),
}
),
"spans": datasets.features.Sequence(
{
"span_id": datasets.Value("int64"),
"entity_label": datasets.features.ClassLabel(names=["", "DEVICE", "MATERIAL", "VALUE"]),
"sentence_id": datasets.Value("int64"),
"experiment_mention_type": datasets.features.ClassLabel(
names=["", "current_exp", "future_work", "general_info", "previous_work"]
),
"begin_char_offset": datasets.Value("int64"),
"end_char_offset": datasets.Value("int64"),
}
),
"experiments": datasets.features.Sequence(
{
"experiment_id": datasets.Value("int64"),
"span_id": datasets.Value("int64"),
"slots": datasets.features.Sequence(
{
"frame_participant_label": datasets.features.ClassLabel(
names=[
"anode_material",
"cathode_material",
"current_density",
"degradation_rate",
"conductivity",
"device",
"electrolyte_material",
"fuel_used",
"interlayer_material",
"open_circuit_voltage",
"power_density",
"resistance",
"support_material",
"time_of_operation",
"voltage",
"working_temperature",
]
),
"slot_id": datasets.Value("int64"),
}
),
}
),
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
my_urls = _URL
data_dir = dl_manager.download_and_extract(my_urls)
data_dir = os.path.join(data_dir, "sofc-exp_textmining_resources-master/sofc-exp-corpus")
metadata = pd.read_csv(os.path.join(data_dir, "SOFC-Exp-Metadata.csv"), sep="\t")
text_base_path = os.path.join(data_dir, "texts")
text_files_available = [
os.path.split(i.rstrip(".txt"))[-1] for i in glob.glob(os.path.join(text_base_path, "*.txt"))
]
metadata = metadata[metadata["name"].map(lambda x: x in text_files_available)]
names = {}
splits = ["train", "test", "dev"]
for split in splits:
names[split] = metadata[metadata["set"] == split]["name"].tolist()
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"names": names["train"],
"data_dir": data_dir,
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"names": names["test"], "data_dir": data_dir, "split": "test"},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"names": names["dev"],
"data_dir": data_dir,
"split": "validation",
},
),
]
def _generate_examples(self, names, data_dir, split):
""" Yields examples. """
# The dataset consists of the original article text as well as annotations
textfile_base_path = os.path.join(data_dir, "texts")
annotations_base_path = os.path.join(data_dir, "annotations")
# The annotations are mostly references to offsets in the source text
# with corresponding labels, so we'll refer to them as `meta`
sentence_meta_base_path = os.path.join(annotations_base_path, "sentences")
tokens_meta_base_path = os.path.join(annotations_base_path, "tokens")
ets_meta_base_path = os.path.join(annotations_base_path, "entity_types_and_slots")
frame_meta_base_path = os.path.join(annotations_base_path, "frames")
# Define the headers for the sentence and token and entity metadata
sentence_meta_header = ["sentence_id", "label", "begin_char_offset", "end_char_offset"]
tokens_meta_header = ["sentence_id", "token_id", "begin_char_offset", "end_char_offset"]
ets_meta_header = [
"sentence_id",
"token_id",
"begin_char_offset",
"end_char_offset",
"entity_label",
"slot_label",
]
# Start the processing loop
# For each text file, we'll load all of the
# associated annotation files
for id_, name in enumerate(sorted(names)):
# Load the main source text
textfile_path = os.path.join(textfile_base_path, name + ".txt")
text = open(textfile_path, encoding="utf-8").read()
# Load the sentence offsets file
sentence_meta_path = os.path.join(sentence_meta_base_path, name + ".csv")
sentence_meta = pd.read_csv(sentence_meta_path, sep="\t", names=sentence_meta_header)
# Load the tokens offsets file
tokens_meta_path = os.path.join(tokens_meta_base_path, name + ".csv")
tokens_meta = pd.read_csv(tokens_meta_path, sep="\t", names=tokens_meta_header)
# Load the entity offsets file
ets_meta_path = os.path.join(ets_meta_base_path, name + ".csv")
ets_meta = pd.read_csv(ets_meta_path, sep="\t", names=ets_meta_header)
# Create a list of lists indexed as [sentence][token] for the entity and slot labels
entity_labels = ets_meta.groupby("sentence_id").apply(lambda x: x["entity_label"].tolist()).to_list()
slot_labels = ets_meta.groupby("sentence_id").apply(lambda x: x["slot_label"].tolist()).to_list()
# Create a list of lists for the token offsets indexed as [sentence][token]
# Each element will contain a dict with beginning and ending character offsets
token_offsets = (
tokens_meta.groupby("sentence_id")[["begin_char_offset", "end_char_offset"]]
.apply(lambda x: x.to_dict(orient="records"))
.tolist()
)
# Load the frames metadata. The frames file contains the data for all of the annotations
# in a condensed format that varies throughout the file. More information on this format
# can be found: https://framenet.icsi.berkeley.edu/fndrupal/
frames_meta_path = os.path.join(frame_meta_base_path, name + ".csv")
frames_meta = open(frames_meta_path, encoding="utf-8").readlines()
# Parse the sentence offsets, producing a list of dicts with the
# starting and ending position of each sentence in the original text
sentence_offsets = (
sentence_meta[["begin_char_offset", "end_char_offset"]].apply(lambda x: x.to_dict(), axis=1).tolist()
)
# The sentence labels are a binary label that describes whether the sentence contains
# any annotations
sentence_labels = sentence_meta["label"].tolist()
# Materialiaze a list of strings of the actual sentences
sentences = [text[ost["begin_char_offset"] : ost["end_char_offset"]] for ost in sentence_offsets]
# Materialize a list of lists of the tokens in each sentence.
# Annotation labels are aligned with these tokens, so be careful with
# alignment if using your own tokenization scheme with the sentences above
tokens = [
[s[tto["begin_char_offset"] : tto["end_char_offset"]] for tto in to]
for s, to in zip(sentences, token_offsets)
]
# The frames file first contains spans annotations (in one format),
# then contains experiments annotations (in another format),
# then links annotations (in yet another format).
# Here we find the beginning of the experiments and links sections of the file
# Additionally, each experiment annotation in the experiment annotations begins with a
# line starting with the word EXPERIMENT (in one format)
# followed by the annotations for that experiment (in yet _another_ format)
# Here we get the start positions for each experiment _within_ the experiments
# section of the frames data
experiment_starts = [i for i, line in enumerate(frames_meta) if line.startswith("EXPERIMENT")]
experiment_start = min(experiment_starts)
link_start = min([i for i, line in enumerate(frames_meta) if line.startswith("LINK")])
# Pick out the spans section of the data for parsing
spans_raw = frames_meta[:experiment_start]
# Iterate through the spans data
spans = []
for span in spans_raw:
# Split out the elements in each tab-delimited line
_, span_id, entity_label_or_exp, sentence_id, begin_char_offset, end_char_offset = span.split("\t")
# The entity label for experiment spans have a sub-label,
# called the experiment mention type,
# which is sub-delimited by a ':'
# The code below standardizes the fields produced by
# each line to a common schema, some fields of which may
# be empty depending on the data available in the line
if entity_label_or_exp.startswith("EXPERIMENT"):
exp, experiment_mention_type = entity_label_or_exp.split(":")
entity_label = ""
else:
entity_label = entity_label_or_exp
exp = ""
experiment_mention_type = ""
s = {
"span_id": span_id,
"entity_label": entity_label,
"sentence_id": sentence_id,
"experiment_mention_type": experiment_mention_type,
"begin_char_offset": int(begin_char_offset),
"end_char_offset": int(end_char_offset),
}
spans.append(s)
# Pull out the links annotations for from the frames data
links_raw = [f.rstrip("\n") for f in frames_meta[link_start:]]
# Iterate through the links data, which is in a simple tab-delimited format
links = []
for link in links_raw:
_, relation_label, start_span_id, end_span_id = link.split("\t")
link_out = {
"relation_label": relation_label,
"start_span_id": int(start_span_id),
"end_span_id": int(end_span_id),
}
links.append(link_out)
# Iterate through the experiments data and parse each experiment
experiments = []
# Zip the experiment start offsets to get start/end position tuples
# for each experiment in the experiments data
for start, end in zip(experiment_starts[:-1], experiment_starts[1:]):
current_experiment = frames_meta[start:end]
# The first line of each experiment annotation contains the
# experiment id and the span id
_, experiment_id, span_id = current_experiment[0].rstrip("\n").split("\t")
exp = {"experiment_id": int(experiment_id), "span_id": int(span_id)}
# The remaining lines in the experiment annotations contain
# slot level information for each experiment.
slots = []
for e in current_experiment[1:]:
e = e.rstrip("\n")
_, frame_participant_label, slot_id = e.split("\t")
to_add = {"frame_participant_label": frame_participant_label, "slot_id": int(slot_id)}
slots.append(to_add)
exp["slots"] = slots
experiments.append(exp)
# Yield the final parsed example output
# NOTE: the `token_offsets` is converted to a list of
# dicts to accommodate processing to the arrow files
# in the `features` schema defined above
yield id_, {
"text": text,
"sentence_offsets": sentence_offsets,
"sentences": sentences,
"sentence_labels": sentence_labels,
"token_offsets": [{"offsets": to} for to in token_offsets],
"tokens": tokens,
"entity_labels": entity_labels,
"slot_labels": slot_labels,
"links": links,
"slots": slots,
"spans": spans,
"experiments": experiments,
}
|
[
"os.path.join",
"datasets.SplitGenerator",
"pandas.read_csv",
"datasets.features.ClassLabel",
"datasets.Value",
"datasets.DatasetInfo",
"datasets.Version"
] |
[((1756, 1781), 'datasets.Version', 'datasets.Version', (['"""1.1.0"""'], {}), "('1.1.0')\n", (1772, 1781), False, 'import datasets\n'), ((9638, 9792), 'datasets.DatasetInfo', 'datasets.DatasetInfo', ([], {'description': '_DESCRIPTION', 'features': 'features', 'supervised_keys': 'None', 'homepage': '_HOMEPAGE', 'license': '_LICENSE', 'citation': '_CITATION'}), '(description=_DESCRIPTION, features=features,\n supervised_keys=None, homepage=_HOMEPAGE, license=_LICENSE, citation=\n _CITATION)\n', (9658, 9792), False, 'import datasets\n'), ((10629, 10707), 'os.path.join', 'os.path.join', (['data_dir', '"""sofc-exp_textmining_resources-master/sofc-exp-corpus"""'], {}), "(data_dir, 'sofc-exp_textmining_resources-master/sofc-exp-corpus')\n", (10641, 10707), False, 'import os\n'), ((10825, 10856), 'os.path.join', 'os.path.join', (['data_dir', '"""texts"""'], {}), "(data_dir, 'texts')\n", (10837, 10856), False, 'import os\n'), ((12431, 12462), 'os.path.join', 'os.path.join', (['data_dir', '"""texts"""'], {}), "(data_dir, 'texts')\n", (12443, 12462), False, 'import os\n'), ((12495, 12532), 'os.path.join', 'os.path.join', (['data_dir', '"""annotations"""'], {}), "(data_dir, 'annotations')\n", (12507, 12532), False, 'import os\n'), ((12716, 12764), 'os.path.join', 'os.path.join', (['annotations_base_path', '"""sentences"""'], {}), "(annotations_base_path, 'sentences')\n", (12728, 12764), False, 'import os\n'), ((12797, 12842), 'os.path.join', 'os.path.join', (['annotations_base_path', '"""tokens"""'], {}), "(annotations_base_path, 'tokens')\n", (12809, 12842), False, 'import os\n'), ((12872, 12933), 'os.path.join', 'os.path.join', (['annotations_base_path', '"""entity_types_and_slots"""'], {}), "(annotations_base_path, 'entity_types_and_slots')\n", (12884, 12933), False, 'import os\n'), ((12965, 13010), 'os.path.join', 'os.path.join', (['annotations_base_path', '"""frames"""'], {}), "(annotations_base_path, 'frames')\n", (12977, 13010), False, 'import os\n'), ((10740, 10787), 'os.path.join', 'os.path.join', (['data_dir', '"""SOFC-Exp-Metadata.csv"""'], {}), "(data_dir, 'SOFC-Exp-Metadata.csv')\n", (10752, 10787), False, 'import os\n'), ((11295, 11427), 'datasets.SplitGenerator', 'datasets.SplitGenerator', ([], {'name': 'datasets.Split.TRAIN', 'gen_kwargs': "{'names': names['train'], 'data_dir': data_dir, 'split': 'train'}"}), "(name=datasets.Split.TRAIN, gen_kwargs={'names':\n names['train'], 'data_dir': data_dir, 'split': 'train'})\n", (11318, 11427), False, 'import datasets\n'), ((11631, 11760), 'datasets.SplitGenerator', 'datasets.SplitGenerator', ([], {'name': 'datasets.Split.TEST', 'gen_kwargs': "{'names': names['test'], 'data_dir': data_dir, 'split': 'test'}"}), "(name=datasets.Split.TEST, gen_kwargs={'names':\n names['test'], 'data_dir': data_dir, 'split': 'test'})\n", (11654, 11760), False, 'import datasets\n'), ((11885, 12025), 'datasets.SplitGenerator', 'datasets.SplitGenerator', ([], {'name': 'datasets.Split.VALIDATION', 'gen_kwargs': "{'names': names['dev'], 'data_dir': data_dir, 'split': 'validation'}"}), "(name=datasets.Split.VALIDATION, gen_kwargs={'names':\n names['dev'], 'data_dir': data_dir, 'split': 'validation'})\n", (11908, 12025), False, 'import datasets\n'), ((13735, 13782), 'os.path.join', 'os.path.join', (['textfile_base_path', "(name + '.txt')"], {}), "(textfile_base_path, name + '.txt')\n", (13747, 13782), False, 'import os\n'), ((13926, 13978), 'os.path.join', 'os.path.join', (['sentence_meta_base_path', "(name + '.csv')"], {}), "(sentence_meta_base_path, name + '.csv')\n", (13938, 13978), False, 'import os\n'), ((14007, 14076), 'pandas.read_csv', 'pd.read_csv', (['sentence_meta_path'], {'sep': '"""\t"""', 'names': 'sentence_meta_header'}), "(sentence_meta_path, sep='\\t', names=sentence_meta_header)\n", (14018, 14076), True, 'import pandas as pd\n'), ((14152, 14202), 'os.path.join', 'os.path.join', (['tokens_meta_base_path', "(name + '.csv')"], {}), "(tokens_meta_base_path, name + '.csv')\n", (14164, 14202), False, 'import os\n'), ((14229, 14294), 'pandas.read_csv', 'pd.read_csv', (['tokens_meta_path'], {'sep': '"""\t"""', 'names': 'tokens_meta_header'}), "(tokens_meta_path, sep='\\t', names=tokens_meta_header)\n", (14240, 14294), True, 'import pandas as pd\n'), ((14367, 14414), 'os.path.join', 'os.path.join', (['ets_meta_base_path', "(name + '.csv')"], {}), "(ets_meta_base_path, name + '.csv')\n", (14379, 14414), False, 'import os\n'), ((14438, 14497), 'pandas.read_csv', 'pd.read_csv', (['ets_meta_path'], {'sep': '"""\t"""', 'names': 'ets_meta_header'}), "(ets_meta_path, sep='\\t', names=ets_meta_header)\n", (14449, 14497), True, 'import pandas as pd\n'), ((15532, 15581), 'os.path.join', 'os.path.join', (['frame_meta_base_path', "(name + '.csv')"], {}), "(frame_meta_base_path, name + '.csv')\n", (15544, 15581), False, 'import os\n'), ((1880, 1904), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (1894, 1904), False, 'import datasets\n'), ((2156, 2180), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (2170, 2180), False, 'import datasets\n'), ((2245, 2268), 'datasets.Value', 'datasets.Value', (['"""int64"""'], {}), "('int64')\n", (2259, 2268), False, 'import datasets\n'), ((10958, 10995), 'os.path.join', 'os.path.join', (['text_base_path', '"""*.txt"""'], {}), "(text_base_path, '*.txt')\n", (10970, 10995), False, 'import os\n'), ((2012, 2035), 'datasets.Value', 'datasets.Value', (['"""int64"""'], {}), "('int64')\n", (2026, 2035), False, 'import datasets\n'), ((2056, 2079), 'datasets.Value', 'datasets.Value', (['"""int64"""'], {}), "('int64')\n", (2070, 2079), False, 'import datasets\n'), ((2683, 2707), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (2697, 2707), False, 'import datasets\n'), ((2844, 2999), 'datasets.features.ClassLabel', 'datasets.features.ClassLabel', ([], {'names': "['B-DEVICE', 'B-EXPERIMENT', 'B-MATERIAL', 'B-VALUE', 'I-DEVICE',\n 'I-EXPERIMENT', 'I-MATERIAL', 'I-VALUE', 'O']"}), "(names=['B-DEVICE', 'B-EXPERIMENT',\n 'B-MATERIAL', 'B-VALUE', 'I-DEVICE', 'I-EXPERIMENT', 'I-MATERIAL',\n 'I-VALUE', 'O'])\n", (2872, 2999), False, 'import datasets\n'), ((3537, 4422), 'datasets.features.ClassLabel', 'datasets.features.ClassLabel', ([], {'names': "['B-anode_material', 'B-cathode_material', 'B-conductivity',\n 'B-current_density', 'B-degradation_rate', 'B-device',\n 'B-electrolyte_material', 'B-experiment_evoking_word', 'B-fuel_used',\n 'B-interlayer_material', 'B-interconnect_material',\n 'B-open_circuit_voltage', 'B-power_density', 'B-resistance',\n 'B-support_material', 'B-thickness', 'B-time_of_operation', 'B-voltage',\n 'B-working_temperature', 'I-anode_material', 'I-cathode_material',\n 'I-conductivity', 'I-current_density', 'I-degradation_rate', 'I-device',\n 'I-electrolyte_material', 'I-experiment_evoking_word', 'I-fuel_used',\n 'I-interlayer_material', 'I-interconnect_material',\n 'I-open_circuit_voltage', 'I-power_density', 'I-resistance',\n 'I-support_material', 'I-thickness', 'I-time_of_operation', 'I-voltage',\n 'I-working_temperature', 'O']"}), "(names=['B-anode_material',\n 'B-cathode_material', 'B-conductivity', 'B-current_density',\n 'B-degradation_rate', 'B-device', 'B-electrolyte_material',\n 'B-experiment_evoking_word', 'B-fuel_used', 'B-interlayer_material',\n 'B-interconnect_material', 'B-open_circuit_voltage', 'B-power_density',\n 'B-resistance', 'B-support_material', 'B-thickness',\n 'B-time_of_operation', 'B-voltage', 'B-working_temperature',\n 'I-anode_material', 'I-cathode_material', 'I-conductivity',\n 'I-current_density', 'I-degradation_rate', 'I-device',\n 'I-electrolyte_material', 'I-experiment_evoking_word', 'I-fuel_used',\n 'I-interlayer_material', 'I-interconnect_material',\n 'I-open_circuit_voltage', 'I-power_density', 'I-resistance',\n 'I-support_material', 'I-thickness', 'I-time_of_operation', 'I-voltage',\n 'I-working_temperature', 'O'])\n", (3565, 4422), False, 'import datasets\n'), ((5853, 5964), 'datasets.features.ClassLabel', 'datasets.features.ClassLabel', ([], {'names': "['coreference', 'experiment_variation', 'same_experiment', 'thickness']"}), "(names=['coreference', 'experiment_variation',\n 'same_experiment', 'thickness'])\n", (5881, 5964), False, 'import datasets\n'), ((6057, 6080), 'datasets.Value', 'datasets.Value', (['"""int64"""'], {}), "('int64')\n", (6071, 6080), False, 'import datasets\n'), ((6121, 6144), 'datasets.Value', 'datasets.Value', (['"""int64"""'], {}), "('int64')\n", (6135, 6144), False, 'import datasets\n'), ((6313, 6642), 'datasets.features.ClassLabel', 'datasets.features.ClassLabel', ([], {'names': "['anode_material', 'cathode_material', 'current_density',\n 'degradation_rate', 'device', 'electrolyte_material', 'fuel_used',\n 'interlayer_material', 'open_circuit_voltage', 'power_density',\n 'resistance', 'support_material', 'time_of_operation', 'voltage',\n 'working_temperature']"}), "(names=['anode_material', 'cathode_material',\n 'current_density', 'degradation_rate', 'device', 'electrolyte_material',\n 'fuel_used', 'interlayer_material', 'open_circuit_voltage',\n 'power_density', 'resistance', 'support_material', 'time_of_operation',\n 'voltage', 'working_temperature'])\n", (6341, 6642), False, 'import datasets\n'), ((7228, 7251), 'datasets.Value', 'datasets.Value', (['"""int64"""'], {}), "('int64')\n", (7242, 7251), False, 'import datasets\n'), ((7404, 7427), 'datasets.Value', 'datasets.Value', (['"""int64"""'], {}), "('int64')\n", (7418, 7427), False, 'import datasets\n'), ((7469, 7540), 'datasets.features.ClassLabel', 'datasets.features.ClassLabel', ([], {'names': "['', 'DEVICE', 'MATERIAL', 'VALUE']"}), "(names=['', 'DEVICE', 'MATERIAL', 'VALUE'])\n", (7497, 7540), False, 'import datasets\n'), ((7581, 7604), 'datasets.Value', 'datasets.Value', (['"""int64"""'], {}), "('int64')\n", (7595, 7604), False, 'import datasets\n'), ((7657, 7764), 'datasets.features.ClassLabel', 'datasets.features.ClassLabel', ([], {'names': "['', 'current_exp', 'future_work', 'general_info', 'previous_work']"}), "(names=['', 'current_exp', 'future_work',\n 'general_info', 'previous_work'])\n", (7685, 7764), False, 'import datasets\n'), ((7861, 7884), 'datasets.Value', 'datasets.Value', (['"""int64"""'], {}), "('int64')\n", (7875, 7884), False, 'import datasets\n'), ((7929, 7952), 'datasets.Value', 'datasets.Value', (['"""int64"""'], {}), "('int64')\n", (7943, 7952), False, 'import datasets\n'), ((8117, 8140), 'datasets.Value', 'datasets.Value', (['"""int64"""'], {}), "('int64')\n", (8131, 8140), False, 'import datasets\n'), ((8177, 8200), 'datasets.Value', 'datasets.Value', (['"""int64"""'], {}), "('int64')\n", (8191, 8200), False, 'import datasets\n'), ((2467, 2490), 'datasets.Value', 'datasets.Value', (['"""int64"""'], {}), "('int64')\n", (2481, 2490), False, 'import datasets\n'), ((2511, 2534), 'datasets.Value', 'datasets.Value', (['"""int64"""'], {}), "('int64')\n", (2525, 2534), False, 'import datasets\n'), ((8352, 8697), 'datasets.features.ClassLabel', 'datasets.features.ClassLabel', ([], {'names': "['anode_material', 'cathode_material', 'current_density',\n 'degradation_rate', 'conductivity', 'device', 'electrolyte_material',\n 'fuel_used', 'interlayer_material', 'open_circuit_voltage',\n 'power_density', 'resistance', 'support_material', 'time_of_operation',\n 'voltage', 'working_temperature']"}), "(names=['anode_material', 'cathode_material',\n 'current_density', 'degradation_rate', 'conductivity', 'device',\n 'electrolyte_material', 'fuel_used', 'interlayer_material',\n 'open_circuit_voltage', 'power_density', 'resistance',\n 'support_material', 'time_of_operation', 'voltage', 'working_temperature'])\n", (8380, 8697), False, 'import datasets\n'), ((9475, 9498), 'datasets.Value', 'datasets.Value', (['"""int64"""'], {}), "('int64')\n", (9489, 9498), False, 'import datasets\n')]
|
from sys import maxsize
from riskGame.classes.evaluations.sigmoidEval import SigmoidEval
from riskGame.classes.agent.passive_agent import Passive
class RTAStar:
def __init__(self, evaluation_heuristic=SigmoidEval()):
self.__hash_table = {}
self.__evaluate = evaluation_heuristic
self.__passive_agent = Passive()
self.__memo = {}
def dfs(self, curr_state, distance_from_root, limit):
if limit == 0 or curr_state.get_winner():
# end of limit
return SigmoidEval().score(curr_state) + distance_from_root
if curr_state.__hash__() in self.__memo:
return self.__memo[curr_state.__hash__()]
my_turn_state = self.__passive_agent.play(curr_state)
child_states = my_turn_state.expand()
child_states = child_states[: min(5 * limit, len(child_states))]
min_cost = maxsize
for child in child_states:
if child.get_winner():
return distance_from_root
child_cost = self.dfs(child, distance_from_root + 1, limit - 1)
min_cost = min(min_cost, child_cost)
self.__memo[curr_state.__hash__()] = min_cost
return min_cost
def play(self, state):
# Plan phase
limit = 3
print("At the RTA*\n")
child_states = state.expand()
min_cost = maxsize
second_min_cost = -1
next_state = None
for child in child_states:
if child in self.__hash_table:
child_cost = self.__hash_table[child]
else:
child_cost = self.dfs(child, 1, limit - 1)
if child_cost < min_cost:
second_min_cost = min_cost
min_cost = child_cost
next_state = child
# Execute phase
self.__hash_table[state] = second_min_cost if second_min_cost != maxsize else min_cost
print('RTA* choose the best state to be: ')
next_state.print_state()
return next_state
|
[
"riskGame.classes.agent.passive_agent.Passive",
"riskGame.classes.evaluations.sigmoidEval.SigmoidEval"
] |
[((208, 221), 'riskGame.classes.evaluations.sigmoidEval.SigmoidEval', 'SigmoidEval', ([], {}), '()\n', (219, 221), False, 'from riskGame.classes.evaluations.sigmoidEval import SigmoidEval\n'), ((333, 342), 'riskGame.classes.agent.passive_agent.Passive', 'Passive', ([], {}), '()\n', (340, 342), False, 'from riskGame.classes.agent.passive_agent import Passive\n'), ((523, 536), 'riskGame.classes.evaluations.sigmoidEval.SigmoidEval', 'SigmoidEval', ([], {}), '()\n', (534, 536), False, 'from riskGame.classes.evaluations.sigmoidEval import SigmoidEval\n')]
|
from mylib.mymodule import get_quotes
from mymodule.ryonage_bot import RyonageBot
def get_lucky(bot, m):
pre = ""
suf = ""
name = m.author.name if m.author.nick is None else m.author.nick
#元気状態なら
if bot.dying_hp < bot.get_hp():
pre = f"{name}さんのラッキーアイテムは・・・・・・『"
quotes = [
[100 , "アナルバイブ"],
[100 , "湯呑"],
[100 , "ビニール傘"],
[100 , "ギロチン台"],
[100 , "ローター"],
[100 , "ペンタブ"],
[100 , "プロテイン"],
[100 , "疑似精子"],
[100 , "マヨネーズ"],
[100 , "鶏むね肉"],
[100 , "ゆで卵"],
[100 , "銀のスプーン"],
[100 , "生首"],
[100 , "包丁"],
[100 , "チェーンソー"],
[100 , "Steamの積みゲー"],
[100 , "プラスチックのコップ"],
[100 , "バナナ"],
[100 , "ゴールデンキウイ"],
[100 , "爪楊枝"],
[100 , "アナルパール"],
[100 , "エロフィギュア"],
[100 , "javascript"],
[100 , "Unity"],
[100 , "RPGツクール"],
[100 , "アクションゲームツクール"],
[100 , "カピバラ"],
[100 , "手袋"],
[100 , "掃除機"],
[100 , "ホウキ"],
[100 , "ツヴァイヘンダー"],
[100 , "日本刀"],
[100 , "ハルバード"],
[100 , "メッサー(グロスメッサー)"],
[100 , "プレートアーマー"],
[100 , "クロスボウ"],
[100 , "ロングボウ"],
[100 , "牛刀"],
[100 , "肩ロース肉"],
[100 , "エロ漫画"],
[100 , "アナルもののAV"],
[100 , "手鏡"],
[100 , "イラスト参考書"],
[100 , "猫のぬいぐるみ"],
[100 , "耳掛け型イヤホン"],
[100 , "ブックスタンド"],
[100 , "レモン"],
[100 , "トマト"],
[100 , "スピーカー"],
[100 , "ミネラルウォーター"],
[100 , "アジャスタブルダンベル"],
[100 , "ゲーミングマウス"],
[100 , "液タブ"],
[100 , "コピー用紙"],
[100 , "プリン"],
[100 , "ハイカカオチョコレート"],
[100 , "アーモンド"],
[100 , "彫刻刀"],
[100 , "ハサミ"],
[100 , "手首"],
[100 , "足首"],
[100 , "スカート"],
[100 , "コスプレグッズ"],
[100 , "ラブドール"],
[100 , "カチューシャ"],
[100 , "ヘアピン"],
[100 , "お寿司"],
[100 , "冷凍マグロ"],
[100 , "しいたけ"],
[100 , "折りたたみ椅子"],
[100 , "シャーペン"],
[100 , "ボールペン"],
[100 , "ピンセット"],
[100 , "浣腸用のシリンジ"],
[100 , "サバイバルナイフ"],
[100 , "遮光カーテン"],
[100 , "大福"],
[100 , "練乳"],
[100 , "キッチンカー"],
[100 , "脚立"],
[100 , "歯ブラシ"],
[100 , "デンタルフロス"],
[100 , "デッサン人形"],
[100 , "30cm定規"],
[100 , "接着剤"],
[100 , "USBメモリ"],
[100 , "電卓"],
[100 , "カレンダー"],
[100 , "コーヒー"],
[100 , "おっぱい"],
[100 , "おまんこ"],
[100 , "Suica"],
[100 , "C++"],
[100 , "薙刀"],
[100 , "段ボール箱"],
[100 , "ティッシュ"],
[100 , "片手鍋"],
[100 , "乳首に刺す名札"],
[100 , "片手斧"],
[100 , "ショートソード"],
[100 , "アーミングソード"],
[100 , "ロングソード"],
[100 , "アルテマウェポン"],
[100 , "ロトの剣"],
[100 , "チェインメイル"],
[100 , "三色ボールペン"],
[100 , "焼き鳥の缶詰"],
[100 , "乾パン"],
[100 , "駆逐艦"],
[100 , "石"],
[100 , "コンクリートブロック"],
[100 , "レンガ"],
[100 , "豆腐"],
[100 , "スライム"],
[100 , "ローション"],
[100 , "うさみみバンド"],
[100 , "バニースーツ"],
[100 , "バイアグラ"],
[100 , "媚薬"],
[100 , "ぷっちょのケース"],
[100 , "たけのこの里"],
[100 , "きのこの山"],
[100 , "チョコモナカジャンボ"],
[100 , "バトルドーム"],
[100 , "砥石"],
[100 , "リオレウス"],
[100 , "超大型巨人"],
[100 , "ミギー"],
[100 , "バキSAGA"],
[100 , "雀牌"],
[100 , "足の爪"],
[100 , "ジャポニカ学習帳"],
[100 , "DXライブラリ"],
[100 , "Godot"],
[100 , "ドラえもん(のぶ代ボイス)"],
[100 , "ポニーテール"],
[100 , "ボンデージ"],
[100 , "新しいPC"],
[100 , "5円玉"],
[100 , "1万円札"],
[100 , "サングラス"],
[100 , "ブルーライトカットメガネ"],
[100 , "チョコパフェ"],
[100 , "堅揚げポテト"],
[100 , "お団子"],
[100 , "A4ファイル"],
[100 , "野太刀"],
[100 , "エアコン"],
[100 , "バランスボール"],
[100 , "算数ドリル"],
[100 , "殺虫スプレー"],
[100 , "ベープマット"],
[100 , "虫取り網"],
[100 , "ロープ"],
[100 , "Tシャツ"],
[100 , "エッチな下着"],
[100 , "魚雷"],
[100 , "かつおぶし"],
[100 , "パンツ"],
[100 , "心霊写真"],
[100 , "ハンガー"],
[100 , "爪切り"],
[100 , "お米"],
[100 , "唐揚げ"],
[100 , "漂白剤"],
[100 , "湯たんぽ"],
[100 , "シャンプーのボトル"],
[100 , "After Effects"],
[100 , "Photoshop"],
[100 , "クリップスタジオ"],
[100 , "触手"],
[100 , "消臭スプレー"],
[100 , "消毒用エタノール"],
[100 , "自転車"],
[100 , "ビー玉"],
[100 , "ハイパーヨーヨー"],
[100 , "ミニ四駆"],
[100 , "緑茶"],
[100 , "紅茶"],
[100 , "野菜ジュース"],
[100 , "トマト"],
[100 , "懐中時計"],
[100 , "懐中電灯"],
[100 , "防災リュック"],
[100 , "ハンドガン"],
[100 , "トミーガン"],
[100 , "ロケットランチャー"],
[100 , "四次元ポケット"],
[100 , "1.5Lのペットボトル"],
[100 , "方位磁針"],
[100 , "羅針盤"],
[100 , "漢字ドリル"],
[100 , "ファミコン"],
[100 , "カセットテープ"],
[100 , "呪いのビデオ"],
[100 , "ニプレス"],
[100 , "猫のヒゲ"],
[100 , "ゲームボーイ"],
[100 , "ガントレット"],
[100 , "サバトン"],
[100 , "アーメット"],
[100 , "バルビュート"],
[100 , "アナルフック"],
[100 , "ベーコン"],
[100 , "パンの耳"],
[100 , "高級食パン"],
[100 , "甘酒"],
[100 , "ガチャポンのカプセル"],
[100 , "木刀"],
[100 , "お土産の剣型キーホルダー"],
[100 , "幸運を呼ぶツボ"],
[100 , "硯"],
[100 , "筆"],
[100 , "電極"],
[100 , "スタンガン"],
[100 , "キャットナインテイル"],
[100 , "レイピア"],
[100 , "こんにゃく"],
[100 , "黒マテリア"],
[100 , "コメドプッシャー(ニキビ潰し)"],
[100 , "毛抜き"],
[100 , "山芋"],
[100 , "海老の天ぷら"],
[100 , "食塩"],
[100 , "ブランデー"],
[100 , "ビール"],
[100 , "バファリン"],
[100 , "モンエナ"],
[100 , "オロナミンC"],
[100 , "アクエリアス"],
[100 , "ポカリスエット"],
[100 , "パトランプ"],
[100 , "へぇボタン"],
[100 , "チャージマン研DVDBOX"],
[100 , "蹄鉄"],
[100 , "バスターソード"],
[100 , "バスタードソード"],
[100 , "蛇口"],
[100 , "ネジ"],
[100 , "六角ボルト"],
[100 , "餃子"],
[100 , "肉まん"],
[100 , "ピザマン"],
[100 , "伊達メガネ"],
[100 , "バンダナ"],
[100 , "ラブレター"],
[100 , "紐水着"],
[100 , "スクール水着"],
[100 , "アナル型オナホール(非貫通タイプ)"],
[100 , "妖精さん"],
[100 , "猫耳美少女"],
[100 , "マスカラ"],
[100 , "ランニングシューズ"],
[100 , "懸垂スタンド"],
[100 , "バスタオル"],
[100 , "塩麹"],
[100 , "ケチャップ"],
[100 , "クリピアス"],
[100 , "乳首ピアス"],
[100 , "手錠"],
[100 , "足枷"],
[100 , "珪藻土コースター"],
[100 , "ワカメ"],
[100 , "昆布"],
[100 , "だしパック"],
[100 , "ウニ"],
[100 , "ピッケル"],
[100 , "ツルハシ"],
[100 , "ギター"],
[100 , "リュート"],
[100 , "レオタード"],
[100 , "ドラム缶"],
[100 , "フライパン"],
[100 , "三角コーナー"],
[100 , "マニキュア"],
[100 , "洗濯バサミ"],
[100 , "ピカチュウ"],
[100 , "スーパーマリオ"],
[100 , "ドラえもん(CV:大山のぶ代)"],
[100 , "ハローキティ"],
[100 , "ラップの芯"],
[100 , "トイレットペーパー"],
[100 , "かまぼこの板"],
[100 , "ストロー"],
[100 , "針金"],
[100 , "豚骨ラーメン"],
[100 , "レバー"],
[100 , "変身ステッキ"],
[100 , "メイス"],
[100 , "お馬さんのおちんちん"],
[100 , "栗おこわ"],
[100 , "アナルプラグ"],
[100 , "セミの抜け殻"],
[100 , "マイクロファイバーの雑巾"],
[100 , "サランラップ"],
[100 , "お箸"],
[100 , "スタンド使い"],
[100 , "紙粘土"],
[100 , "つけまつげ"],
[100 , "おろし金"],
[100 , "グランドピアノ"],
[100 , "リコーダー"],
[100 , "月の石"],
[100 , "万華鏡"],
[100 , "畳"],
[100 , "虫眼鏡"],
[100 , "利尿剤"],
[100 , "大胸筋矯正サポーター"],
[100 , "おちんぽミルク"],
[100 , "ベニヤ板"],
[100 , "スレッジハンマー"],
[100 , "五寸釘"],
[100 , "そうめん"],
[100 , "カツオのたたき"],
[100 , "藁人形"],
[100 , "セーター"],
[100 , "金塊"],
[100 , "梅干し"],
[100 , "チェダーチーズ"],
[100 , "チャーシュー"],
[100 , "上履き"],
[100 , "ブルマ"],
[100 , "バファリン"],
[100 , "単2電池"],
[100 , "鎖鎌"],
[100 , "ひまわりの種"],
[100 , "母乳"],
[100 , "おしっこ"],
[100 , "リュックサック"]
]
#わ・た・しの確率を2%にするためあとから追加
num = len(quotes) * 0.02 * 100
quotes.append([num , "わ・た・し♥"])
suf = "』っ!ですっ!"
else:
#瀕死の時
quotes = [
[100 , "知りま、せん・・・そんなの・・・"],
[100 , f"私、を・・・虐める{name}さんに・・・ラッキーアイテムなん、て・・・無いです・・・"],
[100 , "私が欲しい、ですよ・・・そんなの・・・"]
]
return pre + get_quotes(quotes) + suf
|
[
"mylib.mymodule.get_quotes"
] |
[((10282, 10300), 'mylib.mymodule.get_quotes', 'get_quotes', (['quotes'], {}), '(quotes)\n', (10292, 10300), False, 'from mylib.mymodule import get_quotes\n')]
|
from typing import Optional, List, Dict
from cle.address_translator import AddressTranslator
from sortedcontainers import SortedDict
from .plugin import KnowledgeBasePlugin
# TODO: Serializable
class Patch:
def __init__(self, addr, new_bytes, comment: Optional[str]=None):
self.addr = addr
self.new_bytes = new_bytes
self.comment = comment
def __len__(self):
return len(self.new_bytes)
class PatchManager(KnowledgeBasePlugin):
"""
A placeholder-style implementation for a binary patch manager. This class should be significantly changed in the
future when all data about loaded binary objects are loaded into angr knowledge base from CLE. As of now, it only
stores byte-level replacements. Other angr components may choose to use or not use information provided by this
manager. In other words, it is not transparent.
Patches should not overlap, but it's user's responsibility to check for and avoid overlapping patches.
"""
def __init__(self, kb):
super().__init__()
self._patches: Dict[int,Patch] = SortedDict()
self._kb = kb
def add_patch(self, addr, new_bytes, comment: Optional[str]=None):
self._patches[addr] = Patch(addr, new_bytes, comment=comment)
def add_patch_obj(self, patch: Patch):
self._patches[patch.addr] = patch
def remove_patch(self, addr):
if addr in self._patches:
del self._patches[addr]
def patch_addrs(self):
return self._patches.keys()
def get_patch(self, addr):
"""
Get patch at the given address.
:param int addr: The address of the patch.
:return: The patch if there is one starting at the address, or None if there isn't any.
:rtype: Patch or None
"""
return self._patches.get(addr, None)
def get_all_patches(self, addr, size):
"""
Retrieve all patches that cover a region specified by [addr, addr+size).
:param int addr: The address of the beginning of the region.
:param int size: Size of the region.
:return: A list of patches.
:rtype: list
"""
patches = [ ]
for patch_addr in self._patches.irange(maximum=addr+size-1, reverse=True):
p = self._patches[patch_addr]
if self.overlap(p.addr, p.addr + len(p), addr, addr+size):
patches.append(p)
else:
break
return patches[::-1]
def keys(self):
return self._patches.keys()
def items(self):
return self._patches.items()
def values(self):
return self._patches.values()
def copy(self):
o = PatchManager(self._kb)
o._patches = self._patches.copy()
@staticmethod
def overlap(a0, a1, b0, b1):
return a0 <= b0 < a1 or a0 <= b1 < a1 or b0 <= a0 < b1
def apply_patches_to_binary(self, binary_bytes: Optional[bytes]=None, patches: Optional[List[Patch]]=None) -> bytes:
if patches is None:
patches = sorted(list(self._patches.values()), key=lambda x: x.addr)
if binary_bytes is None:
with open(self._kb._project.loader.main_object.binary, "rb") as f:
binary_bytes = f.read()
for patch in patches:
# convert addr to file offset
at = AddressTranslator.from_mva(patch.addr, self._kb._project.loader.main_object)
file_offset = at.to_raw()
if file_offset < len(binary_bytes) and file_offset + len(patch.new_bytes) < len(binary_bytes):
binary_bytes = binary_bytes[:file_offset] + \
patch.new_bytes + \
binary_bytes[file_offset + len(patch.new_bytes):]
return binary_bytes
KnowledgeBasePlugin.register_default('patches', PatchManager)
|
[
"sortedcontainers.SortedDict",
"cle.address_translator.AddressTranslator.from_mva"
] |
[((1099, 1111), 'sortedcontainers.SortedDict', 'SortedDict', ([], {}), '()\n', (1109, 1111), False, 'from sortedcontainers import SortedDict\n'), ((3410, 3486), 'cle.address_translator.AddressTranslator.from_mva', 'AddressTranslator.from_mva', (['patch.addr', 'self._kb._project.loader.main_object'], {}), '(patch.addr, self._kb._project.loader.main_object)\n', (3436, 3486), False, 'from cle.address_translator import AddressTranslator\n')]
|
"""manager.py"""
import logging
from googleapiclient import errors
import gtm_manager.account
class GTMManager(gtm_manager.base.GTMBase):
"""Authenticates a users base gtm access.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.accounts_service = self.service.accounts() # pylint: disable=E1101
def list_accounts(self):
"""Loads from the API and lists all GTM Accounts that a user has access to.
Returns:
A list of :class:`gtm_manager.account.GTMAccount` that the user has access to.
"""
try:
request = self.accounts_service.list()
response = request.execute()
return [
gtm_manager.account.GTMAccount(account=x, service=self.service)
for x in response.get("account") or []
]
except errors.HttpError as error:
logging.error(error)
return []
|
[
"logging.error"
] |
[((912, 932), 'logging.error', 'logging.error', (['error'], {}), '(error)\n', (925, 932), False, 'import logging\n')]
|
"""
Datacube interop functions are here
"""
import numpy as np
from itertools import chain
from types import SimpleNamespace
from datacube.storage.storage import measurement_paths
from datacube.utils import uri_to_local_path
from datacube.api import GridWorkflow
def flatmap(f, items):
return chain.from_iterable(map(f, items))
def first_val(x):
return next(iter(x.values()))
def list_native_cell(product, cell_index, dc, **query):
index = dc.index
p = index.products.get_by_name(product)
if p.grid_spec is None:
raise ValueError('Supplied product does not have a grid spec')
gw = GridWorkflow(index, grid_spec=p.grid_spec)
tile = gw.list_cells(cell_index=cell_index,
product=product,
**query)[cell_index]
return list(flatmap(lambda x: x, tile.sources.values))
def group_by_storage(dss, bands=None):
"""
returns [StorageResource]
StorageResource
.uri - string, URI of the resource
.local_path - PosixPath, path on a filesystem, could be None if not a file resource
.bands - Dictionary of bands (copied from Dataset)
.time - np.ndarray<datetime64[ns]> Timestamps to be read from this resource
.datasets - List<datacube.Dataset> referencing this resource
"""
su_all = {}
if bands is None:
def check_band(band):
return True
else:
bands = set(bands)
def check_band(band):
return band in bands
def local_path(uri):
try:
return uri_to_local_path(uri)
except ValueError:
return None
def update(su, ds, band=None):
if band is None:
bb = {k: ds.measurements[k]
for k in ds.measurements if check_band(k)}
else:
bb = {band: ds.measurements[band]}
if su not in su_all:
su_all[su] = SimpleNamespace(bands=bb,
uri=su,
local_path=local_path(su),
datasets=[ds])
else:
su_all[su].datasets.append(ds)
for ds in dss:
pp = measurement_paths(ds)
paths = set(pp.values())
if len(paths) == 1: # All bands in one file
update(paths.pop(), ds)
elif len(paths) == len(pp): # Each band in it's own file
for band, file in pp.items():
if check_band(band):
update(file, ds, band)
else:
raise ValueError('Not supporting multiple multi-band files')
for s in su_all.values():
s.time = np.array([ds.center_time for ds in s.datasets], dtype='datetime64[ns]')
return sorted(su_all.values(), key=lambda s: s.time[0])
def compute_time_slice(requested_time, file_time):
"""
Given requested time stamps and available timestamps (both assumed to be
sorted in ascending order), computes roi such that
requested_time in file_time[roi]
Returns (roi, contigous, complete)
Where:
roi: slice object
contigous: True|False if False not all file stamps in the range are needed
complete: True|False, if False some requested timestamps were not found
"""
assert requested_time.dtype == file_time.dtype
ii = np.where((file_time >= requested_time.min()) * (file_time <= requested_time.max()))[0]
if len(ii) == 0:
raise ValueError("No overlap")
roi = slice(ii[0], ii[-1]+1)
file_time = set(file_time[roi])
requested_time = set(requested_time)
contigous = (file_time == requested_time)
complete = requested_time.issubset(file_time)
return roi, contigous, complete
|
[
"datacube.storage.storage.measurement_paths",
"numpy.array",
"datacube.utils.uri_to_local_path",
"datacube.api.GridWorkflow"
] |
[((621, 663), 'datacube.api.GridWorkflow', 'GridWorkflow', (['index'], {'grid_spec': 'p.grid_spec'}), '(index, grid_spec=p.grid_spec)\n', (633, 663), False, 'from datacube.api import GridWorkflow\n'), ((2234, 2255), 'datacube.storage.storage.measurement_paths', 'measurement_paths', (['ds'], {}), '(ds)\n', (2251, 2255), False, 'from datacube.storage.storage import measurement_paths\n'), ((2702, 2773), 'numpy.array', 'np.array', (['[ds.center_time for ds in s.datasets]'], {'dtype': '"""datetime64[ns]"""'}), "([ds.center_time for ds in s.datasets], dtype='datetime64[ns]')\n", (2710, 2773), True, 'import numpy as np\n'), ((1593, 1615), 'datacube.utils.uri_to_local_path', 'uri_to_local_path', (['uri'], {}), '(uri)\n', (1610, 1615), False, 'from datacube.utils import uri_to_local_path\n')]
|
import os
import time
import torch
import torch.optim
import torch.nn.functional as F
import torch.nn.init as init
from torch.autograd import Variable
from loss.ssd_loss import SSDLoss
from metrics.voc_eval import voc_eval
from modellibs.s3fd.box_coder import S3FDBoxCoder
from utils.average_meter import AverageMeter
class Trainer(object):
def __init__(self, opt, train_dataloader, valid_dataloader, model):
self.opt = opt
self.current_lr = opt.lr
self.start_epoch = opt.start_epochs
self.train_dataloader = train_dataloader
self.valid_dataloader = valid_dataloader
self.max_iter_train = len(self.train_dataloader)
self.max_iter_valid = len(self.valid_dataloader)
self.model = model
self.criterion_first = torch.nn.CrossEntropyLoss().cuda()
self.criterion_middle = torch.nn.CrossEntropyLoss().cuda()
self.criterion_last = torch.nn.CrossEntropyLoss().cuda()
self.criterion_config = torch.nn.CrossEntropyLoss().cuda()
self.optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr, weight_decay=opt.weight_decay)
self.best_loss = float('inf')
if opt.resume:
self.optimizer.load_state_dict(torch.load(opt.resume_path)['optimizer'])
def train_model(self, max_epoch, learning_rate, layers=None):
self.max_epoch = max_epoch
for epoch in range(self.start_epoch, self.max_epoch):
self.adjust_learning_rate(self.optimizer, epoch)
self.train_epoch(epoch)
self.valid_epoch(epoch)
print('')
print('optimization done')
save_dir = 'experiments/%s_%s_%s' % (self.opt.dataset, self.opt.task, self.opt.model)
file_name = '%s_%s_%s_best_loss_%f' % (self.opt.dataset, self.opt.task, self.opt.model, self.best_loss)
os.rename(self.opt.expr_dir, os.path.join(save_dir,file_name))
def train_epoch(self, epoch):
""" training """
self.model.train()
self.optimizer.zero_grad()
train_loss = 0
for batch_idx, (inputs, label_first,label_middle, label_last, label_config) in enumerate(self.train_dataloader):
# label_first = labels[0]
# label_middle = labels[1]
# label_last = labels[2]
# label_config = labels[3]
inputs = inputs.to(self.opt.device)
label_first = label_first.to(self.opt.device)
label_middle = label_middle.to(self.opt.device)
label_last = label_last.to(self.opt.device)
label_config = label_config.to(self.opt.device)
output_first, output_middle, output_last, output_config = self.model(inputs)
loss_first = self.criterion_first(output_first, label_first)
loss_middle = self.criterion_middle(output_middle, label_middle)
loss_last = self.criterion_last(output_last, label_last)
loss_config = self.criterion_config(output_config, label_config)
loss = loss_first + loss_middle + loss_last + loss_config
loss.backward()
if ((batch_idx + 1) % self.opt.accum_grad == 0) or ((batch_idx+1) == self.max_iter_train):
self.optimizer.step()
self.model.zero_grad()
self.optimizer.zero_grad()
train_loss += loss.item()
if batch_idx % self.opt.print_freq == 0:
print('Epoch[%d/%d] Iter[%d/%d] Learning Rate: %.6f Total Loss: %.4f, First Loss: %.4f, Middle Loss: %.4f, Last Loss: %.4f, Config Loss: %.4f' %
(epoch, self.max_epoch, batch_idx, self.max_iter_train, self.current_lr, loss.item(), loss_first.item(), loss_middle.item(), loss_last.item(), loss_config.item()))
def valid_epoch(self, epoch):
correct_f = 0
correct_m = 0
correct_l = 0
correct_c = 0
""" validate """
self.model.eval()
test_loss = 0
for batch_idx, (inputs, label_first,label_middle, label_last, label_config) in enumerate(self.valid_dataloader):
with torch.no_grad():
inputs = inputs.to(self.opt.device)
label_first = label_first.to(self.opt.device)
label_middle = label_middle.to(self.opt.device)
label_last = label_last.to(self.opt.device)
label_config = label_config.to(self.opt.device)
output_first, output_middle, output_last, output_config = self.model(inputs)
loss_first = self.criterion_first(output_first, label_first)
loss_middle = self.criterion_middle(output_middle, label_middle)
loss_last = self.criterion_last(output_last, label_last)
loss_config = self.criterion_config(output_config, label_config)
loss = loss_first + loss_middle + loss_last + loss_config
pred_f = output_first.data.max(1, keepdim=True)[1].cpu()
pred_m = output_middle.data.max(1, keepdim=True)[1].cpu()
pred_l = output_last.data.max(1, keepdim=True)[1].cpu()
pred_c = output_config.data.max(1, keepdim=True)[1].cpu()
correct_f += pred_f.eq(label_first.cpu().view_as(pred_f)).sum()
correct_m += pred_m.eq(label_middle.cpu().view_as(pred_m)).sum()
correct_l += pred_l.eq(label_last.cpu().view_as(pred_l)).sum()
correct_c += pred_c.eq(label_config.cpu().view_as(pred_c)).sum()
test_loss += loss.item()
if batch_idx % self.opt.print_freq_eval == 0:
print('Validation[%d/%d] Total Loss: %.4f, First Loss: %.4f, Middle Loss: %.4f, Last Loss: %.4f, Conf Loss: %.4f' %
(batch_idx, len(self.valid_dataloader), loss.item(), loss_first.item(), loss_middle.item(), loss_last.item(), loss_config.item()))
num_test_data = len(self.valid_dataloader.dataset)
accuracy_f = 100. * correct_f / num_test_data
accuracy_m = 100. * correct_m / num_test_data
accuracy_l = 100. * correct_l / num_test_data
accuracy_c = 100. * correct_c / num_test_data
test_loss /= len(self.valid_dataloader)
if test_loss < self.best_loss:
print('Saving..')
state = {
'model': self.model.module.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_loss': test_loss,
'epoch': epoch,
}
torch.save(state, os.path.join(self.opt.expr_dir, 'model_best.pth'))
self.best_loss = test_loss
print('[*] Model %s,\tCurrent Loss: %f\tBest Loss: %f' % (self.opt.model, test_loss, self.best_loss))
print('Val Accuracy_F: {}/{} ({:.0f}%) | Val Accuracy_M: {}/{} ({:.0f}%) | Val Accuracy_L: {}/{} ({:.0f}%) | Val Accuracy_C: {}/{} ({:.0f}%)\n'.format(
correct_f, num_test_data, accuracy_f,
correct_m, num_test_data, accuracy_m,
correct_l, num_test_data, accuracy_l,
correct_c, num_test_data, accuracy_c))
def adjust_learning_rate(self, optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
self.current_lr = self.opt.lr * (0.1 ** (epoch // 50))
for param_group in optimizer.param_groups:
param_group['lr'] = self.current_lr
def make_dir(self, dir_path):
if not os.path.exists(os.path.join(self.opt.expr_dir, dir_path)):
os.mkdir(os.path.join(self.opt.expr_dir, dir_path))
|
[
"torch.nn.CrossEntropyLoss",
"torch.load",
"torch.no_grad",
"os.path.join"
] |
[((1875, 1908), 'os.path.join', 'os.path.join', (['save_dir', 'file_name'], {}), '(save_dir, file_name)\n', (1887, 1908), False, 'import os\n'), ((789, 816), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (814, 816), False, 'import torch\n'), ((856, 883), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (881, 883), False, 'import torch\n'), ((921, 948), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (946, 948), False, 'import torch\n'), ((988, 1015), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (1013, 1015), False, 'import torch\n'), ((4095, 4110), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4108, 4110), False, 'import torch\n'), ((6556, 6605), 'os.path.join', 'os.path.join', (['self.opt.expr_dir', '"""model_best.pth"""'], {}), "(self.opt.expr_dir, 'model_best.pth')\n", (6568, 6605), False, 'import os\n'), ((7484, 7525), 'os.path.join', 'os.path.join', (['self.opt.expr_dir', 'dir_path'], {}), '(self.opt.expr_dir, dir_path)\n', (7496, 7525), False, 'import os\n'), ((7549, 7590), 'os.path.join', 'os.path.join', (['self.opt.expr_dir', 'dir_path'], {}), '(self.opt.expr_dir, dir_path)\n', (7561, 7590), False, 'import os\n'), ((1234, 1261), 'torch.load', 'torch.load', (['opt.resume_path'], {}), '(opt.resume_path)\n', (1244, 1261), False, 'import torch\n')]
|
# Generated by Django 2.2.4 on 2019-11-03 14:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recupero', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='prestacion',
name='nomenclador',
),
migrations.AddField(
model_name='tipoprestacion',
name='arancel',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=11),
),
migrations.AddField(
model_name='tipoprestacion',
name='codigo',
field=models.CharField(blank=True, help_text='Código del servicio (de nomenclador si corresponde)', max_length=30, null=True),
),
migrations.AddField(
model_name='tipoprestacion',
name='descripcion',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='tipoprestacion',
name='observaciones',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='tipoprestacion',
name='tipo',
field=models.PositiveIntegerField(choices=[(0, 'Desconocido'), (100, 'Consulta'), (200, 'Práctica'), (300, 'Internación'), (400, 'Laboratorio')], default=100),
),
]
|
[
"django.db.models.TextField",
"django.db.migrations.RemoveField",
"django.db.models.CharField",
"django.db.models.PositiveIntegerField",
"django.db.models.DecimalField"
] |
[((225, 292), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""prestacion"""', 'name': '"""nomenclador"""'}), "(model_name='prestacion', name='nomenclador')\n", (247, 292), False, 'from django.db import migrations, models\n'), ((445, 510), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'default': '(0.0)', 'max_digits': '(11)'}), '(decimal_places=2, default=0.0, max_digits=11)\n', (464, 510), False, 'from django.db import migrations, models\n'), ((638, 766), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Código del servicio (de nomenclador si corresponde)"""', 'max_length': '(30)', 'null': '(True)'}), "(blank=True, help_text=\n 'Código del servicio (de nomenclador si corresponde)', max_length=30,\n null=True)\n", (654, 766), False, 'from django.db import migrations, models\n'), ((890, 929), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (906, 929), False, 'from django.db import migrations, models\n'), ((1064, 1103), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1080, 1103), False, 'from django.db import migrations, models\n'), ((1231, 1392), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'choices': "[(0, 'Desconocido'), (100, 'Consulta'), (200, 'Práctica'), (300,\n 'Internación'), (400, 'Laboratorio')]", 'default': '(100)'}), "(choices=[(0, 'Desconocido'), (100, 'Consulta'),\n (200, 'Práctica'), (300, 'Internación'), (400, 'Laboratorio')], default=100\n )\n", (1258, 1392), False, 'from django.db import migrations, models\n')]
|
from opensearch import osfeedparser
import logging
logger = logging.getLogger(__name__)
class Results(object):
def __init__(self, query, agent=None):
self.agent = agent
self._fetch(query)
self._iter = 0
def __iter__(self):
self._iter = 0
return self
def __len__(self):
return self.totalResults
def next(self):
# just keep going like the energizer bunny
while True:
# return any item we haven't returned
if self._iter < len(self.items):
self._iter += 1
return self.items[self._iter-1]
# if there appears to be more to fetch
if \
self.totalResults != 0 \
and self.totalResults > self.startIndex + self.itemsPerPage - 1:
# get the next query
next_query = self._get_next_query()
# if we got one executed it and go back to the beginning
if next_query:
self._fetch(next_query)
# very important to reset this counter
# or else the return will fail
self._iter = 0
else:
# deal with malformed templates
# stop if there isn't anything
raise StopIteration
else:
raise StopIteration
def _fetch(self, query):
url = query.url()
logger.debug("fetching %s" % url)
feed = osfeedparser.opensearch_parse(url, agent=self.agent)
self.feed = feed
# general channel stuff
channel = feed['feed']
self.title = _pick(channel,'title')
self.link = _pick(channel,'link')
self.description = _pick(channel,'description')
self.language = _pick(channel,'language')
self.copyright = _pick(channel,'copyright')
# get back opensearch specific values
self.totalResults = _pick(channel,'opensearch_totalresults',0)
self.startIndex = _pick(channel,'opensearch_startindex',1)
self.itemsPerPage = _pick(channel,'opensearch_itemsperpage',0)
# alias items from the feed to our results object
self.items = feed['items']
# set default values if necessary
if self.startIndex == 0:
self.startIndex = 1
if self.itemsPerPage == 0 and len(self.items) > 0:
self.itemsPerPage = len(self.items)
# store away query for calculating next results
# if necessary
self.last_query = query
def _get_next_query(self):
# update our query to get the next set of records
query = self.last_query
# use start page if the query supports it
if query.has_macro('startPage'):
# if the query already defined the startPage
# we just need to increment it
if hasattr(query, 'startPage'):
query.startPage += 1
# to issue the first query startPage might not have
# been specified, so set it to 2
else:
query.startPage = 2
return query
# otherwise the query should support startIndex
elif query.has_macro('startIndex'):
# if startIndex was used before we just add the
# items per page to it to get the next set
if hasattr(query, 'startIndex'):
query.startIndex += self.itemsPerPage
# to issue the first query the startIndex may have
# been left blank in that case we assume it to be
# the item just after the last one on this page
else:
query.startIndex = self.itemsPerPage + 1
return query
# doesn't look like there is another stage to this query
return None
# helper for pulling values out of a dictionary if they're there
# and returning a default value if they're not
def _pick(d,key,default=None):
# get the value out
value = d.get(key)
# if it wasn't there return the default
if value == None:
return default
# if they want an int try to convert to an int
# and return default if it fails
if type(default) == int:
try:
return int(d[key])
except:
return default
# otherwise we're good to return the value
return value
|
[
"opensearch.osfeedparser.opensearch_parse",
"logging.getLogger"
] |
[((62, 89), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (79, 89), False, 'import logging\n'), ((1548, 1600), 'opensearch.osfeedparser.opensearch_parse', 'osfeedparser.opensearch_parse', (['url'], {'agent': 'self.agent'}), '(url, agent=self.agent)\n', (1577, 1600), False, 'from opensearch import osfeedparser\n')]
|
#
# Tests for Overworld character inventory
#
import sys
sys.path.append('../components')
from items import NewInventory as Inventory
from items import NewItem as Item
from items import Material
class Test_Inventory:
def setup_class(cls):
cls.inv = Inventory()
def test_construction(self):
assert self.inv is not None
def test_properties(self):
assert type(self.inv.items) is tuple
assert len(self.inv.items) == 0
def test_inventory_add_items(self):
item = Item()
item.add_material(Material())
self.inv.add_item(item)
|
[
"sys.path.append",
"items.NewItem",
"items.NewInventory",
"items.Material"
] |
[((58, 90), 'sys.path.append', 'sys.path.append', (['"""../components"""'], {}), "('../components')\n", (73, 90), False, 'import sys\n'), ((269, 280), 'items.NewInventory', 'Inventory', ([], {}), '()\n', (278, 280), True, 'from items import NewInventory as Inventory\n'), ((545, 551), 'items.NewItem', 'Item', ([], {}), '()\n', (549, 551), True, 'from items import NewItem as Item\n'), ((578, 588), 'items.Material', 'Material', ([], {}), '()\n', (586, 588), False, 'from items import Material\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compas_mobile_robot_reloc.utils import _ensure_rhino
from pytest import raises
def test__ensure_rhino():
with raises(ImportError):
_ensure_rhino()
|
[
"compas_mobile_robot_reloc.utils._ensure_rhino",
"pytest.raises"
] |
[((231, 250), 'pytest.raises', 'raises', (['ImportError'], {}), '(ImportError)\n', (237, 250), False, 'from pytest import raises\n'), ((260, 275), 'compas_mobile_robot_reloc.utils._ensure_rhino', '_ensure_rhino', ([], {}), '()\n', (273, 275), False, 'from compas_mobile_robot_reloc.utils import _ensure_rhino\n')]
|
from os.path import (
expanduser,
join as join_path
)
from IPython.display import HTML
from tqdm.notebook import tqdm as log_progress
from naeval.const import (
NEWS, WIKI, FICTION, SOCIAL, POETRY,
DATASET, JL, GZ
)
from naeval.io import (
format_jl,
parse_jl,
load_gz_lines,
dump_gz_lines,
)
from naeval.record import (
as_jsons,
from_jsons
)
from naeval.dataset import sample
from naeval.readme import patch_readme
from naeval.lemma.datasets import load_dataset
from naeval.lemma.markup import (
Markup,
show_markup
)
CORUS_DATA_DIR = expanduser('~/proj/corus-data/gramru')
CORUS_FILES = {
NEWS: [
'dev/GramEval2020-RuEval2017-Lenta-news-dev.conllu',
'train/MorphoRuEval2017-Lenta-train.conllu',
],
WIKI: [
'dev/GramEval2020-GSD-wiki-dev.conllu',
'train/GramEval2020-GSD-train.conllu'
],
FICTION: [
'dev/GramEval2020-SynTagRus-dev.conllu',
'train/GramEval2020-SynTagRus-train-v2.conllu',
'train/MorphoRuEval2017-JZ-gold.conllu'
],
SOCIAL: [
'dev/GramEval2020-RuEval2017-social-dev.conllu',
'train/GramEval2020-Taiga-social-train.conllu',
'train/MorphoRuEval2017-VK-gold.conllu'
],
POETRY: [
'dev/GramEval2020-Taiga-poetry-dev.conllu',
'train/GramEval2020-Taiga-poetry-train.conllu'
],
}
DATASETS = [NEWS, WIKI, FICTION, SOCIAL, POETRY]
DATA_DIR = expanduser('~/proj/naeval/data/lemma')
LEMMA = 'lemma'
README = expanduser('~/proj/naeval/README.md')
|
[
"os.path.expanduser"
] |
[((591, 629), 'os.path.expanduser', 'expanduser', (['"""~/proj/corus-data/gramru"""'], {}), "('~/proj/corus-data/gramru')\n", (601, 629), False, 'from os.path import expanduser, join as join_path\n'), ((1441, 1479), 'os.path.expanduser', 'expanduser', (['"""~/proj/naeval/data/lemma"""'], {}), "('~/proj/naeval/data/lemma')\n", (1451, 1479), False, 'from os.path import expanduser, join as join_path\n'), ((1505, 1542), 'os.path.expanduser', 'expanduser', (['"""~/proj/naeval/README.md"""'], {}), "('~/proj/naeval/README.md')\n", (1515, 1542), False, 'from os.path import expanduser, join as join_path\n')]
|
import requests
from bs4 import BeautifulSoup
import re
import webbrowser
import time
from qbittorrent import Client
movie = input("Enter What You Want To Download : ")
movie_name = movie
if(len(movie.split()) > 1):
movie = movie.split()
movie = '%20'.join(movie)
else:
movie = movie
url = f'https://thepiratebay10.org/search/{movie}/1/99/100,200,300,400,600'
r = requests.get(url)
htmlcontent = r.content
soup = BeautifulSoup(htmlcontent, 'html.parser')
anchors = soup.find_all("a")
all_links = []
all_names = []
all_search_links = []
for link in anchors:
if(link.get('href') != '#'):
linkName = link.get('title')
linkText = link.get('href')
all_links.append(linkText)
if(linkName != None):
all_names.append(linkName)
all_links = set(all_links)
all_links = list(all_links)
subsName = "Details for"
nameFinder = [i for i in all_names if subsName in i]
namelist = []
for name in nameFinder:
if(name.startswith(subsName)):
names = name[len(subsName)+1:]
namelist.append(names)
for index, s in enumerate(namelist):
print(str(index+1)+". "+s)
number_for_download = int(input("Enter number you want to download : "))-1
movie_title = namelist[number_for_download]
print("you're downloading : "+movie_title)
movie_title = movie_title.split()
movie_title = '%20'.join(movie_title)
url_selected = f'https://thepiratebay10.org/search/{movie_title}/1/99/100,200,300,400,600'
req = requests.get(url_selected)
htmlcontents = req.content
soup_selected = BeautifulSoup(htmlcontents, 'html.parser')
anchors_selected = soup_selected.find_all("a")
all_links_selected = []
for link_selected in anchors_selected:
if(link_selected.get('href') != '#'):
linkText_selected = link_selected.get('href')
all_links_selected.append(linkText_selected)
all_links_selected = set(all_links_selected)
all_links_selected = list(all_links_selected)
subs2 = "magnet"
magnet_links2 = [i for i in all_links_selected if subs2 in i]
qb = Client("http://127.0.0.1:3500/")
qb.login("admin", "adminadmin")
magnet_url = magnet_links2[0]
qb.download_from_link(magnet_url)
|
[
"bs4.BeautifulSoup",
"qbittorrent.Client",
"requests.get"
] |
[((399, 416), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (411, 416), False, 'import requests\n'), ((452, 493), 'bs4.BeautifulSoup', 'BeautifulSoup', (['htmlcontent', '"""html.parser"""'], {}), "(htmlcontent, 'html.parser')\n", (465, 493), False, 'from bs4 import BeautifulSoup\n'), ((1528, 1554), 'requests.get', 'requests.get', (['url_selected'], {}), '(url_selected)\n', (1540, 1554), False, 'import requests\n'), ((1602, 1644), 'bs4.BeautifulSoup', 'BeautifulSoup', (['htmlcontents', '"""html.parser"""'], {}), "(htmlcontents, 'html.parser')\n", (1615, 1644), False, 'from bs4 import BeautifulSoup\n'), ((2098, 2130), 'qbittorrent.Client', 'Client', (['"""http://127.0.0.1:3500/"""'], {}), "('http://127.0.0.1:3500/')\n", (2104, 2130), False, 'from qbittorrent import Client\n')]
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from skimage import transform
from skimage.transform import estimate_transform
source = np.array([(129, 72),
(302, 76),
(90, 185),
(326, 193)])
target = np.array([[0, 0],
[400, 0],
[0, 400],
[400, 400]])
tf = estimate_transform('projective', source, target)
H = tf.params # in older versions of skimage, this should be
# H = tf._matrix
print(H)
# H = np.array([[ 3.04026872e+00, 1.04929628e+00, -4.67743998e+02],
# [ -1.44134582e-01, 6.23382067e+00, -4.30241727e+02],
# [ 2.63620673e-05, 4.17694527e-03, 1.00000000e+00]])
def rectify(xy):
x = xy[:, 0]
y = xy[:, 1]
# You must fill in your code here.
#
# Handy functions are:
#
# - np.dot (matrix multiplication)
# - np.ones_like (make an array of ones the same shape as another array)
# - np.column_stack
# - A.T -- type .T after a matrix to transpose it
# - x.reshape -- reshapes the array x
# We need to provide the backward mapping
HH = np.linalg.inv(H)
homogeneous_coordinates = np.column_stack([x, y, np.ones_like(x)])
xyz = np.dot(HH, homogeneous_coordinates.T)
# We want one coordinate per row
xyz = xyz.T
# Turn z into a column vector
z = xyz[:, 2]
z = z.reshape([len(z), 1])
xyz = xyz / z
return xyz[:, :2]
image = plt.imread('../../images/chapel_floor.png')
out = transform.warp(image, rectify, output_shape=(400, 400))
f, (ax0, ax1) = plt.subplots(1, 2, figsize=(8, 4))
ax0.imshow(image)
ax1.imshow(out)
plt.show()
|
[
"matplotlib.pyplot.show",
"numpy.ones_like",
"numpy.array",
"numpy.linalg.inv",
"skimage.transform.warp",
"skimage.transform.estimate_transform",
"numpy.dot",
"matplotlib.pyplot.imread",
"matplotlib.pyplot.subplots"
] |
[((182, 237), 'numpy.array', 'np.array', (['[(129, 72), (302, 76), (90, 185), (326, 193)]'], {}), '([(129, 72), (302, 76), (90, 185), (326, 193)])\n', (190, 237), True, 'import numpy as np\n'), ((305, 355), 'numpy.array', 'np.array', (['[[0, 0], [400, 0], [0, 400], [400, 400]]'], {}), '([[0, 0], [400, 0], [0, 400], [400, 400]])\n', (313, 355), True, 'import numpy as np\n'), ((419, 467), 'skimage.transform.estimate_transform', 'estimate_transform', (['"""projective"""', 'source', 'target'], {}), "('projective', source, target)\n", (437, 467), False, 'from skimage.transform import estimate_transform\n'), ((1541, 1584), 'matplotlib.pyplot.imread', 'plt.imread', (['"""../../images/chapel_floor.png"""'], {}), "('../../images/chapel_floor.png')\n", (1551, 1584), True, 'import matplotlib.pyplot as plt\n'), ((1591, 1646), 'skimage.transform.warp', 'transform.warp', (['image', 'rectify'], {'output_shape': '(400, 400)'}), '(image, rectify, output_shape=(400, 400))\n', (1605, 1646), False, 'from skimage import transform\n'), ((1664, 1698), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(8, 4)'}), '(1, 2, figsize=(8, 4))\n', (1676, 1698), True, 'import matplotlib.pyplot as plt\n'), ((1734, 1744), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1742, 1744), True, 'import matplotlib.pyplot as plt\n'), ((1215, 1231), 'numpy.linalg.inv', 'np.linalg.inv', (['H'], {}), '(H)\n', (1228, 1231), True, 'import numpy as np\n'), ((1314, 1351), 'numpy.dot', 'np.dot', (['HH', 'homogeneous_coordinates.T'], {}), '(HH, homogeneous_coordinates.T)\n', (1320, 1351), True, 'import numpy as np\n'), ((1286, 1301), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (1298, 1301), True, 'import numpy as np\n')]
|
import StringIO
import unittest
import iq.combine_overlap_stats
class TestCombineOverlapStats(unittest.TestCase):
def test_simple(self):
exons = ['A1CF\t1\t2\t50.00\tALT1,ALT2', 'A2M\t3\t4\t75.00\t']
cds = ['A2M\t5\t6\t83.33\tALT3']
target = StringIO.StringIO()
log = StringIO.StringIO()
iq.combine_overlap_stats.combine(exons, cds, target, log)
lines = target.getvalue().split('\n')
assert len(lines) == 4
assert lines[1] == 'A1CF\t0\t1\t0\t2\t0\t50.00\tALT1,ALT2' # no cds data
assert lines[2] == 'A2M\t5\t3\t6\t4\t83.33\t75.00\tALT3' # data for both
assert lines[3] == ''
def test_case(self):
exons = ['a1CF\t1\t2\t50.00\tALT1,ALT2', 'A2m\t3\t4\t75.00\t']
cds = ['A2M\t5\t6\t83.33\tALT3']
target = StringIO.StringIO()
log = StringIO.StringIO()
iq.combine_overlap_stats.combine(exons, cds, target, log)
lines = target.getvalue().split('\n')
assert len(lines) == 4
assert lines[1] == 'a1CF\t0\t1\t0\t2\t0\t50.00\tALT1,ALT2' # no cds data
assert lines[2] == 'A2m\t5\t3\t6\t4\t83.33\t75.00\tALT3' # data for both
assert lines[3] == ''
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"StringIO.StringIO"
] |
[((1237, 1252), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1250, 1252), False, 'import unittest\n'), ((272, 291), 'StringIO.StringIO', 'StringIO.StringIO', ([], {}), '()\n', (289, 291), False, 'import StringIO\n'), ((306, 325), 'StringIO.StringIO', 'StringIO.StringIO', ([], {}), '()\n', (323, 325), False, 'import StringIO\n'), ((816, 835), 'StringIO.StringIO', 'StringIO.StringIO', ([], {}), '()\n', (833, 835), False, 'import StringIO\n'), ((850, 869), 'StringIO.StringIO', 'StringIO.StringIO', ([], {}), '()\n', (867, 869), False, 'import StringIO\n')]
|
"""
Test that computes the refined mean field approximation for the two-choice model
(with order 1 and 2 and a few parameter)
Compare the computed value with a value already stored in a pickle file
"""
import pickle
import numpy as np
from approximately_equal import approximately_equal
import os
PWD=os.getcwd()
if PWD[-5:] == 'tests':
CACHE_DIR = 'output_tests'
else:
CACHE_DIR = 'tests/output_tests'
import sys
sys.path.append('../')
sys.path.append('.')
import src.rmf_tool as rmf
def dChoiceModel(K, rho, d):
ddpp = rmf.DDPP()
# The vector 'e(i)' is a vector where the $i$th coordinate is equal to $1$ (the other being equal to $0$)
def e(i):
l = np.zeros(K)
l[i] = 1
return l
# We then add the transitions :
for i in range(K):
if i >= 1:
ddpp.add_transition(e(i),eval('lambda x: {}*(x[{}]**{} - x[{}]**{} )'.format(rho, i-1, d, i, d)))
if i < K-1:
ddpp.add_transition(-e(i),eval('lambda x: (x[{}] - x[{}])'.format(i,i+1) ))
ddpp.add_transition(e(0), lambda x : eval('{}*(1-x[0]**{})'.format(rho,d)))
ddpp.add_transition(-e(K-1), lambda x : x[K-1])
ddpp.set_initial_state(e(0))
return ddpp
def generate_data():
"""
Generate all data and store them in a pickle file
(to be used one times when the test is initialized)
"""
data = dict([])
for rho in [0.6, 0.7, 0.8, 0.9]:
for d in [2, 3]:
for K in [5, 9, 15, 20]:
for order in ([1, 2] if K <= 5 else [1]):
ddpp = dChoiceModel(K, rho, d)
data[(K, rho, d, order)] = ddpp.meanFieldExpansionSteadyState(order=order)
with open('{}/d_choice.pickle'.format(CACHE_DIR), 'wb') as f:
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
def test_two_choice():
"""
Compare the new data with previously computed data.
"""
with open('{}/d_choice.pickle'.format(CACHE_DIR), 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
data = pickle.load(f)
for key in data:
(K,rho,d,order) = key
print(key)
ddpp = dChoiceModel(K, rho, d)
new_data = ddpp.meanFieldExpansionSteadyState(order=order)
test_data = data[key]
assert approximately_equal(new_data, test_data) <= 1e-8
#generate_data()
#test_two_choice()
|
[
"sys.path.append",
"src.rmf_tool.DDPP",
"pickle.dump",
"os.getcwd",
"numpy.zeros",
"pickle.load",
"approximately_equal.approximately_equal"
] |
[((303, 314), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (312, 314), False, 'import os\n'), ((425, 447), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (440, 447), False, 'import sys\n'), ((448, 468), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (463, 468), False, 'import sys\n'), ((537, 547), 'src.rmf_tool.DDPP', 'rmf.DDPP', ([], {}), '()\n', (545, 547), True, 'import src.rmf_tool as rmf\n'), ((685, 696), 'numpy.zeros', 'np.zeros', (['K'], {}), '(K)\n', (693, 696), True, 'import numpy as np\n'), ((1832, 1877), 'pickle.dump', 'pickle.dump', (['data', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(data, f, pickle.HIGHEST_PROTOCOL)\n', (1843, 1877), False, 'import pickle\n'), ((2161, 2175), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2172, 2175), False, 'import pickle\n'), ((2425, 2465), 'approximately_equal.approximately_equal', 'approximately_equal', (['new_data', 'test_data'], {}), '(new_data, test_data)\n', (2444, 2465), False, 'from approximately_equal import approximately_equal\n')]
|
import os
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
def build_chrome_driver(download_dir: str, headless=True,window_size=(1920,1080)):
os.makedirs(download_dir, exist_ok=True)
options = webdriver.ChromeOptions()
if headless:
options.add_argument("headless")
w,h = window_size
options.add_argument(f"--window-size={w},{h}")
# driver.execute_script("document.body.style.zoom='80 %'")
prefs = {
"download.default_directory": download_dir,
"plugins.always_open_pdf_externally": True, # don't open pdfs in browser but instead download them
}
options.add_experimental_option("prefs", prefs)
driver = webdriver.Chrome(
r"/usr/bin/chromedriver", chrome_options=options
) # provide the chromedriver execution path in case of error
driver.implicitly_wait(10) # seconds
return driver
def enter_keyboard_input(wd, xpath: str, value: str, clear_it=False,press_enter=False):
# wait = WebDriverWait(wd, 10)
# wait.until(EC.presence_of_element_located((By.xpath(value), "content")))
e = wd.find_element_by_xpath(xpath)
if clear_it:
e.clear()
e.send_keys(value)
if press_enter:
e.send_keys(Keys.ENTER)
def click_it(wd, xpath):
element = wd.find_element_by_xpath(xpath)
element.click()
|
[
"selenium.webdriver.ChromeOptions",
"os.makedirs",
"selenium.webdriver.Chrome"
] |
[((179, 219), 'os.makedirs', 'os.makedirs', (['download_dir'], {'exist_ok': '(True)'}), '(download_dir, exist_ok=True)\n', (190, 219), False, 'import os\n'), ((234, 259), 'selenium.webdriver.ChromeOptions', 'webdriver.ChromeOptions', ([], {}), '()\n', (257, 259), False, 'from selenium import webdriver\n'), ((700, 765), 'selenium.webdriver.Chrome', 'webdriver.Chrome', (['"""/usr/bin/chromedriver"""'], {'chrome_options': 'options'}), "('/usr/bin/chromedriver', chrome_options=options)\n", (716, 765), False, 'from selenium import webdriver\n')]
|
import time
import numpy as np
from sklearn.model_selection import train_test_split
from keras.optimizers import Adam
from keras.utils import plot_model
from CNNTripletModel import build_network, build_model
from BatchBuilder import get_batch_random_demo
input_shape = (28, 28, 1)
evaluate_every = 5
n_val = 5
batch_size = 20
data = np.load("/Users/niklastecklenburg/Desktop/Test/Data/images.npy")
labels = np.load("/Users/niklastecklenburg/Desktop/Test/Data/labels.npy")
data_train, data_test, labels_train, labels_test = train_test_split(
data, labels, test_size=0.2, random_state=42
)
network = build_network(input_shape, embeddingsize=10)
network_train = build_model(input_shape, network)
optimizer = Adam(lr=0.00006)
network_train.compile(loss=None, optimizer=optimizer)
network_train.summary()
plot_model(
network_train, show_shapes=True, show_layer_names=True, to_file="02 model.png"
)
print(network_train.metrics_names)
network_train.load_weights("mnist-160k_weights.h5")
t_start = time.time()
n_iteration = 0
for i in range(30):
# triplets = get_batch_hard(200,16,16,network)
triplets = get_batch_random_demo(data_train, labels_train, batch_size)
loss = network_train.train_on_batch(triplets, None)
print(loss)
# n_iteration += 1
# if i % evaluate_every == 0:
# print("\n ------------- \n")
# print("[{3}] Time for {0} iterations: {1:.1f} mins, Train Loss: {2}".format(i, (time.time()-t_start)/60.0,loss,n_iteration))
# probs,yprob = compute_probs(network,test_images[:n_val,:,:,:],y_test_origin[:n_val])
|
[
"numpy.load",
"CNNTripletModel.build_network",
"sklearn.model_selection.train_test_split",
"keras.optimizers.Adam",
"time.time",
"keras.utils.plot_model",
"BatchBuilder.get_batch_random_demo",
"CNNTripletModel.build_model"
] |
[((340, 404), 'numpy.load', 'np.load', (['"""/Users/niklastecklenburg/Desktop/Test/Data/images.npy"""'], {}), "('/Users/niklastecklenburg/Desktop/Test/Data/images.npy')\n", (347, 404), True, 'import numpy as np\n'), ((414, 478), 'numpy.load', 'np.load', (['"""/Users/niklastecklenburg/Desktop/Test/Data/labels.npy"""'], {}), "('/Users/niklastecklenburg/Desktop/Test/Data/labels.npy')\n", (421, 478), True, 'import numpy as np\n'), ((531, 593), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'labels'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(data, labels, test_size=0.2, random_state=42)\n', (547, 593), False, 'from sklearn.model_selection import train_test_split\n'), ((611, 655), 'CNNTripletModel.build_network', 'build_network', (['input_shape'], {'embeddingsize': '(10)'}), '(input_shape, embeddingsize=10)\n', (624, 655), False, 'from CNNTripletModel import build_network, build_model\n'), ((672, 705), 'CNNTripletModel.build_model', 'build_model', (['input_shape', 'network'], {}), '(input_shape, network)\n', (683, 705), False, 'from CNNTripletModel import build_network, build_model\n'), ((718, 732), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(6e-05)'}), '(lr=6e-05)\n', (722, 732), False, 'from keras.optimizers import Adam\n'), ((813, 908), 'keras.utils.plot_model', 'plot_model', (['network_train'], {'show_shapes': '(True)', 'show_layer_names': '(True)', 'to_file': '"""02 model.png"""'}), "(network_train, show_shapes=True, show_layer_names=True, to_file=\n '02 model.png')\n", (823, 908), False, 'from keras.utils import plot_model\n'), ((1009, 1020), 'time.time', 'time.time', ([], {}), '()\n', (1018, 1020), False, 'import time\n'), ((1123, 1182), 'BatchBuilder.get_batch_random_demo', 'get_batch_random_demo', (['data_train', 'labels_train', 'batch_size'], {}), '(data_train, labels_train, batch_size)\n', (1144, 1182), False, 'from BatchBuilder import get_batch_random_demo\n')]
|
import numpy as np
from torch.optim.lr_scheduler import _LRScheduler
from torch.utils.data.dataset import Dataset
from math import cos, pi
import librosa
from scipy.io import wavfile
import random
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def cycle(iterable):
"""
convert dataloader to iterator
:param iterable:
:return:
"""
while True:
for x in iterable:
yield x
class CosineLR(_LRScheduler):
"""cosine annealing.
"""
def __init__(self, optimizer, step_size_min=1e-5, t0=100, tmult=2, curr_epoch=-1, last_epoch=-1):
self.step_size_min = step_size_min
self.t0 = t0
self.tmult = tmult
self.epochs_since_restart = curr_epoch
super(CosineLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
self.epochs_since_restart += 1
if self.epochs_since_restart > self.t0:
self.t0 *= self.tmult
self.epochs_since_restart = 0
lrs = [self.step_size_min + (
0.5 * (base_lr - self.step_size_min) * (1 + cos(self.epochs_since_restart * pi / self.t0)))
for base_lr in self.base_lrs]
return lrs
class MelDataset(Dataset):
def __init__(self, X, y, crop=-1,
mixup=False, freqmask=False, gain=False,
crop_mode='original',crop_rate=0.25
):
self.X= X
self.y= y
self.crop = crop
self.mixup = mixup
self.freqmask = freqmask
self.gain = gain
self.crop_mode = crop_mode
self.crop_rate = crop_rate
def do_additional_crop(self, img):
len_img = img.shape[1]
img_new = np.zeros([img.shape[0], self.crop], np.float32)
rate = np.random.random() * (1 - self.crop_rate) + self.crop_rate
if np.random.random() < 0.5: rate = 1
if img.shape[1] <= self.crop:
len_crop = int(img.shape[1] * rate)
if img.shape[1] - len_crop == 0:
shift_crop = 0
else:
shift_crop = np.random.randint(0, img.shape[1] - len_crop)
img = img[:, shift_crop:shift_crop + len_crop]
if self.crop - len_crop == 0:
shift = 0
else:
shift = np.random.randint(0, self.crop - len_crop)
img_new[:, shift:shift + len_crop] = img
else:
shift = np.random.randint(0, img.shape[1] - self.crop)
img_new = img[:, shift:shift + self.crop]
len_crop = int(self.crop * rate)
if self.crop - len_crop == 0:
shift_crop = 0
else:
shift_crop = np.random.randint(0, self.crop - len_crop)
img_new[:shift_crop] = 0
img_new[shift_crop + len_crop:] = 0
return img_new
def do_random_crop(self, img):
img_new = np.zeros([img.shape[0], self.crop], np.float32)
if img.shape[1] < self.crop:
shift = np.random.randint(0, self.crop - img.shape[1])
img_new[:, shift:shift + img.shape[1]] = img
elif img.shape[1] == self.crop:
img_new = img
else:
shift = np.random.randint(0, img.shape[1] - self.crop)
img_new = img[:, shift:shift + self.crop]
return img_new
def do_crop(self, img):
if self.crop_mode == 'random':
return self.do_random_crop(img)
elif self.crop_mode == 'additional':
return self.do_additional_crop(img)
elif self.crop_mode == 'original':
return img
def do_mixup(self, img, label, alpha=1.):
idx = np.random.randint(0, len(self.X))
img2 = np.load("{}.npy".format(self.X[idx][:-4]))
img2 = self.do_crop(img2)
label2 = self.y[idx].astype(np.float32)
rate = np.random.beta(alpha, alpha)
img = img * rate + img2 * (1 - rate)
label = label * rate + label2 * (1 - rate)
return img, label
def do_freqmask(self, img, max=32):
coord = np.random.randint(0, img.shape[0])
width = np.random.randint(8, max)
cut = np.array([coord - width, coord + width])
cut = np.clip(cut, 0, img.shape[0])
img[cut[0]:cut[1]] = 0
return img
def do_gain(self, img, max=0.1):
rate = 1 - max + np.random.random() * max * 2
return img * rate
def __getitem__(self, index):
img = np.load("{}.npy".format(self.X[index][:-4]))
img = self.do_crop(img)
label = self.y[index].astype(np.float32)
if self.mixup and np.random.random() < 0.5:
img, label = self.do_mixup(img, label)
if self.gain and np.random.random() < 0.5:
img = self.do_gain(img)
if self.freqmask and np.random.random() < 0.5:
img = self.do_freqmask(img)
img = librosa.power_to_db(img)
img = (img - img.mean()) / (img.std() + 1e-7)
img = img.reshape([1, img.shape[0], img.shape[1]])
return img, label
def __len__(self):
return len(self.X)
def compute_gain(sound, fs, min_db=-80.0, mode='RMSE'):
if fs == 16000:
n_fft = 2048
elif fs == 44100:
n_fft = 4096
else:
raise Exception('Invalid fs {}'.format(fs))
stride = n_fft // 2
gain = []
for i in range(0, len(sound) - n_fft + 1, stride):
if mode == 'RMSE':
g = np.mean(sound[i: i + n_fft] ** 2)
elif mode == 'A_weighting':
spec = np.fft.rfft(np.hanning(n_fft + 1)[:-1] * sound[i: i + n_fft])
power_spec = np.abs(spec) ** 2
a_weighted_spec = power_spec * np.power(10, a_weight(fs, n_fft) / 10)
g = np.sum(a_weighted_spec)
else:
raise Exception('Invalid mode {}'.format(mode))
gain.append(g)
gain = np.array(gain)
gain = np.maximum(gain, np.power(10, min_db / 10))
gain_db = 10 * np.log10(gain)
return gain_db
def mix(sound1, sound2, r, fs):
gain1 = np.max(compute_gain(sound1, fs)) # Decibel
gain2 = np.max(compute_gain(sound2, fs))
t = 1.0 / (1 + np.power(10, (gain1 - gain2) / 20.) * (1 - r) / r)
sound = ((sound1 * t + sound2 * (1 - t)) / np.sqrt(t ** 2 + (1 - t) ** 2))
sound = sound.astype(np.float32)
return sound
class WaveDataset(Dataset):
def __init__(self, X, y,
crop=-1, crop_mode='original', padding=0,
mixup=False, scaling=-1, gain=-1,
fs=44100,
):
self.X = X
self.y = y
self.crop = crop
self.crop_mode = crop_mode
self.padding = padding
self.mixup = mixup
self.scaling = scaling
self.gain = gain
self.fs = fs
def preprocess(self, sound):
for f in self.preprocess_funcs:
sound = f(sound)
return sound
def do_padding(self, snd):
snd_new = np.pad(snd, self.padding, 'constant')
return snd_new
def do_crop(self, snd):
if self.crop_mode=='random':
shift = np.random.randint(0, snd.shape[0] - self.crop)
snd_new = snd[shift:shift + self.crop]
else:
snd_new = snd
return snd_new
def do_gain(self, snd):
snd_new = snd * np.power(10, random.uniform(-self.gain, self.gain) / 20.0)
return snd_new
def do_scaling(self, snd, interpolate='Nearest'):
scale = np.power(self.scaling, random.uniform(-1, 1))
output_size = int(len(snd) * scale)
ref = np.arange(output_size) / scale
if interpolate == 'Linear':
ref1 = ref.astype(np.int32)
ref2 = np.minimum(ref1+1, len(snd)-1)
r = ref - ref1
snd_new = snd[ref1] * (1-r) + snd[ref2] * r
elif interpolate == 'Nearest':
snd_new = snd[ref.astype(np.int32)]
else:
raise Exception('Invalid interpolation mode {}'.format(interpolate))
return snd_new
def do_mixup(self, snd, label, alpha=1):
idx2 = np.random.randint(0, len(self.X))
_, snd2 = wavfile.read("{}".format(self.X[idx2]))
label2 = self.y[idx2].astype(np.float32)
if self.scaling!=-1:
snd2 = self.do_scaling(snd2)
snd2 = self.do_padding(snd2)
snd2 = self.do_crop(snd2)
rate = np.random.beta(alpha, alpha)
snd_new = mix(snd, snd, rate, self.fs)
label_new = label * rate + label2 * (1 - rate)
return snd_new, label_new
def __getitem__(self, index):
_, snd = wavfile.read("{}".format(self.X[index]))
label = self.y[index].astype(np.float32)
if self.scaling!=-1:
snd = self.do_scaling(snd)
snd = self.do_padding(snd)
snd = self.do_crop(snd)
if self.mixup:
snd, label = self.do_mixup(snd, label)
if self.gain!=-1:
snd = self.do_gain(snd)
snd = snd.reshape([1, 1, -1]).astype(np.float32) / 32768.0
return snd, label
def __len__(self):
return len(self.X)
def _one_sample_positive_class_precisions(scores, truth):
"""Calculate precisions for each true class for a single sample.
Args:
scores: np.array of (num_classes,) giving the individual classifier scores.
truth: np.array of (num_classes,) bools indicating which classes are true.
Returns:
pos_class_indices: np.array of indices of the true classes for this sample.
pos_class_precisions: np.array of precisions corresponding to each of those
classes.
"""
num_classes = scores.shape[0]
pos_class_indices = np.flatnonzero(truth > 0)
# Only calculate precisions if there are some true classes.
if not len(pos_class_indices):
return pos_class_indices, np.zeros(0)
# Retrieval list of classes for this sample.
retrieved_classes = np.argsort(scores)[::-1]
# class_rankings[top_scoring_class_index] == 0 etc.
class_rankings = np.zeros(num_classes, dtype=np.int)
class_rankings[retrieved_classes] = range(num_classes)
# Which of these is a true label?
retrieved_class_true = np.zeros(num_classes, dtype=np.bool)
retrieved_class_true[class_rankings[pos_class_indices]] = True
# Num hits for every truncated retrieval list.
retrieved_cumulative_hits = np.cumsum(retrieved_class_true)
# Precision of retrieval list truncated at each hit, in order of pos_labels.
precision_at_hits = (
retrieved_cumulative_hits[class_rankings[pos_class_indices]] /
(1 + class_rankings[pos_class_indices].astype(np.float)))
return pos_class_indices, precision_at_hits
# All-in-one calculation of per-class lwlrap.
def calculate_per_class_lwlrap(truth, scores):
"""Calculate label-weighted label-ranking average precision.
Arguments:
truth: np.array of (num_samples, num_classes) giving boolean ground-truth
of presence of that class in that sample.
scores: np.array of (num_samples, num_classes) giving the classifier-under-
test's real-valued score for each class for each sample.
Returns:
per_class_lwlrap: np.array of (num_classes,) giving the lwlrap for each
class.
weight_per_class: np.array of (num_classes,) giving the prior of each
class within the truth labels. Then the overall unbalanced lwlrap is
simply np.sum(per_class_lwlrap * weight_per_class)
"""
assert truth.shape == scores.shape
num_samples, num_classes = scores.shape
# Space to store a distinct precision value for each class on each sample.
# Only the classes that are true for each sample will be filled in.
precisions_for_samples_by_classes = np.zeros((num_samples, num_classes))
for sample_num in range(num_samples):
pos_class_indices, precision_at_hits = (
_one_sample_positive_class_precisions(scores[sample_num, :],
truth[sample_num, :]))
precisions_for_samples_by_classes[sample_num, pos_class_indices] = (
precision_at_hits)
labels_per_class = np.sum(truth > 0, axis=0)
weight_per_class = labels_per_class / float(np.sum(labels_per_class))
# Form average of each column, i.e. all the precisions assigned to labels in
# a particular class.
per_class_lwlrap = (np.sum(precisions_for_samples_by_classes, axis=0) /
np.maximum(1, labels_per_class))
# overall_lwlrap = simple average of all the actual per-class, per-sample precisions
# = np.sum(precisions_for_samples_by_classes) / np.sum(precisions_for_samples_by_classes > 0)
# also = weighted mean of per-class lwlraps, weighted by class label prior across samples
# = np.sum(per_class_lwlrap * weight_per_class)
return per_class_lwlrap, weight_per_class
|
[
"numpy.sum",
"numpy.maximum",
"numpy.abs",
"numpy.clip",
"numpy.argsort",
"numpy.random.randint",
"librosa.power_to_db",
"numpy.mean",
"numpy.arange",
"numpy.pad",
"numpy.power",
"numpy.cumsum",
"math.cos",
"numpy.hanning",
"numpy.log10",
"numpy.random.beta",
"random.uniform",
"numpy.flatnonzero",
"numpy.zeros",
"numpy.random.random",
"numpy.array",
"numpy.sqrt"
] |
[((6190, 6204), 'numpy.array', 'np.array', (['gain'], {}), '(gain)\n', (6198, 6204), True, 'import numpy as np\n'), ((9986, 10011), 'numpy.flatnonzero', 'np.flatnonzero', (['(truth > 0)'], {}), '(truth > 0)\n', (10000, 10011), True, 'import numpy as np\n'), ((10332, 10367), 'numpy.zeros', 'np.zeros', (['num_classes'], {'dtype': 'np.int'}), '(num_classes, dtype=np.int)\n', (10340, 10367), True, 'import numpy as np\n'), ((10492, 10528), 'numpy.zeros', 'np.zeros', (['num_classes'], {'dtype': 'np.bool'}), '(num_classes, dtype=np.bool)\n', (10500, 10528), True, 'import numpy as np\n'), ((10679, 10710), 'numpy.cumsum', 'np.cumsum', (['retrieved_class_true'], {}), '(retrieved_class_true)\n', (10688, 10710), True, 'import numpy as np\n'), ((12067, 12103), 'numpy.zeros', 'np.zeros', (['(num_samples, num_classes)'], {}), '((num_samples, num_classes))\n', (12075, 12103), True, 'import numpy as np\n'), ((12472, 12497), 'numpy.sum', 'np.sum', (['(truth > 0)'], {'axis': '(0)'}), '(truth > 0, axis=0)\n', (12478, 12497), True, 'import numpy as np\n'), ((2029, 2076), 'numpy.zeros', 'np.zeros', (['[img.shape[0], self.crop]', 'np.float32'], {}), '([img.shape[0], self.crop], np.float32)\n', (2037, 2076), True, 'import numpy as np\n'), ((3223, 3270), 'numpy.zeros', 'np.zeros', (['[img.shape[0], self.crop]', 'np.float32'], {}), '([img.shape[0], self.crop], np.float32)\n', (3231, 3270), True, 'import numpy as np\n'), ((4179, 4207), 'numpy.random.beta', 'np.random.beta', (['alpha', 'alpha'], {}), '(alpha, alpha)\n', (4193, 4207), True, 'import numpy as np\n'), ((4388, 4422), 'numpy.random.randint', 'np.random.randint', (['(0)', 'img.shape[0]'], {}), '(0, img.shape[0])\n', (4405, 4422), True, 'import numpy as np\n'), ((4439, 4464), 'numpy.random.randint', 'np.random.randint', (['(8)', 'max'], {}), '(8, max)\n', (4456, 4464), True, 'import numpy as np\n'), ((4479, 4519), 'numpy.array', 'np.array', (['[coord - width, coord + width]'], {}), '([coord - width, coord + width])\n', (4487, 4519), True, 'import numpy as np\n'), ((4534, 4563), 'numpy.clip', 'np.clip', (['cut', '(0)', 'img.shape[0]'], {}), '(cut, 0, img.shape[0])\n', (4541, 4563), True, 'import numpy as np\n'), ((5208, 5232), 'librosa.power_to_db', 'librosa.power_to_db', (['img'], {}), '(img)\n', (5227, 5232), False, 'import librosa\n'), ((6233, 6258), 'numpy.power', 'np.power', (['(10)', '(min_db / 10)'], {}), '(10, min_db / 10)\n', (6241, 6258), True, 'import numpy as np\n'), ((6279, 6293), 'numpy.log10', 'np.log10', (['gain'], {}), '(gain)\n', (6287, 6293), True, 'import numpy as np\n'), ((6566, 6596), 'numpy.sqrt', 'np.sqrt', (['(t ** 2 + (1 - t) ** 2)'], {}), '(t ** 2 + (1 - t) ** 2)\n', (6573, 6596), True, 'import numpy as np\n'), ((7277, 7314), 'numpy.pad', 'np.pad', (['snd', 'self.padding', '"""constant"""'], {}), "(snd, self.padding, 'constant')\n", (7283, 7314), True, 'import numpy as np\n'), ((8700, 8728), 'numpy.random.beta', 'np.random.beta', (['alpha', 'alpha'], {}), '(alpha, alpha)\n', (8714, 8728), True, 'import numpy as np\n'), ((10230, 10248), 'numpy.argsort', 'np.argsort', (['scores'], {}), '(scores)\n', (10240, 10248), True, 'import numpy as np\n'), ((12703, 12752), 'numpy.sum', 'np.sum', (['precisions_for_samples_by_classes'], {'axis': '(0)'}), '(precisions_for_samples_by_classes, axis=0)\n', (12709, 12752), True, 'import numpy as np\n'), ((12779, 12810), 'numpy.maximum', 'np.maximum', (['(1)', 'labels_per_class'], {}), '(1, labels_per_class)\n', (12789, 12810), True, 'import numpy as np\n'), ((2162, 2180), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2178, 2180), True, 'import numpy as np\n'), ((2752, 2798), 'numpy.random.randint', 'np.random.randint', (['(0)', '(img.shape[1] - self.crop)'], {}), '(0, img.shape[1] - self.crop)\n', (2769, 2798), True, 'import numpy as np\n'), ((3328, 3374), 'numpy.random.randint', 'np.random.randint', (['(0)', '(self.crop - img.shape[1])'], {}), '(0, self.crop - img.shape[1])\n', (3345, 3374), True, 'import numpy as np\n'), ((5765, 5797), 'numpy.mean', 'np.mean', (['(sound[i:i + n_fft] ** 2)'], {}), '(sound[i:i + n_fft] ** 2)\n', (5772, 5797), True, 'import numpy as np\n'), ((7424, 7470), 'numpy.random.randint', 'np.random.randint', (['(0)', '(snd.shape[0] - self.crop)'], {}), '(0, snd.shape[0] - self.crop)\n', (7441, 7470), True, 'import numpy as np\n'), ((7814, 7835), 'random.uniform', 'random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (7828, 7835), False, 'import random\n'), ((7895, 7917), 'numpy.arange', 'np.arange', (['output_size'], {}), '(output_size)\n', (7904, 7917), True, 'import numpy as np\n'), ((10145, 10156), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (10153, 10156), True, 'import numpy as np\n'), ((12546, 12570), 'numpy.sum', 'np.sum', (['labels_per_class'], {}), '(labels_per_class)\n', (12552, 12570), True, 'import numpy as np\n'), ((2092, 2110), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2108, 2110), True, 'import numpy as np\n'), ((2407, 2452), 'numpy.random.randint', 'np.random.randint', (['(0)', '(img.shape[1] - len_crop)'], {}), '(0, img.shape[1] - len_crop)\n', (2424, 2452), True, 'import numpy as np\n'), ((2622, 2664), 'numpy.random.randint', 'np.random.randint', (['(0)', '(self.crop - len_crop)'], {}), '(0, self.crop - len_crop)\n', (2639, 2664), True, 'import numpy as np\n'), ((3018, 3060), 'numpy.random.randint', 'np.random.randint', (['(0)', '(self.crop - len_crop)'], {}), '(0, self.crop - len_crop)\n', (3035, 3060), True, 'import numpy as np\n'), ((3532, 3578), 'numpy.random.randint', 'np.random.randint', (['(0)', '(img.shape[1] - self.crop)'], {}), '(0, img.shape[1] - self.crop)\n', (3549, 3578), True, 'import numpy as np\n'), ((4934, 4952), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4950, 4952), True, 'import numpy as np\n'), ((5036, 5054), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5052, 5054), True, 'import numpy as np\n'), ((5127, 5145), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5143, 5145), True, 'import numpy as np\n'), ((6057, 6080), 'numpy.sum', 'np.sum', (['a_weighted_spec'], {}), '(a_weighted_spec)\n', (6063, 6080), True, 'import numpy as np\n'), ((4677, 4695), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4693, 4695), True, 'import numpy as np\n'), ((5941, 5953), 'numpy.abs', 'np.abs', (['spec'], {}), '(spec)\n', (5947, 5953), True, 'import numpy as np\n'), ((6468, 6504), 'numpy.power', 'np.power', (['(10)', '((gain1 - gain2) / 20.0)'], {}), '(10, (gain1 - gain2) / 20.0)\n', (6476, 6504), True, 'import numpy as np\n'), ((7651, 7688), 'random.uniform', 'random.uniform', (['(-self.gain)', 'self.gain'], {}), '(-self.gain, self.gain)\n', (7665, 7688), False, 'import random\n'), ((1413, 1458), 'math.cos', 'cos', (['(self.epochs_since_restart * pi / self.t0)'], {}), '(self.epochs_since_restart * pi / self.t0)\n', (1416, 1458), False, 'from math import cos, pi\n'), ((5866, 5887), 'numpy.hanning', 'np.hanning', (['(n_fft + 1)'], {}), '(n_fft + 1)\n', (5876, 5887), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
def make_pulses(data, T, pulse):
widen = np.zeros(len(data) * T, dtype=np.complex64)
for idx, val in enumerate(widen):
if idx % T == 0:
widen[idx] = data[ idx//T ]
return np.array(np.convolve(widen, pulse, 'full'), dtype=np.complex64)
def raised_cosine(size, T):
W = 1/T
pulse = np.zeros(size, dtype=np.complex64)
alpha = 0.5
for idx, t in enumerate(range(-size//T, size//T)):
val = np.sinc(2*W*t) * ( np.cos( 2*np.pi*alpha*W*t )/( 1 - 16 * (alpha**2) * (W**2) * (t**2)) )
pulse[idx] = t
plt.plot(pulse)
plt.show()
exit()
return pulse
if __name__ == "__main__":
data_path = '../data/'
# Gen noise
np.random.seed(45)
noise_size = 10000
noise1 = np.array(np.random.choice([0.5, -0.5], size=noise_size))
noise2 = np.array(np.random.choice([0.5, -0.5], size=noise_size))
# Make noise into pulses
T = 10
pulse = np.ones(10)
noise1 = make_pulses(noise1, T, pulse)
noise2 = make_pulses(noise2, T, pulse)
# Save noise for cross correlation later
noise1.tofile(data_path + "noise_1.bin")
noise2.tofile(data_path + "noise_2.bin")
# Make filler so we can send everything at once
zeros_gap = np.zeros(10000)
zeros = np.zeros(len(noise1))
# Data for channel 1
channel1 = np.concatenate( [noise1, zeros_gap, zeros] )
channel2 = np.concatenate( [zeros, zeros_gap, noise2] )
channel1 = np.array( channel1, dtype=np.complex64 )
channel2 = np.array( channel2, dtype=np.complex64 )
# Save out data
channel1.tofile(data_path + "noise_1_transmit.bin")
channel2.tofile(data_path + "noise_2_transmit.bin")
# Plot for verification
plt.plot(channel1)
plt.plot(channel2)
plt.show()
|
[
"numpy.random.seed",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.zeros",
"numpy.ones",
"numpy.sinc",
"numpy.array",
"numpy.cos",
"numpy.random.choice",
"numpy.convolve",
"numpy.concatenate"
] |
[((374, 408), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.complex64'}), '(size, dtype=np.complex64)\n', (382, 408), True, 'import numpy as np\n'), ((617, 632), 'matplotlib.pyplot.plot', 'plt.plot', (['pulse'], {}), '(pulse)\n', (625, 632), True, 'import matplotlib.pyplot as plt\n'), ((637, 647), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (645, 647), True, 'import matplotlib.pyplot as plt\n'), ((758, 776), 'numpy.random.seed', 'np.random.seed', (['(45)'], {}), '(45)\n', (772, 776), True, 'import numpy as np\n'), ((993, 1004), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (1000, 1004), True, 'import numpy as np\n'), ((1296, 1311), 'numpy.zeros', 'np.zeros', (['(10000)'], {}), '(10000)\n', (1304, 1311), True, 'import numpy as np\n'), ((1387, 1429), 'numpy.concatenate', 'np.concatenate', (['[noise1, zeros_gap, zeros]'], {}), '([noise1, zeros_gap, zeros])\n', (1401, 1429), True, 'import numpy as np\n'), ((1447, 1489), 'numpy.concatenate', 'np.concatenate', (['[zeros, zeros_gap, noise2]'], {}), '([zeros, zeros_gap, noise2])\n', (1461, 1489), True, 'import numpy as np\n'), ((1508, 1546), 'numpy.array', 'np.array', (['channel1'], {'dtype': 'np.complex64'}), '(channel1, dtype=np.complex64)\n', (1516, 1546), True, 'import numpy as np\n'), ((1564, 1602), 'numpy.array', 'np.array', (['channel2'], {'dtype': 'np.complex64'}), '(channel2, dtype=np.complex64)\n', (1572, 1602), True, 'import numpy as np\n'), ((1771, 1789), 'matplotlib.pyplot.plot', 'plt.plot', (['channel1'], {}), '(channel1)\n', (1779, 1789), True, 'import matplotlib.pyplot as plt\n'), ((1794, 1812), 'matplotlib.pyplot.plot', 'plt.plot', (['channel2'], {}), '(channel2)\n', (1802, 1812), True, 'import matplotlib.pyplot as plt\n'), ((1817, 1827), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1825, 1827), True, 'import matplotlib.pyplot as plt\n'), ((266, 299), 'numpy.convolve', 'np.convolve', (['widen', 'pulse', '"""full"""'], {}), "(widen, pulse, 'full')\n", (277, 299), True, 'import numpy as np\n'), ((822, 868), 'numpy.random.choice', 'np.random.choice', (['[0.5, -0.5]'], {'size': 'noise_size'}), '([0.5, -0.5], size=noise_size)\n', (838, 868), True, 'import numpy as np\n'), ((892, 938), 'numpy.random.choice', 'np.random.choice', (['[0.5, -0.5]'], {'size': 'noise_size'}), '([0.5, -0.5], size=noise_size)\n', (908, 938), True, 'import numpy as np\n'), ((499, 517), 'numpy.sinc', 'np.sinc', (['(2 * W * t)'], {}), '(2 * W * t)\n', (506, 517), True, 'import numpy as np\n'), ((518, 551), 'numpy.cos', 'np.cos', (['(2 * np.pi * alpha * W * t)'], {}), '(2 * np.pi * alpha * W * t)\n', (524, 551), True, 'import numpy as np\n')]
|
from django.shortcuts import render
from ..model import SessionMaker, ManagementScenario
from ..model import (LITTLE_DELL_VOLUME,
LITTLE_DELL_RELEASE,
LITTLE_DELL_SPILL,
MOUNTAIN_DELL_VOLUME,
MOUNTAIN_DELL_RELEASE,
MOUNTAIN_DELL_SPILL,
DELL_CREEK_INFLOW,
LAMBS_CREEK_INFLOW,
RELIABILITY)
def view(request, scenario_id, plot_name):
"""
Default action for results controller
"""
session = SessionMaker()
scenario = session.query(ManagementScenario.name,
ManagementScenario.results_link,
ManagementScenario.reliability). \
filter(ManagementScenario.id == scenario_id). \
one()
# Access Plot data
data_source = ManagementScenario.get_results_dataset(scenario_id, plot_name)
# Other data for template
scenario_name = scenario.name
results_link = scenario.results_link
reliability = scenario.reliability
# Plot vars
plot_title = data_source['title']
plot_subtitle = data_source['subtitle']
y_axis_title = data_source['y_axis_title']
y_axis_units = data_source['y_axis_units']
series_data = data_source['series']
# Setup plot
highcharts_object = {
'chart': {
'type': 'line',
'zoomType': 'x'
},
'title': {
'text': plot_title
},
'subtitle': {
'text': plot_subtitle
},
'legend': {
'enabled': False,
'layout': 'vertical',
'align': 'right',
'verticalAlign': 'middle',
'borderWidth': 0
},
'xAxis': {
'title': {
'enabled': False
},
'type': 'datetime',
'maxZoom': 30 * 24 * 3600000, # 30 days in milliseconds
},
'yAxis': {
'title': {
'text': y_axis_title + ' (' + y_axis_units + ')'
}
},
'tooltip': {
'pointFormat': '{point.y} ' + y_axis_units
},
'series': [{'color': '#0066ff',
'marker': {'enabled': False},
'data': series_data}
]
}
timeseries = {'highcharts_object': highcharts_object,
'width': '100%',
'height': '500px'}
# Template context
type(scenario_id)
context = {'scenario_id': str(scenario_id),
'plot_name': plot_name,
'scenario_name': scenario_name,
'results_link': results_link,
'reliability': round(reliability, 2),
'LITTLE_DELL_VOLUME': LITTLE_DELL_VOLUME,
'LITTLE_DELL_RELEASE': LITTLE_DELL_RELEASE,
'LITTLE_DELL_SPILL': LITTLE_DELL_SPILL,
'MOUNTAIN_DELL_VOLUME': MOUNTAIN_DELL_VOLUME,
'MOUNTAIN_DELL_RELEASE': MOUNTAIN_DELL_RELEASE,
'MOUNTAIN_DELL_SPILL': MOUNTAIN_DELL_SPILL,
'DELL_CREEK_INFLOW': DELL_CREEK_INFLOW,
'LAMBS_CREEK_INFLOW': LAMBS_CREEK_INFLOW,
'RELIABILITY': RELIABILITY,
'timeseries': timeseries}
session.close()
return render(request, 'parleys_creek_management/results/results_viewer.html', context)
|
[
"django.shortcuts.render"
] |
[((3333, 3418), 'django.shortcuts.render', 'render', (['request', '"""parleys_creek_management/results/results_viewer.html"""', 'context'], {}), "(request, 'parleys_creek_management/results/results_viewer.html', context\n )\n", (3339, 3418), False, 'from django.shortcuts import render\n')]
|
#! /usr/bin/python3
# run this from the root of a git repository with the command-line arguments
# described in the usage statement below
import sys
import subprocess
import os
AUTHOR = "<NAME> <<EMAIL>>"
TIMEZONE = "-0700"
DESIRED_COMMIT_MESSAGE = "added self-referential commit hash using magic"
DESIRED_COMMIT_TIMESTAMP = "1591753853"
# timestamp (formatted as seconds since UNIX epoch)
# to get git to make a commit at the right time, run the following before
# git commit:
#
# export GIT_COMMITTER_DATE='<timestamp>'
# export GIT_AUTHOR_DATE='<timestamp>'
# values for SHA-1
DIGEST_LEN = 20
HEXDIGEST_LEN = 40
PREFIX_LEN = 6
if len(sys.argv) != 4:
print("usage: parse_target_repo.py (path to target file) (text to replace with hash) (output directory)")
sys.exit(1)
target_file = sys.argv[1]
to_replace = bytes(sys.argv[2], encoding="utf-8")
out_dir = sys.argv[3]
dir_layers = [None] + \
[ bytes(l, encoding="utf-8") for l in target_file.split("/") ]
print("reading relevant hashes from git...")
hashes = [ subprocess.check_output(["git", "rev-parse", "HEAD"])[:-1] ]
for i, layer in enumerate(dir_layers):
curr_tree = subprocess.check_output(["git", "cat-file", "-p", hashes[-1]])
if i == 0:
hash = curr_tree[5: 5 + HEXDIGEST_LEN]
else:
hash_end = curr_tree.find(b"\t%s\n" % layer)
hash_start = hash_end - HEXDIGEST_LEN
hash = curr_tree[hash_start:hash_end]
hashes.append(hash)
print("reading relevant objects from .git/objects...")
hashes = hashes[::-1]
# reverse order of hashes so the blob we are writing to is first in the output
merkle_layer_prefixes = []
merkle_layer_suffixes = []
# Git stores the file tree in a Merkle Tree (the root of a tree where each
# parent is the SHA-1 hash of its children's hashes in a certain format)
digest_types = [False for _ in range(len(hashes) - 2)] + [True, False]
# depending on the point, Git either feeds the bytes in direcly or
# (for the commit) feeds in a hexadecimal string
# True = hexdigest
# False = digest
def tree_unprettify(tree_pretty_printed):
out = b""
for line in tree_pretty_printed.splitlines():
toks = line.split()
out += toks[0] + b" " # mode
out += toks[3] + b"\0" # filename (no spaces in any fname assumed)
out += bytes.fromhex(toks[2].decode())
return out
for i in range(len(hashes) - 1):
hash = hashes[i].decode()
git_obj_type = subprocess.check_output(["git", "cat-file", "-t", hash])[:-1]
git_obj_size = subprocess.check_output(["git", "cat-file", "-s", hash])[:-1]
git_obj_body = subprocess.check_output(["git", "cat-file", "-p", hash])
if i > 0:
git_obj_body = tree_unprettify(git_obj_body)
git_obj_contents = b"%s %s\0%s" % (git_obj_type, git_obj_size, git_obj_body)
if i == 0:
prefix_end = git_obj_contents.find(to_replace)
suffix_begin = prefix_end + len(to_replace)
else:
if digest_types[i - 1]:
prev_hash = bytes(prev_hash, encoding='utf-8')
else:
prev_hash = bytes.fromhex(prev_hash)
prefix_end = git_obj_contents.find(prev_hash)
suffix_begin = prefix_end + \
(HEXDIGEST_LEN if digest_types[i - 1] else DIGEST_LEN)
merkle_layer_prefixes.append(git_obj_contents[:prefix_end])
merkle_layer_suffixes.append(git_obj_contents[suffix_begin:])
prev_hash = hash
commit_suffix = bytes("""
parent {parent_commit}
author {author_str} {timestamp} {timezone}
committer {author_str} {timestamp} {timezone}
{commit_message}
""".format(parent_commit=hashes[-1].decode(), author_str=AUTHOR,
timestamp=DESIRED_COMMIT_TIMESTAMP, timezone=TIMEZONE,
commit_message=DESIRED_COMMIT_MESSAGE), encoding="utf-8")
commit_prefix = bytes("commit {}\0tree ".format(
len(commit_suffix) + 5 + HEXDIGEST_LEN), encoding="utf-8")
# total size is suffix + tree hash + len("tree ")
merkle_layer_prefixes.append(commit_prefix)
merkle_layer_suffixes.append(commit_suffix)
# ensure blob header is accurate with prefix length
merkle_layer_prefixes[0] = merkle_layer_prefixes[0][ merkle_layer_prefixes[0].find(b"\0") + 1:]
actual_size = len(merkle_layer_prefixes[0]) + len(merkle_layer_suffixes[0]) + PREFIX_LEN
merkle_layer_prefixes[0] = (b"blob %d\0" % actual_size) + merkle_layer_prefixes[0]
print("saving bytes to directory...")
os.makedirs(out_dir + "/prefixes")
os.makedirs(out_dir + "/suffixes")
i = 0
for prefix, suffix, digest_type in zip(merkle_layer_prefixes, merkle_layer_suffixes, digest_types):
with open("{}/prefixes/{}.txt".format(out_dir, i), "wb") as f:
f.write(prefix)
with open("{}/suffixes/{}.txt".format(out_dir, i), "wb") as f:
f.write(suffix)
i += 1
with open(out_dir + "/digest_bits.txt", "a") as f:
f.write(" ".join(map(lambda b: str(int(b)), digest_types)))
|
[
"subprocess.check_output",
"os.makedirs",
"sys.exit"
] |
[((4359, 4393), 'os.makedirs', 'os.makedirs', (["(out_dir + '/prefixes')"], {}), "(out_dir + '/prefixes')\n", (4370, 4393), False, 'import os\n'), ((4394, 4428), 'os.makedirs', 'os.makedirs', (["(out_dir + '/suffixes')"], {}), "(out_dir + '/suffixes')\n", (4405, 4428), False, 'import os\n'), ((772, 783), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (780, 783), False, 'import sys\n'), ((1150, 1212), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'cat-file', '-p', hashes[-1]]"], {}), "(['git', 'cat-file', '-p', hashes[-1]])\n", (1173, 1212), False, 'import subprocess\n'), ((2598, 2654), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'cat-file', '-p', hash]"], {}), "(['git', 'cat-file', '-p', hash])\n", (2621, 2654), False, 'import subprocess\n'), ((1033, 1086), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'rev-parse', 'HEAD']"], {}), "(['git', 'rev-parse', 'HEAD'])\n", (1056, 1086), False, 'import subprocess\n'), ((2435, 2491), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'cat-file', '-t', hash]"], {}), "(['git', 'cat-file', '-t', hash])\n", (2458, 2491), False, 'import subprocess\n'), ((2516, 2572), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'cat-file', '-s', hash]"], {}), "(['git', 'cat-file', '-s', hash])\n", (2539, 2572), False, 'import subprocess\n')]
|
#!/usr/bin/env python
# coding: utf-8
import logging
import argparse
import importlib
from tropiac.utils import make_cloudformation_client, load_config, get_log_level
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
LOGGER = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--name', type=str, required=True,
help='the name of the stack to create.')
parser.add_argument('--config', type=str, required=True,
help='the name of the configuration section to use.')
parser.add_argument('--log', type=str, default="INFO", required=False,
help='which log level. DEBUG, INFO, WARNING, CRITICAL')
args = parser.parse_args()
# init LOGGER
stack = importlib.import_module('tropiac.stacks.{0}'.format(args.name))
cfg = stack.get_config()
template = stack.make_template(cfg[args.config])
print(template.to_json())
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"logging.getLogger"
] |
[((288, 315), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (305, 315), False, 'import logging\n'), ((342, 367), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (365, 367), False, 'import argparse\n')]
|
"""
This program is the interface and driver for tsrFinder
"""
import os
import sys
import argparse
import multiprocessing
from collections import defaultdict
from multiprocessing import Pool
from PolTools.utils.constants import tsr_finder_location
from PolTools.utils.tsr_finder_step_four_from_rocky import run_step_four
from PolTools.utils.remove_files import remove_files
def positive_int(num):
try:
val = int(num)
if val <= 0:
raise Exception("Go to the except")
except:
raise argparse.ArgumentTypeError(num + " must be positive")
return val
parser = argparse.ArgumentParser(prog='PolTools tsrFinder',
description='Find transcription start regions\n' +
"More information can be found at " +
"https://geoffscollins.github.io/PolTools/tsrFinder.html")
parser.add_argument('seq_file', metavar='seq_file', type=str, help='Bed formatted sequencing file to find the TSRs')
parser.add_argument('window_size', metavar='window_size', type=positive_int, help='Base pair size of the sliding window')
parser.add_argument('min_seq_depth', metavar='min_seq_depth', type=positive_int, help="Minimum number of 5' ends to be considered a TSR")
parser.add_argument('min_avg_transcript_length', metavar='min_avg_transcript_length', type=positive_int, help="Minimum average transcript length to be considered a TSR")
parser.add_argument('max_fragment_size', metavar='max_fragment_size', type=positive_int, help="Maximum fragment size for a read to be counted in tsrFinder")
parser.add_argument('chrom_size_file', metavar='chrom_size_file', type=str, help="Chromosome sizes file")
parser.add_argument('-t', '--threads', dest='threads', metavar='threads', type=positive_int, nargs='?', default=multiprocessing.cpu_count())
args = parser.parse_args(sys.argv[1:])
bed_file = args.seq_file
window_size = args.window_size
min_seq_depth = args.min_seq_depth
min_avg_transcript_length = args.min_avg_transcript_length
max_fragment_size = args.max_fragment_size
chrom_size_file = args.chrom_size_file
max_threads = args.threads
# Make sure bed_file and chrom_size_file exist
if not os.path.isfile(bed_file):
sys.stderr.write(bed_file + " was not found. Exiting ...\n")
sys.exit(1)
if not os.path.isfile(chrom_size_file):
sys.stderr.write(chrom_size_file + " was not found. Exiting ...\n")
sys.exit(1)
if not bed_file.endswith(".bed"):
sys.stderr.write("The sequencing file must end in .bed. Exiting ...\n")
sys.exit(1)
chromosome_sizes = defaultdict(int)
with open(chrom_size_file) as file:
for line in file:
chromosome, size = line.split()
chromosome_sizes[chromosome] = int(size)
# Step 1. Split the bed file into files by chromosome and strands
fw_filename = bed_file.replace(".bed", "-FW.bed")
rv_filename = bed_file.replace(".bed", "-RV.bed")
parameters_string = "_".join([str(window_size), str(min_seq_depth), str(min_avg_transcript_length), str(max_fragment_size)])
output_filename = bed_file.replace(".bed", "_" + parameters_string + "-TSR.tab")
chromosome_file_writers = defaultdict(lambda : {"+": None, "-": None})
chromosome_files = []
tsr_finder_step_files = []
output_files = []
with open(bed_file) as file:
for line in file:
chromosome, left, right, name, score, strand = line.split()
if chromosome in chromosome_sizes:
if chromosome not in chromosome_file_writers:
fw_filename = bed_file.replace(".bed", "-" + chromosome + "-FW.bed")
rv_filename = bed_file.replace(".bed", "-" + chromosome + "-RV.bed")
chromosome_file_writers[chromosome]["+"] = open(fw_filename, 'w')
chromosome_file_writers[chromosome]["-"] = open(rv_filename, 'w')
chromosome_files.extend([fw_filename, rv_filename])
for i in range(2, 5):
tsr_finder_step_files.append(fw_filename.replace(".bed", "-" + str(i) + "-output.txt"))
tsr_finder_step_files.append(rv_filename.replace(".bed", "-" + str(i) + "-output.txt"))
output_files.append(fw_filename.replace(".bed", "-4-output.txt"))
output_files.append(rv_filename.replace(".bed", "-4-output.txt"))
chromosome_file_writers[chromosome][strand].write(line)
# Need to close all the writers
for chromosome in chromosome_file_writers:
chromosome_file_writers[chromosome]["+"].close()
chromosome_file_writers[chromosome]["-"].close()
# Step 2: Run tsrFinder on both files concurrently
def run_tsrFinderGC(filename):
os.system(tsr_finder_location + " " + filename + " " +
" ".join([str(window_size), str(min_seq_depth), str(min_avg_transcript_length),
str(max_fragment_size), chrom_size_file]))
step_three_filename = filename.replace(".bed", "-3-output.txt")
step_four_filename = filename.replace(".bed", "-4-output.txt")
run_step_four(step_three_filename, window_size, chromosome_sizes, step_four_filename)
with Pool(max_threads) as pool:
pool.map(run_tsrFinderGC, (filename for filename in chromosome_files))
# # Step 3: Combine the output files and delete intermediate files
os.system("cat " + " ".join(output_files) + " > " + output_filename)
remove_files(tsr_finder_step_files)
remove_files(chromosome_files)
|
[
"argparse.ArgumentParser",
"PolTools.utils.tsr_finder_step_four_from_rocky.run_step_four",
"argparse.ArgumentTypeError",
"collections.defaultdict",
"os.path.isfile",
"sys.stderr.write",
"multiprocessing.Pool",
"PolTools.utils.remove_files.remove_files",
"sys.exit",
"multiprocessing.cpu_count"
] |
[((609, 823), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""PolTools tsrFinder"""', 'description': "('Find transcription start regions\\n' + 'More information can be found at ' +\n 'https://geoffscollins.github.io/PolTools/tsrFinder.html')"}), '(prog=\'PolTools tsrFinder\', description=\n """Find transcription start regions\n""" +\n \'More information can be found at \' +\n \'https://geoffscollins.github.io/PolTools/tsrFinder.html\')\n', (632, 823), False, 'import argparse\n'), ((2597, 2613), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2608, 2613), False, 'from collections import defaultdict\n'), ((3164, 3208), 'collections.defaultdict', 'defaultdict', (["(lambda : {'+': None, '-': None})"], {}), "(lambda : {'+': None, '-': None})\n", (3175, 3208), False, 'from collections import defaultdict\n'), ((5351, 5386), 'PolTools.utils.remove_files.remove_files', 'remove_files', (['tsr_finder_step_files'], {}), '(tsr_finder_step_files)\n', (5363, 5386), False, 'from PolTools.utils.remove_files import remove_files\n'), ((5387, 5417), 'PolTools.utils.remove_files.remove_files', 'remove_files', (['chromosome_files'], {}), '(chromosome_files)\n', (5399, 5417), False, 'from PolTools.utils.remove_files import remove_files\n'), ((2214, 2238), 'os.path.isfile', 'os.path.isfile', (['bed_file'], {}), '(bed_file)\n', (2228, 2238), False, 'import os\n'), ((2244, 2304), 'sys.stderr.write', 'sys.stderr.write', (["(bed_file + ' was not found. Exiting ...\\n')"], {}), "(bed_file + ' was not found. Exiting ...\\n')\n", (2260, 2304), False, 'import sys\n'), ((2309, 2320), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2317, 2320), False, 'import sys\n'), ((2329, 2360), 'os.path.isfile', 'os.path.isfile', (['chrom_size_file'], {}), '(chrom_size_file)\n', (2343, 2360), False, 'import os\n'), ((2366, 2433), 'sys.stderr.write', 'sys.stderr.write', (["(chrom_size_file + ' was not found. Exiting ...\\n')"], {}), "(chrom_size_file + ' was not found. Exiting ...\\n')\n", (2382, 2433), False, 'import sys\n'), ((2438, 2449), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2446, 2449), False, 'import sys\n'), ((2489, 2560), 'sys.stderr.write', 'sys.stderr.write', (['"""The sequencing file must end in .bed. Exiting ...\n"""'], {}), "('The sequencing file must end in .bed. Exiting ...\\n')\n", (2505, 2560), False, 'import sys\n'), ((2565, 2576), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2573, 2576), False, 'import sys\n'), ((5019, 5108), 'PolTools.utils.tsr_finder_step_four_from_rocky.run_step_four', 'run_step_four', (['step_three_filename', 'window_size', 'chromosome_sizes', 'step_four_filename'], {}), '(step_three_filename, window_size, chromosome_sizes,\n step_four_filename)\n', (5032, 5108), False, 'from PolTools.utils.tsr_finder_step_four_from_rocky import run_step_four\n'), ((5111, 5128), 'multiprocessing.Pool', 'Pool', (['max_threads'], {}), '(max_threads)\n', (5115, 5128), False, 'from multiprocessing import Pool\n'), ((1830, 1857), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (1855, 1857), False, 'import multiprocessing\n'), ((529, 582), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (["(num + ' must be positive')"], {}), "(num + ' must be positive')\n", (555, 582), False, 'import argparse\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 <NAME>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from morphforge import units
from morphforge.simulation.base import CurrentClamp
from morphforge.simulation.neuron.objects.neuronobject import NEURONObject
from morphforge.constants.standardtags import StandardTags
from morphforge.simulation.neuron.simulationdatacontainers import MHocFileData
#from morphforge.units import qty
from morphforge.simulation.neuron.hocmodbuilders.hocmodutils import HocModUtils
from morphforge.simulation.neuron.hocmodbuilders import HocBuilder
from morphforge.simulation.neuron.objects.neuronrecordable import NEURONRecordable
from morphforge.simulation.base.stimulation import CurrentClampStepChange
from morphforge.simulation.neuron.core.neuronsimulationenvironment import NEURONEnvironment
class CurrentClampCurrentRecord(NEURONRecordable):
def __init__(self, cclamp, **kwargs):
super(CurrentClampCurrentRecord, self).__init__(**kwargs)
self.cclamp = cclamp
def get_unit(self):
return units.nA
def get_std_tags(self):
return [StandardTags.Current]
def build_hoc(self, hocfile_obj):
name_hoc = hocfile_obj[MHocFileData.CurrentClamps][self.cclamp]['stimname']
HocModUtils.create_record_from_object(
hocfile_obj=hocfile_obj,
vecname='RecVec%s' % self.name,
objname=name_hoc,
objvar='i',
recordobj=self)
def build_mod(self, modfile_set):
pass
def get_description(self):
return 'Step CurrentClamp Injection: %s' % self.cclamp.name
class NEURONCurrentClampStepChange(CurrentClampStepChange, NEURONObject):
def __init__(self, **kwargs):
super(NEURONCurrentClampStepChange, self).__init__(**kwargs)
def build_hoc(self, hocfile_obj):
HocBuilder.CurrentClamp(hocfile_obj=hocfile_obj,
currentclamp=self)
def build_mod(self, modfile_set):
pass
def get_recordable(self, what, name=None, **kwargs):
recorders = {
CurrentClamp.Recordables.Current: CurrentClampCurrentRecord
}
return recorders[what](cclamp=self, name=name, **kwargs)
NEURONEnvironment.currentclamps.register_plugin(CurrentClampStepChange, NEURONCurrentClampStepChange)
|
[
"morphforge.simulation.neuron.core.neuronsimulationenvironment.NEURONEnvironment.currentclamps.register_plugin",
"morphforge.simulation.neuron.hocmodbuilders.hocmodutils.HocModUtils.create_record_from_object",
"morphforge.simulation.neuron.hocmodbuilders.HocBuilder.CurrentClamp"
] |
[((3720, 3825), 'morphforge.simulation.neuron.core.neuronsimulationenvironment.NEURONEnvironment.currentclamps.register_plugin', 'NEURONEnvironment.currentclamps.register_plugin', (['CurrentClampStepChange', 'NEURONCurrentClampStepChange'], {}), '(CurrentClampStepChange,\n NEURONCurrentClampStepChange)\n', (3767, 3825), False, 'from morphforge.simulation.neuron.core.neuronsimulationenvironment import NEURONEnvironment\n'), ((2695, 2840), 'morphforge.simulation.neuron.hocmodbuilders.hocmodutils.HocModUtils.create_record_from_object', 'HocModUtils.create_record_from_object', ([], {'hocfile_obj': 'hocfile_obj', 'vecname': "('RecVec%s' % self.name)", 'objname': 'name_hoc', 'objvar': '"""i"""', 'recordobj': 'self'}), "(hocfile_obj=hocfile_obj, vecname=\n 'RecVec%s' % self.name, objname=name_hoc, objvar='i', recordobj=self)\n", (2732, 2840), False, 'from morphforge.simulation.neuron.hocmodbuilders.hocmodutils import HocModUtils\n'), ((3338, 3405), 'morphforge.simulation.neuron.hocmodbuilders.HocBuilder.CurrentClamp', 'HocBuilder.CurrentClamp', ([], {'hocfile_obj': 'hocfile_obj', 'currentclamp': 'self'}), '(hocfile_obj=hocfile_obj, currentclamp=self)\n', (3361, 3405), False, 'from morphforge.simulation.neuron.hocmodbuilders import HocBuilder\n')]
|
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.views import View
from django.views.generic import CreateView, ListView
from django.views.generic.edit import FormMixin, FormView, ProcessFormView
from movierama.movies.forms import MovieForm
from movierama.movies.models import Movie
class MovieCreateView(CreateView):
form_class = MovieForm
template_name = "movie_create.html"
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_success_url(self):
return reverse("homepage")
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.user = self.request.user
self.object.save()
messages.add_message(self.request, messages.SUCCESS,
'Movie "{}" successfully added.'.format(self.object),
fail_silently=True)
return HttpResponseRedirect(self.get_success_url())
create_movie = MovieCreateView.as_view()
class MovieListView(ListView):
model = Movie
template_name = "pages/home.html"
context_object_name = "movies"
def get_ordering(self):
return self.request.GET.get('order_by', None)
def get_queryset(self):
queryset = self.model.objects.all()
if self.request.user.is_authenticated:
queryset = self.model.as_user(self.request.user.id).all()
username = self.kwargs.get("username", None)
if username:
queryset = queryset.filter(user__username=username)
ordering = self.get_ordering()
if ordering:
if ordering == "date":
queryset = queryset.order_by("-date_created")
if ordering == "likes":
queryset = queryset.order_by_likes()
if ordering == "hates":
queryset = queryset.order_by_hates()
return queryset
movieslist = MovieListView.as_view()
class VoteMovieView(View):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def post(self, request, *args, **kwargs):
try:
movie = Movie.objects.get(id=kwargs.get('movie_id', None))
except ObjectDoesNotExist as e:
return HttpResponseRedirect(reverse("homepage"))
vote = request.POST.get('vote', None)
if vote is not None:
movie.vote(self.request.user, vote)
return HttpResponseRedirect(reverse("homepage"))
vote_movie = VoteMovieView.as_view()
class UnVoteMovieView(View):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def post(self, request, *args, **kwargs):
try:
movie = Movie.objects.get(id=kwargs.get('movie_id', None))
except ObjectDoesNotExist as e:
return HttpResponseRedirect(reverse("homepage"))
movie.remove_vote(self.request.user)
return HttpResponseRedirect(reverse("homepage"))
unvote_movie = UnVoteMovieView.as_view()
|
[
"django.urls.reverse",
"django.utils.decorators.method_decorator"
] |
[((635, 667), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {}), '(login_required)\n', (651, 667), False, 'from django.utils.decorators import method_decorator\n'), ((2245, 2277), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {}), '(login_required)\n', (2261, 2277), False, 'from django.utils.decorators import method_decorator\n'), ((2858, 2890), 'django.utils.decorators.method_decorator', 'method_decorator', (['login_required'], {}), '(login_required)\n', (2874, 2890), False, 'from django.utils.decorators import method_decorator\n'), ((805, 824), 'django.urls.reverse', 'reverse', (['"""homepage"""'], {}), "('homepage')\n", (812, 824), False, 'from django.urls import reverse\n'), ((2761, 2780), 'django.urls.reverse', 'reverse', (['"""homepage"""'], {}), "('homepage')\n", (2768, 2780), False, 'from django.urls import reverse\n'), ((3296, 3315), 'django.urls.reverse', 'reverse', (['"""homepage"""'], {}), "('homepage')\n", (3303, 3315), False, 'from django.urls import reverse\n'), ((2579, 2598), 'django.urls.reverse', 'reverse', (['"""homepage"""'], {}), "('homepage')\n", (2586, 2598), False, 'from django.urls import reverse\n'), ((3192, 3211), 'django.urls.reverse', 'reverse', (['"""homepage"""'], {}), "('homepage')\n", (3199, 3211), False, 'from django.urls import reverse\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.