content
stringlengths 5
1.05M
|
---|
import os
import pyotp
import alpaca_trade_api as tradeapi
import robin_stocks.robinhood as rh
from dotenv import load_dotenv
from pathlib import Path
dotenv_path = Path('.') / '.env'
load_dotenv(dotenv_path=dotenv_path)
def initAlpaca():
ALPACA_ACCESS_KEY_ID = os.getenv("ALPACA_ACCESS_KEY_ID")
ALPACA_SECRET_ACCESS_KEY = os.getenv("ALPACA_SECRET_ACCESS_KEY")
if not (ALPACA_ACCESS_KEY_ID or ALPACA_SECRET_ACCESS_KEY):
print("No Alpaca credentials supplied, skipping")
return None
# Set up alpaca
alpaca = tradeapi.REST(
ALPACA_ACCESS_KEY_ID,
ALPACA_SECRET_ACCESS_KEY,
"https://paper-api.alpaca.markets"
)
return alpaca
def initRobinHood():
ROBINHOOD_USER = os.getenv("ROBINHOOD_USER")
ROBINHOOD_PASS = os.getenv("ROBINHOOD_PASS")
ROBINHOOD_MFA = os.getenv("ROBINHOOD_MFA")
if not (ROBINHOOD_USER or ROBINHOOD_PASS or ROBINHOOD_MFA):
print("No Robinhood credentials supplied, skipping")
return None
# set up robinhood
mfa = pyotp.TOTP(ROBINHOOD_MFA).now()
rh.login(ROBINHOOD_USER, ROBINHOOD_PASS, mfa_code=mfa)
return rh
|
##########################################################################
#
# Copyright (c) 2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import math
import imath
import IECore
import Gaffer
import GafferDispatch
class Wedge( GafferDispatch.TaskContextProcessor ) :
Mode = IECore.Enum.create( "FloatRange", "IntRange", "ColorRange", "FloatList", "IntList", "StringList" )
def __init__( self, name = "Wedge" ) :
GafferDispatch.TaskContextProcessor.__init__( self, name )
self["variable"] = Gaffer.StringPlug( defaultValue = "wedge:value" )
self["indexVariable"] = Gaffer.StringPlug( defaultValue = "wedge:index" )
self["mode"] = Gaffer.IntPlug(
defaultValue = int( self.Mode.FloatRange ),
minValue = int( self.Mode.FloatRange ),
maxValue = int( self.Mode.StringList ),
)
# float range
self["floatMin"] = Gaffer.FloatPlug( defaultValue = 0 )
self["floatMax"] = Gaffer.FloatPlug( defaultValue = 1 )
self["floatSteps"] = Gaffer.IntPlug( minValue = 2, defaultValue = 11 )
# int range
self["intMin"] = Gaffer.IntPlug( defaultValue = 0 )
self["intMax"] = Gaffer.IntPlug( defaultValue = 5 )
self["intStep"] = Gaffer.IntPlug( minValue = 1, defaultValue = 1 )
# color range
self["ramp"] = Gaffer.SplinefColor3fPlug(
defaultValue = Gaffer.SplineDefinitionfColor3f(
(
( 0, imath.Color3f( 0 ) ),
( 1, imath.Color3f( 1 ) ),
),
Gaffer.SplineDefinitionInterpolation.CatmullRom
)
)
self["colorSteps"] = Gaffer.IntPlug( defaultValue = 5, minValue = 2 )
# lists
self["floats"] = Gaffer.FloatVectorDataPlug( defaultValue = IECore.FloatVectorData() )
self["ints"] = Gaffer.IntVectorDataPlug( defaultValue = IECore.IntVectorData() )
self["strings"] = Gaffer.StringVectorDataPlug( defaultValue = IECore.StringVectorData() )
def values( self ) :
mode = self.Mode( self["mode"].getValue() )
if mode == self.Mode.FloatRange :
min = self["floatMin"].getValue()
max = self["floatMax"].getValue()
steps = self["floatSteps"].getValue()
values = []
for i in range( 0, steps ) :
t = float( i ) / ( steps - 1 )
values.append( min + t * ( max - min ) )
elif mode == self.Mode.IntRange :
min = self["intMin"].getValue()
max = self["intMax"].getValue()
step = self["intStep"].getValue()
if max < min :
min, max = max, min
if step == 0 :
raise RuntimeError( "Invalid step - step must not be 0" )
elif step < 0 :
step = -step
values = []
while True :
value = min + len( values ) * step
if value > max :
break
values.append( value )
elif mode == self.Mode.ColorRange :
spline = self["ramp"].getValue().spline()
steps = self["colorSteps"].getValue()
values = [ spline( i / float( steps - 1 ) ) for i in range( 0, steps ) ]
elif mode == self.Mode.FloatList :
values = self["floats"].getValue()
elif mode == self.Mode.IntList :
values = self["ints"].getValue()
elif mode == self.Mode.StringList :
values = self["strings"].getValue()
return values
def _processedContexts( self, context ) :
# make a context for each of the wedge values
variable = self["variable"].getValue()
indexVariable = self["indexVariable"].getValue()
contexts = []
for index, value in enumerate( self.values() ) :
contexts.append( Gaffer.Context( context ) )
contexts[-1][variable] = value
contexts[-1][indexVariable] = index
return contexts
IECore.registerRunTimeTyped( Wedge, typeName = "GafferDispatch::Wedge" )
|
###############################################################################
# densprofiles: a collection of parameterized density profiles
###############################################################################
from functools import wraps
import numpy
import healpy
from galpy.util import bovy_coords
_R0= 8.
_Zsun= 0.025
# Input decorators
def glonDecorator(func):
"""Decorator to convert input in (l/rad,b/rad,D/kpc) to (R,z,phi)"""
@wraps(func)
def glon_wrapper(*args,**kwargs):
if kwargs.pop('glon',False):
XYZ= bovy_coords.lbd_to_XYZ(args[0],args[1],args[2],degree=False)
R,phi,z= bovy_coords.XYZ_to_galcencyl(XYZ[:,0],XYZ[:,1],XYZ[:,2],
Xsun=_R0,Zsun=_Zsun)
else:
R,phi,z= args[0],args[1],args[2]
return func(R,phi,z,*args[3:],**kwargs)
return glon_wrapper
def scalarDecorator(func):
"""Decorator to deal with scalar input"""
@wraps(func)
def scalar_wrapper(*args,**kwargs):
if numpy.array(args[0]).shape == ():
scalarOut= True
newargs= ()
for ii in range(len(args)):
newargs= newargs+(numpy.array([args[ii]]),)
args= newargs
else:
scalarOut= False
result= func(*args,**kwargs)
if scalarOut:
return result[0]
else:
return result
return scalar_wrapper
############################### LOGIT FOR AMPLITUDES ##########################
def logit(p):
"""The logit functions"""
return numpy.log(p/(1.-p))
def ilogit(x):
"""The reverse logit"""
return 1./(1.+numpy.exp(-x))
################################# HEALPIX MAPS ################################
def healpixelate(dist,densprofile,params=None,nside=512,nest=True):
"""
NAME:
healpixelate
PURPOSE:
Pixelate a density profile at a given distance from the Sun on a HEALPIX grid
INPUT:
dist - distance in kpc
densprofile - density profile function from this module
params= parameter array of the density profile
nside= (512) HEALPIX nside
nest= (True) True: nested pixelation; False: ring pixelation
OUTPUT:
density on grid (1D array)
HISTORY:
2015-03-04 - Written - Bovy (IAS)
"""
npix= healpy.pixelfunc.nside2npix(nside)
theta,phi= healpy.pixelfunc.pix2ang(nside,numpy.arange(npix),nest=nest)
return densprofile(phi,numpy.pi/2.-theta,dist*numpy.ones(npix),glon=True,
params=params)
def powspec(dist,densprofile,params=None,nside=512):
"""
NAME:
powspec
PURPOSE:
calculate the angular power spectrum of a density profile at a given distance
INPUT:
dist - distance in kpc
densprofile - density profile function from this module
params= parameter array of the density profile
nside= (512) HEALPIX nside
OUTPUT:
(l,C_l)
HISTORY:
2015-03-04 - Written - Bovy (IAS)
"""
dmap= healpixelate(dist,densprofile,params=params,nside=nside,nest=False)
cl= healpy.sphtfunc.anafast(dmap-numpy.mean(dmap),pol=False)
return (numpy.arange(len(cl)),cl)
############################### DENSITY PROFILES ##############################
@scalarDecorator
@glonDecorator
def expdisk(R,phi,z,glon=False,
params=[1./3.,1./0.3],log=False):
"""
NAME:
expdisk
PURPOSE:
density of an exponential disk
INPUT:
R,phi,z - Galactocentric cylindrical coordinates or (l/rad,b/rad,D/kpc)
glon= (False) if True, input coordinates above are (l,b,D)
params= parameters [1/hR,1/hz]
log= (False) if True, return the log of the density
OUTPUT:
density or log density
HISTORY:
2015-03-04 - Written - Bovy (IAS)
"""
if log:
return -params[0]*(R-_R0)-params[1]*numpy.fabs(z)
else:
return numpy.exp(-params[0]*(R-_R0)-params[1]*numpy.fabs(z))
@scalarDecorator
@glonDecorator
def expdiskplusconst(R,phi,z,glon=False,
params=[1./3.,1./0.3,0.1]):
"""
NAME:
expdiskplusconst
PURPOSE:
density of an exponential disk plus a constant
INPUT:
R,phi,z - Galactocentric cylindrical coordinates or (l/rad,b/rad,D/kpc)
glon= (False) if True, input coordinates above are (l,b,D)
params= parameters [1/hR,1/hz,log(amp)]
OUTPUT:
density or log density
HISTORY:
2015-03-24 - Written - Bovy (IAS)
"""
return numpy.exp(-params[0]*(R-_R0)-params[1]*numpy.fabs(z))/2.\
*numpy.fabs(params[1])\
+numpy.exp(params[2])/24.
@scalarDecorator
@glonDecorator
def twoexpdisk(R,phi,z,glon=False,
params=[1./3.,1./0.3,1./4.,1./0.5,logit(0.1)]):
"""
NAME:
twoexpdisk
PURPOSE:
density of a sum of two exponential disks
INPUT:
R,phi,z - Galactocentric cylindrical coordinates or (l/rad,b/rad,D/kpc)
glon= (False) if True, input coordinates above are (l,b,D)
params= parameters [1/hR,1/hz,1/hR2,1/hz2,logit(amp2)]
OUTPUT:
density or log density
HISTORY:
2015-03-24 - Written - Bovy (IAS)
"""
amp= ilogit(params[4])
return (1.-amp)/2.*numpy.fabs(params[1])\
*numpy.exp(-params[0]*(R-_R0)-params[1]*numpy.fabs(z))\
+amp/2.*params[3]*numpy.exp(-params[2]*(R-_R0)-params[3]*numpy.fabs(z))
@scalarDecorator
@glonDecorator
def brokenexpdisk(R,phi,z,glon=False,
params=[1./3.,1./0.3,1./4.,numpy.log(10.)]):
"""
NAME:
brokenexpdisk
PURPOSE:
density of a broken exponential disk (two scale lengths)
INPUT:
R,phi,z - Galactocentric cylindrical coordinates or (l/rad,b/rad,D/kpc)
glon= (False) if True, input coordinates above are (l,b,D)
params= parameters [1/hR,1/hz,1/hR2,log[Rbreak]]
OUTPUT:
density or log density
HISTORY:
2015-03-24 - Written - Bovy (IAS)
"""
Rb= numpy.exp(params[3])
out= numpy.empty_like(R)
sR= R[R <= Rb]
bR= R[R > Rb]
sz= z[R <= Rb]
bz= z[R > Rb]
out[R <= Rb]= \
numpy.fabs(params[1])/2.*numpy.exp(-params[0]*(sR-_R0)-params[1]*numpy.fabs(sz))
out[R > Rb]=\
numpy.exp(-params[2]*(bR-_R0)-params[1]*numpy.fabs(bz))\
*numpy.fabs(params[1])/2.*numpy.exp(params[2]*(Rb-_R0)-params[0]*(Rb-_R0))
return out
@scalarDecorator
@glonDecorator
def tribrokenexpdisk(R,phi,z,glon=False,
params=[1./3.,1./0.3,1./4.,numpy.log(10.)]):
"""
NAME:
tribrokenexpdisk
PURPOSE:
density of a broken exponential disk (two scale lengths, one negative, one positive)
INPUT:
R,phi,z - Galactocentric cylindrical coordinates or (l/rad,b/rad,D/kpc)
glon= (False) if True, input coordinates above are (l,b,D)
params= parameters [1/hR,1/hz,1/hR2,log[Rbreak]]
OUTPUT:
density or log density
HISTORY:
2015-03-24 - Written - Bovy (IAS)
"""
Rb= numpy.exp(params[3])
out= numpy.empty_like(R)
sR= R[R <= Rb]
bR= R[R > Rb]
sz= z[R <= Rb]
bz= z[R > Rb]
out[R <= Rb]= \
numpy.fabs(params[1])/2.*numpy.exp(numpy.fabs(params[0])*(sR-_R0)-params[1]*numpy.fabs(sz))
out[R > Rb]=\
numpy.exp(-numpy.fabs(params[2])*(bR-_R0)-params[1]*numpy.fabs(bz))\
*numpy.fabs(params[1])/2.*numpy.exp(numpy.fabs(params[2])*(Rb-_R0)+numpy.fabs(params[0])*(Rb-_R0))
return out
@scalarDecorator
@glonDecorator
def symbrokenexpdisk(R,phi,z,glon=False,
params=[1./3.,1./0.3,numpy.log(10.)]):
"""
NAME:
symbrokenexpdisk
PURPOSE:
density of a broken exponential disk, symmetric around a break
INPUT:
R,phi,z - Galactocentric cylindrical coordinates or (l/rad,b/rad,D/kpc)
glon= (False) if True, input coordinates above are (l,b,D)
params= parameters [1/hR,1/hz,log[Rbreak]]
OUTPUT:
density or log density
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
return brokenexpdisk(R,phi,z,glon=False,
params=[-params[0],params[1],2.*params[0],params[2]])
@scalarDecorator
@glonDecorator
def brokenexpflaredisk(R,phi,z,glon=False,surfdens=False,
params=[1./3.,1./0.3,1./4.,numpy.log(10.),1./5.]):
"""
NAME:
brokenexpflaredisk
PURPOSE:
density of a broken exponential disk (two scale lengths), with a flaring vertical profile
INPUT:
R,phi,z - Galactocentric cylindrical coordinates or (l/rad,b/rad,D/kpc)
glon= (False) if True, input coordinates above are (l,b,D)
params= parameters [1/hR,1/hz,1/hR2,log[Rbreak]]
OUTPUT:
density or log density
HISTORY:
2015-03-24 - Written - Bovy (IAS)
"""
Rb= numpy.exp(params[3])
out= numpy.empty_like(R)
sR= R[R <= Rb]
bR= R[R > Rb]
tinvhz= params[1]*numpy.exp((R-_R0)*params[4])
out[R <= Rb]= numpy.exp(-params[0]*(sR-_R0))
out[R > Rb]= numpy.exp(-params[2]*(bR-_R0))\
*numpy.exp(params[2]*(Rb-_R0)-params[0]*(Rb-_R0))
if surfdens == True:
return out
else:
return numpy.fabs(tinvhz)/2.*out*numpy.exp(-tinvhz*numpy.fabs(z))
@scalarDecorator
@glonDecorator
def tribrokenexpflaredisk(R,phi,z,glon=False,
params=[1./3.,1./0.3,1./4.,numpy.log(10.),-1./5.]):
"""
NAME:
tribrokenexpflaredisk
PURPOSE:
density of a broken exponential disk (two scale lengths, one negative, one positive)
INPUT:
R,phi,z - Galactocentric cylindrical coordinates or (l/rad,b/rad,D/kpc)
glon= (False) if True, input coordinates above are (l,b,D)
params= parameters [1/hR,1/hz,1/hR2,log[Rbreak]]
OUTPUT:
density or log density
HISTORY:
2015-03-24 - Written - Bovy (IAS)
"""
Rb= numpy.exp(params[3])
out= numpy.empty_like(R)
sR= R[R <= Rb]
bR= R[R > Rb]
tinvhz= params[1]*numpy.exp((R-_R0)*params[4])
out[R <= Rb]= numpy.exp(numpy.fabs(params[0])*(sR-_R0))
out[R > Rb]= numpy.exp(-numpy.fabs(params[2])*(bR-_R0))\
*numpy.exp(numpy.fabs(params[2])*(Rb-_R0)\
+numpy.fabs(params[0])*(Rb-_R0))
return numpy.fabs(tinvhz)/2.*out*numpy.exp(-tinvhz*numpy.fabs(z))
@scalarDecorator
@glonDecorator
def tribrokenexplinflaredisk(R,phi,z,glon=False,
params=[1./3.,1./0.3,1./4.,numpy.log(10.),-1./5.]):
"""
NAME:
tribrokenexplinflaredisk
PURPOSE:
density of a broken exponential disk (two scale lengths, one negative, one positive) with a linearly flaring scale-length
INPUT:
R,phi,z - Galactocentric cylindrical coordinates or (l/rad,b/rad,D/kpc)
glon= (False) if True, input coordinates above are (l,b,D)
params= parameters [1/hR,1/hz,1/hR2,log[Rbreak]]
OUTPUT:
density or log density
HISTORY:
2015-09-0 - Written - Bovy (UofT)
"""
Rb= numpy.exp(params[3])
out= numpy.empty_like(R)
sR= R[R <= Rb]
bR= R[R > Rb]
tinvhz= params[1]/(1.-(R-_R0)*params[4]*(numpy.exp(1.)-1.))
tinvhz[tinvhz > 10.]= 10.
out[R <= Rb]= numpy.exp(numpy.fabs(params[0])*(sR-_R0))
out[R > Rb]= numpy.exp(-numpy.fabs(params[2])*(bR-_R0))\
*numpy.exp(numpy.fabs(params[2])*(Rb-_R0)\
+numpy.fabs(params[0])*(Rb-_R0))
return numpy.fabs(tinvhz)/2.*out*numpy.exp(-tinvhz*numpy.fabs(z))
@scalarDecorator
@glonDecorator
def tribrokenexpinvlinflaredisk(R,phi,z,glon=False,
params=[1./3.,1./0.3,1./4.,numpy.log(10.),-1./5.]):
"""
NAME:
tribrokenexpinvlinflaredisk
PURPOSE:
density of a broken exponential disk (two scale lengths, one negative, one positive) with a linearly flaring inverse scale-length
INPUT:
R,phi,z - Galactocentric cylindrical coordinates or (l/rad,b/rad,D/kpc)
glon= (False) if True, input coordinates above are (l,b,D)
params= parameters [1/hR,1/hz,1/hR2,log[Rbreak]]
OUTPUT:
density or log density
HISTORY:
2015-09-0 - Written - Bovy (UofT)
"""
Rb= numpy.exp(params[3])
out= numpy.empty_like(R)
sR= R[R <= Rb]
bR= R[R > Rb]
tinvhz= params[1]*(1.+(R-_R0)*params[4]*(numpy.exp(1.)-1.))
tinvhz[tinvhz > 10.]= 10.
out[R <= Rb]= numpy.exp(numpy.fabs(params[0])*(sR-_R0))
out[R > Rb]= numpy.exp(-numpy.fabs(params[2])*(bR-_R0))\
*numpy.exp(numpy.fabs(params[2])*(Rb-_R0)\
+numpy.fabs(params[0])*(Rb-_R0))
return numpy.fabs(tinvhz)/2.*out*numpy.exp(-tinvhz*numpy.fabs(z))
@scalarDecorator
@glonDecorator
def tribrokenexpfixedflaredisk(R,phi,z,glon=False,
params=[1./3.,1./0.3,1./4.,numpy.log(10.)]):
"""
NAME:
tribrokenexpfixedflaredisk
PURPOSE:
density of a broken exponential disk (two scale lengths, one negative, one positive)
INPUT:
R,phi,z - Galactocentric cylindrical coordinates or (l/rad,b/rad,D/kpc)
glon= (False) if True, input coordinates above are (l,b,D)
params= parameters [1/hR,1/hz,1/hR2,log[Rbreak]]
OUTPUT:
density or log density
HISTORY:
2015-04-22 - Written - Bovy (IAS)
"""
Rb= numpy.exp(params[3])
out= numpy.empty_like(R)
sR= R[R <= Rb]
bR= R[R > Rb]
tinvhz= params[1]*numpy.exp((R-_R0)*-0.1)
out[R <= Rb]= numpy.exp(numpy.fabs(params[0])*(sR-_R0))
out[R > Rb]= numpy.exp(-numpy.fabs(params[2])*(bR-_R0))\
*numpy.exp(numpy.fabs(params[2])*(Rb-_R0)\
+numpy.fabs(params[0])*(Rb-_R0))
return numpy.fabs(tinvhz)/2.*out*numpy.exp(-tinvhz*numpy.fabs(z))
@scalarDecorator
@glonDecorator
def brokentwoexpdisk(R,phi,z,glon=False,
params=[1./3.,1./0.3,1./4.,numpy.log(10.),
logit(0.1),1./0.8]):
"""
NAME:
brokentwoexpdisk
PURPOSE:
density of a broken exponential disk (two scale lengths), with a
vertical density profile consisting of two scale heights
INPUT:
R,phi,z - Galactocentric cylindrical coordinates or (l/rad,b/rad,D/kpc)
glon= (False) if True, input coordinates above are (l,b,D)
params= parameters [1/hR,1/hz,1/hR2,log[Rbreak],1/hz2,logit(amp2)]
OUTPUT:
density or log density
HISTORY:
2015-03-24 - Written - Bovy (IAS)
"""
Rb= numpy.exp(params[3])
out= numpy.empty_like(R)
sR= R[R <= Rb]
bR= R[R > Rb]
out[R <= Rb]= numpy.exp(-params[0]*(sR-_R0))
out[R > Rb]=\
numpy.exp(-params[2]*(bR-_R0))\
*numpy.exp(params[2]*(Rb-_R0)-params[0]*(Rb-_R0))
amp= ilogit(params[4])
return out*((1.-amp)/2.*numpy.fabs(params[1])\
*numpy.exp(-params[1]*numpy.fabs(z))\
+amp/2.*numpy.fabs(params[5])\
*numpy.exp(-params[5]*numpy.fabs(z)))
@scalarDecorator
@glonDecorator
def brokentwoexpflaredisk(R,phi,z,glon=False,
params=[1./3.,1./0.3,1./4.,numpy.log(10.),
logit(0.1),1./0.8,-1./5.]):
"""
NAME:
brokentwoexpflaredisk
PURPOSE:
density of a broken exponential disk (two scale lengths), with a
vertical density profile consisting of two scale heights, flaring with the same scale length
INPUT:
R,phi,z - Galactocentric cylindrical coordinates or (l/rad,b/rad,D/kpc)
glon= (False) if True, input coordinates above are (l,b,D)
params= parameters [1/hR,1/hz,1/hR2,log[Rbreak],1/hz2,logit(amp2)]
OUTPUT:
density or log density
HISTORY:
2015-09-12 - Written - Bovy (UofT)
"""
Rb= numpy.exp(params[3])
out= numpy.empty_like(R)
sR= R[R <= Rb]
bR= R[R > Rb]
out[R <= Rb]= numpy.exp(-params[0]*(sR-_R0))
out[R > Rb]=\
numpy.exp(-params[2]*(bR-_R0))\
*numpy.exp(params[2]*(Rb-_R0)-params[0]*(Rb-_R0))
amp= ilogit(params[4])
tinvhz1= params[1]*numpy.exp((R-_R0)*params[6])
tinvhz2= params[5]*numpy.exp((R-_R0)*params[6])
return out*((1.-amp)/2.*numpy.fabs(tinvhz1)\
*numpy.exp(-tinvhz1*numpy.fabs(z))\
+amp/2.*numpy.fabs(tinvhz2)\
*numpy.exp(-tinvhz2*numpy.fabs(z)))
@scalarDecorator
@glonDecorator
def tribrokentwoexpdisk(R,phi,z,glon=False,
params=[1./3.,1./0.3,1./4.,numpy.log(10.),logit(0.1),
1./0.8]):
"""
NAME:
tribrokentwoexpdisk
PURPOSE:
density of a broken exponential disk (two scale lengths, one negative, one positive)
INPUT:
R,phi,z - Galactocentric cylindrical coordinates or (l/rad,b/rad,D/kpc)
glon= (False) if True, input coordinates above are (l,b,D)
params= parameters [1/hR,1/hz,1/hR2,log[Rbreak]]
OUTPUT:
density or log density
HISTORY:
2015-03-24 - Written - Bovy (IAS)
"""
Rb= numpy.exp(params[3])
out= numpy.empty_like(R)
sR= R[R <= Rb]
bR= R[R > Rb]
out[R <= Rb]= \
numpy.exp(numpy.fabs(params[0])*(sR-_R0))
out[R > Rb]= numpy.exp(-numpy.fabs(params[2])*(bR-_R0))\
*numpy.exp(numpy.fabs(params[2])*(Rb-_R0)+numpy.fabs(params[0])*(Rb-_R0))
amp= ilogit(params[4])
return out*((1.-amp)/2.*numpy.fabs(params[1])\
*numpy.exp(-params[1]*numpy.fabs(z))\
+amp/2.*numpy.fabs(params[5])\
*numpy.exp(-params[5]*numpy.fabs(z)))
@scalarDecorator
@glonDecorator
def gaussexpdisk(R,phi,z,glon=False,
params=[1./3.,1./0.3,numpy.log(10.)]):
"""
NAME:
gaussexpdisk
PURPOSE:
density as a Gaussian in radius and an exponential vertically
INPUT:
R,phi,z - Galactocentric cylindrical coordinates or (l/rad,b/rad,D/kpc)
glon= (False) if True, input coordinates above are (l,b,D)
params= parameters [1/hR,1/hz,log[Rmax]]
OUTPUT:
density
HISTORY:
2015-03-28 - Written - Bovy (IAS)
"""
Rm= numpy.exp(params[2])
return numpy.fabs(params[1])/2.*numpy.exp(-params[1]*numpy.fabs(z))\
*numpy.exp(-params[0]**2./2.*((R-Rm)**2.-(_R0-Rm)**2.))
@scalarDecorator
@glonDecorator
def brokenquadexpdisk(R,phi,z,glon=False,
params=[1./3.,1./0.3,1./4.,numpy.log(10.)]):
"""
NAME:
brokenquadexpdisk
PURPOSE:
density of a broken exponential squared disk (two scale lengths)
INPUT:
R,phi,z - Galactocentric cylindrical coordinates or (l/rad,b/rad,D/kpc)
glon= (False) if True, input coordinates above are (l,b,D)
params= parameters [1/hR,1/hz,1/hR2,log[Rbreak]]
OUTPUT:
density or log density
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
Rb= numpy.exp(params[3])
out= numpy.empty_like(R)
sR= R[R <= Rb]
bR= R[R > Rb]
sz= z[R <= Rb]
bz= z[R > Rb]
out[R <= Rb]= \
numpy.fabs(params[1])/2.*numpy.exp(-params[0]**2./2.*(sR-_R0)**2.-params[1]*numpy.fabs(sz))
out[R > Rb]=\
numpy.exp(-params[2]**2./2.*(bR-_R0)**2.-params[1]*numpy.fabs(bz))\
*numpy.fabs(params[1])/2.*numpy.exp(params[2]**2./2.*(Rb-_R0)**2.-params[0]**2./2.*(Rb-_R0)**2.)
return out
@scalarDecorator
@glonDecorator
def symbrokenquadexpdisk(R,phi,z,glon=False,
params=[1./3.,1./0.3,numpy.log(10.)]):
"""
NAME:
symbrokenquadexpdisk
PURPOSE:
density of a broken exponential squared disk, symmetric around a break
INPUT:
R,phi,z - Galactocentric cylindrical coordinates or (l/rad,b/rad,D/kpc)
glon= (False) if True, input coordinates above are (l,b,D)
params= parameters [1/hR,1/hz,log[Rbreak]]
OUTPUT:
density or log density
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
return brokenquadexpdisk(R,phi,z,glon=False,
params=[params[0],params[1],
-2.*params[0],params[2]])
def logspiral(R,phi,tanp=numpy.tan(9.4/180.*numpy.pi),
Rref=9.9,phiref=14.2/180.*numpy.pi,
width=0.38,phimin=-21./180.*numpy.pi,phimax=88./180.*numpy.pi):
"""
NAME:
logspiral
PURPOSE:
return the overdensity due to a logarithmic spiral (default parameters: Perseus arm)
INPUT:
R - Galactocentric radius (/kpc)
phi - Galactocentric azimuth (/rad)
tanp= tan of the pitch angle
Rref= reference radius
phiref= reference phi
width= width in radius (/kpc)
phimin, phimax= extent of the arm in azimuth
OUTPUT:
overdensity (unit amplitude)
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
phi[phi > 180.]-= 360.
Rs= Rref*numpy.exp(-(phi-phiref)*tanp)
w= width*numpy.sqrt(1.+tanp**2.)
out= numpy.zeros_like(R)
gindx= (phi > phimin)*(phi < phimax)
out[gindx]= 1./numpy.sqrt(2.*numpy.pi)/w\
*numpy.exp(-0.5*(R-Rs)[gindx]**2./w**2.)
return out
@scalarDecorator
@glonDecorator
def brokenexpdiskfixedspiral(R,phi,z,glon=False,
params=[1./3.,1./0.3,1./4.,numpy.log(10.),
numpy.log(0.1)]):
"""
NAME:
brokenexpdiskfixedspiral
PURPOSE:
density of a broken exponential disk (two scale lengths) + a fixed spiral
INPUT:
R,phi,z - Galactocentric cylindrical coordinates or (l/rad,b/rad,D/kpc)
glon= (False) if True, input coordinates above are (l,b,D)
params= parameters [1/hR,1/hz,1/hR2,log[Rbreak],log[spamp]]
OUTPUT:
density
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
Rb= numpy.exp(params[3])
spamp= numpy.exp(params[4])
out= numpy.empty_like(R)
sR= R[R <= Rb]
bR= R[R > Rb]
out[R <= Rb]= \
numpy.exp(-params[0]*(sR-_R0))
out[R > Rb]=\
numpy.exp(-params[2]*(bR-_R0))\
*numpy.exp(params[2]*(Rb-_R0)-params[0]*(Rb-_R0))
return numpy.fabs(params[1])/2.*(out+spamp*logspiral(R,phi))\
*numpy.exp(-params[1]*numpy.fabs(z))
|
# -*- coding: utf-8; test-case-name: bridgedb.test.test_email_request; -*-
#_____________________________________________________________________________
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Nick Mathewson <[email protected]>
# Isis Lovecruft <[email protected]> 0xA3ADB67A2CDB8B35
# Matthew Finkel <[email protected]>
# please also see AUTHORS file
# :copyright: (c) 2007-2015, The Tor Project, Inc.
# (c) 2013-2015, Isis Lovecruft
# :license: see LICENSE for licensing information
#_____________________________________________________________________________
"""
.. py:module:: bridgedb.email.request
:synopsis: Classes for parsing and storing information about requests for
bridges which are sent to the email distributor.
bridgedb.email.request
======================
Classes for parsing and storing information about requests for bridges
which are sent to the email distributor.
::
bridgedb.email.request
| |_ determineBridgeRequestOptions - Figure out which filters to apply, or
| offer help.
|_ EmailBridgeRequest - A request for bridges which was received through
the email distributor.
..
"""
from __future__ import print_function
from __future__ import unicode_literals
import logging
import re
from bridgedb import bridgerequest
from bridgedb.Dist import EmailRequestedHelp
from bridgedb.Dist import EmailRequestedKey
#: A regular expression for matching the Pluggable Transport method TYPE in
#: emailed requests for Pluggable Transports.
TRANSPORT_REGEXP = ".*transport ([a-z][_a-z0-9]*)"
TRANSPORT_PATTERN = re.compile(TRANSPORT_REGEXP)
#: A regular expression that matches country codes in requests for unblocked
#: bridges.
UNBLOCKED_REGEXP = ".*unblocked ([a-z]{2,4})"
UNBLOCKED_PATTERN = re.compile(UNBLOCKED_REGEXP)
def determineBridgeRequestOptions(lines):
"""Figure out which :class:`Bridges.BridgeFilter`s to apply, or offer help.
.. note:: If any ``'transport TYPE'`` was requested, or bridges not
blocked in a specific CC (``'unblocked CC'``), then the ``TYPE``
and/or ``CC`` will *always* be stored as a *lowercase* string.
:param list lines: A list of lines from an email, including the headers.
:raises EmailRequestedHelp: if the client requested help.
:raises EmailRequestedKey: if the client requested our GnuPG key.
:rtype: :class:`EmailBridgeRequest`
:returns: A :class:`~bridgerequst.BridgeRequest` with all of the requested
parameters set. The returned ``BridgeRequest`` will have already had
its filters generated via :meth:`~EmailBridgeRequest.generateFilters`.
"""
request = EmailBridgeRequest()
skippedHeaders = False
for line in lines:
line = line.strip().lower()
# Ignore all lines before the first empty line:
if not line: skippedHeaders = True
if not skippedHeaders: continue
if ("help" in line) or ("halp" in line):
raise EmailRequestedHelp("Client requested help.")
if "get" in line:
request.isValid(True)
logging.debug("Email request was valid.")
if "key" in line:
request.wantsKey(True)
raise EmailRequestedKey("Email requested a copy of our GnuPG key.")
if "ipv6" in line:
request.withIPv6()
if "transport" in line:
request.withPluggableTransportType(line)
if "unblocked" in line:
request.withoutBlockInCountry(line)
logging.debug("Generating hashring filters for request.")
request.generateFilters()
return request
class EmailBridgeRequest(bridgerequest.BridgeRequestBase):
"""We received a request for bridges through the email distributor."""
def __init__(self):
"""Process a new bridge request received through the
:class:`~bridgedb.Dist.EmailBasedDistributor`.
"""
super(EmailBridgeRequest, self).__init__()
self._isValid = False
self._wantsKey = False
def isValid(self, valid=None):
"""Get or set the validity of this bridge request.
If called without parameters, this method will return the current
state, otherwise (if called with the **valid** parameter), it will set
the current state of validity for this request.
:param bool valid: If given, set the validity state of this
request. Otherwise, get the current state.
"""
if valid is not None:
self._isValid = bool(valid)
return self._isValid
def wantsKey(self, wantsKey=None):
"""Get or set whether this bridge request wanted our GnuPG key.
If called without parameters, this method will return the current
state, otherwise (if called with the **wantsKey** parameter set), it
will set the current state for whether or not this request wanted our
key.
:param bool wantsKey: If given, set the validity state of this
request. Otherwise, get the current state.
"""
if wantsKey is not None:
self._wantsKey = bool(wantsKey)
return self._wantsKey
def withoutBlockInCountry(self, line):
"""This request was for bridges not blocked in **country**.
Add any country code found in the **line** to the list of
``notBlockedIn``. Currently, a request for a transport is recognized
if the email line contains the ``'unblocked'`` command.
:param str country: The line from the email wherein the client
requested some type of Pluggable Transport.
"""
unblocked = None
logging.debug("Parsing 'unblocked' line: %r" % line)
try:
unblocked = UNBLOCKED_PATTERN.match(line).group(1)
except (TypeError, AttributeError):
pass
if unblocked:
self.notBlockedIn.append(unblocked)
logging.info("Email requested bridges not blocked in: %r"
% unblocked)
def withPluggableTransportType(self, line):
"""This request included a specific Pluggable Transport identifier.
Add any Pluggable Transport method TYPE found in the **line** to the
list of ``transports``. Currently, a request for a transport is
recognized if the email line contains the ``'transport'`` command.
:param str line: The line from the email wherein the client
requested some type of Pluggable Transport.
"""
transport = None
logging.debug("Parsing 'transport' line: %r" % line)
try:
transport = TRANSPORT_PATTERN.match(line).group(1)
except (TypeError, AttributeError):
pass
if transport:
self.transports.append(transport)
logging.info("Email requested transport type: %r" % transport)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
from backpack import backpack
from backpack.extensions import BatchGrad
from lib.BaseAlg import BaseAlg, get_device, Network,Network_NL, obs_data_all
import numpy as np
from backpack import backpack, extend
import numpy as np
from scipy.stats import invgamma
class NeuralLinearUserStruct:
def __init__(self, feature, featureDimension, mlp_dims,
epoch, batch_size, learning_rate,):
# create neural model
self.feature = feature
self.mlp_dims = [int(x) for x in mlp_dims.split(",") if x != ""]
self.device = get_device()
self.epoch = epoch
self.batch_size = batch_size
self.learning_rate = learning_rate
self.model = extend(Network_NL(feature_dim=featureDimension, mlp_dims=self.mlp_dims).to(self.device))
self.loss_func = nn.MSELoss()
self.data = obs_data_all() # training set
self.latent_data = obs_data_all()
self.time = 0
def updateParameters(self):
self.update_model()
def update_model(self):
num_data = len(self.data)
optimizer = torch.optim.Rprop(self.model.parameters(), lr=self.learning_rate,)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9999977)
# prepare the training data
loss_list = []
dataloader = torch.utils.data.DataLoader(self.data, batch_size=self.batch_size, shuffle=True, num_workers=0)
# calculate the number of batches
num_batch = int(num_data / self.batch_size) if num_data >= self.batch_size else 1
for i in range(self.epoch):
total_loss = 0
for j, batch in enumerate(dataloader):
if j == num_batch:
break
self.model.zero_grad()
optimizer.zero_grad()
context_feature = batch["context"].clone().detach().float().requires_grad_(True).to(self.device)
click = batch["click"].clone().detach().float().requires_grad_(True).to(self.device).view(-1)
pred = self.model(context_feature).view(-1)
loss = self.loss_func(pred, click)
total_loss += loss
loss.backward()
optimizer.step()
loss_list.append(total_loss / num_batch)
scheduler.step()
class NeuralLinearAlgorithm(BaseAlg):
def __init__(self, arg_dict,): # n is number of users
BaseAlg.__init__(self, arg_dict)
self.device = get_device()
# latent_dim
self.mlp_dims = [int(x) for x in arg_dict["mlp"].split(",") if x != ""]
self.latent_dim = self.mlp_dims[-1]
self.param_dim = self.latent_dim
# create parameters
# Gaussian prior for each beta_i
self._lambda_prior = float(arg_dict["lambda_"])
self.mu = np.zeros(self.param_dim)
self.f = np.zeros(self.param_dim)
self.yy = [0]
self.cov = (1.0 / self._lambda_prior) * np.eye(self.param_dim)
self.precision = self._lambda_prior * np.eye(self.param_dim)
# Inverse Gamma prior for each sigma2_i
self._a0 = float(arg_dict["a0"])
self._b0 = float(arg_dict["b0"])
self.a = self._a0
self.b = self._b0
self.t = 0
self.update_freq_nn = arg_dict["training_freq_network"]
self.current_g = None
self.users = []
for i in range(arg_dict["n_users"]):
self.users.append(
NeuralLinearUserStruct([], arg_dict["dimension"], arg_dict["mlp"], arg_dict["epoch"],
arg_dict["batch_size"], arg_dict["learning_rate"],))
def decide(self, pool_articles, userID, k=1,) -> object:
"""Samples beta's from posterior, and chooses best action accordingly."""
# Sample sigma2, and beta conditional on sigma2
sigma2_s = self.b * invgamma.rvs(self.a)
try:
beta_s = np.random.multivariate_normal(self.mu, sigma2_s*self.cov)
except np.linalg.LinAlgError as e:
beta_s = np.random.multivariate_normal(np.zeros(self.param_dim), np.eye((self.param_dim)))
concat_feature = np.array([x.featureVector for x in pool_articles])
# #! need to double check the implementation for concatenate user and item features
tensor = torch.tensor(concat_feature, dtype=torch.float32).to(self.users[userID].device)
# forward
z_context = self.users[userID].model(tensor,path="last")
z_context_=z_context.clone().detach().numpy()
vals = np.dot(beta_s,z_context_.T)
pool_positions = np.argsort(vals)[(k * -1):]
articles = []
for i in range(k):
articles.append(pool_articles[pool_positions[i]])
return articles
def updateParameters(self, articlePicked, click, userID): # click: reward
self.t += 1
article_id = articlePicked.id
article_feature = articlePicked.contextFeatureVector[: self.dimension] # context_feature
concat_feature = np.array(articlePicked.featureVector)
# #! need to double check the implementation for concatenate user and item features
tensor = torch.tensor(concat_feature, dtype=torch.float32).to(self.users[userID].device)
z_context = self.users[userID].model(tensor,path="last")
# put pickedArticle data into training set to update model
self.users[userID].data.push(article_feature, click) # self.data_h.add(context, action, reward)
if self.t % self.update_freq_nn == 0:
self.users[userID].updateParameters() # update_model(Train NN for new features)
tensor = torch.tensor(concat_feature, dtype=torch.float32).to(self.users[userID].device)
new_z = self.users[userID].model(tensor, path="last")
new_z_ = new_z.clone().detach().numpy()
self.precision = (np.dot(new_z_.T, new_z_) + self._lambda_prior * np.eye(self.param_dim))
self.f = np.dot(new_z_.T, click)
else:
z_context_ = z_context.clone().detach().numpy()
self.precision += np.dot(z_context_.T, z_context_)
self.f += np.dot(z_context_.T, click)
self.yy += click**2
self.cov = np.linalg.inv(self.precision)
self.mu = np.dot(self.cov, self.f)
# Inverse Gamma posterior update
self.a += 0.5
b_ = 0.5 * (self.yy - np.dot(self.mu.T, np.dot(self.precision, self.mu)))
self.b += b_
|
aqiRanges = (0, 50, 100, 150, 200, 300, 500)
aqiDescriptions = ("Good", "Moderate", "Unhealthy for Sensitive Groups",
"Unhealthy", "Very Unhealthy", "Hazardous")
aqiDescription = ""
pm25ranges = (0, 12, 35.4, 55.4, 150.4, 250.4, 500.4)
pm10ranges = (0, 54, 154, 254, 354, 424, 604)
no2ranges = (0, 53, 100, 360, 649, 1249, 2049)
so2ranges = (0, 35, 75, 185, 304, 604, 1004)
coranges = (0, 4.4, 9.4, 12.4, 15.4, 30.4, 50.4)
iHigh, iLow, cHigh, cLow, cP = 0, 0, 0, 0, 0
location = input("Where is this measurement taken from? ")
# This code only takes acceptable inputs and asks again if an out of boud input is entered
def takeInput(upperBound, message):
while True:
tempinput = float(input(message))
if (tempinput < 0) or (tempinput > upperBound):
print(
f"Entered value is out of range, please use a value between 0 and {upperBound}")
else:
break
return (tempinput)
def calculateAQI(name, ranges):
cP = takeInput(ranges[6], str(
f"Enter the value for the {name} concentration : "))
index = 0
for upper in ranges:
if cP <= upper:
cHigh = upper
# IMPORTANT NOTE:
# This code uses the uperbound of the previous index as the lower bound.
# I discussed this change with Sumona and we agreed that it was a good
# change as it results in a more reasonable result when edge casses in
# between the specified ranges are entered. This will result in this
# program returning slightly different results but I talked with Sumona
# and she just said to write out a coment that explaid this change so that
# the TA will know why
cLow = ranges[index - 1]
iHigh = aqiRanges[index]
iLow = aqiRanges[index - 1]
break
index += 1
return(((iHigh-iLow)/(cHigh-cLow)*(cP-cLow))+iLow)
results = []
endMessages = []
# hashmap for
pollutantRanges = {"PM2.5": pm25ranges, "PM10": pm10ranges,
"NO2": no2ranges, "SO2": so2ranges, "CO": coranges}
keys = dict.keys(pollutantRanges)
#iterate over the polutants to receive the data and prosses it
for key in keys:
result = calculateAQI(key, pollutantRanges[key])
endMessages.append(f"The Air Quality Index of {key} is {result}")
results.append(result)
maxAqi = max(results)
index = 0
for upper in aqiRanges:
if maxAqi <= upper:
print(
f"The Air Quality Index in {location} is {maxAqi}, this is {aqiDescriptions[index - 1]}")
break
index += 1
for endMessage in endMessages:
print(endMessage)
|
from Cryptodome.PublicKey import RSA
def gen_AB_key():
print('start generate public key and private key')
print()
gen_key_and_save('A')
gen_key_and_save('B')
print()
print('end generate public key and private key')
def gen_key_and_save(name):
publickey, privatekey = RSA_gen_key()
save_key(name, publickey, privatekey)
def RSA_gen_key():
# 產生 RSA 私鑰
prikey = RSA.generate(2048)
# 產生 RSA 公鑰
pubkey = prikey.public_key()
# 儲存 RSA 公私鑰
pubkeyPEM = pubkey.export_key('PEM')
prikeyPEM = prikey.export_key('PEM')
return pubkeyPEM, prikeyPEM
def save_key(name, pubkey, prikey):
with open(name + '_public_key.pem', 'wb') as f:
f.write(pubkey)
with open(name + '_private_key.pem', 'wb') as f:
f.write(prikey)
if __name__ == '__main__':
gen_AB_key()
|
import uuid, yaml, json, logging, pathlib
from ignition.service.framework import Service, Capability
from ignition.service.infrastructure import InfrastructureDriverCapability
from ignition.model.infrastructure import CreateInfrastructureResponse, DeleteInfrastructureResponse, FindInfrastructureResponse, InfrastructureTask, STATUS_IN_PROGRESS, STATUS_COMPLETE, STATUS_FAILED, STATUS_UNKNOWN
from ignition.model.failure import FAILURE_CODE_RESOURCE_ALREADY_EXISTS, FAILURE_CODE_UNKNOWN, FAILURE_CODE_INTERNAL_ERROR
from kubernetes.client.rest import ApiException
logger = logging.getLogger(__name__)
class K8sInfrastructureService(Service, InfrastructureDriverCapability):
def __init__(self, tosca_translator, **kwargs):
self.tosca_translator = tosca_translator
if 'location_translator' not in kwargs:
raise ValueError('location_translator argument not provided')
self.location_translator = kwargs.get('location_translator')
def create_infrastructure(self, template, template_type, system_properties, properties, deployment_location):
"""
Initiates a request to create infrastructure based on a VNFC package
:param str template: tosca template of infrastructure to be created
:param ignition.utils.propvaluemap.PropValueMap system_properties: properties generated by LM for this Resource: resourceId, resourceName, requestId, metricKey, resourceManagerId, deploymentLocation, resourceType
:param ignition.utils.propvaluemap.PropValueMap properties: property values of the Resource
:param dict deployment_location: the valid Openstack location to deploy to
:return: the id of Infrastructure to be created
"""
infrastructure_id = uuid.uuid4().hex
k8s = self.tosca_translator.generate_k8s(infrastructure_id, template, properties)
logger.debug('k8s = {0}'.format(k8s))
k8s_location = self.location_translator.from_deployment_location(deployment_location)
return k8s_location.create_infrastructure(infrastructure_id, k8s)
def get_infrastructure_task(self, infrastructure_id, request_id, deployment_location):
"""
Get information about the infrastructure (created or deleted)
:param str infrastructure_id: identifier of the infrastructure to check
:param str request_id: identifier of the request to check
:param dict deployment_location: the K8s location the infrastructure was deployed to
:return: a Infrastructure instance describing the status
"""
# noop - the driver does not use the Ignition job queue, but sends the response directly on the infrastructure responses Kafka topic
return None
def delete_infrastructure(self, infrastructure_id, deployment_location):
"""
Remove infrastructure previously created
:param str infrastructure_id: identifier of the stack to be removed
:param dict deployment_location: the K8s location the infrastructure was deployed to
"""
k8s_location = self.location_translator.from_deployment_location(deployment_location)
if k8s_location is None:
raise ValueError("Invalid deployment location")
k8s_location.delete_infrastructure(infrastructure_id)
return DeleteInfrastructureResponse(infrastructure_id, infrastructure_id)
def find_infrastructure(self, template, instance_name, deployment_location):
# TODO
return FindInfrastructureResponse("1", {}) |
#!/usr/bin/env python3
import numpy as np
import pandas as pd
import math
import argparse
import itertools
import csv
from scipy.stats import linregress
from scipy.optimize import minimize
read_num_seq_lineage_global = None
read_num_min_seq_lineage_global = None
read_depth_seq_global = None
t_seq_global = None
kappa_global = None
x_mean_global = None
def fun_estimate_parameters(x, read_num_seq, t_seq, kappa=2.5, fitness_type='m'):
# ------------------------------------------------------------------------------------------------------------------
# A SUB-FUNCTION CALLED BY MAIN FUNCTION main() TO CALCULATE THE LOG LIKELIHOOD VALUE OF EACH GENOTYPE GIVEN ITS
# FITNESS, THE ESTIMATED READ NUMBER PER GENOTYPE PER SEQUENCING TIME-POINT, AND THE ESTIMATED MEAN FITNESS PER
# SEQUENCING TIME-POINT
#
# INPUTS
# --x: fitness of each genotype, [x1, x2, ...]
# --read_num_seq: read number per genotype at each sequencing time-point
# --t_seq: sequenced time-points in number of generations, [0, t1, t2, ...]
# --kappa: a noise parameter that characterizes the total noise introduced by growth, cell transfer, DNA extraction,
# PCR, and sequencing (To measure kappa empirically, see the reference: [S. F. Levy, et al. Quantitative
# Evolutionary Dynamics Using High-resolution Lineage Tracking. Nature, 519: 181–186 (2015)].)
# . (default: 2.5)
# --fitness_type: type of fitness: Wrightian fitness (w), or Malthusian fitness (m)' (default: m)
#
# OUTPUTS
# --estimate_parameters_output: log likelihood value of each genotype,
# estimated reads number per genotype per sequencing time-point,
# estimated mean fitness per sequencing time-point, [x_mean(t0),x_mean(t1),...]
# ------------------------------------------------------------------------------------------------------------------
read_num_seq = read_num_seq.astype(float)
read_num_seq[read_num_seq == 0] = 1e-1
read_depth_seq = np.sum(read_num_seq, axis=0)
lineages_num, seq_num = read_num_seq.shape
read_num_min_seq = np.zeros((lineages_num, seq_num))
read_num_min_seq[:, 0] = read_num_seq[:, 0]
for i in range(1, seq_num):
read_num_min_seq[:, i] = read_num_min_seq[:, i - 1] / 2 ** (t_seq[i] - t_seq[i - 1])
x[x <= -1] = -1 + 1e-7
x_mean = np.zeros(seq_num)
read_num_seq_est = np.zeros((lineages_num, seq_num))
read_num_seq_est[:, 0] = read_num_seq[:, 0]
likelihood_log_seq = np.zeros((lineages_num, seq_num))
if fitness_type == 'w':
for i in range(1, seq_num):
x_mean[i] = np.max(np.dot(x, read_num_seq[:, i]) / read_depth_seq[i], 0)
read_num_est_tempt = np.exp((t_seq[i] - t_seq[i - 1]) * (np.log(1 + x) + 1)
- (t_seq[i] - t_seq[i - 1]) / (x_mean[i] - x_mean[i - 1])
* ((x_mean[i] + 1) * np.log(x_mean[i] + 1)
- (x_mean[i - 1] + 1) * np.log(x_mean[i - 1] + 1)))
read_num_est_tempt = read_num_est_tempt * read_num_seq[:, i - 1] / read_depth_seq[i - 1] * read_depth_seq[i]
read_num_seq_est[:, i] = np.max([read_num_est_tempt, read_num_min_seq[:, i]], axis=0)
x_mean[i] = np.dot(x, read_num_seq_est[:, i]) / np.sum(read_num_seq_est[:, i])
elif fitness_type == 'm':
for i in range(1, seq_num):
x_mean[i] = np.max(np.dot(x, read_num_seq[:, i]) / read_depth_seq[i], 0)
read_num_est_tempt = np.exp((t_seq[i] - t_seq[i - 1]) * x
- (t_seq[i] - t_seq[i - 1]) * (x_mean[i] + x_mean[i - 1]) / 2)
read_num_est_tempt = read_num_est_tempt * read_num_seq[:, i - 1] / read_depth_seq[i - 1] * read_depth_seq[i]
read_num_seq_est[:, i] = np.max([read_num_est_tempt, read_num_min_seq[:, i]], axis=0)
x_mean[i] = np.dot(x, read_num_seq_est[:, i]) / np.sum(read_num_seq_est[:, i])
pos1_r, pos1_c = np.where(read_num_seq[:, :-1] >= 20)
likelihood_log_seq[pos1_r, pos1_c + 1] = (0.25 * np.log(read_num_seq_est[pos1_r, pos1_c + 1])
- 0.5 * np.log(4 * np.pi * kappa)
- 0.75 * np.log(read_num_seq_est[pos1_r, pos1_c + 1])
- (np.sqrt(read_num_seq[pos1_r, pos1_c + 1])
- np.sqrt(read_num_seq_est[pos1_r, pos1_c + 1])) ** 2 / kappa)
pos_r, pos_c = np.where(read_num_seq[:, :-1] < 20)
pos_p1 = np.where(read_num_seq[pos_r, pos_c + 1] >= 10)[0]
pos_p2 = np.where(read_num_seq[pos_r, pos_c + 1] < 10)[0]
pos2_r = pos_r[pos_p1]
pos2_c = pos_c[pos_p1]
pos3_r = pos_r[pos_p2]
pos3_c = pos_c[pos_p2]
likelihood_log_seq[pos2_r, pos2_c + 1] = (np.multiply(read_num_seq[pos2_r, pos2_c + 1],
np.log(read_num_seq_est[pos2_r, pos2_c + 1]))
- read_num_seq_est[pos2_r, pos2_c + 1]
- np.multiply(read_num_seq[pos2_r, pos2_c + 1],
np.log(read_num_seq[pos2_r, pos2_c + 1]))
+ read_num_seq[pos2_r, pos2_c + 1]
- 0.5 * np.log(2 * np.pi * read_num_seq[pos2_r, pos2_c + 1]))
factorial_tempt = [float(math.factorial(i)) for i in read_num_seq[pos3_r, pos3_c + 1].astype(int)]
likelihood_log_seq[pos3_r, pos3_c + 1] = (np.multiply(read_num_seq[pos3_r, pos3_c + 1],
np.log(read_num_seq_est[pos3_r, pos3_c + 1]))
- read_num_seq_est[pos3_r, pos3_c + 1]
- np.log(factorial_tempt))
likelihood_log = np.sum(likelihood_log_seq, axis=1)
estimate_parameters_output = {'Likelihood_Log': likelihood_log,
'Estimated_Read_Number': read_num_seq_est,
'Estimated_Mean_Fitness': x_mean}
return estimate_parameters_output
def fun_likelihood_lineage_w(x):
# ------------------------------------------------------------------------------------------------------------------
# A SUB-FUNCTION CALLED BY MAIN FUNCTION main() TO CALCULATE THE SUM OF THE NEGATIVE LOG LIKELIHOOD VALUE OF ALL
# GENOTYPES GIVEN THE WRIGHTIAN FITNESS OF EACH GENOTYPE
#
# INPUTS
# --x: fitness of a genotype
#
# OUTPUTS
# --likelihood_log_lineage: the negative log likelihood value of the genotype
# ------------------------------------------------------------------------------------------------------------------
global read_num_seq_lineage_global
global read_num_min_seq_lineage_global
global read_depth_seq_global
global t_seq_global
global kappa_global
global x_mean_global
if x <= -1:
x = -1 + 1e-7
seq_num = read_num_seq_lineage_global.shape[0]
read_num_seq_lineage_est = np.zeros(seq_num)
read_num_seq_lineage_est[0] = read_num_seq_lineage_global[0]
likelihood_log_seq_lineage = np.zeros(seq_num)
for i in range(1, seq_num):
read_num_lineage_est_tempt = np.exp((t_seq_global[i] - t_seq_global[i - 1]) * (np.log(1 + x) + 1)
- (t_seq_global[i] - t_seq_global[i - 1]) / (
x_mean_global[i] - x_mean_global[i - 1])
* ((x_mean_global[i] + 1) * np.log(x_mean_global[i] + 1)
- (x_mean_global[i - 1] + 1) * np.log(x_mean_global[i - 1] + 1)))
read_num_lineage_est_tempt = (read_num_lineage_est_tempt * read_num_seq_lineage_global[i - 1]
/ read_depth_seq_global[i - 1] * read_depth_seq_global[i])
read_num_seq_lineage_est[i] = np.max([read_num_lineage_est_tempt.item(), read_num_min_seq_lineage_global[i]])
pos1 = np.where(read_num_seq_lineage_global[:-1] >= 20)[0]
likelihood_log_seq_lineage[pos1 + 1] = (0.25 * np.log(read_num_seq_lineage_est[pos1 + 1])
- 0.5 * np.log(4 * np.pi * kappa_global)
- 0.75 * np.log(read_num_seq_lineage_est[pos1 + 1])
- (np.sqrt(read_num_seq_lineage_global[pos1 + 1])
- np.sqrt(read_num_seq_lineage_est[pos1 + 1])) ** 2 / kappa_global)
pos = np.where(read_num_seq_lineage_global[:-1] < 20)[0]
pos_p1 = np.where(read_num_seq_lineage_global[pos + 1] >= 10)[0]
pos_p2 = np.where(read_num_seq_lineage_global[pos + 1] < 10)[0]
pos2 = pos[pos_p1]
pos3 = pos[pos_p2]
likelihood_log_seq_lineage[pos2 + 1] = (np.multiply(read_num_seq_lineage_global[pos2 + 1],
np.log(read_num_seq_lineage_est[pos2 + 1]))
- read_num_seq_lineage_est[pos2 + 1]
- np.multiply(read_num_seq_lineage_global[pos2 + 1],
np.log(read_num_seq_lineage_global[pos2 + 1]))
+ read_num_seq_lineage_global[pos2 + 1]
- 0.5 * np.log(2 * np.pi * read_num_seq_lineage_global[pos2 + 1]))
factorial_tempt = [float(math.factorial(i)) for i in read_num_seq_lineage_global[pos3 + 1].astype(int)]
likelihood_log_seq_lineage[pos3 + 1] = (np.multiply(read_num_seq_lineage_global[pos3 + 1],
np.log(read_num_seq_lineage_est[pos3 + 1]))
- read_num_seq_lineage_est[pos3 + 1]
- np.log(factorial_tempt))
likelihood_log_lineage = np.sum(likelihood_log_seq_lineage)
return -likelihood_log_lineage
def fun_likelihood_lineage_m(x):
# ------------------------------------------------------------------------------------------------------------------
# A SUB-FUNCTION CALLED BY MAIN FUNCTION main() TO CALCULATE THE SUM OF THE NEGATIVE LOG LIKELIHOOD VALUE OF ALL
# GENOTYPES GIVEN THE MALTHUSIAN FITNESS OF EACH GENOTYPE
#
# INPUTS
# --x: fitness of a genotype
#
# OUTPUTS
# --likelihood_log_lineage: the negative log likelihood value of the genotype
# ------------------------------------------------------------------------------------------------------------------
global read_num_seq_lineage_global
global read_num_min_seq_lineage_global
global read_depth_seq_global
global t_seq_global
global kappa_global
global x_mean_global
if x <= -1:
x = -1 + 1e-7
seq_num = read_num_seq_lineage_global.shape[0]
read_num_seq_lineage_est = np.zeros(seq_num)
read_num_seq_lineage_est[0] = read_num_seq_lineage_global[0]
likelihood_log_seq_lineage = np.zeros(seq_num)
for i in range(1, seq_num):
read_num_lineage_est_tempt = np.exp((t_seq_global[i] - t_seq_global[i - 1]) * x
- (t_seq_global[i] - t_seq_global[i - 1]) *
(x_mean_global[i] + x_mean_global[i - 1]) / 2)
read_num_lineage_est_tempt = (read_num_lineage_est_tempt * read_num_seq_lineage_global[i - 1]
/ read_depth_seq_global[i - 1] * read_depth_seq_global[i])
read_num_seq_lineage_est[i] = np.max([read_num_lineage_est_tempt.item(), read_num_min_seq_lineage_global[i]])
pos1 = np.where(read_num_seq_lineage_global[:-1] >= 20)[0]
likelihood_log_seq_lineage[pos1 + 1] = (0.25 * np.log(read_num_seq_lineage_est[pos1 + 1])
- 0.5 * np.log(4 * np.pi * kappa_global)
- 0.75 * np.log(read_num_seq_lineage_est[pos1 + 1])
- (np.sqrt(read_num_seq_lineage_global[pos1 + 1])
- np.sqrt(read_num_seq_lineage_est[pos1 + 1])) ** 2 / kappa_global)
pos = np.where(read_num_seq_lineage_global[:-1] < 20)[0]
pos_p1 = np.where(read_num_seq_lineage_global[pos + 1] >= 10)[0]
pos_p2 = np.where(read_num_seq_lineage_global[pos + 1] < 10)[0]
pos2 = pos[pos_p1]
pos3 = pos[pos_p2]
likelihood_log_seq_lineage[pos2 + 1] = (np.multiply(read_num_seq_lineage_global[pos2 + 1],
np.log(read_num_seq_lineage_est[pos2 + 1]))
- read_num_seq_lineage_est[pos2 + 1]
- np.multiply(read_num_seq_lineage_global[pos2 + 1],
np.log(read_num_seq_lineage_global[pos2 + 1]))
+ read_num_seq_lineage_global[pos2 + 1]
- 0.5 * np.log(2 * np.pi * read_num_seq_lineage_global[pos2 + 1]))
factorial_tempt = [float(math.factorial(i)) for i in read_num_seq_lineage_global[pos3 + 1].astype(int)]
likelihood_log_seq_lineage[pos3 + 1] = (np.multiply(read_num_seq_lineage_global[pos3 + 1],
np.log(read_num_seq_lineage_est[pos3 + 1]))
- read_num_seq_lineage_est[pos3 + 1]
- np.log(factorial_tempt))
likelihood_log_lineage = np.sum(likelihood_log_seq_lineage)
return -likelihood_log_lineage
def main():
# ------------------------------------------------------------------------------------------------------------------
# ESTIMATE FITNESS OF EACH GENOTYPE IN A COMPETITIVE POOLED GROWTH EXPERIMENT
#
# OPTIONS
# --input: a .csv file, with each column being the read number per genotype at each sequenced time-point
# --t_seq: sequenced time-points in number of generations (format: 0 t1 t2 ...)
# --max_iter_num: maximum number of iterations in the optimization (Small numbers can reduce running time
# and decrease accuracy.) (default: 10)
# --kappa: a noise parameter that characterizes the total noise introduced by growth, cell transfer,
# DNA extraction, PCR, and sequencing (To measure kappa empirically, see the reference:
# [S. F. Levy, et al. Quantitative Evolutionary Dynamics Using High-resolution Lineage Tracking.
# Nature, 519: 181–186 (2015)].) (default: 2.5)
# --regression_num: number of points used in the initial linear-regression-based fitness estimate (default: 2)
# --fitness_type: type of fitness: Wrightian fitness (w), or Malthusian fitness (m)' (default: m)
# --output_filename: prefix of output .csv files (default: output)
#
# OUTPUTS
# output_filename_FitSeq_Result.csv: 1st column: estimated fitness of each genotype, [x1, x2, ...],
# 2nd column: log likelihood value of each genotype, [f1, f2, ...],
# 3rd column: estimated mean fitness per sequenced time-point
# [x_mean(0), x_mean(t1), ...],
# 4th column+: estimated reads number per genotype per sequencingtime-point,
# with each time-point being a column
# ------------------------------------------------------------------------------------------------------------------
global read_num_seq_lineage_global
global read_num_min_seq_lineage_global
global read_depth_seq_global
global t_seq_global
global kappa_global
global x_mean_global
parser = argparse.ArgumentParser(description='Estimate fitness of each genotype in a competitive pooled growth '
'experiment', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input', type=str, help='a .csv file: with each column being the read number per '
'genotype at each sequenced time-point')
parser.add_argument('-t', '--t_seq', nargs='*', type=float, help='sequenced time-points in number of generations')
parser.add_argument('-m', '--max_iter_num', type=int, default=10,
help='maximum number of iterations in the optimization')
parser.add_argument('-k', '--kappa', type=float, default=2.5,
help='a noise parameter that characterizes the total noise introduced by growth, '
'cell transfer, DNA extraction, PCR, and sequencing (To measure kappa empirically, '
'see the reference: [S. F. Levy, et al. Quantitative Evolutionary Dynamics Using '
'High-resolution Lineage Tracking. Nature, 519: 181–186 (2015)].)')
parser.add_argument('-g', '--regression_num', type=int, default=2,
help='number of points used in the initial linear-regression-based fitness estimate')
parser.add_argument('-f', '--fitness_type', type=str, default='m',
help='type of fitness: Wrightian fitness (w), or Malthusian fitness (m)')
parser.add_argument('-o', '--output_filename', type=str, default='output', help='prefix of output .csv files')
args = parser.parse_args()
read_num_seq = np.array(pd.read_csv(args.input, header=None), dtype=float)
t_seq = np.array(args.t_seq, dtype=float)
max_iter_num = args.max_iter_num
kappa = args.kappa
regression_num = args.regression_num
fitness_type = args.fitness_type
output_filename = args.output_filename
for i in range(regression_num):
pos_zero = np.where(read_num_seq[:, i] < 1)
read_num_seq[pos_zero, i] = 1
# pos_zero = np.where(read_num_seq[:, 0] < 1)
# read_num_seq[pos_zero, 0] = 1
read_num_seq[read_num_seq == 0] = 1e-1
read_depth_seq = np.sum(read_num_seq, axis=0)
lineages_num, seq_num = read_num_seq.shape
read_num_min_seq = np.zeros((lineages_num, seq_num))
read_num_min_seq[:, 0] = read_num_seq[:, 0]
for i in range(1, seq_num):
read_num_min_seq[:, i] = read_num_min_seq[:, i - 1] / 2 ** (t_seq[i] - t_seq[i - 1])
read_freq_seq = read_num_seq / read_depth_seq
if fitness_type == 'w':
if regression_num == 2:
x0_tempt = np.power(np.true_divide(read_freq_seq[:, 1], read_freq_seq[:, 0]), 1 / (t_seq[1] - t_seq[0])) - 1
else:
x0_tempt = [regression_output.slope for i in range(lineages_num) for regression_output in
[linregress(t_seq[0:regression_num], np.log(read_freq_seq[i, 0:regression_num]))]]
x0_tempt = np.exp(x0_tempt) - 1
x0 = (1 + x0_tempt) / (1 + np.dot(read_freq_seq[:, 0], x0_tempt)) - 1 # Normalization
elif fitness_type == 'm':
if regression_num == 2:
x0_tempt = np.true_divide(read_freq_seq[:, 1] - read_freq_seq[:, 0], t_seq[1] - t_seq[0])
else:
x0_tempt = [regression_output.slope for i in range(lineages_num) for regression_output in
[linregress(t_seq[0:regression_num], np.log(read_freq_seq[i, 0:regression_num]))]]
x0 = x0_tempt - np.dot(read_freq_seq[:, 0], x0_tempt) # Normalization
x_opt = np.copy(x0)
read_depth_seq_global = read_depth_seq
t_seq_global = t_seq
kappa_global = kappa
parameter_output = fun_estimate_parameters(x0, read_num_seq, t_seq, kappa, fitness_type)
x_mean_global = parameter_output['Estimated_Mean_Fitness']
likelihood_log_sum_iter = [-1e50 * lineages_num, np.sum(parameter_output['Likelihood_Log'])]
step_size = 1 / lineages_num
iter_num = 0
while (likelihood_log_sum_iter[-1] - likelihood_log_sum_iter[-2] >= step_size) and (iter_num <= max_iter_num):
#while (iter_num <= 6):
if fitness_type == 'w':
for i in range(lineages_num):
x0_lineage = x_opt[i]
read_num_seq_lineage_global = read_num_seq[i, :]
read_num_min_seq_lineage_global = read_num_min_seq[i, :]
opt_output_lineage = minimize(fun_likelihood_lineage_w, x0_lineage, method='BFGS',
options={'disp': False, 'maxiter': 50})
x_opt[i] = opt_output_lineage['x'][0]
pos = (np.true_divide(read_num_seq[:, 0] / np.sum(read_num_seq[:, 0], axis=0),
np.sum(read_num_seq[:, 1], axis=0)) > 2 ** (t_seq[1] - t_seq[0])) | (x_opt <= -1)
x_opt[pos] = x0[pos]
elif fitness_type == 'm':
for i in range(lineages_num):
x0_lineage = x_opt[i]
read_num_seq_lineage_global = read_num_seq[i, :]
read_num_min_seq_lineage_global = read_num_min_seq[i, :]
opt_output_lineage = minimize(fun_likelihood_lineage_m, x0_lineage, method='BFGS',
options={'disp': False, 'maxiter': 50})
x_opt[i] = opt_output_lineage['x'][0]
pos = (np.true_divide(read_num_seq[:, 0] / np.sum(read_num_seq[:, 0], axis=0),
np.sum(read_num_seq[:, 1], axis=0)) > 2 ** (t_seq[1] - t_seq[0]))
x_opt[pos] = x0[pos]
parameter_output = fun_estimate_parameters(x_opt, read_num_seq, t_seq, kappa, fitness_type)
likelihood_log_sum_iter.append(np.sum(parameter_output['Likelihood_Log']))
x_mean_global = parameter_output['Estimated_Mean_Fitness']
iter_num += 1
print('Iteration ' + str(iter_num) + ': ' + str(likelihood_log_sum_iter[-1]))
read_num_seq_est = parameter_output['Estimated_Read_Number']
x_opt = x_opt - np.dot(read_num_seq_est[:, 0], x_opt) / np.sum(read_num_seq_est[:, 0])
parameter_output_final = fun_estimate_parameters(x_opt, read_num_seq, t_seq, kappa, fitness_type)
x_mean_est = parameter_output_final['Estimated_Mean_Fitness']
likelihood_log = parameter_output_final['Likelihood_Log']
#fitseq_output = {'Estimated_Fitness': x_opt,
# 'Likelihood_Log': likelihood_log,
# 'Estimated_Mean_Fitness': x_mean_est,
# 'Estimated_Read_Number': read_num_seq_est.astype(int)}
fitseq_output = {'Estimated_Fitness': x_opt,
'Likelihood_Log': likelihood_log,
'Estimated_Mean_Fitness': x_mean_est}
for i in range(read_num_seq_est.shape[1]):
fitseq_output['Estimated_Read_Number_t%d' % i] = read_num_seq_est[:, i].astype(int)
tempt = list(itertools.zip_longest(*list(fitseq_output.values())))
with open(output_filename + '_FitSeq.csv', 'w') as f:
w = csv.writer(f)
w.writerow(fitseq_output.keys())
w.writerows(tempt)
if __name__ == "__main__":
main()
|
'''Scrape info on series'''
from bs4 import BeautifulSoup
from requests_html import HTMLSession
import requests
import pandas as pd
import time
import os
import json
from datetime import datetime
BASE_URL = "https://fmovies.to"
start = ""
retryCount = 0
def get_last_epi(series_name, season):
query = '{0}+{1}'.format(
'+'.join(series_name.split()), season)
result = ""
try:
response = requests.get(
'{0}/search?keyword={1}'.format(
BASE_URL, query)
)
response.raise_for_status()
soup = BeautifulSoup(response.text, 'html.parser')
result = soup.find("a", class_="poster")
result = result.get('href')
except Exception as err:
return "Connection error:{err}", 0
except:
return "Captcha content received", 0
# delay before next request
time.sleep(1)
# Scrape last episode
session = HTMLSession()
try:
req = session.get('{0}{1}'.format(BASE_URL, result))
except:
print("Couldn't connect to internet")
raise SystemExit()
req.html.render(timeout=5000)
try:
server = req.html.find('.episodes.range.active')[1]
episodes = BeautifulSoup(server.html, 'html.parser')
e = episodes.find_all("a")
total_episodes = len(e)
last_episode_link = e[-1].get('href')
last_episode_num = int(e[-1].text)
except:
print(req.html)
return None, 0
req.close()
session.close()
return {
"name": series_name, "latest_episode_num": last_episode_num,
"latest_episode_link": '{0}{1}'.format(BASE_URL, last_episode_link),
"season": season
}, 1
def check_for_new_epi(series_name, season, watchedTill):
global retryCount
status = 0
print("Looking up '{0} s{1}'...".format(series_name, season))
retryCount = 0
while not status:
retryCount += 1
info, status = get_last_epi(
series_name.capitalize(), str(season)
)
if not status:
print("Problem with connection. Retrying..")
time.sleep(1)
if retryCount >= 2:
break
try:
diff = info['latest_episode_num'] - watchedTill
except:
print("Couldnt obtain data. Skipping...")
return None
print("DONE!")
if(diff > 0):
info["new_epi_count"] = diff
return info
return None
def seriesScraper():
global start
start = time.time()
data = pd.read_csv('../data.csv', delimiter=',')
series_name = data['series']
season = data['season']
watchedTill = data['epi']
new_epi_of_series = []
print('\nScraping metadata on the list of series ...\n')
for i in range(len(series_name)):
epi = check_for_new_epi(series_name[i], int(
season[i]), int(watchedTill[i]))
if epi:
new_epi_of_series.append(epi)
time.sleep(5)
end = time.time()
print("Time lapsed:{0}s".format(int(end-start)), end='\n')
return new_epi_of_series
def main():
new_epi_of_series = seriesScraper()
pwd = os.getcwd()
path_to_file = os.path.join(pwd, 'series.temp.json')
if not os.path.exists(pwd) and not os.path.isfile(path_to_file):
os.makedirs(path_to_file)
try:
with open(path_to_file, 'w') as f:
data = {
'data': new_epi_of_series,
'date': datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
}
f.write(json.dumps(data))
f.close()
if os.path.exists(os.path.join(pwd, 'series.json')):
os.remove('series.json')
os.rename('series.temp.json', 'series.json')
print('Records updated.')
except:
print('Records were not updated.')
if __name__ == '__main__':
main()
|
from selenium import webdriver
from requests import *
from functools import *
import re
import time
import json
headers = \
{
'Host':'tools.7881.com',
'Referer':'https://tools.7881.com/publish/b/batch',
'Sec-Fetch-Mode': 'no-cors',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36'}
hde = \
{
'Sec-Fetch-Mode': 'no-cors',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36'}
cook_dd373=''
#chromeOption启动函数
def chromedriver():
driver_path=r'.\chromedriver.exe'
chromeOptions = webdriver.ChromeOptions()
chromeOptions.add_argument('disable-infobars')
chromeOptions.add_experimental_option("excludeSwitches", ['enable-automation']);
wd=webdriver.Chrome(executable_path=driver_path,options=chromeOptions)
return wd
#get请求接口返回json
def get_api(url,cook):
res=get(headers=hde,url=url,cookies=cook)
ress=res.content.decode()
return json.loads(ress)
#修改dd373
def set_price(url,data,cook,price,Last):
cookie=''
for k,v in cook.items():
cookie=cookie+k+"="+v+"; "
headers['cookie']=cookie
res=post(url=url,data=data,headers=hde)
rt = json.loads(res.text)
if rt['StatusData']['ResultCode'] == '4001':
cook_dd373=get_login_cookies_dd373()
cookie = ''
for k, v in cook_dd373.items():
cookie = cookie + k + "=" + v + "; "
headers['cookie'] = cookie
res = post(url=url, data=data, headers=hde)
rt = json.loads(res.text)
if rt['StatusData']['ResultMsg']=='操作成功':
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())+'------'+rt['StatusMsg']+'------'+rt['StatusData']['ResultMsg']+'---'+str(Last)+'---- 1元='+str(price)+"金")
if rt['StatusData']['ResultMsg']=='全部失败!':
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())+'------'+rt['StatusMsg']+'------'+rt['StatusData']['ResultMsg']+'---'+str(Last)+'---'+rt['StatusData']['ResultData']['ErrorMsges'][0]['MsgInfo'])
#修改7881
def set_price_7881(url,data,cook,price,Last):
cookie=''
hed=hde
for k,v in cook.items():
cookie=cookie+k+"="+v+"; "
hed['cookie']=cookie
hed['Content-Type']='application/json'
res=post(url=url,data=json.dumps(data),headers=hed)
rt=json.loads(res.text)
if rt['data']['errCount']==0:
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())+'---'+'修改成功'+'---'+Last+'--- 库存:'+price+"金")
if rt['data']['errCount']!=0:
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())+'---出错!数量:'+str(rt['data']['errCount'])+'---'+Last+'---'+rt['data']['errMsg'])
#登录获取登录cookies
def get_login_cookies():
wd=chromedriver()
wd.get('https://goods.dd373.com/usercenter/merchant/mall_management.html')
user_input=wd.find_element_by_xpath("//div[@class='user-name border-ed']/input")
pwd_input=wd.find_element_by_xpath("//div[@class='user-pwd border-ed']/input")
user_input.send_keys("")
pwd_input.send_keys("")
print("等待登录")
time.sleep(6)
while wd.current_url!='https://goods.dd373.com/usercenter/merchant/mall_management.html':
time.sleep(1)
ck=wd.get_cookies()
ck=get_ck(ck)
print("登录成功")
return ck,wd
#获取7881价格
def get_price(url,cook):
try:
getprice=[]
res=get(headers=headers,url=url,cookies=cook)
restxt=res.content.decode()
rtcook=res.cookies
if res.url==url:
text=re.findall('<table class="resulut_table">(.*?)<div class="table_foot clearfix">',restxt,re.S)
text=str(text[0]).replace("\n","").replace("\r","").replace(" ","")
getprice=re.findall(r'<trclass="tr"gameId="(.*?)"gtId="(.*?)"groupId="(.*?)"serverId="(.*?)"tradePlace="(.*?)"campName="(.*?)"><td>',text,re.S)
restxt=re.findall(r'.*?<tdclass="goodsId">(.*?)</td><tdclass="goodsTradeType".*?"value="(.*?)"maxlength.*?"value="(.*?)"></td>',text,re.S)
return restxt,getprice
else:
return '',''
except:
print("爬取数据出错了!")
return '',''
#修改cookies数据格式
def get_ck(ck):
cook={}
for oneCK in ck:
cook[oneCK['name']]=oneCK['value']
return cook
#登录获取登录cookies
def get_login_cookies_7881():
wd=chromedriver()
wd.get('https://passport.7881.com/login.html')
user_input=wd.find_element_by_xpath("//input[@class='iptAct']")
pwd_input=wd.find_element_by_xpath("//input[@class='iptPwd eyeclose']")
user_input.send_keys("")
pwd_input.send_keys("")
print("等待登录!")
while wd.current_url=='https://passport.7881.com/login.html':
time.sleep(1)
print(wd.current_url)
ck=wd.get_cookies()
print('登录成功!')
wd.quit()
return ck
#登录获取登录cookies
def get_login_cookies_dd373():
wd=chromedriver()
wd.get('https://goods.dd373.com/usercenter/merchant/mall_management.html')
user_input=wd.find_element_by_xpath("//div[@class='user-name border-ed']/input")
pwd_input=wd.find_element_by_xpath("//div[@class='user-pwd border-ed']/input")
user_input.send_keys("")
pwd_input.send_keys("")
print("等待登录")
while wd.current_url!='https://goods.dd373.com/usercenter/merchant/mall_management.html':
time.sleep(1)
ck=wd.get_cookies()
wd.quit()
ck=get_ck(ck)
return ck
#检查cookies是否有效
def examine_ck(cook):
thiss = get(url='https://tools.7881.com/helper/mallbill/1',cookies=cook)
if thiss.url.split('?',1)=='https://passport.7881.com/login.html':
return False
return True
def get_price_7881(url,cook):
try:
cookie = ''
hed = headers
#for k, v in cook.items():
#cookie = cookie + k + "=" + v + "; "
getprice=[]
#hed['Cookie'] = cookie
res=get(url=url,headers=hed,cookies=cook)
restxt=res.content.decode()
rtcook=res.cookies
if res.url==url:
text=re.findall('<table class="resulut_table">(.*?)<div class="table_foot clearfix">',restxt,re.S)
text=str(text[0]).replace("\n","").replace("\r","").replace(" ","")
getprice=re.findall(r'<trclass="tr"gameId="(.*?)"gtId="(.*?)"groupId="(.*?)"serverId="(.*?)"tradePlace="(.*?)"campName="(.*?)"><td>',text,re.S)
restxt=re.findall(r'.*?<tdclass="goodsId">(.*?)</td><tdclass="goodsTradeType".*?"value="(.*?)"maxlength.*?"value="(.*?)"></td>',text,re.S)
return restxt,getprice
else:
return '',''
except:
print('检查cookies,若无误请联系开发人员维护')
return '', ''
#修改7881
def set_price_7881(url,data,cook,price,Last):
cookie=''
hed=hde
for k,v in cook.items():
cookie=cookie+k+"="+v+"; "
hed['cookie']=cookie
hed['Content-Type']='application/json'
res=post(url=url,data=json.dumps(data),headers=hed)
rt=json.loads(res.text)
if rt['data']['errCount']==0:
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())+'---'+'修改成功'+'---'+Last+'--- 库存:'+price+"金")
if rt['data']['errCount']!=0:
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())+'---出错!数量:'+str(rt['data']['errCount'])+'---'+Last+'---'+rt['data']['errMsg']) |
"""
This file contains the functional tests for the routes.
These tests use GETs and POSTs to different URLs to check for the proper behavior.
Resources:
https://flask.palletsprojects.com/en/1.1.x/testing/
https://www.patricksoftwareblog.com/testing-a-flask-application-using-pytest/
"""
import os
import pytest
from app import create_app, db
from app.Model.models import User, Post, Field
from config import Config
class TestConfig(Config):
SQLALCHEMY_DATABASE_URI = 'sqlite://'
SECRET_KEY = 'bad-bad-key'
WTF_CSRF_ENABLED = False
DEBUG = True
TESTING = True
@pytest.fixture(scope='module')
def test_client():
# create the flask application ; configure the app for tests
flask_app = create_app(config_class=TestConfig)
db.init_app(flask_app)
# Flask provides a way to test your application by exposing the Werkzeug test Client
# and handling the context locals for you.
testing_client = flask_app.test_client()
# Establish an application context before running the tests.
ctx = flask_app.app_context()
ctx.push()
yield testing_client
# this is where the testing happens!
ctx.pop()
def new_user(uname, uemail,passwd):
user = User(username=uname, email=uemail)
user.set_password(passwd)
return user
def init_fields():
# initialize the fields
if Field.query.count() == 0:
fields = ['TestField1', 'TestField2','TestField3','TestField4','TestField5']
for t in fields:
db.session.add(Field(name=t))
db.session.commit()
print(fields)
return None
@pytest.fixture
def init_database():
# Create the database and the database table
db.create_all()
# initialize the tags
init_fields()
#add a user
user1 = new_user(uname='sakire', uemail='[email protected]',passwd='1234')
# Insert user data
db.session.add(user1)
# Commit the changes for the users
db.session.commit()
yield # this is where the testing happens!
db.drop_all()
def test_register_page(test_client):
"""
GIVEN a Flask application configured for testing
WHEN the '/register' page is requested (GET)
THEN check that the response is valid
"""
# Create a test client using the Flask application configured for testing
response = test_client.get('/register')
assert response.status_code == 308
# assert b"Register" in response.data
def test_register(test_client,init_database):
"""
GIVEN a Flask application configured for testing
WHEN the '/register' form is submitted (POST)
THEN check that the response is valid and the database is updated correctly
"""
# Create a test client using the Flask application configured for testing
response = test_client.post('/register/',
data=dict(username='john', firstname='john', lastname='doe', email='[email protected]', phone=1234567890, wsuid = 123456789, password="bad-bad-password",password2="bad-bad-password"),
follow_redirects = True)
assert response.status_code == 200
s = db.session.query(User).filter(User.username=='john')
assert s.first().email == '[email protected]'
assert s.count() == 1
assert b"Sign In" in response.data
assert b"Please log in to access this page." in response.data
def test_invalidlogin(test_client,init_database):
"""
GIVEN a Flask application configured for testing
WHEN the '/login' form is submitted (POST) with wrong credentials
THEN check that the response is valid and login is refused
"""
response = test_client.post('/login/',
data=dict(username='sakire', password='12345',remember_me=False),
follow_redirects = True)
assert response.status_code == 200
assert b"Invalid username or password" in response.data
def test_login_logout(request,test_client,init_database):
"""
GIVEN a Flask application configured for testing
WHEN the '/login' form is submitted (POST) with correct credentials
THEN check that the response is valid and login is succesfull
"""
response = test_client.post('/login/',
data=dict(username='sakire', password='1234',remember_me=False),
follow_redirects = True)
assert response.status_code == 200
assert b"WSU Undergraduate Research Portal" in response.data
response = test_client.get('/logout',
follow_redirects = True)
assert response.status_code == 200
assert b"Sign In" in response.data
def test_postResearch(test_client,init_database):
"""
GIVEN a Flask application configured for testing , after user logs in,
WHEN the '/postsmile' page is requested (GET) AND /PostForm' form is submitted (POST)
THEN check that response is valid and the class is successfully created in the database
"""
#login
response = test_client.post('/login/',
data=dict(username='sakire', password='1234',remember_me=False),
follow_redirects = True)
assert response.status_code == 200
assert b"WSU Undergraduate Research Portal" in response.data
#test the "PostSmile" form
response = test_client.get('/post/')
assert response.status_code == 302
|
#!/usr/bin/env python2
#$ -S /usr/bin/python
#$ -l mem_free=1G
#$ -l arch=linux-x64
#$ -l netapp=1G
#$ -cwd
from pull_into_place import big_jobs
workspace, job_info = big_jobs.initiate()
big_jobs.run_rosetta(
workspace, job_info,
use_resfile=True,
use_restraints=True,
use_fragments=True,
)
big_jobs.debrief()
|
# The MIT License (MIT)
#
# Copyright (c) 2019 Looker Data Sciences, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Load settings from .ini file and create an ApiSettings object
with the settings as attributes
"""
import configparser as cp
import os
from typing import cast, Dict, Optional
import attr
import cattr
from typing_extensions import Protocol
from looker_sdk import error
from looker_sdk.rtl import transport
from looker_sdk.rtl import constants
def _convert_bool(val: str, _: bool) -> bool:
converted: bool
if val.lower() in ("yes", "y", "true", "t", "1"):
converted = True
elif val.lower() in ("", "no", "n", "false", "f", "0"):
converted = False
else:
raise TypeError
return converted
class PApiSettings(transport.PTransportSettings, Protocol):
def get_client_id(self) -> Optional[str]:
...
def get_client_secret(self) -> Optional[str]:
...
@attr.s(auto_attribs=True, kw_only=True)
class ApiSettings(transport.TransportSettings):
filename: str
section: Optional[str] = None
@classmethod
def configure(
cls, filename: str = "looker.ini", section: Optional[str] = None
) -> PApiSettings:
"""Configure using a config file and/or environment variables.
Environment variables will override config file settings. Neither
is necessary but some combination must supply the minimum to
instantiate ApiSettings.
ENV variables map like this:
<package-prefix>_API_VERSION -> api_version
<package-prefix>_BASE_URL -> base_url
<package-prefix>_VERIFY_SSL -> verify_ssl
"""
api_settings = cls(filename=filename, section=section)
config_data = api_settings.read_config()
converter = cattr.Converter()
converter.register_structure_hook(bool, _convert_bool)
settings = converter.structure(config_data, ApiSettings)
return settings
def read_config(self) -> Dict[str, Optional[str]]:
cfg_parser = cp.ConfigParser()
try:
cfg_parser.read_file(open(self.filename))
except FileNotFoundError:
config_data: Dict[str, Optional[str]] = {}
else:
# If section is not specified, use first section in file
section = self.section or cfg_parser.sections()[0]
config_data = self._clean_input(dict(cfg_parser[section]))
env_api_version = cast(
str, os.getenv(f"{constants.environment_prefix}_API_VERSION")
)
if env_api_version:
config_data["api_version"] = env_api_version
env_base_url = cast(str, os.getenv(f"{constants.environment_prefix}_BASE_URL"))
if env_base_url:
config_data["base_url"] = env_base_url
env_verify_ssl = cast(
str, os.getenv(f"{constants.environment_prefix}_VERIFY_SSL")
)
if env_verify_ssl:
config_data["verify_ssl"] = env_verify_ssl
config_data["filename"] = self.filename
config_data["section"] = self.section
config_data = self._clean_input(config_data)
return config_data
def _clean_input(
self, config_data: Dict[str, Optional[str]]
) -> Dict[str, Optional[str]]:
for setting, value in list(config_data.items()):
# Remove empty setting values
if not isinstance(value, str):
continue
if value in ['""', "''", ""]:
config_data.pop(setting)
# Strip quotes from setting values
elif value.startswith(('"', "'")) or value.endswith(('"', "'")):
config_data[setting] = value.strip("\"'")
return config_data
def get_client_id(self) -> Optional[str]:
return os.getenv(
f"{constants.environment_prefix}_CLIENT_ID"
) or self.read_config().get("client_id")
def get_client_secret(self) -> Optional[str]:
return os.getenv(
f"{constants.environment_prefix}_CLIENT_SECRET"
) or self.read_config().get("client_secret")
|
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, division
import collections
from punch import version_part as vpart
from punch.helpers import import_file
class Version():
def __init__(self):
self.parts = collections.OrderedDict()
@property
def keys(self):
return list(self.parts.keys())
@property
def values(self):
return list(self.parts.values())
def __eq__(self, other):
return self.as_dict() == other.as_dict()
def add_part(self, part):
self.keys.append(part.name)
self.parts[part.name] = part
def create_part(self, name, value,
cls=vpart.IntegerVersionPart, *args, **kwds):
self.keys.append(name)
self.parts[name] = cls(name, value, *args, **kwds)
def add_part_from_dict(self, dic):
vp = vpart.VersionPart.from_dict(dic)
self.keys.append(vp.name)
self.parts[vp.name] = vp
def get_part(self, name):
return self.parts[name]
def _reset_following_parts(self, name):
idx = self.keys.index(name)
reset_keys = self.keys[idx + 1:]
for key in reset_keys:
self.parts[key].reset()
def inc(self, name):
self.parts[name].inc()
self._reset_following_parts(name)
def set(self, adict):
for key, value in adict.items():
self.parts[key].set(value)
def set_and_reset(self, name, value):
self.parts[name].set(value)
self._reset_following_parts(name)
def copy(self):
new = Version()
for value in self.parts.values():
new.add_part(value.copy())
return new
def as_dict(self):
return dict((key, part.value) for key, part in self.parts.items())
def as_list(self):
return list((key, part.value) for key, part in self.parts.items())
def to_file(self, version_filepath):
with open(version_filepath, 'w') as f:
for key, part in self.parts.items():
f.write("{0} = {1}\n".format(key, repr(part.value)))
@classmethod
def from_file(cls, version_filepath, version_description):
version_module = import_file(version_filepath)
version = Version()
for version_part in version_description:
if isinstance(version_part, collections.Mapping):
version_part_name = version_part['name']
version_part['value'] = cls._get_version_part(
version_module, version_part, version_part_name)
version.add_part_from_dict(version_part)
else:
version_part_name = version_part
version_part_value = cls._get_version_part(
version_module, version_part, version_part_name)
version.create_part(version_part_name, version_part_value)
return version
@classmethod
def _get_version_part(cls, version_module,
version_part, version_part_name):
try:
return getattr(version_module, version_part_name)
except AttributeError:
raise ValueError(
"Given version file is invalid:" +
" missing '{}' variable".format(version_part_name)
)
|
from maru.grammeme.abstract import Grammeme
class Degree(Grammeme):
POSITIVE = 'Pos'
COMPARATIVE = 'Cmp'
|
import numpy as np
import pickle
import logging
import os
import xarray as xr
from .utils import *
def gen_feature_description(metadata_instance, sample_dim):
""" Generates the feature description dictionary given a metadata_intance and the name of the sample dimension
Args:
metadata_instance (metadata): Metadata instance
sample_dim (str): Name of the sample_dimension
Returns:
dict: feature description used to parse tfrecord
"""
feature_variables = metadata_instance.feature_metadata["data_vars"]
feature_description = {}
for feature_variable in feature_variables:
feature_variable_dimensions = metadata_instance.feature_metadata["data_vars"][feature_variable]["dims"]
feature_variable_dimensions = list(feature_variable_dimensions)
feature_variable_dimensions.remove(sample_dim)
feature_variable_dimension_shape = []
for feature_variable_dimension in feature_variable_dimensions:
dimension_size = metadata_instance.metadata["dims"][feature_variable_dimension]
feature_variable_dimension_shape.append(dimension_size)
dtype = np.dtype(metadata_instance.feature_metadata["data_vars"][feature_variable]["dtype"])
tf_type = get_tf_type(dtype = dtype)
feature_description[feature_variable] = tf.io.FixedLenFeature(feature_variable_dimension_shape, tf_type)
return feature_description
def parser_function(example_proto, feature_description):
"""Parses a single example prototype given a the feature_description
Args:
example_proto (tensorflow.python.framework.ops.EagerTensor): Prototype to be parsed
feature_description (dict): feature description used to parse tfrecord
Returns:
dict: Dictionary of tensorflow Tensors
"""
return tf.io.parse_single_example(example_proto, feature_description)
def get_parser_function(feature_description):
return lambda example_proto : parser_function(example_proto, feature_description)
def load_variable_from_parsed_dataset(dataset, variable):
"""Loads the values for a given variable in a given dataset
Args:
dataset (dict): Dictionary of parsed Dataset
variable (str): Name of variable
"""
return np.stack([sample[variable].numpy() for sample in dataset])
def tfrecord_to_xarray(data_path):
dataset = tf.data.TFRecordDataset(data_path)
data_path = os.path.abspath(data_path)
metadata_filename = data_path.split(".")[0]+".meta"
with open(metadata_filename,"rb") as handle:
metadata_instance = pickle.load(handle)
feature_description = gen_feature_description(metadata_instance, sample_dim="time")
dataset_parsed = dataset.map( get_parser_function(feature_description))
variable_arr = []
metadata_coords = metadata_instance.metadata["coords"]
for variable in metadata_instance.feature_metadata["data_vars"]:
logging.info("{} Variable loading started".format(variable))
data = load_variable_from_parsed_dataset(dataset_parsed, variable=variable)
sample_arr = []
metadata_variable = metadata_instance.feature_metadata["data_vars"]
for sample in dataset_parsed:
sample_arr.append(sample[variable].numpy())
variable_dimensions = metadata_variable[variable]["dims"]
variable_attributes = metadata_variable[variable]["attrs"]
dimension_dict = {}
dimension_attr = {}
for dimension in variable_dimensions:
dimension_dict[dimension] = metadata_coords[dimension]["data"]
dimension_attr[dimension] = metadata_coords[dimension]["attrs"]
data_xr = xr.DataArray(data, name = variable, dims = variable_dimensions, coords = dimension_dict, attrs = metadata_instance.metadata["attrs"] )
# Add Attributes
for dimension in variable_dimensions:
data_xr[dimension].attrs = dimension_attr[dimension]
data_xr = data_xr.to_dataset()[variable]
data_xr.attrs = variable_attributes
variable_arr.append(data_xr)
return xr.merge(variable_arr) |
import pytest
import qcelemental as qcel
import qcengine as qcng
from qcelemental.testing import compare_values
from qcengine import testing
@pytest.fixture
def h2o():
smol = """
# R=0.958 A=104.5
H 0.000000000000 1.431430901356 0.984293362719
O 0.000000000000 0.000000000000 -0.124038860300
H 0.000000000000 -1.431430901356 0.984293362719
units au
"""
return qcel.models.Molecule.from_data(smol)
@pytest.fixture
def nh2():
smol = """
# R=1.008 #A=105.0
0 2
N 0.000000000000000 0.000000000000000 -0.145912918634892
H 0.000000000000000 -1.511214298139000 1.013682596946108
H 0.000000000000000 1.511214298139000 1.013682596946108
units au
"""
return qcel.models.Molecule.from_data(smol)
@pytest.mark.parametrize(
'program,basis,keywords',
[
pytest.param('cfour', 'aug-pvdz', {'scf_conv': 12}, marks=testing.using_cfour),
pytest.param('cfour', 'aug-pvdz', {}, marks=testing.using_cfour),
pytest.param('nwchem', 'aug-cc-pvdz', {'basis__spherical': True}, marks=testing.using_nwchem),
pytest.param('nwchem', 'aug-cc-pvdz', {'basis__spherical': True, 'qc_module': 'tce'}, marks=testing.using_nwchem),
pytest.param('psi4', 'aug-cc-pvdz', {'scf_type': 'direct'}, marks=testing.using_psi4),
pytest.param('gamess', 'accd', {'contrl__ispher': 1}, marks=testing.using_gamess),
]) # yapf: disable
def test_sp_hf_rhf(program, basis, keywords, h2o):
"""cfour/sp-rhf-hf/input.dat
#! single point HF/adz on water
"""
resi = {
"molecule": h2o,
"driver": "energy",
"model": {
"method": "hf",
"basis": basis
},
"keywords": keywords,
}
res = qcng.compute(resi, program, raise_error=True, return_dict=True)
assert res["driver"] == "energy"
assert "provenance" in res
assert res["success"] is True
# aug-cc-pvdz
scf_tot = -76.0413815332
atol = 1.e-6
assert compare_values(scf_tot, res["return_result"], atol=atol)
@pytest.mark.parametrize(
'program,basis,keywords',
[
pytest.param('cfour', 'aug-pvdz', {'reference': 'uhf', 'occupation': [[3,1,1,0],[3,0,1,0]], 'scf_conv': 12}, marks=testing.using_cfour),
pytest.param('cfour', 'aug-pvdz', {'reference': 'uhf'}, marks=testing.using_cfour),
pytest.param('nwchem', 'aug-cc-pvdz', {'basis__spherical': True, 'scf__uhf': True}, marks=testing.using_nwchem),
pytest.param('nwchem', 'aug-cc-pvdz', {'basis__spherical': True, 'qc_module': 'tce', 'scf__uhf': True}, marks=testing.using_nwchem),
pytest.param('psi4', 'aug-cc-pvdz', {'reference': 'uhf', 'scf_type': 'direct'}, marks=testing.using_psi4),
pytest.param('gamess', 'accd', {'contrl__ispher': 1, 'contrl__scftyp': 'uhf'}, marks=testing.using_gamess),
]) # yapf: disable
def test_sp_hf_uhf(program, basis, keywords, nh2):
resi = {
"molecule": nh2,
"driver": "energy",
"model": {
"method": "hf",
"basis": basis
},
"keywords": keywords,
}
res = qcng.compute(resi, program, raise_error=True, return_dict=True)
assert res["driver"] == "energy"
assert "provenance" in res
assert res["success"] is True
# aug-cc-pvdz
scf_tot = -55.57513805253009
atol = 1.e-6
assert compare_values(scf_tot, res["return_result"], atol=atol)
@pytest.mark.parametrize(
'program,basis,keywords',
[
pytest.param('cfour', 'aug-pvdz', {'reference': 'rohf', 'occupation': [[3,1,1,0],[3,0,1,0]], 'scf_conv': 12}, marks=testing.using_cfour),
pytest.param('cfour', 'aug-pvdz', {'reference': 'rohf'}, marks=testing.using_cfour),
pytest.param('nwchem', 'aug-cc-pvdz', {'basis__spherical': True, 'scf__rohf': True}, marks=testing.using_nwchem),
pytest.param('nwchem', 'aug-cc-pvdz', {'basis__spherical': True, 'qc_module': 'tce', 'scf__rohf': True}, marks=testing.using_nwchem),
pytest.param('psi4', 'aug-cc-pvdz', {'reference': 'rohf', 'scf_type': 'direct'}, marks=testing.using_psi4),
pytest.param('gamess', 'accd', {'contrl__ispher': 1, 'contrl__scftyp': 'rohf'}, marks=testing.using_gamess),
]) # yapf: disable
def test_sp_hf_rohf(program, basis, keywords, nh2):
resi = {
"molecule": nh2,
"driver": "energy",
"model": {
"method": "hf",
"basis": basis
},
"keywords": keywords,
}
res = qcng.compute(resi, program, raise_error=True, return_dict=True)
assert res["driver"] == "energy"
assert "provenance" in res
assert res["success"] is True
# aug-cc-pvdz
scf_tot = -55.570724348574
atol = 1.e-6
assert compare_values(scf_tot, res["return_result"], atol=atol)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'window.ui'
#
# Created: Fri Oct 12 15:31:31 2012
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtWidgets
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(825, 651)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.splitter = QtWidgets.QSplitter(self.centralwidget)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName(_fromUtf8("splitter"))
self.catlist = QtWidgets.QTreeWidget(self.splitter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.catlist.sizePolicy().hasHeightForWidth())
self.catlist.setSizePolicy(sizePolicy)
self.catlist.setBaseSize(QtCore.QSize(200, 500))
self.catlist.setAlternatingRowColors(True)
self.catlist.setRootIsDecorated(False)
self.catlist.setAllColumnsShowFocus(True)
self.catlist.setObjectName(_fromUtf8("catlist"))
self.catlist.header().setDefaultSectionSize(100)
self.revlist = QtWidgets.QTreeWidget(self.splitter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.revlist.sizePolicy().hasHeightForWidth())
self.revlist.setSizePolicy(sizePolicy)
self.revlist.setBaseSize(QtCore.QSize(500, 500))
self.revlist.setToolTip(_fromUtf8(""))
self.revlist.setAlternatingRowColors(False)
self.revlist.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.revlist.setRootIsDecorated(False)
self.revlist.setObjectName(_fromUtf8("revlist"))
self.revlist.header().setCascadingSectionResizes(False)
self.revlist.header().setDefaultSectionSize(100)
self.horizontalLayout.addWidget(self.splitter)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 825, 20))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName(_fromUtf8("menuFile"))
self.menuSorting = QtWidgets.QMenu(self.menubar)
self.menuSorting.setObjectName(_fromUtf8("menuSorting"))
self.menuAssign = QtWidgets.QMenu(self.menubar)
self.menuAssign.setObjectName(_fromUtf8("menuAssign"))
self.menuHelp = QtWidgets.QMenu(self.menubar)
self.menuHelp.setObjectName(_fromUtf8("menuHelp"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.actionOpenSpreadsheet = QtWidgets.QAction(MainWindow)
self.actionOpenSpreadsheet.setObjectName(_fromUtf8("actionOpenSpreadsheet"))
self.actionOpenSession = QtWidgets.QAction(MainWindow)
self.actionOpenSession.setObjectName(_fromUtf8("actionOpenSession"))
self.actionSaveSession = QtWidgets.QAction(MainWindow)
self.actionSaveSession.setObjectName(_fromUtf8("actionSaveSession"))
self.actionExport = QtWidgets.QAction(MainWindow)
self.actionExport.setObjectName(_fromUtf8("actionExport"))
self.actionQuit = QtWidgets.QAction(MainWindow)
self.actionQuit.setObjectName(_fromUtf8("actionQuit"))
self.actionAssign = QtWidgets.QAction(MainWindow)
self.actionAssign.setObjectName(_fromUtf8("actionAssign"))
self.actionAssignAllReviewers = QtWidgets.QAction(MainWindow)
self.actionAssignAllReviewers.setObjectName(_fromUtf8("actionAssignAllReviewers"))
self.actionAssignReserves = QtWidgets.QAction(MainWindow)
self.actionAssignReserves.setObjectName(_fromUtf8("actionAssignReserves"))
self.actionClearAssignments = QtWidgets.QAction(MainWindow)
self.actionClearAssignments.setObjectName(_fromUtf8("actionClearAssignments"))
self.actionAssignAllLabels = QtWidgets.QAction(MainWindow)
self.actionAssignAllLabels.setObjectName(_fromUtf8("actionAssignAllLabels"))
self.actionAssignAllReviewCategories = QtWidgets.QAction(MainWindow)
self.actionAssignAllReviewCategories.setObjectName(_fromUtf8("actionAssignAllReviewCategories"))
self.actionAssignAllSubmissionCategories = QtWidgets.QAction(MainWindow)
self.actionAssignAllSubmissionCategories.setObjectName(_fromUtf8("actionAssignAllSubmissionCategories"))
self.actionAssignFirst = QtWidgets.QAction(MainWindow)
self.actionAssignFirst.setObjectName(_fromUtf8("actionAssignFirst"))
self.actionAssignSecond = QtWidgets.QAction(MainWindow)
self.actionAssignSecond.setObjectName(_fromUtf8("actionAssignSecond"))
self.actionAssignThird = QtWidgets.QAction(MainWindow)
self.actionAssignThird.setObjectName(_fromUtf8("actionAssignThird"))
self.actionAssignFourth = QtWidgets.QAction(MainWindow)
self.actionAssignFourth.setObjectName(_fromUtf8("actionAssignFourth"))
self.actionAssignFifth = QtWidgets.QAction(MainWindow)
self.actionAssignFifth.setObjectName(_fromUtf8("actionAssignFifth"))
self.actionReviewerChoice = QtWidgets.QAction(MainWindow)
self.actionReviewerChoice.setCheckable(True)
self.actionReviewerChoice.setChecked(True)
self.actionReviewerChoice.setWhatsThis(_fromUtf8(""))
self.actionReviewerChoice.setObjectName(_fromUtf8("actionReviewerChoice"))
self.actionAbout = QtWidgets.QAction(MainWindow)
self.actionAbout.setObjectName(_fromUtf8("actionAbout"))
self.actionWorkflow = QtWidgets.QAction(MainWindow)
self.actionWorkflow.setObjectName(_fromUtf8("actionWorkflow"))
self.actionColorize = QtWidgets.QAction(MainWindow)
self.actionColorize.setCheckable(True)
self.actionColorize.setChecked(True)
self.actionColorize.setObjectName(_fromUtf8("actionColorize"))
self.actionAbstractLimit = QtWidgets.QAction(MainWindow)
self.actionAbstractLimit.setObjectName(_fromUtf8("actionAbstractLimit"))
self.actionAbstractLowerLimit = QtWidgets.QAction(MainWindow)
self.actionAbstractLowerLimit.setObjectName(_fromUtf8("actionAbstractLowerLimit"))
self.actionReviewerLimit = QtWidgets.QAction(MainWindow)
self.actionReviewerLimit.setObjectName(_fromUtf8("actionReviewerLimit"))
self.actionReviewerUpperLimit = QtWidgets.QAction(MainWindow)
self.actionReviewerUpperLimit.setObjectName(_fromUtf8("actionReviewerUpperLimit"))
self.menuFile.addAction(self.actionOpenSpreadsheet)
self.menuFile.addAction(self.actionOpenSession)
self.menuFile.addAction(self.actionSaveSession)
self.menuFile.addAction(self.actionExport)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionQuit)
self.menuAssign.addAction(self.actionAssign)
self.menuAssign.addAction(self.actionClearAssignments)
self.menuAssign.addSeparator()
self.menuAssign.addAction(self.actionAssignAllReviewers)
self.menuAssign.addAction(self.actionAssignReserves)
self.menuAssign.addSeparator()
self.menuAssign.addAction(self.actionAssignAllLabels)
self.menuAssign.addAction(self.actionAssignAllReviewCategories)
self.menuAssign.addAction(self.actionAssignAllSubmissionCategories)
self.menuAssign.addSeparator()
self.menuAssign.addAction(self.actionAssignFirst)
self.menuAssign.addAction(self.actionAssignSecond)
self.menuAssign.addAction(self.actionAssignThird)
self.menuAssign.addAction(self.actionAssignFourth)
self.menuAssign.addAction(self.actionAssignFifth)
self.menuSorting.addAction(self.actionReviewerLimit)
self.menuSorting.addAction(self.actionReviewerUpperLimit)
self.menuSorting.addAction(self.actionAbstractLowerLimit)
self.menuSorting.addAction(self.actionAbstractLimit)
self.menuSorting.addSeparator()
self.menuSorting.addAction(self.actionReviewerChoice)
self.menuSorting.addAction(self.actionColorize)
self.menuHelp.addAction(self.actionAbout)
self.menuHelp.addSeparator()
self.menuHelp.addAction(self.actionWorkflow)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuSorting.menuAction())
self.menubar.addAction(self.menuAssign.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtWidgets.QApplication.translate("MainWindow", "MainWindow", None ))
self.catlist.setStatusTip(QtWidgets.QApplication.translate("MainWindow", "select a category to assign reviewers", None ))
self.catlist.setSortingEnabled(True)
self.catlist.headerItem().setText(0, QtWidgets.QApplication.translate("MainWindow", "Category #", None ))
self.catlist.headerItem().setText(1, QtWidgets.QApplication.translate("MainWindow", "Category Title", None ))
self.catlist.headerItem().setText(2, QtWidgets.QApplication.translate("MainWindow", "# of Abstracts", None ))
self.catlist.headerItem().setText(3, QtWidgets.QApplication.translate("MainWindow", "# of Assigned Reviewers", None ))
self.catlist.headerItem().setText(4, QtWidgets.QApplication.translate("MainWindow", "Pool Size", None ))
self.catlist.headerItem().setText(5, QtWidgets.QApplication.translate("MainWindow", "Assigned Reviewers", None ))
self.revlist.setStatusTip(QtWidgets.QApplication.translate("MainWindow", "check reviewers to assign them to the selected category", None ))
self.revlist.setSortingEnabled(True)
self.revlist.headerItem().setText(0, QtWidgets.QApplication.translate("MainWindow", "Member #", None ))
self.revlist.headerItem().setText(1, QtWidgets.QApplication.translate("MainWindow", "Type", None ))
self.revlist.headerItem().setText(2, QtWidgets.QApplication.translate("MainWindow", "First", None ))
self.revlist.headerItem().setText(3, QtWidgets.QApplication.translate("MainWindow", "Last", None ))
self.revlist.headerItem().setText(4, QtWidgets.QApplication.translate("MainWindow", "Designation", None ))
self.revlist.headerItem().setText(5, QtWidgets.QApplication.translate("MainWindow", "Institution", None ))
self.revlist.headerItem().setText(6, QtWidgets.QApplication.translate("MainWindow", "Email", None ))
self.revlist.headerItem().setText(7, QtWidgets.QApplication.translate("MainWindow", "Primary Training", None ))
self.revlist.headerItem().setText(8, QtWidgets.QApplication.translate("MainWindow", "Pubmed", None ))
self.revlist.headerItem().setText(9, QtWidgets.QApplication.translate("MainWindow", "Pubmed #", None ))
self.revlist.headerItem().setText(10, QtWidgets.QApplication.translate("MainWindow", "Journal Articles", None ))
self.revlist.headerItem().setText(11, QtWidgets.QApplication.translate("MainWindow", "Reviewed Previously", None ))
self.revlist.headerItem().setText(12, QtWidgets.QApplication.translate("MainWindow", "Choice 1", None ))
self.revlist.headerItem().setText(13, QtWidgets.QApplication.translate("MainWindow", "Choice 2", None ))
self.revlist.headerItem().setText(14, QtWidgets.QApplication.translate("MainWindow", "Choice 3", None ))
self.revlist.headerItem().setText(15, QtWidgets.QApplication.translate("MainWindow", "Choice 4", None ))
self.revlist.headerItem().setText(16, QtWidgets.QApplication.translate("MainWindow", "Choice 5", None ))
self.revlist.headerItem().setText(17, QtWidgets.QApplication.translate("MainWindow", "# of Assigned Abstracts", None ))
self.revlist.headerItem().setText(18, QtWidgets.QApplication.translate("MainWindow", "Assigned Categories", None ))
self.menuFile.setTitle(QtWidgets.QApplication.translate("MainWindow", "File", None ))
self.menuAssign.setTitle(QtWidgets.QApplication.translate("MainWindow", "Assign", None ))
self.menuSorting.setTitle(QtWidgets.QApplication.translate("MainWindow", "Settings", None ))
self.menuHelp.setTitle(QtWidgets.QApplication.translate("MainWindow", "Help", None ))
self.actionOpenSpreadsheet.setText(QtWidgets.QApplication.translate("MainWindow", "Open Spreadsheet (.xls)", None ))
self.actionOpenSession.setText(QtWidgets.QApplication.translate("MainWindow", "Open Session (.mpc)", None ))
self.actionSaveSession.setText(QtWidgets.QApplication.translate("MainWindow", "Save Session (.mpc)", None ))
self.actionExport.setText(QtWidgets.QApplication.translate("MainWindow", "Export (.xls)", None ))
self.actionQuit.setText(QtWidgets.QApplication.translate("MainWindow", "Quit", None ))
self.actionAssign.setText(QtWidgets.QApplication.translate("MainWindow", "Assign", None ))
self.actionClearAssignments.setText(QtWidgets.QApplication.translate("MainWindow", "Clear assignments", None ))
self.actionAssignAllReviewers.setText(QtWidgets.QApplication.translate("MainWindow", "Assign all reviewers", None ))
self.actionAssignReserves.setText(QtWidgets.QApplication.translate("MainWindow", "Assign reserves", None ))
self.actionAssignAllLabels.setText(QtWidgets.QApplication.translate("MainWindow", "Assign by label", None ))
self.actionAssignAllReviewCategories.setText(QtWidgets.QApplication.translate("MainWindow", "Assign by review category", None ))
self.actionAssignAllSubmissionCategories.setText(QtWidgets.QApplication.translate("MainWindow", "Assign by submission category", None ))
self.actionAssignFirst.setText(QtWidgets.QApplication.translate("MainWindow", "Assign by first label", None ))
self.actionAssignSecond.setText(QtWidgets.QApplication.translate("MainWindow", "Assign by second label", None ))
self.actionAssignThird.setText(QtWidgets.QApplication.translate("MainWindow", "Assign by third label", None ))
self.actionAssignFourth.setText(QtWidgets.QApplication.translate("MainWindow", "Assign by fourth label", None ))
self.actionAssignFifth.setText(QtWidgets.QApplication.translate("MainWindow", "Assign by fifth label", None ))
self.actionReviewerChoice.setText(QtWidgets.QApplication.translate("MainWindow", "Reviewer\'s Choice", None ))
self.actionReviewerChoice.setToolTip(QtWidgets.QApplication.translate("MainWindow", "Reviewer\'s Choice", None ))
self.actionReviewerChoice.setStatusTip(QtWidgets.QApplication.translate("MainWindow", "sort based off of the reviwer\'s top 5 category choices", None ))
self.actionAbout.setText(QtWidgets.QApplication.translate("MainWindow", "About", None ))
self.actionWorkflow.setText(QtWidgets.QApplication.translate("MainWindow", "Typical Workflow", None ))
self.actionColorize.setText(QtWidgets.QApplication.translate("MainWindow", "Colorize Reviewer Choices", None ))
self.actionAbstractLimit.setText(QtWidgets.QApplication.translate("MainWindow", "Maximum abstracts per reviewer", None ))
self.actionAbstractLowerLimit.setText(QtWidgets.QApplication.translate("MainWindow", "Minimum abstracts per reviewer", None ))
self.actionReviewerLimit.setText(QtWidgets.QApplication.translate("MainWindow", "Minimum reviewers per abstract", None ))
self.actionReviewerUpperLimit.setText(QtWidgets.QApplication.translate("MainWindow", "Maximum reviewers per abstract", None ))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
# -*- coding: utf-8 -*-
from flask import request, jsonify, abort, Blueprint
from app import app, db, redis_pool
from app.api import common
from app.controller import MovieRecController
from app.models import user_schema, movie_schema, rating_schema
app_controller = MovieRecController(db,
redis_pool=redis_pool,
default_rating=app.config.get("DEFAULT_RATING"),
top_n=app.config.get("TOP_N"))
api = Blueprint(name="v1", import_name="api")
@api.route('/', methods=['GET'])
def hello():
return common.hello()
@api.route('/user/<int:user_id>', methods=['GET'])
def get_user(user_id):
user_info = app_controller.get_user_info(user_id)
return abort(404) if user_info is None else jsonify(user_schema.dump(user_info).data)
@api.route('/user', methods=['PUT'])
def add_user():
content = request.json
gender = None if content is None else content.get('gender')
year_of_birth = None if content is None else content.get('year_of_birth')
resulting_user = app_controller.add_user(gender, year_of_birth)
return jsonify(user_schema.dump(resulting_user).data)
@api.route('/user/<int:user_id>', methods=['DELETE'])
def del_user(user_id):
result = app_controller.delete_user(user_id)
return abort(404) if result is None else jsonify({'user_id': user_id, 'msg': 'deleted'})
@api.route('/user/<int:user_id>/ratings/latest', methods=['GET'])
def get_user_ratings(user_id):
limit = request.args.get('limit', 20)
result = app_controller.get_user_ratings(user_id, limit)
return abort(404) if result is None else jsonify({'user_id': user_id, 'limit': limit, 'ratings': result})
@api.route('/user/<int:user_id>/ratings/top', methods=['GET'])
def get_user_top_ratings(user_id):
limit = request.args.get('limit', 20)
result = app_controller.get_user_top_ratings(user_id, limit)
return abort(404) if result is None else jsonify({'user_id': user_id, 'limit': limit, 'ratings': result})
@api.route('/movie/<int:movie_id>', methods=['GET'])
def get_movie_info(movie_id):
result = app_controller.get_movie_info(movie_id)
return abort(404) if result is None else jsonify(movie_schema.dump(result).data)
@api.route('/movies/top', methods=['GET'])
def get_top_movies():
limit = request.args.get('limit', 100)
rating_limit = request.args.get('rating_limit', None)
result = app_controller.get_top_movies(limit, rating_limit)
return abort(404) if result is None else jsonify(top_movies=result)
@api.route('/user/<int:user_id>/rating', methods=['PUT'])
def set_user_rating(user_id):
content = request.json
movie_id = int(content['movie_id'])
rating = float(content['rating'])
result = app_controller.set_movie_rating(user_id, movie_id, rating)
return abort(404) if result is None else jsonify(rating_schema.dump(result).data)
@api.route('/user/<int:user_id>/rating', methods=['DELETE'])
def del_user_rating(user_id):
content = request.json
movie_id = int(content['movie_id'])
deletion_result = app_controller.delete_movie_rating(user_id, movie_id)
if deletion_result is None:
return abort(404)
else:
return jsonify({'user_id': user_id, 'movie_id': movie_id, 'msg': 'deleted'})
@api.route('/user/<int:user_id>/watched', methods=['PUT', 'DELETE'])
def handle_user_watched(user_id):
content = request.json
movie_id = int(content['movie_id'])
if request.method != 'PUT' and request.method != 'DELETE':
# return with http error code 405 --- Method Not Allowed
abort(405)
set_watched = False if request.method == 'DELETE' else True
result = app_controller.set_movie_watched(user_id, movie_id, set_watched=set_watched)
return abort(404) if result is None else jsonify({'user_id': user_id, 'movie_id': movie_id, 'watched:': result})
@api.route("/user/<int:user_id>/recommendations", methods=['GET'])
def recommendations(user_id):
result = app_controller.get_recommendations(user_id)
if result is None:
return abort(404)
else:
resulting_movies = [movie_schema.dump(m).data for m in result]
return jsonify({'user_id': user_id, 'recommendations': resulting_movies})
|
dictionary = {
'À': r'\`A', 'Á': r"\'A", 'Â': r'\^A', 'Ã': r'\~A', 'Ä': r'\"A',
'Å': r'\r{A}', 'Æ': r'\AE', 'Ç': r'\c{C}', 'È': r'\`E', 'É': r"\'E",
'Ê': r'\^E', 'Ë': r'\"E', 'Ì': r'\`I', 'Í': r"\'I", 'Î': r'\^I',
'Ï': r'\"I', 'Ð': r'\DH', 'Ñ': r'\~N', 'Ò': r'\`O', 'Ó': r"\'O",
'Ô': r'\^O', 'Õ': r'\~O', 'Ö': r'\"O', 'Ø': r'\O', 'Ù': r'\`U',
'Ú': r"\'U", 'Û': r'\^U', 'Ü': r'\"U', 'Ý': r"\'Y", 'ß': r'\ss',
'à': r'\`a', 'á': r"\'a", 'â': r'\^a', 'ã': r'\~a', 'ä': r'\"a',
'å': r'\r{a}', 'æ': r'\ae', 'ç': r'\c{c}', 'è': r'\`e', 'é': r"\'e",
'ê': r'\^e', 'ë': r'\"e', 'ì': r'\`i', 'í': r"\'i", 'î': r'\^i',
'ï': r'\"i', 'ð': r'\dh', 'ñ': r'\~n', 'ò': r'\`o', 'ó': r"\'o",
'ô': r'\^o', 'õ': r'\~o', 'ö': r'\"o', 'ø': r'\o', 'ù': r'\`u',
'ú': r"\'u", 'û': r'\^u', 'ü': r'\"u', 'ý': r"\'y", 'ÿ': r'\"y',
'Ā': r'\=A', 'ā': r'\=a', 'Ă': r'\u{A}', 'ă': r'\u{a}', 'Ą': r'\k{A}',
'ą': r'\k{a}', 'Ć': r"\'C", 'ć': r"\'c", 'Ĉ': r'\^C', 'ĉ': r'\^c',
'Ċ': r'\.C', 'ċ': r'\.c', 'Č': r'\v{C}', 'č': r'\v{c}', 'Ď': r'\v{D}',
'ď': r'\v{d}', 'Đ': r'\DJ', 'đ': r'\dj', 'Ē': r'\=E', 'ē': r'\=e',
'Ĕ': r'\u{E}', 'ĕ': r'\u{e}', 'Ė': r'\.E', 'ė': r'\.e', 'Ę': r'\k{E}',
'ę': r'\k{e}', 'Ě': r'\v{E}', 'ě': r'\v{e}', 'Ĝ': r'\^G', 'ĝ': r'\^g',
'Ğ': r'\u{G}', 'ğ': r'\u{g}', 'Ġ': r'\.G', 'ġ': r'\.g', 'Ģ': r'\c{G}',
'ģ': r'\c{g}', 'Ĥ': r'\^H', 'ĥ': r'\^h', 'Ĩ': r'\~I', 'ĩ': r'\~i',
'Ī': r'\=I', 'ī': r'\=i', 'Ĭ': r'\u{I}', 'ĭ': r'\u{i}', 'Į': r'\k{I}',
'į': r'\k{i}', 'İ': r'\.I', 'Ĵ': r'\^J', 'ĵ': r'\^j', 'Ķ': r'\c{K}',
'ķ': r'\c{k}', 'Ĺ': r"\'L", 'ĺ': r"\'l", 'Ļ': r'\c{L}', 'ļ': r'\c{l}',
'Ľ': r'\v{L}', 'ľ': r'\v{l}', 'Ł': r'\L', 'ł': r'\l', 'Ń': r"\'N",
'ń': r"\'n", 'Ņ': r'\c{N}', 'ņ': r'\c{n}', 'Ň': r'\v{N}', 'ň': r'\v{n}',
'Ŋ': r'\NG', 'ŋ': r'\ng', 'Ō': r'\=O', 'ō': r'\=o', 'Ŏ': r'\u{O}',
'ŏ': r'\u{o}', 'Ő': r'\H{O}', 'ő': r'\H{o}', 'Ŕ': r"\'R", 'ŕ': r"\'r",
'Ŗ': r'\c{R}', 'ŗ': r'\c{r}', 'Ř': r'\v{R}', 'ř': r'\v{r}', 'Ś': r"\'S",
'ś': r"\'s", 'Ŝ': r'\^S', 'ŝ': r'\^s', 'Ş': r'\c{S}', 'ş': r'\c{s}',
'Š': r'\v{S}', 'š': r'\v{s}', 'Ţ': r'\c{T}', 'ţ': r'\c{t}', 'Ť': r'\v{T}',
'ť': r'\v{t}', 'Ũ': r'\~U', 'ũ': r'\~u', 'Ū': r'\=U', 'ū': r'\=u',
'Ŭ': r'\u{U}', 'ŭ': r'\u{u}', 'Ů': r'\r{U}', 'ů': r'\r{u}', 'Ű': r'\H{U}',
'ű': r'\H{u}', 'Ų': r'\k{U}', 'ų': r'\k{u}', 'Ŵ': r'\^W', 'ŵ': r'\^w',
'Ŷ': r'\^Y', 'ŷ': r'\^y', 'Ÿ': r'\"Y', 'Ź': r"\'Z", 'ź': r"\'z",
'Ż': r'\.Z', 'ż': r'\.z', 'Ž': r'\v{Z}', 'ž': r'\v{z}'}
def u2tex(string):
return ''.join(dictionary.get(char, char) for char in string)
|
from setuptools import setup
import os
from os import path
from gekkopy import version
name = "gekkopy"
description = "Python API for Gekko trading bot"
long_description = "See https://github.com/askmike/gekko for the trading bot."
this_directory = path.abspath(path.dirname(__file__))
def read(filename):
with open(os.path.join(this_directory, filename), "rb") as f:
return f.read().decode("utf-8")
if os.path.exists("README.md"):
long_description = read("README.md")
packages = ["gekkopy"]
url = "https://github.com/mariushelf/gekkopy"
author = "Marius Helf"
author_email = "[email protected]"
classifiers = [
"Development Status :: 3",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Topic :: Office/Business :: Financial :: Investment",
]
setup(
name=name,
description=description,
long_description=long_description,
long_description_content_type="text/markdown",
author=author,
url=url,
author_email=author_email,
classifiers=classifiers,
install_requires=["requests", "matplotlib", "pandas", "flask", "numpy"],
version=version.__version__,
packages=packages,
license="MIT",
)
|
"""add company details confirmed column
Revision ID: 1120
Revises: 1110
Create Date: 2018-03-06 09:05:13.221057
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1120'
down_revision = '1110'
def upgrade():
op.add_column('suppliers',
sa.Column(
'company_details_confirmed',
sa.Boolean(),
nullable=False,
default=False,
server_default=sa.sql.expression.literal(False)
)
)
def downgrade():
op.drop_column('suppliers', 'company_details_confirmed')
|
#
# PySNMP MIB module DKSF-253-6-X-A-X (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DKSF-253-6-X-A-X
# Produced by pysmi-0.3.4 at Mon Apr 29 18:32:23 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
snmpTraps, = mibBuilder.importSymbols("SNMPv2-MIB", "snmpTraps")
Counter32, mib_2, iso, ObjectIdentity, TimeTicks, IpAddress, ModuleIdentity, Unsigned32, Gauge32, Counter64, NotificationType, enterprises, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "mib-2", "iso", "ObjectIdentity", "TimeTicks", "IpAddress", "ModuleIdentity", "Unsigned32", "Gauge32", "Counter64", "NotificationType", "enterprises", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Integer32")
TimeStamp, TruthValue, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "TimeStamp", "TruthValue", "DisplayString", "TextualConvention")
netPing = ModuleIdentity((1, 3, 6, 1, 4, 1, 25728, 253))
netPing.setRevisions(('2015-09-29 00:00', '2014-11-19 00:00', '2014-06-12 00:00', '2011-02-04 00:00', '2010-08-30 00:00', '2010-08-20 00:00', '2010-08-13 00:00', '2010-08-11 00:00', '2010-07-08 00:00', '2010-04-14 00:00',))
if mibBuilder.loadTexts: netPing.setLastUpdated('201509290000Z')
if mibBuilder.loadTexts: netPing.setOrganization('Alentis Electronics')
lightcom = MibIdentifier((1, 3, 6, 1, 4, 1, 25728))
npIo = MibIdentifier((1, 3, 6, 1, 4, 1, 25728, 8900))
npIoTable = MibTable((1, 3, 6, 1, 4, 1, 25728, 8900, 1), )
if mibBuilder.loadTexts: npIoTable.setStatus('current')
npIoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25728, 8900, 1, 1), ).setIndexNames((0, "DKSF-253-6-X-A-X", "npIoLineN"))
if mibBuilder.loadTexts: npIoEntry.setStatus('current')
npIoLineN = MibTableColumn((1, 3, 6, 1, 4, 1, 25728, 8900, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4))).setMaxAccess("readonly")
if mibBuilder.loadTexts: npIoLineN.setStatus('current')
npIoLevelIn = MibTableColumn((1, 3, 6, 1, 4, 1, 25728, 8900, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: npIoLevelIn.setStatus('current')
npIoLevelOut = MibTableColumn((1, 3, 6, 1, 4, 1, 25728, 8900, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(-1, 0, 1))).clone(namedValues=NamedValues(("flip", -1), ("low", 0), ("high", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: npIoLevelOut.setStatus('current')
npIoMemo = MibTableColumn((1, 3, 6, 1, 4, 1, 25728, 8900, 1, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: npIoMemo.setStatus('current')
npIoPulseCounter = MibTableColumn((1, 3, 6, 1, 4, 1, 25728, 8900, 1, 1, 9), Counter32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: npIoPulseCounter.setStatus('current')
npIoSinglePulseDuration = MibTableColumn((1, 3, 6, 1, 4, 1, 25728, 8900, 1, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(100, 25500))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: npIoSinglePulseDuration.setStatus('current')
npIoSinglePulseStart = MibTableColumn((1, 3, 6, 1, 4, 1, 25728, 8900, 1, 1, 13), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: npIoSinglePulseStart.setStatus('current')
npIoTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 25728, 8900, 2))
npIoTrapPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 25728, 8900, 2, 0))
npIoTrapLineN = MibScalar((1, 3, 6, 1, 4, 1, 25728, 8900, 2, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4))).setMaxAccess("readonly")
if mibBuilder.loadTexts: npIoTrapLineN.setStatus('current')
npIoTrapLevelIn = MibScalar((1, 3, 6, 1, 4, 1, 25728, 8900, 2, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: npIoTrapLevelIn.setStatus('current')
npIoTrapMemo = MibScalar((1, 3, 6, 1, 4, 1, 25728, 8900, 2, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: npIoTrapMemo.setStatus('current')
npIoTrapLevelIn1 = MibScalar((1, 3, 6, 1, 4, 1, 25728, 8900, 2, 21), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: npIoTrapLevelIn1.setStatus('current')
npIoTrapLevelIn2 = MibScalar((1, 3, 6, 1, 4, 1, 25728, 8900, 2, 22), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: npIoTrapLevelIn2.setStatus('current')
npIoTrapLevelIn3 = MibScalar((1, 3, 6, 1, 4, 1, 25728, 8900, 2, 23), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: npIoTrapLevelIn3.setStatus('current')
npIoTrapLevelIn4 = MibScalar((1, 3, 6, 1, 4, 1, 25728, 8900, 2, 24), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: npIoTrapLevelIn4.setStatus('current')
npIoTrap = NotificationType((1, 3, 6, 1, 4, 1, 25728, 8900, 2, 0, 1)).setObjects(("DKSF-253-6-X-A-X", "npIoTrapLineN"), ("DKSF-253-6-X-A-X", "npIoTrapLevelIn"), ("DKSF-253-6-X-A-X", "npIoTrapMemo"), ("DKSF-253-6-X-A-X", "npIoTrapLevelIn1"), ("DKSF-253-6-X-A-X", "npIoTrapLevelIn2"), ("DKSF-253-6-X-A-X", "npIoTrapLevelIn3"), ("DKSF-253-6-X-A-X", "npIoTrapLevelIn4"))
if mibBuilder.loadTexts: npIoTrap.setStatus('current')
npElecMeter = MibIdentifier((1, 3, 6, 1, 4, 1, 25728, 9700))
npElecTable = MibTable((1, 3, 6, 1, 4, 1, 25728, 9700, 1), )
if mibBuilder.loadTexts: npElecTable.setStatus('current')
npElecEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25728, 9700, 1, 1), ).setIndexNames((0, "DKSF-253-6-X-A-X", "npElecIndex"))
if mibBuilder.loadTexts: npElecEntry.setStatus('current')
npElecIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 25728, 9700, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4))).setMaxAccess("readonly")
if mibBuilder.loadTexts: npElecIndex.setStatus('current')
npElecPulsesPerKwh = MibTableColumn((1, 3, 6, 1, 4, 1, 25728, 9700, 1, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: npElecPulsesPerKwh.setStatus('current')
npElecPower = MibTableColumn((1, 3, 6, 1, 4, 1, 25728, 9700, 1, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: npElecPower.setStatus('current')
npElecEnergy = MibTableColumn((1, 3, 6, 1, 4, 1, 25728, 9700, 1, 1, 4), Counter32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: npElecEnergy.setStatus('current')
npElecEnergy100 = MibTableColumn((1, 3, 6, 1, 4, 1, 25728, 9700, 1, 1, 5), Counter32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: npElecEnergy100.setStatus('current')
npReboot = MibIdentifier((1, 3, 6, 1, 4, 1, 25728, 911))
npSoftReboot = MibScalar((1, 3, 6, 1, 4, 1, 25728, 911, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: npSoftReboot.setStatus('current')
npResetStack = MibScalar((1, 3, 6, 1, 4, 1, 25728, 911, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: npResetStack.setStatus('current')
npForcedReboot = MibScalar((1, 3, 6, 1, 4, 1, 25728, 911, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: npForcedReboot.setStatus('current')
mibBuilder.exportSymbols("DKSF-253-6-X-A-X", npIoEntry=npIoEntry, npIoLevelIn=npIoLevelIn, npIoTrapLevelIn4=npIoTrapLevelIn4, PYSNMP_MODULE_ID=netPing, npResetStack=npResetStack, npElecIndex=npElecIndex, npElecEnergy100=npElecEnergy100, npIoTrapMemo=npIoTrapMemo, npIoTrapLineN=npIoTrapLineN, npIoPulseCounter=npIoPulseCounter, npIoLevelOut=npIoLevelOut, npIoMemo=npIoMemo, npElecPower=npElecPower, npIoTrapLevelIn2=npIoTrapLevelIn2, npElecEntry=npElecEntry, npIo=npIo, npIoLineN=npIoLineN, npIoTraps=npIoTraps, npIoTrapLevelIn1=npIoTrapLevelIn1, netPing=netPing, npIoTrapLevelIn3=npIoTrapLevelIn3, npElecPulsesPerKwh=npElecPulsesPerKwh, npIoTable=npIoTable, npIoTrapLevelIn=npIoTrapLevelIn, npElecMeter=npElecMeter, npIoTrapPrefix=npIoTrapPrefix, npIoSinglePulseDuration=npIoSinglePulseDuration, npIoSinglePulseStart=npIoSinglePulseStart, npForcedReboot=npForcedReboot, npElecTable=npElecTable, npIoTrap=npIoTrap, lightcom=lightcom, npSoftReboot=npSoftReboot, npReboot=npReboot, npElecEnergy=npElecEnergy)
|
class STree:
def __init__(self, l):
self.l = l
self.n = len(l)
self.st = [0] * (4 * self.n)
self.islazy = [False] * (4 * self.n)
self.lazy = [0] * (4 * self.n)
self.build(1, 0, self.n - 1)
def left(self, p):
return p << 1
def right(self, p):
return (p << 1) + 1
def build(self, p, l, r):
if (l == r):
self.st[p] = l
else:
self.build(self.left(p), l, (l + r) // 2)
self.build(self.right(p), (l + r) // 2 + 1, r)
p1 = self.st[self.left(p)]
p2 = self.st[self.right(p)]
if self.l[p1] <= self.l[p2]:
self.st[p] = p1
else:
self.st[p] = p2
def _q(self, p, pl, pr, fr, to):
if fr > pr or to < pl:
return -1, -1
if self.islazy[p]:
return fr, self.lazy[p]
if fr <= pl and to >= pr:
return self.st[p], self.l[self.st[p]]
res1 = self._q(self.left(p), pl, (pl + pr) // 2, fr, to)
res2 = self._q(self.right(p), (pl + pr) // 2 + 1, pr, fr, to)
if (res1[0] == -1):
return res2
elif res2[0] == -1:
return res1
elif res1[1] <= res2[1]:
return res1
else:
return res2
def _u(self, p, pl, pr, fr, to, newval):
if fr > pr or to < pl:
return self.st[p]
if fr == pl and to == pr:
if fr == to:
self.l[pl] = newval
self.islazy[p] = False
else:
self.lazy[p] = newval
self.islazy[p] = True
self.st[p] = fr
return self.st[p]
pm = (pl + pr) // 2
if self.islazy[p]:
self.islazy[p] = False
self.islazy[self.left(p)] = True
self.islazy[self.right(p)] = True
self.lazy[self.left(p)] = self.lazy[p]
self.lazy[self.right(p)] = self.lazy[p]
self.st[self.left(p)] = pl
self.st[self.right(p)] = pm
p1 = self._u(self.left(p), pl, pm, max(fr, pl), min(to, pm), newval)
p2 = self._u(self.right(p), pm + 1, pr, max(fr, pm + 1), min(to, pr), newval)
if self.l[p1] <= self.l[p2]:
self.st[p] = p1
else:
self.st[p] = p2
return self.st[p]
def q(self, fr, to):
return self._q(1, 0, self.n - 1, fr, to)[0]
def u(self, fr, to, val):
return self._u(1, 0, self.n - 1, fr, to, val)
l = [18, 17, 13, 19, 15, 11, 20]
st = STree(l)
print(st.q(0, 0) == 0)
print(st.q(1, 3) == 2)
print(st.q(4, 6) == 5)
st.u(5, 5, 99)
print(st.q(1, 3) == 2)
print(st.q(4, 6) == 4)
st.u(0, 3, 7)
print(st.q(1, 3) == 1)
print(st.q(3, 6) == 3)
st.u(3, 4, 5)
print(st.q(1, 3) == 3)
print(st.q(4, 6) == 4)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KbdishEstimatedInfo(object):
def __init__(self):
self._ds_id = None
self._ds_type = None
self._inventory = None
self._out_shop_id = None
self._shop_id = None
self._status = None
self._update_user = None
@property
def ds_id(self):
return self._ds_id
@ds_id.setter
def ds_id(self, value):
self._ds_id = value
@property
def ds_type(self):
return self._ds_type
@ds_type.setter
def ds_type(self, value):
self._ds_type = value
@property
def inventory(self):
return self._inventory
@inventory.setter
def inventory(self, value):
self._inventory = value
@property
def out_shop_id(self):
return self._out_shop_id
@out_shop_id.setter
def out_shop_id(self, value):
self._out_shop_id = value
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def update_user(self):
return self._update_user
@update_user.setter
def update_user(self, value):
self._update_user = value
def to_alipay_dict(self):
params = dict()
if self.ds_id:
if hasattr(self.ds_id, 'to_alipay_dict'):
params['ds_id'] = self.ds_id.to_alipay_dict()
else:
params['ds_id'] = self.ds_id
if self.ds_type:
if hasattr(self.ds_type, 'to_alipay_dict'):
params['ds_type'] = self.ds_type.to_alipay_dict()
else:
params['ds_type'] = self.ds_type
if self.inventory:
if hasattr(self.inventory, 'to_alipay_dict'):
params['inventory'] = self.inventory.to_alipay_dict()
else:
params['inventory'] = self.inventory
if self.out_shop_id:
if hasattr(self.out_shop_id, 'to_alipay_dict'):
params['out_shop_id'] = self.out_shop_id.to_alipay_dict()
else:
params['out_shop_id'] = self.out_shop_id
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.update_user:
if hasattr(self.update_user, 'to_alipay_dict'):
params['update_user'] = self.update_user.to_alipay_dict()
else:
params['update_user'] = self.update_user
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KbdishEstimatedInfo()
if 'ds_id' in d:
o.ds_id = d['ds_id']
if 'ds_type' in d:
o.ds_type = d['ds_type']
if 'inventory' in d:
o.inventory = d['inventory']
if 'out_shop_id' in d:
o.out_shop_id = d['out_shop_id']
if 'shop_id' in d:
o.shop_id = d['shop_id']
if 'status' in d:
o.status = d['status']
if 'update_user' in d:
o.update_user = d['update_user']
return o
|
import yaml
import os
class Config():
def __init__(self, params=None):
self.params = params
self.config_file_path = params.config_file_path
self.environment = params.pos_env
self._load_config_file(self.config_file_path)
if not hasattr(self, 'log_level'):
setattr(self, 'log_level', 'info')
if not hasattr(self, 'log_file_path'):
setattr(self, 'log_file_path', './pos.log')
if not hasattr(self, 'web_port'):
setattr(self, 'web_port', '80')
def _parse_section(self, key, value):
if type(value) == dict:
for subkey,subvalue in value.items():
self._parse_section(key + "_" + subkey, subvalue)
else:
setattr(self, key, value)
def _load_config_file(self, filepath):
try:
with open(filepath, 'r') as ymlfile:
cfg = yaml.safe_load(ymlfile)[self.environment]
for key,value in cfg.items():
self._parse_section(key,value)
except FileNotFoundError or NameError or KeyError or ValueError as err:
print(err)
|
import os
from django.conf import settings
from django.contrib.sites.models import Site
# setup the sites available for this project
def setup_sites():
"""
Setup sites (name, domain) available for this project (SITE_ID will decide the active site)
"""
site_info = getattr(settings, 'BOOTUP_SITES', None)
if site_info:
ids = site_info.keys()
ids.sort()
for id in ids:
site, created = Site.objects.get_or_create(pk=id)
if site:
site.name = site_info[id]['name']
site.domain = site_info[id]['domain']
site.save() |
""" Runs the COVID Lung Ultrasound task on DeAI.
Prerequisites:
Download a chromedriver from here: https://sites.google.com/a/chromium.org/chromedriver/downloads.
Extract the chromedriver in the current folder.
Prepare the covid positive and covid negative images in separate folders.
Constants:
Use `POSITIVE_CLASS_PATH` and `NEGATIVE_CLASS_PATH` to point to the two data folders.
Use `NUM_IMAGES` to limit the number of images per peer to test faster.
Use `NUM_PEERS` to define the number of peers to run.
Use `TRAINING_TYPE` to choose between training alone or distributed.
How to run:
python run_lus_covid.py
"""
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from selenium.webdriver.common.action_chains import ActionChains
import os
# Defines how many browser tabs to open
NUM_PEERS = 3
# Should match the name of the task in the task list and is case sensitive
TASK_NAME = 'COVID Lung Ultrasound'
# can be either 'Train Alone' or 'Train Distributed'. Should match the text of the button in the train screen.
TRAINING_TYPE = 'Train Distributed'
# Currently we take the first `NUM_IMAGES` in the folder for each peer. We should make a more complex distribution.
NUM_IMAGES = 100
# paths to folders containing covid positive and coivd negative patients
POSITIVE_CLASS_PATH = r'preprocessed_images\covid-positive'
NEGATIVE_CLASS_PATH = r'preprocessed_images\covid-negative'
def get_files(directory, num_images):
files = []
for dirpath,_,filenames in os.walk(directory):
for f in filenames:
if '.png' in f:
files.append(os.path.abspath(os.path.join(dirpath, f)))
return ' \n '.join(files[:num_images])
# Download and extract chromedriver from here: https://sites.google.com/a/chromium.org/chromedriver/downloads
drivers = [webdriver.Chrome(executable_path=r"chromedriver.exe") for i in range(NUM_PEERS)]
for driver in drivers:
# Click 'Start Building' on home page
driver.get("https://epfml.github.io/DeAI/#/")
elements = driver.find_elements_by_tag_name('button')
for elem in elements:
if 'Start building' in elem.get_attribute('innerHTML'):
elem.click()
# Find LUS-Covid task and click 'Join' on task list page
time.sleep(0.5)
elements = driver.find_elements_by_css_selector('div.group')
for element in elements:
if TASK_NAME in element.get_attribute('innerHTML'):
button = element.find_element_by_tag_name('button')
button.click()
# Click 'Join Training' on Task Description page
elements = driver.find_elements_by_tag_name('button')
for elem in elements:
if 'Join Training' in elem.get_attribute('innerHTML'):
elem.click()
# Upload files on Task Training
time.sleep(0.5)
driver.find_element_by_id('hidden-input_lus-covid-model_COVID-Positive').send_keys(get_files(POSITIVE_CLASS_PATH, NUM_IMAGES))
driver.find_element_by_id('hidden-input_lus-covid-model_COVID-Negative').send_keys(get_files(NEGATIVE_CLASS_PATH, NUM_IMAGES))
# Start training on each driver
for driver in drivers:
elements = driver.find_elements_by_tag_name('button')
for elem in elements:
if TRAINING_TYPE in elem.get_attribute('innerHTML'):
driver.execute_script("arguments[0].scrollIntoView();", elem)
elem.click()
break |
import Elements
import Visitors
class ObjectStructure:
def __init__(self):
self.elements = []
def attach(self,element: Elements.Element):
self.elements.append(element)
def detach(self,element: Elements.Element):
self.elements.remove(element)
def accept(self,visitor: Visitors.Visitor):
for elm in self.elements:
elm.accept(visitor)
|
"""
### Usage: ctdoc [-w]
### Options:
-h, --help show this help message and exit
-w, --web build web docs
-a, --auto use auto mode (even with a plugin)
-o, --omit omit any files from autodoc?
Run from cantools root (contains setup.py, cantools/, README.md, etc), from root
of a CT plugin, or from within a custom project. In cantools, builds docs for all
frontend (js) and CLI (py) files. In plugin, docs consist of about file (about.txt),
initialization config (init.py) and default frontend config (js/config.js). In custom
(project) mode (when ctdoc is run somewhere other than cantools root or a plugin root,
and additionally a configuration file, doc.cfg, is present), for each path declared in
doc.cfg, include the docstring of each file specified, as well as the contents of
about.txt (if present). Lastly, auto mode doesn't require configuration (doc.cfg) --
instead, it recurses through the directories of your project, and includes the contents of
any about.txt files, as well as (the top of) any py/js file that starts with a docstring.
"""
import os, json
from optparse import OptionParser
from cantools import __version__, config
from cantools.util import read, write, log, cp, cmd
WEB = []
ALTS = {
"pubsub": os.path.join("pubsub", "__init__")
}
HERE = os.path.abspath(".").split(os.path.sep)[-1]
CUSTOM = os.path.isfile("doc.cfg") and read("doc.cfg")
ISPLUGIN = not CUSTOM and HERE.startswith("ct") and HERE
AUTO = HERE != "cantools" and not CUSTOM and not ISPLUGIN
if not CUSTOM and not AUTO:
if ISPLUGIN:
JSPATH = os.path.join(HERE, "js")
BPATH = "."
ALTS["init"] = os.path.join(ISPLUGIN, "init")
else:
JSPATH = os.path.join(HERE, "CT")
BPATH = os.path.join(HERE, "scripts")
def space(data):
return " " + data.replace("\n", "\n ")
def dsBack(cmd):
cpath = os.path.join(BPATH, "%s.py"%(ALTS.get(cmd, cmd),))
log(cpath, 2)
bdata = read(cpath)
fdata = ISPLUGIN and space(bdata) or "## ct%s\n%s"%(cmd, bdata[4:].split('\n"""')[0])
WEB[-1]["children"].append({
"name": cmd,
"content": fdata
})
return fdata
def dsFront(mod, modname=None, iline=None):
modname = modname or "CT.%s"%(mod[:-3],)
iline = iline or (mod == "ct.js" and '<script src="/js/CT/ct.js"></script>' or 'CT.require("%s");'%(modname,))
log(modname, 2)
mdata = read(os.path.join(JSPATH, mod))
rdata = "\n".join([
"## %s"%(modname,),
"### Import line: '%s'"%(iline,),
(ISPLUGIN and mod == "config.js") and space(mdata) or mdata[3:].split("\n*/")[0]
])
WEB[-1]["children"].append({
"name": mod,
"content": rdata
})
return rdata
def back():
log("back", 1)
wobj = { "children": [] }
WEB.append(wobj)
f = []
if ISPLUGIN:
wobj["name"] = "Back (Init Config)"
fdata = [dsBack("init")]
else:
wobj["name"] = "Back (CLI)"
fdata = list(map(dsBack, ["init", "start", "deploy", "pubsub", "migrate", "index", "doc"]))
f.append("# %s"%(wobj["name"],))
f += fdata
return f
def front():
log("front", 1)
wobj = { "children": [] }
WEB.append(wobj)
f = []
if not ISPLUGIN:
wobj["name"] = "Front (JS Library)"
plist = os.listdir(JSPATH)
plist.sort()
fdata = list(map(dsFront, [i for i in plist if i.endswith("js")]))
elif os.path.isfile(os.path.join(JSPATH, "config.js")):
wobj["name"] = "Front (JS Config)"
fdata = [dsFront("config.js", "core.config.%s"%(ISPLUGIN,), 'CT.require("core.config");')]
if "name" in wobj:
f.append("# %s"%(wobj["name"],))
f += fdata
return f
def customChunk(path, fnames):
log("custom chunk: %s"%(path,), 1)
kids = []
wobj = { "name": path, "children": kids }
WEB.append(wobj)
f = ["## %s"%(path,)]
afile = os.path.join(path, "about.txt")
if os.path.isfile(afile):
adata = read(afile)
f.append(adata)
kids.append({
"name": "about",
"content": adata
})
for fname in fnames:
fdata = read(os.path.join(path, fname))
if fname == "config.js" or fname.startswith("ct.cfg"): # for non-local backend cfgs
fdata = space(fdata)
elif fname.endswith(".js"):
fdata = fdata[3:].split("\n*/")[0]
elif fname.endswith(".py"):
fdata = fdata[4:].split('\n"""')[0]
f.append("### %s\n%s"%(fname, fdata))
kids.append({
"name": fname,
"content": fdata
})
return f
frules = {
".js": {
"top": "/*\n",
"bottom": "\n*/"
},
".py": {
"top": '"""\n',
"bottom": '\n"""'
}
}
hashead = set()
def sethead(curdir, data):
dirname = curdir.rsplit(os.path.sep, 1)[-1]
if dirname not in hashead:
hashead.add(dirname)
data.append("%s %s"%("#" * len(curdir.split(os.path.sep)), dirname))
wobj = { "name": dirname, "children": [] }
WEB.append(wobj)
return WEB[-1]["children"]
OMIT = ""
def autodoc(data, curdir, contents):
about = "about.txt"
if curdir != HERE: # probs revise this...
about = os.path.join(curdir, about)
if os.path.isfile(about):
kids = sethead(curdir, data)
adata = read(about)
data.append(adata)
kids.append({
"name": "about",
"content": adata
})
for fname in contents:
if fname in OMIT:
continue
for flag, rule in list(frules.items()):
if fname.endswith(flag):
fdata = read(os.path.join(curdir, fname))
if fdata.startswith(rule["top"]):
kids = sethead(curdir, data)
fstr = fdata[len(rule["top"]):].split(rule["bottom"])[0]
data.append("%s# %s"%("#" * len(curdir.split(os.path.sep)), fname))
data.append(fstr)
kids.append({
"name": fname,
"content": fstr
})
def build():
global OMIT
parser = OptionParser("ctdoc [-w]")
parser.add_option("-w", "--web", action="store_true",
dest="web", default=False, help="build web docs")
parser.add_option("-a", "--auto", action="store_true",
dest="auto", default=False, help="use auto mode (even with a plugin)")
parser.add_option("-o", "--omit", dest="omit", default="",
help="omit any files from autodoc?")
options, args = parser.parse_args()
log("building docs")
ds = []
if AUTO or options.auto:
OMIT = options.omit
os.path.walk(HERE, autodoc, ds)
else:
abdata = (ISPLUGIN or CUSTOM) and "# %s\n%s"%(HERE, read("about.txt")) or config.about%(__version__,)
ds.append(abdata)
WEB.append({
"name": HERE,
"children": [{
"name": "about",
"content": abdata
}]
})
if CUSTOM:
for line in CUSTOM.split("\n"):
path, fnames = line.split(" = ")
ds.extend(customChunk(path, fnames.split("|")))
else:
ds.extend(back())
ds.extend(front())
log("writing data", important=True)
log("README.md", 1)
write("\n\n".join(ds), "README.md")
if options.web:
log("web docs enabled!", 1)
log("building docs web application", 2)
if not os.path.isdir("docs"):
cmd("ctinit docs -p ctdocs")
log("copying data", 2)
cp("core.data = %s;"%(json.dumps(WEB, indent=4),), os.path.join("docs", "js", "core", "data.js"))
log("goodbye")
if __name__ == "__main__":
build() |
# Copyright 2018 NTT DATA
# All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from tacker.plugins.common import constants
from tacker.vnfm.infra_drivers.openstack import heat_client as hc
from tacker.vnfm import utils as vnfm_utils
from tacker.vnfm import vim_client
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class Vdu(object):
def __init__(self, context, vnf_dict, heal_request_data_obj):
super(Vdu, self).__init__()
self.context = context
self.vnf_dict = vnf_dict
self.heal_request_data_obj = heal_request_data_obj
vim_id = self.vnf_dict['vim_id']
vim_res = vim_client.VimClient().get_vim(context, vim_id)
placement_attr = vnf_dict.get('placement_attr', {})
auth_attr = vim_res['vim_auth']
region_name = placement_attr.get('region_name', None)
self.heat_client = hc.HeatClient(auth_attr=auth_attr,
region_name=region_name)
def _get_resource_status(self, stack_id, rsc_name):
# Get the status of VDU resource from heat
vdu_resource = self.heat_client.resource_get(stack_id=stack_id,
rsc_name=rsc_name)
return vdu_resource.resource_status
def _resource_mark_unhealthy(self):
"""Mark the resource unhealthy using heat."""
additional_params = self.heal_request_data_obj.additional_params
for additional_param in additional_params:
resource_name = additional_param.parameter
res_status = self._get_resource_status(
self.vnf_dict['instance_id'], resource_name)
if res_status != 'CHECK_FAILED':
self.heat_client.resource_mark_unhealthy(
stack_id=self.vnf_dict['instance_id'],
resource_name=resource_name, mark_unhealthy=True,
resource_status_reason=additional_param.cause)
LOG.debug("Heat stack '%s' resource '%s' marked as "
"unhealthy", self.vnf_dict['instance_id'],
resource_name)
evt_details = (("HealVnfRequest invoked to mark resource "
"'%s' to unhealthy.") % resource_name)
vnfm_utils.log_events(self.context, self.vnf_dict,
constants.RES_EVT_HEAL,
evt_details)
else:
LOG.debug("Heat stack '%s' resource '%s' already mark "
"unhealthy.", self.vnf_dict['instance_id'],
resource_name)
def heal_vdu(self):
"""Update stack using heat.
This will re-create the resource which are mark as unhealthy.
"""
# Mark all the resources as unhealthy
self._resource_mark_unhealthy()
self.heat_client.update(stack_id=self.vnf_dict['instance_id'],
existing=True)
LOG.debug("Heat stack '%s' update initiated to revive "
"unhealthy resources.", self.vnf_dict['instance_id'])
evt_details = (("HealVnfRequest invoked to update the stack "
"'%s'") % self.vnf_dict['instance_id'])
vnfm_utils.log_events(self.context, self.vnf_dict,
constants.RES_EVT_HEAL, evt_details)
|
"""
Contains the user settings class implementation and functions for
database interaction.
"""
import collections
import datetime
from models.base_model import BaseModel
from sqlalchemy import Integer, select, func
from database import DATABASE_INSTANCE
class UserSettings(BaseModel):
"""
Class that contains the information for the settings of a user
"""
def __init__(self, *initial_data, **kwords):
self.identity = None
self.user_id = None
self.sound_direction = 'server'
self.dark_theme = True
self.sidenav_mode = 'side'
self.date_created = None
self.date_modified = None
self.init_from_dict(initial_data)
self.init_from_kwords(kwords)
def __iter__(self):
yield 'identity', self.identity
yield 'user_id', self.user_id
yield 'sound_direction', self.sound_direction
yield 'dark_theme', self.dark_theme
yield 'sidenav_mode', self.sidenav_mode
if self.date_created:
yield 'date_created', self.date_created.isoformat()
else:
yield 'date_modified', ''
if self.date_modified:
yield 'date_modified', self.date_modified.isoformat()
else:
yield 'date_modified', ''
def new_id():
"""
A function that produces a new id for the user settings data table
"""
with DATABASE_INSTANCE.engine.connect() as conn:
max_id = conn.execute(
select([
func.
max(DATABASE_INSTANCE.user_settings.c.identity, type_=Integer).
label('max')
])
).scalar()
return max_id + 1 if max_id else 1
def insert(data):
"""
A function that inserts new entries on the user settings data table
"""
if isinstance(data, UserSettings):
with DATABASE_INSTANCE.engine.connect() as conn:
settings = dict(data)
settings['identity'] = new_id()
settings['date_created'] = datetime.datetime.now()
settings['date_modified'] = datetime.datetime.now()
conn.execute(
DATABASE_INSTANCE.user_settings.insert(), dict(settings)
)
return UserSettings(settings)
if isinstance(data, collections.Sequence):
with DATABASE_INSTANCE.engine.connect() as conn:
collection = []
id_interval = 0
for entry in data:
settings = dict(entry)
settings['identity'] = new_id() + id_interval
settings['date_created'] = datetime.datetime.now()
settings['date_modified'] = datetime.datetime.now()
collection.append(settings)
id_interval += 1
conn.execute(
DATABASE_INSTANCE.users.insert(), settings_collection
)
return [UserSettings(x) for x in collection]
return None
def update(data):
"""
A function that updates entries on the user settings data table
"""
if isinstance(data, UserSettings):
with DATABASE_INSTANCE.engine.connect() as conn:
settings = dict(data)
settings['date_created'] = datetime.datetime.strptime(
settings['date_created'], '%Y-%m-%dT%H:%M:%S.%f'
)
settings['date_modified'] = datetime.datetime.now()
conn.execute(
DATABASE_INSTANCE.
user_settings.
update().
values(settings)
)
return UserSettings(settings)
if isinstance(data, collections.Sequence):
with DATABASE_INSTANCE.engine.connect() as conn:
collection = []
for entry in data:
settings = dict(entry)
settings['date_created'] = datetime.datetime.strptime(
settings['date_created'], '%Y-%m-%dT%H:%M:%S.%f'
)
settings['date_modified'] = datetime.datetime.now()
conn.execute(
DATABASE_INSTANCE.
user_settings.
update().
values(settings)
)
collection.append(settings)
return [UserSettings(x) for x in collection]
return None
def update_by_id(data):
"""
A function that updates an entry on the user settings data table
that contains the provided id
"""
if isinstance(data, UserSettings):
with DATABASE_INSTANCE.engine.connect() as conn:
settings = dict(data)
settings['date_created'] = datetime.datetime.strptime(
settings['date_created'], '%Y-%m-%dT%H:%M:%S.%f'
)
settings['date_modified'] = datetime.datetime.now()
conn.execute(
DATABASE_INSTANCE.
user_settings.
update().
where(DATABASE_INSTANCE.user_settings.c.identity == data.identity).
values(settings)
)
return UserSettings(settings)
if isinstance(data, collections.Sequence):
with DATABASE_INSTANCE.engine.connect() as conn:
collection = []
for entry in data:
settings = dict(entry)
settings['date_creted'] = datetime.datetime.strptime(
settings['date_created'], '%Y-%m-%dT%H:%M:%S.%f'
)
settings['date_modified'] = datetime.datetime.now()
conn.execute(
DATABASE_INSTANCE.
user_settings.
update().
where(DATABASE_INSTANCE.user_settings.c.identity == entry.identity).
values(settings)
)
collection.append(settings)
return [UserSettings(x) for x in collection]
return None
def delete(data):
"""
A function that deletes entries from the user settings data table
"""
if isinstance(data, UserSettings):
with DATABASE_INSTANCE.engine.connect() as conn:
conn.execute(
DATABASE_INSTANCE.
user_settings.
delete().
where(DATABASE_INSTANCE.user_settings.c.identity == data.identity)
)
elif isinstance(data, collections.Sequence):
with DATABASE_INSTANCE.engine.connect() as conn:
for entry in data:
conn.execute(
DATABASE_INSTANCE.
user_settings.
delete().
where(DATABASE_INSTANCE.user_settings.c.identity == entry.identity)
)
else:
pass
def delete_all():
"""
A function that deletes all entries in the users date table
"""
with DATABASE_INSTANCE.engine.connect() as conn:
conn.execute(DATABASE_INSTANCE.user_settings.delete())
def delete_by_id(identity):
"""
A function that removes an entry from the user settings data table
that contains the provided id
"""
with DATABASE_INSTANCE.engine.connect() as conn:
conn.execute(
DATABASE_INSTANCE.
user_settings.
delete().
where(DATABASE_INSTANCE.user_settings.c.identity == identity)
)
def select_by_id(settings_id):
"""
A function that returns the entry on the user settings data table with
the provided id
"""
with DATABASE_INSTANCE.engine.connect() as conn:
collection = conn.execute(
select([DATABASE_INSTANCE.user_settings]).
where(DATABASE_INSTANCE.user_settings.c.identity == settings_id)
)
return list(map(lambda x: UserSettings(dict(x)), collection))
def select_by_user_id(user_id):
"""
A function that returns the entry on the user settings data table
with the provided user id
"""
with DATABASE_INSTANCE.engine.connect() as conn:
collection = conn.execute(
select([DATABASE_INSTANCE.user_settings]).
where(DATABASE_INSTANCE.user_settings.c.user_id == user_id)
)
return list(map(lambda x: UserSettings(dict(x)), collection))
|
import math
from typing import List
from paddle.abc_paddle import ABCPaddle
class Paddle(ABCPaddle):
urdf_model = "paddle/paddle.urdf"
# The following are the paddle important joints ids.
# These are hard coded values, so always make sure to check these after changing paddle urdf model.
MOVE_AXIS_JOINTS = {"x": 2, "y": 1, "z": 0}
ROTATE_AXIS_JOINTS = {"x": 5, "y": 4, "z": 3}
PADDLE_LINK_ID = 5
joint_controllers = List[int]
def __init__(self, pybullet_client):
self.pybullet_client = pybullet_client
self.robot_id = self.pybullet_client.loadURDF(self.urdf_model)
self.joint_ids = [i for i in range(0, 6)]
self.pybullet_client.changeDynamics(self.robot_id, -1, mass=0.0)
# Set the friction and restitution of the paddle.
self.pybullet_client.changeDynamics(
self.robot_id, self.PADDLE_LINK_ID, lateralFriction=1, restitution=0.7
)
def reset_position(self):
self.pybullet_client.resetBasePositionAndOrientation(
self.robot_id, [0, 0, 0], [0, 0, 0, 1]
)
def create_joint_controllers(self):
self.joint_controllers = []
self.joint_controllers.append(
self.pybullet_client.addUserDebugParameter("z", 0, 3, 0.5)
)
self.joint_controllers.append(
self.pybullet_client.addUserDebugParameter("y", -1, 1, 0)
)
self.joint_controllers.append(
self.pybullet_client.addUserDebugParameter("x", -1, 1, 0)
)
self.joint_controllers.append(
self.pybullet_client.addUserDebugParameter("z_roll", -3.14, 3.14, 0)
)
self.joint_controllers.append(
self.pybullet_client.addUserDebugParameter("y_roll", -3.14, 3.14, 0)
)
self.joint_controllers.append(
self.pybullet_client.addUserDebugParameter("x_roll", -3.14, 3.14, 0)
)
def read_and_update_joint_position(self):
for i in range(len(self.joint_controllers)):
self.pybullet_client.setJointMotorControl2(
self.robot_id,
i,
self.pybullet_client.POSITION_CONTROL,
self.pybullet_client.readUserDebugParameter(self.joint_controllers[i]),
)
def set_angle_on_axis(self, axis, angle):
"""Parameter `angle` should be given in degrees."""
self.pybullet_client.setJointMotorControl2(
self.robot_id,
self.ROTATE_AXIS_JOINTS[axis],
self.pybullet_client.POSITION_CONTROL,
targetPosition=angle * math.pi / 180,
)
def set_angles(self, x_angle, y_angle):
self.set_angle_on_axis("x", x_angle)
self.set_angle_on_axis("y", y_angle)
def rotate_around_axis(self, axis, angle):
"""Parameter `angle` should be given in degrees."""
joint_pos = self.pybullet_client.getJointState(
self.robot_id, self.ROTATE_AXIS_JOINTS[axis]
)[0]
self.pybullet_client.setJointMotorControl2(
self.robot_id,
self.ROTATE_AXIS_JOINTS[axis],
self.pybullet_client.POSITION_CONTROL,
targetPosition=joint_pos + angle * math.pi / 180,
)
# Resets all the rotation angles on the paddle.
def reset_torque_pos(self):
for axe in ["x", "y", "z"]:
self.set_angle_on_axis(axe, 0)
def move_by_vector(self, vector: List[float], vel=1):
assert len(vector) == 3
axes = ["x", "y", "z"]
for i in range(3):
joint_pos = self.pybullet_client.getJointState(
self.robot_id, self.MOVE_AXIS_JOINTS[axes[i]]
)[0]
self.pybullet_client.setJointMotorControl2(
self.robot_id,
self.MOVE_AXIS_JOINTS[axes[i]],
self.pybullet_client.POSITION_CONTROL,
targetPosition=joint_pos + vector[i],
maxVelocity=vel,
)
def move_to_position(self, position: List[float], vel=1):
assert len(position) == 3
axes = ["x", "y", "z"]
for i in range(3):
self.pybullet_client.setJointMotorControl2(
self.robot_id,
self.MOVE_AXIS_JOINTS[axes[i]],
self.pybullet_client.POSITION_CONTROL,
targetPosition=position[i],
maxVelocity=vel,
)
def get_center_position(self) -> List[float]:
"""Returns [x, y, z] coordinates of the center"""
return self.pybullet_client.getLinkState(self.robot_id, self.PADDLE_LINK_ID)[0]
def check_if_in_range(self, position: List[float]) -> bool:
"""Parameter `position` is in [x, y, z] order"""
center = self.get_center_position()
# TODO - This 0.5 value is only a placeholder, if we want to do this correctly,
# then we will have to take the paddle angles into account.
return (
abs(position[0] - center[0]) < 0.25 and abs(position[1] - center[1]) < 0.25
)
def steer_with_keyboard(self, rotation_speed, x_steering=[0], y_steering=[0]):
p = self.pybullet_client
keys = p.getKeyboardEvents()
# handle keyboard events
for k, v in keys.items():
if k == p.B3G_RIGHT_ARROW and (v & p.KEY_WAS_TRIGGERED):
x_steering[0] = -1
if k == p.B3G_RIGHT_ARROW and (v & p.KEY_WAS_RELEASED):
x_steering[0] = 0
if k == p.B3G_LEFT_ARROW and (v & p.KEY_WAS_TRIGGERED):
x_steering[0] = 1
if k == p.B3G_LEFT_ARROW and (v & p.KEY_WAS_RELEASED):
x_steering[0] = 0
if k == p.B3G_UP_ARROW and (v & p.KEY_WAS_TRIGGERED):
y_steering[0] = 1
if k == p.B3G_UP_ARROW and (v & p.KEY_WAS_RELEASED):
y_steering[0] = 0
if k == p.B3G_DOWN_ARROW and (v & p.KEY_WAS_TRIGGERED):
y_steering[0] = -1
if k == p.B3G_DOWN_ARROW and (v & p.KEY_WAS_RELEASED):
y_steering[0] = 0
self.rotate_around_axis("x", x_steering[0] * rotation_speed)
self.rotate_around_axis("y", y_steering[0] * rotation_speed)
|
import re
from PIL.Image import new
from reportlab.graphics.shapes import Drawing
from svglib.svglib import svg2rlg
from reportlab.graphics import renderPDF, renderPM
import codecs
import random
import linecache
from reportlab.graphics.shapes import Drawing
from reportlab.graphics.renderSVG import SVGCanvas, draw
import gui
gui.start_gui()
old_colour = '#FF0000'
color_path = "assets/hexColors.txt"
backgroundCol = ''
background = None
layer2 = None
def random_color():
num_lines = sum(1 for line in open(color_path))
random_line = random.randint(1, num_lines)
return linecache.getline(color_path, random_line)
def get_content(filePath, newCol):
f = codecs.open(filePath, encoding='utf-8', errors='ignore')
content = f.read()
f.close
w = content.replace(old_colour, newCol)
return w
def combine_svg(background, foreground):
d = Drawing(1024, 1024)
d.add(background)
d.add(foreground)
c = SVGCanvas((d.width, d.height))
draw(d, c, 0, 0)
return c
def change_color(filePath, content):
f = open(filePath, 'w', encoding='utf-8', errors='ignore')
f.write(content)
f.close
def build_background():
newColBackground = random_color()
backgroundCol = get_content(
'assets/Components/Layer-1/background.svg', newColBackground)
change_color('layer1.svg', backgroundCol)
background = svg2rlg('layer1.svg')
return background
def choose_layer2():
percent = random.randint(1, 100)
if percent <= 30:
return 'assets/Components/Layer-2/circle.svg'
elif (percent > 30) and (percent <= 50):
return 'assets/Components/Layer-2/square.svg'
elif (percent > 50) and (percent <= 65):
return 'assets/Components/Layer-2/octagon.svg'
else:
return 'assets/Components/nothing.svg'
def build_layer2():
chosenlayer2 = choose_layer2()
while True:
newColForeground = random_color()
if newColForeground != backgroundCol:
break
layer2Col = get_content(chosenlayer2, newColForeground)
change_color('layer2.svg', layer2Col)
layer2 = svg2rlg('layer2.svg')
return layer2
def choose_layer3():
percent = random.randint(1, 100)
if percent <= 20:
return 'assets/Components/Layer-3/smallcircle.svg'
elif (percent > 20) and (percent <= 45):
return 'assets/Components/Layer-3/smallsquare.svg'
elif (percent > 45) and (percent <= 55):
return 'assets/Components/Layer-3/smallhexagon.svg'
else:
return 'assets/Components/nothing.svg'
def build_layer3():
chosenlayer3 = choose_layer3()
layer3 = svg2rlg(chosenlayer3)
return layer3
def choose_layer4():
percent = random.randint(1, 100)
if percent <= 20:
return 'assets/Components/Layer-4/circle.svg'
elif (percent > 20) and (percent <= 45):
return 'assets/Components/Layer-4/square.svg'
elif (percent > 45) and (percent <= 55):
return 'assets/Components/Layer-4/hexagon.svg'
elif (percent > 55) and (percent <= 60):
return 'assets/Components/Layer-4/rotatedsquare.svg'
else:
return 'assets/Components/nothing.svg'
def build_layer4():
chosenlayer4 = choose_layer4()
layer4 = svg2rlg(chosenlayer4)
return layer4
def choose_layer5():
percent = random.randint(1, 100)
if percent <= 20:
return 'assets/Components/Layer-5/bigcircle.svg'
elif (percent > 20) and (percent <= 45):
return 'assets/Components/Layer-5/bigsquare.svg'
elif (percent > 45) and (percent <= 55):
return 'assets/Components/Layer-5/bighexagon.svg'
else:
return 'assets/Components/nothing.svg'
def build_layer5():
chosenlayer5 = choose_layer5()
layer5 = svg2rlg(chosenlayer5)
return layer5
def create_image(name_ending):
background = build_background()
layer2 = build_layer2()
layer3 = build_layer3()
layer4 = build_layer4()
layer5 = build_layer5()
firstcombined = combine_svg(background, layer2)
firstcombined.save("NFT.svg")
a = svg2rlg("NFT.svg")
secondcombined = combine_svg(a, layer3)
secondcombined.save("NFT.svg")
a = svg2rlg("NFT.svg")
thirdcombined = combine_svg(a, layer4)
thirdcombined.save("NFT.svg")
a = svg2rlg("NFT.svg")
combined = combine_svg(a, layer5)
combined.save("NFT.svg")
a = svg2rlg("NFT.svg")
renderPM.drawToFile(a, gui.folder_name + "/NFT_" + str(name_ending) + ".png", fmt="PNG")
for i in range (0, int(gui.image_number)):
create_image(i+1)
|
import unittest
from click.testing import CliRunner
from mock import patch
from tenx.app import TenxApp
from tenx.reads_cli import reads_cli, reads_download_cmd
class TenxRdsCliTest(unittest.TestCase):
def setUp(self):
if TenxApp.config is None: TenxApp()
TenxApp.config["TENX_DATA_PATH"] = "/mnt/disks/data"
TenxApp.config["TENX_REMOTE_URL"] = "gs://data"
def tearDown(self):
TenxApp.config = None
def test0_reads_cli(self):
runner = CliRunner()
result = runner.invoke(reads_cli, ["--help"])
self.assertEqual(result.exit_code, 0)
result = runner.invoke(reads_cli, [])
self.assertEqual(result.exit_code, 0)
@patch("tenx.reads.download")
def test_reads_download(self, dl_p):
runner = CliRunner()
result = runner.invoke(reads_download_cmd, ["--help"])
self.assertEqual(result.exit_code, 0)
result = runner.invoke(reads_download_cmd, [])
self.assertEqual(result.exit_code, 2)
result = runner.invoke(reads_download_cmd, ["MYSAMPLE"])
try:
self.assertEqual(result.exit_code, 0)
except:
print(result.output)
raise
expected_output = ""
self.assertEqual(result.output, expected_output)
# -- TenxRdsCliTest
if __name__ == '__main__':
unittest.main(verbosity=2)
#-- __main__
|
'''
Created on 2015年12月1日
给出N个数字,不改变它们的相对位置,在中间加入K个乘号和N-K-1个加号,
(括号随便加)使最终结果尽量大。因为乘号和加号一共就是N-1个了,所以恰好每两个相邻数字之间都有一个符号。
http://www.lostscroll.com/max-value-using-and/
@author: Darren
'''
def maxValue(nums):
dp=[[0]*len(nums) for i in range(len(nums)+1)]
sumN=[0]*(len(nums)+1)
for i in range(1,len(nums)+1):
sumN[i]=sumN[i-1]+nums[i-1]
dp[i][0]=sumN[i]
res=dp[len(nums)][0]
for i in range(2,len(nums)+1):
for j in range(1,i):
for k in range(j,i):
dp[i][j]=max(dp[i][j],dp[k][j-1]*(sumN[i]-sumN[k]))
if i==len(nums) and dp[len(nums)][j]>res:
res=dp[len(nums)][j]
return res
def maxValue2(nums):
pass
nums=[1,1,1,1,1,1,1,1,1]
print(maxValue(nums))
print(maxValue2(nums)) |
class AccountRequest:
get_all_accounts_request = """
<?xml version="1.0" ?>
<soap:Envelope xmlns:soap="http://www.w3.org/2003/05/soap-envelope">
<soap:Header>
<context xmlns="urn:zimbra">
<authToken>%s</authToken>
<session/>
<account by="name">%s</account>
<userAgent name="zclient" version="8.0.7_GA_6020"/>
</context>
</soap:Header>
<soap:Body>
<GetAllAccountsRequest xmlns="urn:zimbraAdmin">
<domain by="name">%s</domain>
</GetAllAccountsRequest>
</soap:Body>
</soap:Envelope>
"""
get_account_request = """
<?xml version="1.0" ?>
<soap:Envelope xmlns:soap="http://www.w3.org/2003/05/soap-envelope">
<soap:Header>
<context xmlns="urn:zimbra">
<authToken>%s</authToken>
<session/>
<account by="name">%s</account>
<userAgent name="zclient" version="8.0.7_GA_6020"/>
</context>
</soap:Header>
<soap:Body>
<GetAccountRequest xmlns="urn:zimbraAdmin">
<account by="name">%s</account>
</GetAccountRequest>
</soap:Body>
</soap:Envelope>
"""
create_account_request = """
<?xml version="1.0" ?>
<soap:Envelope xmlns:soap="http://www.w3.org/2003/05/soap-envelope">
<soap:Header>
<context xmlns="urn:zimbra">
<authToken>%s</authToken>
<session/>
<account by="name">%s</account>
<userAgent name="zclient" version="8.0.7_GA_6020"/>
</context>
</soap:Header>
<soap:Body>
<CreateAccountRequest name="%s" password="%s" xmlns="urn:zimbraAdmin">
<a n="zimbraCosId">%s</a>
</CreateAccountRequest>
</soap:Body>
</soap:Envelope>
"""
update_account_cos_request = """
<?xml version="1.0" ?>
<soap:Envelope xmlns:soap="http://www.w3.org/2003/05/soap-envelope">
<soap:Header>
<context xmlns="urn:zimbra">
<authToken>%s</authToken>
<session/>
<account by="name">%s</account>
<userAgent name="zclient" version="8.0.7_GA_6020"/>
</context>
</soap:Header>
<soap:Body>
<ModifyAccountRequest id="%s" xmlns="urn:zimbraAdmin">
<a n="zimbraCosId">%s</a>
</ModifyAccountRequest>
</soap:Body>
</soap:Envelope>
"""
update_account_password_request = """
<?xml version="1.0" ?>
<soap:Envelope xmlns:soap="http://www.w3.org/2003/05/soap-envelope">
<soap:Header>
<context xmlns="urn:zimbra">
<authToken>%s</authToken>
<session/>
<account by="name">%s</account>
<userAgent name="zclient" version="8.0.7_GA_6020"/>
</context>
</soap:Header>
<soap:Body>
<SetPasswordRequest id="%s" newPassword="%s" xmlns="urn:zimbraAdmin" />
</soap:Body>
</soap:Envelope>
"""
rename_account_request = """
<?xml version="1.0" ?>
<soap:Envelope xmlns:soap="http://www.w3.org/2003/05/soap-envelope">
<soap:Header>
<context xmlns="urn:zimbra">
<authToken>%s</authToken>
<session/>
<account by="name">%s</account>
<userAgent name="zclient" version="8.0.7_GA_6020"/>
</context>
</soap:Header>
<soap:Body>
<RenameAccountRequest id="%s" newName="%s" xmlns="urn:zimbraAdmin" />
</soap:Body>
</soap:Envelope>
"""
|
from skyciv.classes.model.components.meshed_plates.meshed_plate import MeshedPlate
from skyciv.utils.helpers import next_object_key
from skyciv.classes.model.components._base_class.model_collection_component import ModelCollectionComponent
from typing import List
class MeshedPlates(ModelCollectionComponent):
"""Creates an instance of the SkyCiv MeshedPlates class.
"""
def add(self, parent_plate: int, node_A: int, node_B: int, node_C: int, node_D: int = None, rotZ: float = 0) -> int:
"""Create a meshed plate with the next available ID.
Args:
parent_plate (int): The ID of the plate which this meshed plate originated from. Must refer to a plate in the plates object.
node_A (int): The first node of the meshed plate.
node_B (int): The second node of the meshed plate.
node_C (int): The third node of the meshed plate.
node_D (int, optional): The fourth node of the meshed plate. Set this to None if the meshed plate is triangular. Defaults to None.
rotZ (float, optional): Rotation of this plate about the plate's local z-axis, in degrees. Defaults to 0.
Returns:
int: The ID of the new meshed plate element.
"""
next_index = next_object_key(self)
element_ids = self.get_meshed_plate_ids_from_nodes_ids(
node_A, node_B, node_C, node_D)
if (element_ids != None):
print('There is more than one meshed plate with the same nodes.')
mp = MeshedPlate(parent_plate, node_A, node_B, node_C, node_D, rotZ)
setattr(self, str(next_index), mp)
return next_index
def get_meshed_plate_ids_from_nodes_ids(self, node_A: int, node_B: int, node_C: int, node_D: int = None) -> List[int]:
"""Get the IDs of all meshed plates by corner nodes.
Args:
node_A (int): The ID of Node A.
node_B (int): The ID of Node B.
node_C (int): The ID of Node C.
node_D (int, optional): The ID of Node D. Defaults to None.
Returns:
list[int]: An array of meshed plate IDs or None if none exist.
"""
ids = []
for k, v in vars(self).items():
if (
v.node_A == node_A and
v.node_B == node_B and
v.node_C == node_C and
v.node_D == node_D
):
ids.append(k)
if len(ids) == 0:
ids = None
return ids
|
# Import libraries and sklearn libraries
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import neighbors, datasets
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix, f1_score
from sklearn.datasets import load_breast_cancer
def graph():
df = pd.read_csv('../data/wdbc.data',
sep=',',
header=None)
df.columns = ['ID', 'Diagnosis', 'mean radius', 'mean texture', 'mean perimeter', 'mean area',
'mean smoothness', 'mean compactness', 'mean concavity',
'mean concave points', 'mean symmetry', 'mean fractal dimension',
'radius error', 'texture error', 'perimeter error', 'area error',
'smoothness error', 'compactness error', 'concavity error',
'concave points error', 'symmetry error', 'fractal dimension error',
'worst radius', 'worst texture', 'worst perimeter', 'worst area',
'worst smoothness', 'worst compactness', 'worst concavity',
'worst concave points', 'worst symmetry', 'worst fractal dimension']
fig, axes = plt.subplots(3, 3, figsize=(10, 10))
axes[0][0].scatter(df['Diagnosis'], df['mean radius'], alpha='0.2', marker=",", c='blue')
axes[0][1].scatter(df['Diagnosis'], df['mean perimeter'], alpha='0.2', marker="o", c='green')
axes[0][2].scatter(df['Diagnosis'], df['mean smoothness'], alpha='0.2', marker="v", c='cyan')
axes[1][0].scatter(df['Diagnosis'], df['mean compactness'], alpha='0.2', marker=".", c='yellow')
axes[1][1].scatter(df['Diagnosis'], df['mean concavity'], alpha='0.2', marker=",", c='red')
axes[1][2].scatter(df['Diagnosis'], df['mean concave points'], alpha='0.2', marker="o", c='magenta')
axes[2][0].scatter(df['Diagnosis'], df['mean symmetry'], alpha='0.2', marker="v", c='black')
axes[2][1].scatter(df['Diagnosis'], df['mean fractal dimension'], alpha='0.2', marker=".", c='blue')
axes[2][2].scatter(df['Diagnosis'], df['mean texture'], alpha='0.2', marker=".", c='green')
axes[0][0].set_xlabel('Diagnosis')
axes[0][1].set_xlabel('Diagnosis')
axes[0][2].set_xlabel('Diagnosis')
axes[1][0].set_xlabel('Diagnosis')
axes[1][1].set_xlabel('Diagnosis')
axes[1][2].set_xlabel('Diagnosis')
axes[2][0].set_xlabel('Diagnosis')
axes[2][1].set_xlabel('Diagnosis')
axes[2][2].set_xlabel('Diagnosis')
axes[0][0].set_ylabel('mean radius')
axes[0][1].set_ylabel('mean perimeter')
axes[0][2].set_ylabel('mean smoothness')
axes[1][0].set_ylabel('mean compactness')
axes[1][1].set_ylabel('mean concavity')
axes[1][2].set_ylabel('mean concave points')
axes[2][0].set_ylabel('mean symmetry')
axes[2][1].set_ylabel('mean fractal dimension')
axes[2][2].set_ylabel('mean texture')
axes[0][0].set_title('Diagnosis Vs mean radius')
axes[0][1].set_title('Diagnosis Vs mean perimeter')
axes[0][2].set_title('Diagnosis Vs mean smoothness')
axes[1][0].set_title('Diagnosis Vs mean compactness')
axes[1][1].set_title('Diagnosis Vs mean concavity')
axes[1][2].set_title('Diagnosis Vs mean concave points')
axes[2][0].set_title('Diagnosis Vs mean symmetry')
axes[2][1].set_title('Diagnosis Vs mean fractal dimension')
axes[2][2].set_title('Diagnosis Vs mean texture')
plt.tight_layout()
if os.path.exists('../plots'):
plt.savefig('../plots/Diagnosis.png', dpi=300)
else:
print('Path does not exist\n')
def main():
cancer = datasets.load_breast_cancer()
X = cancer.data
y = cancer.target
columns_names = cancer.feature_names
print(columns_names)
# Splitting features and target datasets into: train and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.35)
# Training a Linear Regression model with fit()
lr = LogisticRegression()
lr.fit(X_train, y_train)
# Predicting the results for our test dataset
predicted_values = lr.predict(X_test)
# Printing the residuals: difference between real and predicted
for (real, predicted) in list(zip(y_test, predicted_values)):
print(f'Value: {real}, pred: {predicted} {"is different" if real != predicted else ""}')
# Printing accuracy score(mean accuracy) from 0 - 1
print(f'Accuracy score is {lr.score(X_test, y_test):.2f}/1 \n')
# Printing the classification report
print('Classification Report')
print(classification_report(y_test, predicted_values))
# Printing the classification confusion matrix (diagonal is true)
print('Confusion Matrix')
print(confusion_matrix(y_test, predicted_values))
print('Overall f1-score')
print(f1_score(y_test, predicted_values, average="macro"))
# Graph for accuracy of the test data
plt_array = np.arange(0, predicted_values.size)
actual = np.zeros(predicted_values.size)
for x in plt_array:
if predicted_values[x]==y_test[x]:
actual[x] = 1
else:
actual[x] = 0
plt.figure(figsize=(5,5))
plt.plot(plt_array, actual, 'gv')
plt.xlabel('Number of test iteration')
plt.ylabel('Correct Predicted value')
plt.title('Accuracy of test')
plt.tight_layout()
if os.path.exists('../plots'):
plt.savefig('../plots/performance.png', dpi=300)
else:
print('Path does not exist\n')
# Generate Graph
graph()
if __name__ == "__main__":
main() |
# dspace/__init__.py
"""DSpace Python Client package."""
import logging
from dspace.bitstream import Bitstream # noqa
from dspace.client import DSpaceClient # noqa
from dspace.item import Item, MetadataEntry # noqa
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
import iniparser2
import cmdtools
from lib import utils
from lib import statics
async def error_equip(error):
if isinstance(error, cmdtools.MissingRequiredArgument):
if error.param == "item":
await error_equip.message.channel.send(":x: Item name is required")
async def _equip(item):
assigned_ids = utils.get_assigned_user_ids()
if _equip.message.author.id not in assigned_ids:
await _equip.message.channel.send(":warning: Your id is not assigned")
else:
if item in statics.items:
if item.endswith("_axe"):
filename = f"data/users/{_equip.message.author.id}.ini"
data = iniparser2.INI(convert_property=True)
data.read_file(filename)
if data["inventory"][item] < 1:
await _equip.message.channel.send(
f":x: {_equip.message.author.mention} You don't have this item `{item}`"
)
return
data["tools"]["axe"] = item
data.write(filename)
await _equip.message.channel.send(
f":white_check_mark: {_equip.message.author.mention} You have equipped item: **{item}**"
)
elif item.endswith("_pickaxe"):
filename = f"data/users/{_equip.message.author.id}.ini"
data = iniparser2.INI(convert_property=True)
data.read_file(filename)
if data["inventory"][item] < 1:
await _equip.message.channel.send(
f":x: {_equip.message.author.mention} You don't have this item `{item}`"
)
return
data["tools"]["pickaxe"] = item
data.write(filename)
await _equip.message.channel.send(
f":white_check_mark: {_equip.message.author.mention} You have equipped item: **{item}**"
)
elif item.endswith("_sword"):
filename = f"data/users/{_equip.message.author.id}.ini"
data = iniparser2.INI(convert_property=True)
data.read_file(filename)
if data["inventory"][item] < 1:
await _equip.message.channel.send(
f":x: {_equip.message.author.mention} You don't have this item `{item}`"
)
return
data["tools"]["sword"] = item
data.write(filename)
await _equip.message.channel.send(
f":white_check_mark: {_equip.message.author.mention} You have equipped item: **{item}**"
)
else:
await _equip.message.channel.send(f":x: Item `{item}` is not equipable")
else:
await _equip.message.channel.send(f":x: Item `{item}` did not exists")
|
class Solution:
def solve(self, nums, k):
ans = -1
total = 0
for i in range(len(nums)):
total += nums[i]
if total <= k: ans = i
return ans
|
import os
from typing import Union
import requests
import random
from .get_hashio_storage_dir import get_hashio_storage_dir
def load_file(uri: str) -> Union[str, None]:
assert uri.startswith('ipfs://'), f'Invalid or unsupported URI: {uri}'
a = uri.split('/')
assert len(a) >= 3, f'Invalid or unsupported URI: {uri}'
cid = a[2]
hashio_storage_dir = get_hashio_storage_dir()
parent_dir = f'{hashio_storage_dir}/ipfs/{cid[0]}{cid[1]}/{cid[2]}{cid[3]}/{cid[4]}{cid[5]}'
filename = f'{parent_dir}/{cid}'
if os.path.exists(filename):
return filename
tmp_filename = filename + '.downloading'
if os.path.exists(tmp_filename):
raise Exception(f'Temporary file exists.')
# url = f'https://{cid}.ipfs.dweb.link'
url = f'https://cloudflare-ipfs.com/ipfs/{cid}'
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
tmp_filename = f'{filename}.tmp.{_random_string(8)}'
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(tmp_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
try:
os.rename(tmp_filename, filename)
except:
if not os.path.exists(filename): # maybe some other process beat us to it
raise Exception(f'Unexpected problem moving file {tmp_filename}')
return filename
def _random_string(num_chars: int) -> str:
chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
return ''.join(random.choice(chars) for _ in range(num_chars)) |
import discord
from discord.ext import commands
from discord.commands import slash_command
from console import Console
import random
console = Console(True)
class help_class(commands.Cog):
def __init__(self, client):
self.client = client
@slash_command(name="help", description="Send help message.")
async def help(self, ctx):
colour = random.choice([0x0057b7, 0xffd700])
embed=discord.Embed(title="UNB Help", description="To configure the bot, use ...\n /config \n - channel {channel} - Set the channel for the bot to post news in\n - ping_roles {role} - Set the role to ping when breaking news is posted", colour=random.choice([0x0057b7, 0xffd700]))
embed.add_field(name="Still not working?", value="Make sure the bot has permissions to send messages in the channel you set it for.", inline=False)
embed.add_field(name="If you need more help..", value="Join the support server [here](https://discord.gg/9k59NxQ9wf)", inline=False)
await ctx.respond(embed=embed, ephemeral=True)
def setup(client):
client.add_cog(help_class(client))
|
import platform
import stat
import os
import csv
from datetime import datetime
import json
import shutil
from pathlib import Path
import subprocess
from collections import defaultdict
import click
from ploomber.io.terminalwriter import TerminalWriter
from ploomber.table import Table
from pygments.formatters.terminal import TerminalFormatter
from pygments.lexers.markup import MarkdownLexer
from pygments import highlight
_URL = 'https://github.com/ploomber/projects'
_DEFAULT_BRANCH = 'master'
_home = Path('~', '.ploomber')
_lexer = MarkdownLexer()
_formatter = TerminalFormatter(bg="dark")
def _find_header(md):
"""Find header markers
"""
mark = '<!-- end header -->'
lines = md.splitlines()
for n, line in enumerate(lines):
if mark == line:
return n
return None
def _skip_header(md):
line = _find_header(md)
if line:
lines = md.splitlines()
return '\n'.join(lines[line + 1:])
else:
return md
def _delete_git_repo(path):
"""
If on windows, we need to change permissionsto delete the repo
"""
path_to_repo = Path(path, '.git')
if os.name == 'nt' and path_to_repo.exists():
for root, dirs, files in os.walk(path_to_repo):
for dir_ in dirs:
os.chmod(Path(root, dir_), stat.S_IRWXU)
for file_ in files:
os.chmod(Path(root, file_), stat.S_IRWXU)
def _delete(source, sub):
return source.replace(sub, '')
def _cleanup_markdown(source):
source = _delete(source, '<!-- start description -->\n')
source = _delete(source, '<!-- end description -->\n')
source = _skip_header(source)
return source
def _display_markdown(tw, path):
source = _cleanup_markdown(path.read_text())
lines = source.splitlines()
top_lines = '\n'.join(lines[:25])
tw.write(highlight(top_lines, _lexer, _formatter))
if len(lines) > 25:
tw.write(f'\n[...{str(path)} continues]\n', yellow=True)
class _ExamplesManager:
"""Class for managing examples data
"""
def __init__(self, home, branch=None):
self._home = Path(home).expanduser()
self._path_to_metadata = self._home / '.metadata'
self._examples = self._home / 'projects'
self._branch = branch or _DEFAULT_BRANCH
self._explicit_branch = branch is not None
@property
def home(self):
return self._home
@property
def examples(self):
return self._examples
@property
def path_to_metadata(self):
return self._path_to_metadata
@property
def branch(self):
return self._branch
def save_metadata(self, branch):
timestamp = datetime.now().timestamp()
metadata = json.dumps(dict(timestamp=timestamp, branch=branch))
self.path_to_metadata.write_text(metadata)
def load_metadata(self):
try:
return json.loads(self.path_to_metadata.read_text())
except Exception as e:
click.echo(f'Error loading metadata: {e}')
return None
def clone(self):
if not self.home.exists():
self.home.mkdir()
if self.examples.exists():
_delete_git_repo(self.examples)
shutil.rmtree(self.examples)
try:
subprocess.run([
'git',
'clone',
'--depth',
'1',
'--branch',
self.branch,
_URL,
str(self.examples),
],
check=True)
except Exception as e:
exception = e
else:
exception = None
if exception:
raise RuntimeError(
'An error occurred when downloading examples. '
'Verify git is installed and your internet '
f'connection. (Error message: {str(exception)!r})')
self.save_metadata(branch=self.branch)
def outdated(self):
metadata = self.load_metadata()
if metadata:
timestamp = metadata['timestamp']
then = datetime.fromtimestamp(timestamp)
now = datetime.now()
elapsed = (now - then).days
is_more_than_one_day_old = elapsed >= 1
is_different_branch = metadata.get('branch') != self.branch
if is_more_than_one_day_old:
click.echo('Examples copy is more than 1 day old...')
if is_different_branch and self._explicit_branch:
click.echo('Different branch requested...')
return is_more_than_one_day_old or (is_different_branch
and self._explicit_branch)
else:
click.echo('Cloning...')
return True
def path_to(self, name):
return self.examples / name
def path_to_readme(self):
return self.examples / 'README.md'
def list(self):
with open(self.examples / '_index.csv',
newline='',
encoding='utf-8-sig') as f:
rows = list(csv.DictReader(f))
categories = json.loads((self.examples / '_category.json').read_text())
by_category = defaultdict(lambda: [])
for row in rows:
category = row.pop('category')
del row['idx']
by_category[category].append(row)
tw = TerminalWriter()
click.echo(f'Branch: {self.branch}')
tw.sep('=', 'Ploomber examples', blue=True)
click.echo()
for category in sorted(by_category):
title = category.capitalize()
description = categories.get(category)
if description:
title = f'{title} ({description})'
tw.sep(' ', title, green=True)
click.echo()
click.echo(
Table.from_dicts(by_category[category]).to_format('simple'))
click.echo()
tw.sep('=', blue=True)
tw.write('\nTo run these examples in free, hosted '
f'environment, see instructions at: {_URL}')
tw.write('\nTo download: ploomber examples -n name -o path\n')
tw.write('Example: ploomber examples -n templates/ml-basic -o ml\n')
def main(name, force=False, branch=None, output=None):
"""
Entry point for examples
"""
manager = _ExamplesManager(home=_home, branch=branch)
tw = TerminalWriter()
if not manager.examples.exists() or manager.outdated() or force:
if not manager.examples.exists():
click.echo('Local copy does not exist...')
elif force:
click.echo('Forcing download...')
manager.clone()
if not name:
manager.list()
else:
selected = manager.path_to(name)
if not selected.exists():
click.echo(f'There is no example named {name!r}.\n'
'To list examples: ploomber examples\n'
'To update local copy: ploomber examples -f')
else:
output = output or name
tw.sep('=', f'Copying example {name!r} to {output}/', green=True)
if Path(output).exists():
raise click.ClickException(
f"{output!r} already exists in the current working "
"directory, please rename it or move it "
"to another location and try again.")
shutil.copytree(selected, output)
path_to_readme = Path(output, 'README.md')
out_dir = output + ('\\'
if platform.system() == 'Windows' else '/')
tw.sep('=', str(path_to_readme), blue=True)
_display_markdown(tw, path_to_readme)
tw.sep('=', blue=True)
tw.sep('=', 'Installation', blue=True)
tw.write(f'Move to {out_dir} and run one of:'
f'\n* ploomber install'
f'\n* conda env create -f environment.yml'
f'\n* pip install -r requirements.txt\n')
tw.sep('=', blue=True)
|
import os.path
import fs
from .setup import *
def test_normalize():
path = '../test/'
assert fs.normalize(path) == os.path.normpath(path) |
# coding:utf8
# 端口扫描工具
# 使用方式:python portScan.py -i 192.168.1.1 -p 22
# 2017-03-23
# leafrainy (leafrainy.cc)
import socket
import re
import sys
s = sys.argv
ss = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
#提示信息
def useNotic(s):
print "请按照格式输入正确的参数"
print "使用方法: python "+s[0]+" -i ip -p port"
print " 示例: python "+s[0]+" -i 192.168.1.1 -p 22"
#验证ip端口
def inputInfo(s):
if len(s) != 5:
useNotic(s)
exit(0)
i = '-i'
p = '-p'
inputInfoList = []
if i in s:
if s.index('-i') == 1:
if re.match(r"^\s*\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\s*$", s[2]):
inputInfoList.append(s[2])
else:
print "请输入正确的IP地址"
useNotic(s)
exit(0)
if p in s:
if s.index('-p') == 3:
if (int(s[4]) <= 65535)&(int(s[4])>0):
inputInfoList.append(s[4])
else:
print "请输入正确的端口号"
useNotic(s)
exit(0)
return inputInfoList
#扫描
def scanPort(ss,inputInfoList):
print "扫描中。。。。"
#tcp
try:
result = ss.connect_ex((inputInfoList[0],int(inputInfoList[1])))
if result == 0:
print inputInfoList[1]+" 端口已开放"
else:
print inputInfoList[1]+" 端口已关闭"
ss.close()
except:
print '端口扫描异常'
if __name__ == '__main__':
inputInfoList = inputInfo(s)
scanPort(ss,inputInfoList)
|
from collections import OrderedDict
from _pytest.runner import CollectReport
from py.log import Producer
from xdist.report import report_collection_diff
from xdist.workermanage import parse_spec_config
class LoadScopeScheduling:
"""Implement load scheduling across nodes, but grouping test by scope.
This distributes the tests collected across all nodes so each test is run
just once. All nodes collect and submit the list of tests and when all
collections are received it is verified they are identical collections.
Then the collection gets divided up in work units, grouped by test scope,
and those work units get submitted to nodes. Whenever a node finishes an
item, it calls ``.mark_test_complete()`` which will trigger the scheduler
to assign more work units if the number of pending tests for the node falls
below a low-watermark.
When created, ``numnodes`` defines how many nodes are expected to submit a
collection. This is used to know when all nodes have finished collection.
Attributes:
:numnodes: The expected number of nodes taking part. The actual number of
nodes will vary during the scheduler's lifetime as nodes are added by
the DSession as they are brought up and removed either because of a dead
node or normal shutdown. This number is primarily used to know when the
initial collection is completed.
:collection: The final list of tests collected by all nodes once it is
validated to be identical between all the nodes. It is initialised to
None until ``.schedule()`` is called.
:workqueue: Ordered dictionary that maps all available scopes with their
associated tests (nodeid). Nodeids are in turn associated with their
completion status. One entry of the workqueue is called a work unit.
In turn, a collection of work unit is called a workload.
::
workqueue = {
'<full>/<path>/<to>/test_module.py': {
'<full>/<path>/<to>/test_module.py::test_case1': False,
'<full>/<path>/<to>/test_module.py::test_case2': False,
(...)
},
(...)
}
:assigned_work: Ordered dictionary that maps worker nodes with their
assigned work units.
::
assigned_work = {
'<worker node A>': {
'<full>/<path>/<to>/test_module.py': {
'<full>/<path>/<to>/test_module.py::test_case1': False,
'<full>/<path>/<to>/test_module.py::test_case2': False,
(...)
},
(...)
},
(...)
}
:registered_collections: Ordered dictionary that maps worker nodes with
their collection of tests gathered during test discovery.
::
registered_collections = {
'<worker node A>': [
'<full>/<path>/<to>/test_module.py::test_case1',
'<full>/<path>/<to>/test_module.py::test_case2',
],
(...)
}
:log: A py.log.Producer instance.
:config: Config object, used for handling hooks.
"""
def __init__(self, config, log=None):
self.numnodes = len(parse_spec_config(config))
self.collection = None
self.workqueue = OrderedDict()
self.assigned_work = OrderedDict()
self.registered_collections = OrderedDict()
if log is None:
self.log = Producer("loadscopesched")
else:
self.log = log.loadscopesched
self.config = config
@property
def nodes(self):
"""A list of all active nodes in the scheduler."""
return list(self.assigned_work.keys())
@property
def collection_is_completed(self):
"""Boolean indication initial test collection is complete.
This is a boolean indicating all initial participating nodes have
finished collection. The required number of initial nodes is defined
by ``.numnodes``.
"""
return len(self.registered_collections) >= self.numnodes
@property
def tests_finished(self):
"""Return True if all tests have been executed by the nodes."""
if not self.collection_is_completed:
return False
if self.workqueue:
return False
for assigned_unit in self.assigned_work.values():
if self._pending_of(assigned_unit) >= 2:
return False
return True
@property
def has_pending(self):
"""Return True if there are pending test items.
This indicates that collection has finished and nodes are still
processing test items, so this can be thought of as
"the scheduler is active".
"""
if self.workqueue:
return True
for assigned_unit in self.assigned_work.values():
if self._pending_of(assigned_unit) > 0:
return True
return False
def add_node(self, node):
"""Add a new node to the scheduler.
From now on the node will be assigned work units to be executed.
Called by the ``DSession.worker_workerready`` hook when it successfully
bootstraps a new node.
"""
assert node not in self.assigned_work
self.assigned_work[node] = OrderedDict()
def remove_node(self, node):
"""Remove a node from the scheduler.
This should be called either when the node crashed or at shutdown time.
In the former case any pending items assigned to the node will be
re-scheduled.
Called by the hooks:
- ``DSession.worker_workerfinished``.
- ``DSession.worker_errordown``.
Return the item being executed while the node crashed or None if the
node has no more pending items.
"""
workload = self.assigned_work.pop(node)
if not self._pending_of(workload):
return None
# The node crashed, identify test that crashed
for work_unit in workload.values():
for nodeid, completed in work_unit.items():
if not completed:
crashitem = nodeid
break
else:
continue
break
else:
raise RuntimeError(
"Unable to identify crashitem on a workload with pending items"
)
# Made uncompleted work unit available again
self.workqueue.update(workload)
for node in self.assigned_work:
self._reschedule(node)
return crashitem
def add_node_collection(self, node, collection):
"""Add the collected test items from a node.
The collection is stored in the ``.registered_collections`` dictionary.
Called by the hook:
- ``DSession.worker_collectionfinish``.
"""
# Check that add_node() was called on the node before
assert node in self.assigned_work
# A new node has been added later, perhaps an original one died.
if self.collection_is_completed:
# Assert that .schedule() should have been called by now
assert self.collection
# Check that the new collection matches the official collection
if collection != self.collection:
other_node = next(iter(self.registered_collections.keys()))
msg = report_collection_diff(
self.collection, collection, other_node.gateway.id, node.gateway.id
)
self.log(msg)
return
self.registered_collections[node] = list(collection)
def mark_test_complete(self, node, item_index, duration=0):
"""Mark test item as completed by node.
Called by the hook:
- ``DSession.worker_testreport``.
"""
nodeid = self.registered_collections[node][item_index]
scope = self._split_scope(nodeid)
self.assigned_work[node][scope][nodeid] = True
self._reschedule(node)
def _assign_work_unit(self, node):
"""Assign a work unit to a node."""
assert self.workqueue
# Grab a unit of work
scope, work_unit = self.workqueue.popitem(last=False)
# Keep track of the assigned work
assigned_to_node = self.assigned_work.setdefault(node, default=OrderedDict())
assigned_to_node[scope] = work_unit
# Ask the node to execute the workload
worker_collection = self.registered_collections[node]
nodeids_indexes = [
worker_collection.index(nodeid)
for nodeid, completed in work_unit.items()
if not completed
]
node.send_runtest_some(nodeids_indexes)
def _split_scope(self, nodeid):
"""Determine the scope (grouping) of a nodeid.
There are usually 3 cases for a nodeid::
example/loadsuite/test/test_beta.py::test_beta0
example/loadsuite/test/test_delta.py::Delta1::test_delta0
example/loadsuite/epsilon/__init__.py::epsilon.epsilon
#. Function in a test module.
#. Method of a class in a test module.
#. Doctest in a function in a package.
This function will group tests with the scope determined by splitting
the first ``::`` from the right. That is, classes will be grouped in a
single work unit, and functions from a test module will be grouped by
their module. In the above example, scopes will be::
example/loadsuite/test/test_beta.py
example/loadsuite/test/test_delta.py::Delta1
example/loadsuite/epsilon/__init__.py
"""
if nodeid.count('::') > 1:
return '::'.join(nodeid.split('::')[:2])
return nodeid.rsplit("::", 1)[0]
def _pending_of(self, workload):
"""Return the number of pending tests in a workload."""
pending = sum(list(scope.values()).count(False) for scope in workload.values())
return pending
def _reschedule(self, node):
"""Maybe schedule new items on the node.
If there are any globally pending work units left then this will check
if the given node should be given any more tests.
"""
# Do not add more work to a node shutting down
if node.shutting_down:
return
# Check that more work is available
if not self.workqueue:
node.shutdown()
return
self.log("Number of units waiting for node:", len(self.workqueue))
# Check that the node is almost depleted of work
# 2: Heuristic of minimum tests to enqueue more work
if self._pending_of(self.assigned_work[node]) > 2:
return
# Pop one unit of work and assign it
self._assign_work_unit(node)
def schedule(self):
"""Initiate distribution of the test collection.
Initiate scheduling of the items across the nodes. If this gets called
again later it behaves the same as calling ``._reschedule()`` on all
nodes so that newly added nodes will start to be used.
If ``.collection_is_completed`` is True, this is called by the hook:
- ``DSession.worker_collectionfinish``.
"""
assert self.collection_is_completed
# Initial distribution already happened, reschedule on all nodes
if self.collection is not None:
for node in self.nodes:
self._reschedule(node)
return
# Check that all nodes collected the same tests
if not self._check_nodes_have_same_collection():
self.log("**Different tests collected, aborting run**")
return
# Collections are identical, create the final list of items
self.collection = list(next(iter(self.registered_collections.values())))
if not self.collection:
return
# Determine chunks of work (scopes)
for nodeid in self.collection:
scope = self._split_scope(nodeid)
work_unit = self.workqueue.setdefault(scope, default=OrderedDict())
work_unit[nodeid] = False
# Avoid having more workers than work
extra_nodes = len(self.nodes) - len(self.workqueue)
if extra_nodes > 0:
self.log("Shuting down {} nodes".format(extra_nodes))
for _ in range(extra_nodes):
unused_node, assigned = self.assigned_work.popitem(last=True)
self.log("Shuting down unused node {}".format(unused_node))
unused_node.shutdown()
# Assign initial workload
for node in self.nodes:
self._assign_work_unit(node)
# Ensure nodes start with at least two work units if possible (#277)
for node in self.nodes:
self._reschedule(node)
# Initial distribution sent all tests, start node shutdown
if not self.workqueue:
for node in self.nodes:
node.shutdown()
def _check_nodes_have_same_collection(self):
"""Return True if all nodes have collected the same items.
If collections differ, this method returns False while logging
the collection differences and posting collection errors to
pytest_collectreport hook.
"""
node_collection_items = list(self.registered_collections.items())
first_node, col = node_collection_items[0]
same_collection = True
for node, collection in node_collection_items[1:]:
msg = report_collection_diff(
col, collection, first_node.gateway.id, node.gateway.id
)
if not msg:
continue
same_collection = False
self.log(msg)
if self.config is None:
continue
rep = CollectReport(node.gateway.id, "failed", longrepr=msg, result=[])
self.config.hook.pytest_collectreport(report=rep)
return same_collection
|
# -*- coding: utf-8 -*-
"""Top-level package for django_inclusiveft."""
__author__ = """Bernard Parah"""
__email__ = '[email protected]'
__version__ = '0.1.0'
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 26 09:13:00 2019
@author: Rajiv Sambasivan
"""
import pandas as pd
from arango import ArangoClient
import time
import traceback
import uuid
from collections import OrderedDict
class ITSM_Dataloader:
def __init__(self, conn, input_file = "data/pp_recoded_incident_event_log.csv",\
create_db = True, frac = 1.0):
self.emlg = None
self.db = None
self.labels = list()
self.vertex_list = None
self.edge_dict = {}
self.feature_dict = {}
self.feature_data = None
self.setup_schema()
self.sampling_frac = frac
self.replication_factor = None
self.batch_vert_dict = None
self.batch_edge_dict = None
if self.is_valid_conn(conn):
url = conn["hostname"]
user_name = conn["username"]
password = conn["password"]
dbName = conn["dbName"]
if 'port' in conn:
port = str(conn['port'])
else:
port = '8529'
if 'protocol' in conn:
protocol = conn['protocol']
else:
protocol = "https"
con_str = protocol + "://" + url + ":" + port
client = ArangoClient(hosts=con_str)
self.db = client.db(dbName, user_name, password)
else:
print(
"The connection information you supplied is invalid, please check and try again!")
if create_db:
self.input_file = input_file
self.create_graph()
self.load_data()
return
def is_valid_conn(self, conn):
valid_con_info = True
if not "hostname" in conn:
print("hostname is missing in connection")
if not "username" in conn:
print("Username is missing in connection")
valid_con_info = False
if not "password" in conn:
print("Password is missing in connection")
valid_con_info = False
if not "dbName" in conn:
print("Database is missing in connection")
valid_con_info = False
return valid_con_info
def setup_schema(self):
self.vertex_list = ['incident', 'support_org', 'customer', 'vendor']
self.edge_dict = {'incident_support_org': {'from': 'incident', 'to': 'support_org'},\
'incident_customer': {'from': 'incident', 'to': 'customer'},\
'incident_vendor': {'from': 'incident', 'to': 'vendor'}}
self.feature_dict['support_org'] = ['assignment_group', 'assigned_to']
self.feature_dict['customer'] = ['opened_by']
self.feature_dict['vendor'] = ['vendor']
self.feature_data = {v : OrderedDict() for v in self.vertex_list}
self.feature_dict['incident'] = ['D_sys_mod_count', 'D_reopen_count',\
'urgency','incident_state', 'u_symptom', 'impact', 'contact_type',\
'u_priority_confirmation', 'cmdb_ci', 'rfc', 'problem_id',\
'caused_by', 'location', 'knowledge', 'resolved_by', 'subcategory',\
'active', 'category', 'priority', 'reassigned']
return
def create_graph(self):
self.emlg = self.db.create_graph('ITSMg')
self.create_graph_vertices()
self.create_graph_edges()
return
def create_graph_edges(self):
for edge in self.edge_dict:
src_vertex = self.edge_dict[edge]['from']
dest_vertex = self.edge_dict[edge]['to']
if not self.emlg.has_edge_definition(edge):
self.db.create_collection(edge, edge = True,\
replication_factor = self.replication_factor)
self.emlg.create_edge_definition(edge_collection = edge,\
from_vertex_collections=[src_vertex],\
to_vertex_collections=[dest_vertex] )
return
def create_graph_vertices(self):
for v in self.vertex_list:
if not self.emlg.has_vertex_collection(v):
self.db.create_collection(v, self.replication_factor)
self.emlg.create_vertex_collection(v)
return
def id_sequence(self, vertex):
id_dict = {v: 0 for v in self.vertex_list}
while True:
yield id_dict[vertex]
id_dict[vertex] += 1
def do_inserts(self):
for v in self.vertex_list:
batch_docs = self.batch_vert_dict[v]
self.db.collection(v).insert_many(batch_docs)
self.batch_vert_dict[v] = list()
edge_names = [*self.edge_dict]
for ename in edge_names:
batch_edge_docs = self.batch_edge_dict[ename]
self.db.collection(ename).insert_many(batch_edge_docs)
self.batch_edge_dict[ename] = list()
return
def load_data(self):
t0 = time.time()
df = pd.read_csv(self.input_file)
df = df.sample(frac = self.sampling_frac)
num_rows = df.shape[0] - 1
print("A dataset with %d rows is being used for this run" % (num_rows) )
df = df.reset_index()
node_val_ids = {v: dict() for v in self.vertex_list}
vertex_colls = {v: self.emlg.vertex_collection(v) for v in self.vertex_list}
edge_names = [*self.edge_dict]
edge_colls = {ename: self.emlg.edge_collection(ename) for ename in edge_names}
row_vertex_map = {'incident': 'number', 'support_org': 'assignment_group',\
'customer': 'opened_by', 'vendor': 'vendor'}
batch_size = 500
self.batch_vert_dict = {v : list() for v in self.vertex_list}
self.batch_edge_dict = {ename: list() for ename in edge_names}
batch_count = 0
for row_index, row in df.iterrows():
try:
if row_index % 500 == 0:
print("Processing row: " + str(row_index))
# insert the vertices
record_vertex_keys = dict()
for v in self.vertex_list:
the_vertex = dict()
row_val = row[row_vertex_map[v]]
#if not row_val in node_val_ids[v]:
the_vertex['node_id'] = str(uuid.uuid4().int >> 64)
the_vertex['_key'] = v.upper() + "-" + the_vertex['node_id']
node_val_ids[v][row_val] = the_vertex['_key']
self.load_vertex_attributes(row, the_vertex, v )
self.batch_vert_dict[v].append(the_vertex)
record_vertex_keys[v] = node_val_ids[v][row_val]
#insert the edges
for ename in edge_names:
from_vertex = self.edge_dict[ename]['from']
to_vertex = self.edge_dict[ename]['to']
edge_key = record_vertex_keys[from_vertex] + "-" + \
record_vertex_keys[to_vertex]
the_edge = {"_key" : edge_key,\
"_from": from_vertex + "/" + record_vertex_keys[from_vertex],\
"_to": to_vertex + "/" + record_vertex_keys[to_vertex]}
self.batch_edge_dict[ename].append(the_edge)
if row_index > 0 and (row_index % batch_size == 0):
self.do_inserts()
if num_rows % batch_size != 0:
if row_index == num_rows:
self.do_inserts()
except Exception as e:
traceback.print_exc()
#breakpoint()
t1 = time.time()
et = float((t1 -t0) / 60)
et = round(et, 2)
print("Data load took " + str(et) + " minutes!.")
print("Done loading data!")
return
def load_vertex_attributes(self, row, the_vertex, vertex_name):
if vertex_name == 'incident':
self.load_incident_attributes(row, the_vertex)
if vertex_name == 'customer':
self.load_customer_attributes(row, the_vertex)
if vertex_name == 'support_org':
self.load_support_org_attributes(row, the_vertex)
if vertex_name == 'vendor':
self.load_vendor_attributes(row, the_vertex)
return
def load_incident_attributes(self, row, the_vertex):
subset_dict = row[self.feature_dict['incident']].to_dict()
for a in subset_dict:
the_vertex[a] = subset_dict[a]
return
def load_customer_attributes(self, row, the_vertex):
subset_dict = row[self.feature_dict['customer']].to_dict()
for a in subset_dict:
the_vertex[a] = subset_dict[a]
return
def load_support_org_attributes(self, row, the_vertex):
subset_dict = row[self.feature_dict['support_org']].to_dict()
for a in subset_dict:
the_vertex[a] = subset_dict[a]
return
def load_vendor_attributes(self, row, the_vertex):
subset_dict = row[self.feature_dict['vendor']].to_dict()
for a in subset_dict:
the_vertex[a] = subset_dict[a]
return
def load_num_mods(self, row, the_vertex):
return
|
# SecretPlots
# Copyright (c) 2019. SecretBiology
#
# Author: Rohit Suratekar
# Organisation: SecretBiology
# Website: https://github.com/secretBiology/SecretPlots
# Licence: MIT License
# Creation: 05/10/19, 7:47 PM
#
#
# Matrix Assemblers
from SecretPlots.assemblers import Assembler
from SecretPlots.constants import *
from SecretPlots.managers import ColorMapLocations
class ColorMapAssembler(Assembler):
@property
def main_location_manager(self):
return ColorMapLocations(self.am, self.om, self._log)
@property
def type(self):
return PLOT_COLOR_MAP
def _adjust_defaults(self):
if self.type != PLOT_BOOLEAN_PLOT:
if self.gm.has_colorbar is None:
self.gm.has_colorbar = True
if self.gm.colorbar_location is None:
self.gm.colorbar_location = "right"
else:
if self.gm.has_colorbar is None:
self.gm.has_colorbar = True
else:
self.em.show_legends = True
def _draw_elements(self):
locations = self.lm.get(self.data)
for loc, val, pos in zip(locations, self.data.value,
self.data.positions):
x, y = loc
if self.am.orientation == "y":
x, y = y, x
if self.type == PLOT_BOOLEAN_PLOT:
if self.data.threshold is None:
self._log.error("For BooleanPlot, you should specify "
"threshold")
v = 1 if val >= self.data.threshold else 0
shape = self.om.get(x, y, v, pos)
else:
shape = self.om.get(x, y, val / self.data.max, pos)
self.ax.add_patch(shape.get())
self.em.draw_values(shape, val,
self.cm.color(pos, val / self.data.max))
ticks_major = []
ticks_minor = []
edge_major = []
edge_minor = []
for i, m in enumerate(locations):
if i == 0:
edge_major.append(m[0])
edge_minor.append(m[1])
elif i == len(locations) - 1:
edge_major.append(m[0] + self.om.width)
edge_minor.append(m[1] + self.om.height)
temp = round(m[0] + self.om.width / 2, 2)
temp_minor = round(m[1] + self.om.height / 2, 2)
if temp not in ticks_major:
ticks_major.append(temp)
if temp_minor not in ticks_minor:
ticks_minor.append(temp_minor)
self.am.major.make_ticks(ticks_major)
self.am.minor.make_ticks(ticks_minor)
self.am.major.edgelines = edge_major
self.am.minor.edgelines = edge_minor
self.am.major.make_labels()
self.am.minor.make_labels()
def draw(self):
self._adjust_defaults()
self._draw_elements()
self._draw_axis()
self._draw_extra()
class BooleanAssembler(ColorMapAssembler):
@property
def type(self):
return PLOT_BOOLEAN_PLOT
|
"""
Wrapper for handling data in lexibank packages.
"""
from importlib import import_module
from pyclts import CLTS
from csvw.dsv import UnicodeDictReader
from tqdm import tqdm
class LexibankDataset(object):
def __init__(self, package, transform=None):
"""
Load the data of a lexibank dataset.
"""
clts = CLTS()
modify = {
"Tokens": lambda x, y, z: [
str(clts.bipa[token]) for token in x["Segments"].split() if token != "+"
],
"Language": lambda x, y, z: y[x["Language_ID"]]["Name"],
"Glottocode": lambda x, y, z: y[x["Language_ID"]]["Glottocode"],
"Concept": lambda x, y, z: z[x["Parameter_ID"]]["Name"],
"Concepticon_ID": lambda x, y, z: z[x["Parameter_ID"]]["Concepticon_ID"],
"Concepticon_GLOSS": lambda x, y, z: z[x["Parameter_ID"]][
"Concepticon_Gloss"
],
"FormChars": lambda x, y, z: list(x["Form"]),
"ASJP": lambda x, y, z: clts.soundclass("asjp")(x["Segments"]),
"DOLGO": lambda x, y, z: clts.soundclass("dolgo")(x["Segments"]),
"SCA": lambda x, y, z: clts.soundclass("sca")(x["Segments"]),
}
transform = transform or {}
modify.update(transform)
module = import_module("lexibank_" + package)
self.ds = module.Dataset()
self.forms = []
self.concepts = {}
with UnicodeDictReader(self.ds.cldf_dir.joinpath("parameters.csv")) as reader:
for row in reader:
self.concepts[row["ID"]] = row
self.languages = {}
with UnicodeDictReader(self.ds.cldf_dir.joinpath("languages.csv")) as reader:
for row in reader:
self.languages[row["ID"]] = row
with UnicodeDictReader(self.ds.cldf_dir.joinpath("forms.csv")) as reader:
for row in tqdm(reader, desc="loading data"):
for key, fun in modify.items():
row[key] = fun(row, self.languages, self.concepts)
self.forms.append(row)
def get_table(self, language=None, form="Form", classification="Borrowed"):
out = []
for row in self.forms:
if not language or row["Language"] == language:
out.append([row["ID"], row[form], row[classification]])
return out
|
# Copyright 2020 KCL-BMEIS - King's College London
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uuid
from contextlib import contextmanager
from datetime import datetime, timezone
import time
from distutils.util import strtobool
import h5py
import numpy as np
import numba
from numba import jit, njit
import pandas as pd
from exetera.core import validation as val
from exetera.core import readerwriter as rw
from exetera.core import fields as fld
from exetera.core import operations as ops
from exetera.core.operations import INVALID_INDEX, DEFAULT_CHUNKSIZE
# TODO: rename this persistence file to hdf5persistence
# TODO: wrap the dataset in a withable so that different underlying
# data stores can be used
# schema
# * schema
# * import history
# * schema number
# * patients
# * assessments
# * tests
# TODO:
"""
* mapping and joining
* by function
* get_mapping (fwd / bwd)
* first_in_second -> filter
* second_in_first -> filter
* join(left, right, inner, outer)
* aggregate_and_join(left, right, inner, outer, left_fn, right_fn
* use of pandas
* use of dicts
"""
chunk_sizes = {
'patient': (DEFAULT_CHUNKSIZE,), 'assessment': (DEFAULT_CHUNKSIZE,), 'test': (DEFAULT_CHUNKSIZE,)
}
# @numba.njit
# def _safe_map(data_field, map_field, map_filter=None):
# result = np.zeros_like(map_field, dtype=data_field.dtype)
# empty_val = result[0]
# if map_filter is not None:
# for i in range(len(map_field)):
# if map_filter[i]:
# result[i] = data_field[map_field[i]]
# else:
# result[i] = empty_val
# else:
# for i in range(len(map_field)):
# result[i] = data_field[map_field[i]]
#
# return result
def try_str_to_float_to_int(value, invalid=0):
try:
return True, int(float(value))
except ValueError:
return False, invalid
def try_str_to_bool(value, invalid=0):
try:
return True, bool(value)
except ValueError:
return False, invalid
def try_str_to_int(value, invalid=0):
try:
return True, int(value)
except ValueError:
return False, invalid
def try_str_to_float(value, invalid=0):
try:
return True, float(value)
except ValueError:
return False, invalid
def _apply_filter_to_array(values, filter):
return values[filter]
@njit
def _apply_filter_to_index_values(index_filter, indices, values):
# pass 1 - determine the destination lengths
cur_ = indices[:-1]
next_ = indices[1:]
count = 0
total = 0
for i in range(len(index_filter)):
if index_filter[i] == True:
count += 1
total += next_[i] - cur_[i]
dest_indices = np.zeros(count+1, indices.dtype)
dest_values = np.zeros(total, values.dtype)
dest_indices[0] = 0
count = 1
total = 0
for i in range(len(index_filter)):
if index_filter[i] == True:
n = next_[i]
c = cur_[i]
delta = n - c
dest_values[total:total + delta] = values[c:n]
total += delta
dest_indices[count] = total
count += 1
return dest_indices, dest_values
@njit
def _apply_indices_to_index_values(indices_to_apply, indices, values):
# pass 1 - determine the destination lengths
cur_ = indices[:-1]
next_ = indices[1:]
count = 0
total = 0
for i in indices_to_apply:
count += 1
total += next_[i] - cur_[i]
dest_indices = np.zeros(count+1, indices.dtype)
dest_values = np.zeros(total, values.dtype)
dest_indices[0] = 0
count = 1
total = 0
for i in indices_to_apply:
n = next_[i]
c = cur_[i]
delta = n - c
dest_values[total:total + delta] = values[c:n]
total += delta
dest_indices[count] = total
count += 1
return dest_indices, dest_values
def _apply_sort_to_array(index, values):
return values[index]
@njit
def _apply_sort_to_index_values(index, indices, values):
s_indices = np.zeros_like(indices, dtype=np.int64)
s_values = np.zeros_like(values)
accumulated = np.int64(0)
s_indices[0] = 0
for di, si in enumerate(index):
src_field_start = indices[si]
src_field_end = indices[si + 1]
length = np.int64(src_field_end - src_field_start)
if length > 0:
s_values[accumulated:accumulated + length] =\
values[src_field_start:src_field_end]
accumulated += length
if s_indices[di + 1] != 0:
print('non-zero index!')
s_indices[di + 1] = accumulated
return s_indices, s_values
# TODO: merge implementation may still be required in the medium term
def dataset_merge_sort(group, index, fields):
raise NotImplementedError()
# def sort_comparison(*args):
# if len(args) == 1:
# a0 = args[0]
# def _inner(r):
# return a0[r]
# return _inner
# if len(args) == 2:
# a0 = args[0]
# a1 = args[1]
# def _inner(r):
# return a0[r], a1[r]
# return _inner
# if len(args) == 3:
# a0 = args[0]
# a1 = args[1]
# a2 = args[2]
# def _inner(r):
# return a0[r], a1[r], a2[r]
# return _inner
# if len(args) > 3:
# def _inner(r):
# return tuple(a[r] for a in args)
# return _inner
#
# def sort_function(index, fields):
# sort_group = temp_dataset()
#
# # sort each chunk individually
# chunksize = 1 << 24
# chunkcount = _chunkcount(index, chunksize)
# for c in range(chunkcount):
# istart, iend = _slice_for_chunk(c, index, chunksize)
# length = iend - istart
# fieldchunks = [None] * len(fields)
# indexchunk = index[istart:iend]
# for i_f, f in enumerate(fields):
# fc = reader(f, istart, iend)
# fieldchunks[i_f] = fc
# sfn = sort_comparison(*fieldchunks)
# sindexchunk = sorted(indexchunk, key=sfn)
# sort_group.create_dataset(f'chunk{c}', (length,), data=sindexchunk)
#
# sort_function(index, fields)
@contextmanager
def temp_dataset():
try:
uid = str(uuid.uuid4())
while os.path.exists(uid + '.hdf5'):
uid = str(uuid.uuid4())
hd = h5py.File(uid, 'w')
yield hd
finally:
hd.flush()
hd.close()
@njit
def _index_spans(spans, results):
sp_sta = spans[:-1]
sp_end = spans[1:]
for s in range(len(sp_sta)):
results[sp_sta[s]:sp_end[s]] = s
return results
@njit
def _apply_spans_index_of_max(spans, src_array, dest_array):
for i in range(len(spans)-1):
cur = spans[i]
next = spans[i+1]
if next - cur == 1:
dest_array[i] = cur
else:
dest_array[i] = cur + src_array[cur:next].argmax()
@njit
def _apply_spans_index_of_min(spans, src_array, dest_array, filter_array):
for i in range(len(spans)-1):
cur = spans[i]
next = spans[i+1]
if next - cur == 0:
filter_array[i] = False
elif next - cur == 1:
dest_array[i] = cur
else:
dest_array[i] = cur = src_array[cur:next].argmin()
@njit
def _apply_spans_index_of_first(spans, dest_array):
dest_array[:] = spans[:-1]
@njit
def _apply_spans_index_of_last(spans, dest_array):
dest_array[:] = spans[1:] - 1
@njit
def _apply_spans_count(spans, dest_array):
for i in range(len(spans)-1):
dest_array[i] = np.int64(spans[i+1] - spans[i])
@njit
def _apply_spans_first(spans, src_array, dest_array):
dest_array[:] = src_array[spans[:-1]]
@njit
def _apply_spans_last(spans, src_array, dest_array):
spans = spans[1:]-1
dest_array[:] = src_array[spans]
@njit
def _apply_spans_max(spans, src_array, dest_array):
for i in range(len(spans)-1):
cur = spans[i]
next = spans[i+1]
if next - cur == 1:
dest_array[i] = src_array[cur]
else:
dest_array[i] = src_array[cur:next].max()
@njit
def _apply_spans_min(spans, src_array, dest_array):
for i in range(len(spans)-1):
cur = spans[i]
next = spans[i+1]
if next - cur == 1:
dest_array[i] = src_array[cur]
else:
dest_array[i] = src_array[cur:next].min()
# def _apply_spans_concat(spans, src_field):
# dest_values = [None] * (len(spans)-1)
# for i in range(len(spans)-1):
# cur = spans[i]
# next = spans[i+1]
# if next - cur == 1:
# dest_values[i] = src_field[cur]
# else:
# src = [s for s in src_field[cur:next] if len(s) > 0]
# if len(src) > 0:
# dest_values[i] = ','.join(utils.to_escaped(src))
# else:
# dest_values[i] = ''
# # if len(dest_values[i]) > 0:
# # print(dest_values[i])
# return dest_values
@njit
def _apply_spans_concat_2(spans, src_index, src_values, dest_index, dest_values,
max_index_i, max_value_i, separator, delimiter, sp_start, dest_start_v):
if sp_start == 0:
d_index_i = np.int64(1)
d_index_v = np.int64(0)
else:
d_index_i = np.int64(0)
d_index_v = np.int64(0)
sp_end = len(spans)-1
for s in range(sp_start, sp_end):
sp_cur = spans[s]
sp_next = spans[s+1]
cur_src_i = src_index[sp_cur]
next_src_i = src_index[sp_next]
non_empties = 0
if sp_next - sp_cur == 1:
# at most one entry to be copied so no decoration required
if next_src_i - cur_src_i > 0:
non_empties = 1
elif sp_next - sp_cur > 1:
for e in range(sp_cur, sp_next):
e_start = src_index[e]
e_end = src_index[e+1]
if e_end - e_start > 0:
non_empties += 1
delta = 0
if non_empties == 1:
# single entry
comma = False
quotes = False
for i_c in range(cur_src_i, next_src_i):
if src_values[i_c] == separator:
comma = True
elif src_values[i_c] == delimiter:
quotes = True
if comma or quotes:
dest_values[d_index_v + delta] = delimiter
delta += 1
for i_c in range(cur_src_i, next_src_i):
if src_values[i_c] == delimiter:
dest_values[d_index_v + delta] = delimiter
delta += 1
dest_values[d_index_v + delta] = src_values[i_c]
delta += 1
if comma or quotes:
dest_values[d_index_v + delta] = delimiter
delta += 1
elif non_empties > 1:
# multiple entries so find out whether there are multiple entries with values
prev_empty = True
for e in range(sp_cur, sp_next):
src_start = src_index[e]
src_end = src_index[e + 1]
comma = False
quotes = False
cur_empty = src_end == src_start
for i_c in range(src_start, src_end):
if src_values[i_c] == separator:
comma = True
elif src_values[i_c] == delimiter:
quotes = True
if prev_empty == False and cur_empty == False:
if e > sp_cur:
dest_values[d_index_v + delta] = separator
delta += 1
# `prev_empty`, once set to False, can't become True again.
# this line ensures that, once we have encountered our first
# non-empty entry, any following non-empty entry will get a separator,
# even if there are empty-entries in-between.
prev_empty = cur_empty if cur_empty == False else prev_empty
if comma or quotes:
dest_values[d_index_v + delta] = delimiter
delta += 1
for i_c in range(src_start, src_end):
if src_values[i_c] == delimiter:
dest_values[d_index_v + delta] = delimiter
delta += 1
dest_values[d_index_v + delta] = src_values[i_c]
delta += 1
if comma or quotes:
dest_values[d_index_v + delta] = delimiter
delta += 1
d_index_v += delta
dest_index[d_index_i] = d_index_v + dest_start_v
d_index_i += 1
if d_index_i >= max_index_i or d_index_v >= max_value_i:
break
return s + 1, d_index_i, d_index_v
@njit
def _apply_spans_concat(spans, src_index, src_values, dest_index, dest_values,
max_index_i, max_value_i, s_start, separator, delimiter):
if s_start == 0:
index_i = np.uint32(1)
index_v = np.int64(0)
dest_index[0] = spans[0]
else:
index_i = np.uint32(0)
index_v = np.int64(0)
s_end = len(spans)-1
for s in range(s_start, s_end):
cur = spans[s]
next = spans[s+1]
cur_src_i = src_index[cur]
next_src_i = src_index[next]
dest_index[index_i] = next_src_i
index_i += 1
if next_src_i - cur_src_i > 0:
if next - cur == 1:
# only one entry to be copied, so commas not required
next_index_v = next_src_i - cur_src_i + np.int64(index_v)
dest_values[index_v:next_index_v] = src_values[cur_src_i:next_src_i]
index_v = next_index_v
else:
# check to see how many non-zero-length entries there are; >1 means we must
# separate them by commas
non_empties = 0
for e in range(cur, next):
if src_index[e] < src_index[e+1]:
non_empties += 1
if non_empties == 0:
raise NotImplementedError()
elif non_empties == 1:
# only one non-empty entry to be copied, so commas not required
next_index_v = next_src_i - cur_src_i + np.int64(index_v)
dest_values[index_v:next_index_v] = src_values[cur_src_i:next_src_i]
index_v = next_index_v
else:
# the outer conditional already determines that we have a non-empty entry
# so there must be multiple non-empty entries and commas are required
for e in range(cur, next):
src_start = src_index[e]
src_end = src_index[e+1]
comma = False
quotes = False
for i_c in range(src_start, src_end):
if src_values[i_c] == separator:
comma = True
elif src_values[i_c] == delimiter:
quotes = True
d_index = np.int64(0)
if comma or quotes:
dest_values[d_index] = delimiter
d_index += 1
for i_c in range(src_start, src_end):
if src_values[i_c] == delimiter:
dest_values[d_index] = src_values[i_c]
d_index += 1
dest_values[d_index] = src_values[i_c]
d_index += 1
dest_values[d_index] = delimiter
d_index += 1
else:
s_len = np.int64(src_end - src_start)
dest_values[index_v:index_v + s_len] = src_values[src_start:src_end]
d_index += s_len
index_v += np.int64(d_index)
# if either the index or values are past the threshold, write them
if index_i >= max_index_i or index_v >= max_value_i:
break
return s+1, index_i, index_v
# TODO - this can go if it isn't needed
def timestamp_to_date(values):
results = np.zeros(len(values), dtype='|S10')
template = "{:04d}-{:02d}-{:02d}"
for i_r in range(len(values)):
dt = datetime.fromtimestamp(values[i_r])
results[i_r] = template.format(dt.year, dt.month, dt.day).encode()
return results
# TODO: refactor into datastore
@jit
def filtered_iterator(values, filter, default=np.nan):
for i in range(len(values)):
if filter[i]:
yield default
else:
yield values[i]
@njit
def _map_valid_indices(src, map, default):
filter = map < INVALID_INDEX
#dest = np.where(filter, src[map], default)
dest = np.zeros(len(map), dtype=src.dtype)
for i_r in range(len(map)):
if filter[i_r]:
dest[i_r] = src[map[i_r]]
else:
dest[i_r] = default
return dest
def _values_from_reader_or_ndarray(name, field):
if isinstance(field, rw.Reader):
raw_field = field[:]
elif isinstance(field, np.ndarray):
raw_field = field
else:
raise ValueError(f"'{name}' must be a Reader or an ndarray but is {type(field)}")
return raw_field
# TODO: handle usage of reader
def filter_duplicate_fields(field):
field_ = val.array_from_field_or_lower('field', field)
filter_ = np.ones(len(field_), dtype=bool)
_filter_duplicate_fields(field_, filter_)
return filter_
def _filter_duplicate_fields(field, filter):
seen_ids = dict()
for i in range(len(field)):
f = field[i]
if f in seen_ids:
filter[i] = False
else:
seen_ids[f] = 1
filter[i] = True
return filter
def foreign_key_is_in_primary_key(primary_key, foreign_key):
val._check_is_reader_or_ndarray('primary_key', primary_key)
val._check_is_reader_or_ndarray('foreign_key', foreign_key)
if isinstance(primary_key, rw.Reader):
pk = primary_key[:]
else:
pk = primary_key
if isinstance(foreign_key, rw.Reader):
fk = foreign_key[:]
else:
fk = foreign_key
result = np.zeros(len(fk), dtype=bool)
return _filter_non_orphaned_foreign_keys(pk, fk, result)
def _filter_non_orphaned_foreign_keys(primary_key, foreign_key, results):
pkids = dict()
trueval = bool(True)
falseval = bool(False)
for p in primary_key:
pkids[p] = trueval
for i, f in enumerate(foreign_key):
results[i] = pkids.get(f, falseval)
return results
# Newest
# ======
def _aggregate_impl(predicate, fkey_indices=None, fkey_index_spans=None,
reader=None, writer=None, result_dtype=None):
if fkey_indices is None and fkey_index_spans is None:
raise ValueError("One of 'fkey_indices' or 'fkey_index_spans' must be set")
if fkey_indices is not None and fkey_index_spans is not None:
raise ValueError("Only one of 'fkey_indices' and 'fkey_index_spans' may be set")
if writer is not None:
if not isinstance(writer, (rw.Writer, np.ndarray)):
raise ValueError("'writer' must be either a Writer or an ndarray instance")
if fkey_index_spans is None:
fkey_index_spans = ops.get_spans_for_field(fkey_indices)
if isinstance(writer, np.ndarray):
if len(writer) != len(fkey_index_spans) - 1:
error = "'writer': ndarray must be of length {} but is of length {}"
raise ValueError(error.format(len(fkey_index_spans) - 1), len(writer))
elif writer.dtype != result_dtype:
raise ValueError(f"'writer' dtype must be {result_dtype} but is {writer.dtype}")
if isinstance(writer, rw.Writer) or writer is None:
results = np.zeros(len(fkey_index_spans) - 1, dtype=result_dtype)
else:
results = writer
# execute the predicate (note that not every predicate requires a reader)
predicate(fkey_index_spans, reader, results)
if isinstance(writer, rw.Writer):
writer.write(results)
return writer if writer is not None else results
class DataStore:
def __init__(self, chunksize=DEFAULT_CHUNKSIZE,
timestamp=str(datetime.now(timezone.utc))):
if not isinstance(timestamp, str):
error_str = "'timestamp' must be a string but is of type {}"
raise ValueError(error_str.format(type(timestamp)))
self.chunksize = chunksize
self.timestamp = timestamp
def set_timestamp(self, timestamp=str(datetime.now(timezone.utc))):
if not isinstance(timestamp, str):
error_str = "'timestamp' must be a string but is of type {}"
raise ValueError(error_str.format(type(timestamp)))
self.timestamp = timestamp
# TODO: fields is being ignored at present
def sort_on(self, src_group, dest_group, keys, fields=None,
timestamp=None, write_mode='write'):
if timestamp is None:
timestamp = self.timestamp
# sort_keys = ('patient_id', 'created_at')
readers = tuple(self.get_reader(src_group[f]) for f in keys)
t1 = time.time()
sorted_index = self.dataset_sort(readers, np.arange(len(readers[0]), dtype=np.uint32))
print(f'sorted {keys} index in {time.time() - t1}s')
t0 = time.time()
for k in src_group.keys():
t1 = time.time()
r = self.get_reader(src_group[k])
w = r.get_writer(dest_group, k, timestamp, write_mode=write_mode)
self.apply_sort(sorted_index, r, w)
del r
del w
print(f" '{k}' reordered in {time.time() - t1}s")
print(f"fields reordered in {time.time() - t0}s")
def dataset_sort(self, readers, index=None):
r_readers = reversed(readers)
if index is None:
index = np.arange(len(r_readers[0]))
acc_index = index[:]
first = True
for f in r_readers:
if first:
first = False
fdata = f[:]
else:
fdata = f[:][acc_index]
index = np.argsort(fdata, kind='stable')
acc_index = acc_index[index]
return acc_index
# TODO: index should be able to be either a reader or an ndarray
def apply_sort(self, index, reader, writer=None):
val._check_is_appropriate_writer_if_set(self, 'writer', reader, writer)
if isinstance(reader, rw.IndexedStringReader):
src_indices = reader.field['index'][:]
src_values = reader.field.get('values', np.zeros(0, dtype=np.uint8))[:]
indices, values = _apply_sort_to_index_values(index, src_indices, src_values)
if writer:
writer.write_raw(indices, values)
return indices, values
elif isinstance(reader, rw.Reader):
result = _apply_sort_to_array(index, reader[:])
if writer:
writer.write(result)
return result
elif isinstance(reader, np.ndarray):
result = _apply_sort_to_array(index, reader)
if writer:
writer.write(result)
return result
else:
raise ValueError(f"'reader' must be a Reader or an ndarray, but is {type.reader}")
# TODO: write filter with new readers / writers rather than deleting this
def apply_filter(self, filter_to_apply, reader, writer=None):
if isinstance(reader, rw.IndexedStringReader):
val._check_is_appropriate_writer_if_set(self, 'writer', reader, writer)
src_indices = reader.field['index'][:]
src_values = reader.field.get('values', np.zeros(0, dtype=np.uint8))[:]
if len(src_indices) != len(filter_to_apply) + 1:
raise ValueError(f"'indices' (length {len(src_indices)}) must be one longer than "
f"'index_filter' (length {len(filter_to_apply)})")
indices, values = _apply_filter_to_index_values(filter_to_apply,
src_indices, src_values)
if writer:
writer.write_raw(indices, values)
return indices, values
elif isinstance(reader, rw.Reader):
if len(filter_to_apply) != len(reader):
msg = ("'filter_to_apply' and 'reader' must be the same length "
"but are length {} and {} respectively")
raise ValueError(msg.format(len(filter_to_apply), len(reader)))
result = reader[:][filter_to_apply]
if writer:
writer.write(result)
return result
elif isinstance(reader, np.ndarray):
result = reader[filter_to_apply]
if writer:
writer.write(result)
return result
else:
raise ValueError("'reader' must be a Reader or an ndarray, but is {}".format(type(reader)))
def apply_indices(self, indices_to_apply, reader, writer=None):
if isinstance(reader, rw.IndexedStringReader):
val._check_is_appropriate_writer_if_set(self, 'writer', reader, writer)
src_indices = reader.field['index'][:]
src_values = reader.field.get('values', np.zeros(0, dtype=np.uint8))[:]
indices, values = _apply_indices_to_index_values(indices_to_apply,
src_indices, src_values)
if writer:
writer.write_raw(indices, values)
return indices, values
elif isinstance(reader, rw.Reader):
result = reader[:][indices_to_apply]
if writer:
writer.write(result)
return result
elif isinstance(reader, np.ndarray):
result = reader[indices_to_apply]
if writer:
writer.write(result)
return result
else:
raise ValueError(f"'reader' must be a Reader or an ndarray, but is {type(reader)}")
# TODO: write distinct with new readers / writers rather than deleting this
def distinct(self, field=None, fields=None, filter=None):
if field is None and fields is None:
return ValueError("One of 'field' and 'fields' must be set")
if field is not None and fields is not None:
return ValueError("Only one of 'field' and 'fields' may be set")
if field is not None:
field_ = val.raw_array_from_parameter(self, 'field', field)
return np.unique(field_)
entries = [(f'{i}', f.dtype) for i, f in enumerate(fields)]
unified = np.empty_like(fields[0], dtype=np.dtype(entries))
for i, f in enumerate(fields):
unified[f'{i}'] = f
uniques = np.unique(unified)
results = [uniques[f'{i}'] for i in range(len(fields))]
return results
def get_spans(self, field=None, fields=None):
if field is None and fields is None:
raise ValueError("One of 'field' and 'fields' must be set")
if field is not None and fields is not None:
raise ValueError("Only one of 'field' and 'fields' may be set")
raw_field = None
raw_fields = None
if field is not None:
val._check_is_reader_or_ndarray('field', field)
raw_field = field[:] if isinstance(field, rw.Reader) else field
return ops.get_spans_for_field(raw_field)
else:
raw_fields = []
for f in fields:
val._check_is_reader_or_ndarray('elements of tuple/list fields', f)
raw_fields.append(f[:] if isinstance(f, rw.Reader) else f)
return ops._get_spans_for_2_fields(raw_fields[0], raw_fields[1])
def index_spans(self, spans):
raw_spans = val.raw_array_from_parameter(self, "spans", spans)
results = np.zeros(raw_spans[-1], dtype=np.int64)
return _index_spans(raw_spans, results)
# TODO: needs a predicate to break ties: first, last?
def apply_spans_index_of_min(self, spans, reader, writer=None):
val._check_is_reader_or_ndarray('reader', reader)
val._check_is_reader_or_ndarray_if_set('writer', reader)
if isinstance(reader, rw.Reader):
raw_reader = reader[:]
else:
raw_reader = reader
if writer is not None:
if isinstance(writer, rw.Writer):
raw_writer = writer[:]
else:
raw_writer = writer
else:
raw_writer = np.zeros(len(spans)-1, dtype=np.int64)
_apply_spans_index_of_min(spans, raw_reader, raw_writer)
if isinstance(writer, rw.Writer):
writer.write(raw_writer)
else:
return raw_writer
def apply_spans_index_of_max(self, spans, reader, writer=None):
val._check_is_reader_or_ndarray('reader', reader)
val._check_is_reader_or_ndarray_if_set('writer', reader)
if isinstance(reader, rw.Reader):
raw_reader = reader[:]
else:
raw_reader = reader
if writer is not None:
if isinstance(writer, rw.Writer):
raw_writer = writer[:]
else:
raw_writer = writer
else:
raw_writer = np.zeros(len(spans)-1, dtype=np.int64)
_apply_spans_index_of_max(spans, raw_reader, raw_writer)
if isinstance(writer, rw.Writer):
writer.write(raw_writer)
else:
return raw_writer
def apply_spans_index_of_first(self, spans, writer=None):
if writer is not None:
if isinstance(writer, rw.Writer):
raw_writer = writer[:]
else:
raw_writer = writer
else:
raw_writer = np.zeros(len(spans) - 1, dtype=np.int64)
_apply_spans_index_of_first(spans, raw_writer)
if isinstance(writer, rw.Writer):
writer.write(raw_writer)
else:
return raw_writer
def apply_spans_index_of_last(self, spans, writer=None):
if writer is not None:
if isinstance(writer, rw.Writer):
raw_writer = writer[:]
else:
raw_writer = writer
else:
raw_writer = np.zeros(len(spans) - 1, dtype=np.int64)
_apply_spans_index_of_last(spans, raw_writer)
if isinstance(writer, rw.Writer):
writer.write(raw_writer)
else:
return raw_writer
# TODO - for all apply_spans methods, spans should be able to be an ndarray
def apply_spans_count(self, spans, _, writer=None):
if writer is None:
_values_from_reader_or_ndarray('spans', spans)
dest_values = np.zeros(len(spans)-1, dtype=np.int64)
_apply_spans_count(spans, dest_values)
return dest_values
else:
if isinstance(writer, rw.Writer):
dest_values = writer.chunk_factory(len(spans) - 1)
_apply_spans_count(spans, dest_values)
writer.write(dest_values)
elif isinstance(writer, np.ndarray):
_apply_spans_count(spans, writer)
else:
raise ValueError(f"'writer' must be one of 'Writer' or 'ndarray' but is {type(writer)}")
def apply_spans_first(self, spans, reader, writer):
if isinstance(reader, rw.Reader):
read_values = reader[:]
elif isinstance(reader, np.ndarray):
read_values = reader
else:
raise ValueError(f"'reader' must be one of 'Reader' or 'ndarray' but is {type(reader)}")
if isinstance(writer, rw.Writer):
dest_values = writer.chunk_factory(len(spans) - 1)
_apply_spans_first(spans, read_values, dest_values)
writer.write(dest_values)
elif isinstance(writer, np.ndarray):
_apply_spans_first(spans, read_values, writer)
else:
raise ValueError(f"'writer' must be one of 'Writer' or 'ndarray' but is {type(writer)}")
def apply_spans_last(self, spans, reader, writer):
if isinstance(reader, rw.Reader):
read_values = reader[:]
elif isinstance(reader, np.ndarray):
read_values = reader
else:
raise ValueError(f"'reader' must be one of 'Reader' or 'ndarray' but is {type(reader)}")
if isinstance(writer, rw.Writer):
dest_values = writer.chunk_factory(len(spans) - 1)
_apply_spans_last(spans, read_values, dest_values)
writer.write(dest_values)
elif isinstance(writer, np.ndarray):
_apply_spans_last(spans, read_values, writer)
else:
raise ValueError(f"'writer' must be one of 'Writer' or 'ndarray' but is {type(writer)}")
def apply_spans_min(self, spans, reader, writer):
if isinstance(reader, rw.Reader):
read_values = reader[:]
elif isinstance(reader, np.ndarray):
read_values = reader
else:
raise ValueError(f"'reader' must be one of 'Reader' or 'ndarray' but is {type(reader)}")
if isinstance(writer, rw.Writer):
dest_values = writer.chunk_factory(len(spans) - 1)
_apply_spans_min(spans, read_values, dest_values)
writer.write(dest_values)
elif isinstance(reader, rw.Reader):
_apply_spans_min(spans, read_values, writer)
else:
raise ValueError(f"'writer' must be one of 'Writer' or 'ndarray' but is {type(writer)}")
def apply_spans_max(self, spans, reader, writer):
if isinstance(reader, rw.Reader):
read_values = reader[:]
elif isinstance(reader, np.ndarray):
read_values = reader
else:
raise ValueError(f"'reader' must be one of 'Reader' or 'ndarray' but is {type(reader)}")
if isinstance(writer, rw.Writer):
dest_values = writer.chunk_factory(len(spans) - 1)
_apply_spans_max(spans, read_values, dest_values)
writer.write(dest_values)
elif isinstance(reader, rw.Reader):
_apply_spans_max(spans, read_values, writer)
else:
raise ValueError(f"'writer' must be one of 'Writer' or 'ndarray' but is {type(writer)}")
def apply_spans_concat(self, spans, reader, writer):
if not isinstance(reader, rw.IndexedStringReader):
raise ValueError(f"'reader' must be one of 'IndexedStringReader' but is {type(reader)}")
if not isinstance(writer, rw.IndexedStringWriter):
raise ValueError(f"'writer' must be one of 'IndexedStringWriter' but is {type(writer)}")
src_index = reader.field['index'][:]
src_values = reader.field['values'][:]
dest_index = np.zeros(reader.chunksize, np.int64)
dest_values = np.zeros(reader.chunksize * 16, src_values.dtype)
max_index_i = reader.chunksize
max_value_i = reader.chunksize * 8
if src_values.dtype == 'S1':
separator = b','
delimiter = b'"'
elif src_values.dtype == np.uint8:
separator = np.frombuffer(b',', dtype='S1')[0][0]
delimiter = np.frombuffer(b'"', dtype='S1')[0][0]
s = 0
while s < len(spans) - 1:
s, index_i, index_v = _apply_spans_concat(spans, src_index, src_values,
dest_index, dest_values,
max_index_i, max_value_i, s,
separator, delimiter)
if index_i > 0 or index_v > 0:
writer.write_raw(dest_index[:index_i], dest_values[:index_v])
writer.flush()
def aggregate_count(self, fkey_indices=None, fkey_index_spans=None,
reader=None, writer=None):
return _aggregate_impl(self.apply_spans_count, fkey_indices, fkey_index_spans,
reader, writer, np.uint32)
def aggregate_first(self, fkey_indices=None, fkey_index_spans=None,
reader=None, writer=None):
return self.aggregate_custom(self.apply_spans_first, fkey_indices, fkey_index_spans,
reader, writer)
def aggregate_last(self, fkey_indices=None, fkey_index_spans=None,
reader=None, writer=None):
return self.aggregate_custom(self.apply_spans_last, fkey_indices, fkey_index_spans,
reader, writer)
def aggregate_min(self,fkey_indices=None, fkey_index_spans=None,
reader=None, writer=None):
return self.aggregate_custom(self.apply_spans_min, fkey_indices, fkey_index_spans,
reader, writer)
def aggregate_max(self, fkey_indices=None, fkey_index_spans=None,
reader=None, writer=None):
return self.aggregate_custom(self.apply_spans_max, fkey_indices, fkey_index_spans,
reader, writer)
def aggregate_custom(self,
predicate, fkey_indices=None, fkey_index_spans=None,
reader=None, writer=None):
if reader is None:
raise ValueError("'reader' must not be None")
if not isinstance(reader, (rw.Reader, np.ndarray)):
raise ValueError(f"'reader' must be a Reader or an ndarray but is {type(reader)}")
if isinstance(reader, rw.Reader):
required_dtype = reader.dtype()
else:
required_dtype = reader.dtype
return _aggregate_impl(predicate, fkey_indices, fkey_index_spans,
reader, writer, required_dtype)
def join(self,
destination_pkey, fkey_indices, values_to_join,
writer=None, fkey_index_spans=None):
if fkey_indices is not None:
if not isinstance(fkey_indices, (rw.Reader, np.ndarray)):
raise ValueError(f"'fkey_indices' must be a type of Reader or an ndarray")
if values_to_join is not None:
if not isinstance(values_to_join, (rw.Reader, np.ndarray)):
raise ValueError(f"'values_to_join' must be a type of Reader but is {type(values_to_join)}")
if isinstance(values_to_join, rw.IndexedStringReader):
raise ValueError(f"Joins on indexed string fields are not supported")
if isinstance(values_to_join, rw.Reader):
raw_values_to_join = values_to_join[:]
else:
raw_values_to_join = values_to_join
# generate spans for the sorted key indices if not provided
if fkey_index_spans is None:
fkey_index_spans = self.get_spans(field=fkey_indices)
# select the foreign keys from the start of each span to get an ordered list
# of unique id indices in the destination space that the results of the predicate
# execution are mapped to
unique_fkey_indices = fkey_indices[:][fkey_index_spans[:-1]]
# generate a filter to remove invalid foreign key indices (where values in the
# foreign key don't map to any values in the destination space
invalid_filter = unique_fkey_indices < INVALID_INDEX
safe_unique_fkey_indices = unique_fkey_indices[invalid_filter]
# the predicate results are in the same space as the unique_fkey_indices, which
# means they may still contain invalid indices, so filter those now
safe_values_to_join = raw_values_to_join[invalid_filter]
# now get the memory that the results will be mapped to
destination_space_values = writer.chunk_factory(len(destination_pkey))
# finally, map the results from the source space to the destination space
destination_space_values[safe_unique_fkey_indices] = safe_values_to_join
if writer is not None:
writer.write(destination_space_values)
else:
return destination_space_values
def predicate_and_join(self,
predicate, destination_pkey, fkey_indices,
reader=None, writer=None, fkey_index_spans=None):
if reader is not None:
if not isinstance(reader, rw.Reader):
raise ValueError(f"'reader' must be a type of Reader but is {type(reader)}")
if isinstance(reader, rw.IndexedStringReader):
raise ValueError(f"Joins on indexed string fields are not supported")
# generate spans for the sorted key indices if not provided
if fkey_index_spans is None:
fkey_index_spans = self.get_spans(field=fkey_indices)
# select the foreign keys from the start of each span to get an ordered list
# of unique id indices in the destination space that the results of the predicate
# execution are mapped to
unique_fkey_indices = fkey_indices[:][fkey_index_spans[:-1]]
# generate a filter to remove invalid foreign key indices (where values in the
# foreign key don't map to any values in the destination space
invalid_filter = unique_fkey_indices < INVALID_INDEX
safe_unique_fkey_indices = unique_fkey_indices[invalid_filter]
# execute the predicate (note that not every predicate requires a reader)
if reader is not None:
dtype = reader.dtype()
else:
dtype = np.uint32
results = np.zeros(len(fkey_index_spans)-1, dtype=dtype)
predicate(fkey_index_spans, reader, results)
# the predicate results are in the same space as the unique_fkey_indices, which
# means they may still contain invalid indices, so filter those now
safe_results = results[invalid_filter]
# now get the memory that the results will be mapped to
destination_space_values = writer.chunk_factory(len(destination_pkey))
# finally, map the results from the source space to the destination space
destination_space_values[safe_unique_fkey_indices] = safe_results
writer.write(destination_space_values)
def get_reader(self, field):
if 'fieldtype' not in field.attrs.keys():
raise ValueError(f"'{field.name}' is not a well-formed field")
fieldtype_map = {
'indexedstring': rw.IndexedStringReader,
'fixedstring': rw.FixedStringReader,
'categorical': rw.CategoricalReader,
'boolean': rw.NumericReader,
'numeric': rw.NumericReader,
'datetime': rw.TimestampReader,
'date': rw.TimestampReader,
'timestamp': rw.TimestampReader
}
fieldtype = field.attrs['fieldtype'].split(',')[0]
return fieldtype_map[fieldtype](self, field)
def get_existing_writer(self, field, timestamp=None):
if 'fieldtype' not in field.attrs.keys():
raise ValueError(f"'{field.name}' is not a well-formed field")
fieldtype_map = {
'indexedstring': rw.IndexedStringReader,
'fixedstring': rw.FixedStringReader,
'categorical': rw.CategoricalReader,
'boolean': rw.NumericReader,
'numeric': rw.NumericReader,
'datetime': rw.TimestampReader,
'date': rw.TimestampReader,
'timestamp': rw.TimestampReader
}
fieldtype = field.attrs['fieldtype'].split(',')[0]
reader = fieldtype_map[fieldtype](self, field)
group = field.parent
name = field.name.split('/')[-1]
return reader.get_writer(group, name, timestamp=timestamp, write_mode='overwrite')
def get_indexed_string_writer(self, group, name,
timestamp=None, writemode='write'):
return rw.IndexedStringWriter(self, group, name, timestamp, writemode)
def get_fixed_string_writer(self, group, name, width,
timestamp=None, writemode='write'):
return rw.FixedStringWriter(self, group, name, width, timestamp, writemode)
def get_categorical_writer(self, group, name, categories,
timestamp=None, writemode='write'):
return rw.CategoricalWriter(self, group, name, categories, timestamp, writemode)
def get_numeric_writer(self, group, name, dtype, timestamp=None, writemode='write'):
return rw.NumericWriter(self, group, name, dtype, timestamp, writemode)
def get_timestamp_writer(self, group, name, timestamp=None, writemode='write'):
return rw.TimestampWriter(self, group, name, timestamp, writemode)
def get_compatible_writer(self, field, dest_group, dest_name,
timestamp=None, writemode='write'):
reader = self.get_reader(field)
return reader.get_writer(dest_group, dest_name, timestamp, writemode)
def get_or_create_group(self, group, name):
if name in group:
return group[name]
return group.create_group(name)
def chunks(self, length, chunksize=None):
if chunksize is None:
chunksize = self.chunksize
cur = 0
while cur < length:
next = min(length, cur + chunksize)
yield cur, next
cur = next
def process(self, inputs, outputs, predicate):
# TODO: modifying the dictionaries in place is not great
input_readers = dict()
for k, v in inputs.items():
if isinstance(v, rw.Reader):
input_readers[k] = v
else:
input_readers[k] = self.get_reader(v)
output_writers = dict()
output_arrays = dict()
for k, v in outputs.items():
if isinstance(v, rw.Writer):
output_writers[k] = v
else:
raise ValueError("'outputs': all values must be 'Writers'")
reader = next(iter(input_readers.values()))
input_length = len(reader)
writer = next(iter(output_writers.values()))
chunksize = writer.chunksize
required_chunksize = min(input_length, chunksize)
for k, v in outputs.items():
output_arrays[k] = output_writers[k].chunk_factory(required_chunksize)
for c in self.chunks(input_length, chunksize):
kwargs = dict()
for k, v in inputs.items():
kwargs[k] = v[c[0]:c[1]]
for k, v in output_arrays.items():
kwargs[k] = v[:c[1] - c[0]]
predicate(**kwargs)
# TODO: write back to the writer
for k in output_arrays.keys():
output_writers[k].write_part(kwargs[k])
for k, v in output_writers.items():
output_writers[k].flush()
def get_index(self, target, foreign_key, destination=None):
# print(' building patient_id index')
t0 = time.time()
target_lookup = dict()
for i, v in enumerate(target[:]):
target_lookup[v] = i
# print(f' target lookup built in {time.time() - t0}s')
# print(' perform initial index')
t0 = time.time()
foreign_key_elems = foreign_key[:]
# foreign_key_index = np.asarray([target_lookup.get(i, -1) for i in foreign_key_elems],
# dtype=np.int64)
foreign_key_index = np.zeros(len(foreign_key_elems), dtype=np.int64)
current_invalid = np.int64(INVALID_INDEX)
for i_k, k in enumerate(foreign_key_elems):
index = target_lookup.get(k, current_invalid)
if index >= INVALID_INDEX:
current_invalid += 1
target_lookup[k] = index
foreign_key_index[i_k] = index
# print(f' initial index performed in {time.time() - t0}s')
if destination:
destination.write(foreign_key_index)
else:
return foreign_key_index
def get_shared_index(self, keys):
if not isinstance(keys, tuple):
raise ValueError("'keys' must be a tuple")
concatted = None
for k in keys:
raw_field = val.raw_array_from_parameter(self, 'keys', k)
if concatted is None:
concatted = pd.unique(raw_field)
else:
concatted = np.concatenate((concatted, raw_field), axis=0)
concatted = pd.unique(concatted)
concatted = np.sort(concatted)
return tuple(np.searchsorted(concatted, k) for k in keys)
def get_trash_group(self, group):
group_names = group.name[1:].split('/')
while True:
id = str(uuid.uuid4())
try:
result = group.create_group(f"/trash/{'/'.join(group_names[:-1])}/{id}")
return result
except KeyError:
pass
def temp_filename(self):
uid = str(uuid.uuid4())
while os.path.exists(uid + '.hdf5'):
uid = str(uuid.uuid4())
return uid + '.hdf5'
|
import os
import sys
import time
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
class IndeedBot:
def __init__(self):
"""
Initilializes the Chrome webdriver.
Sets the job search query string
self.driver:selenium.webdriver.Chrome
self.query_string:str
self.jobs:arr
self.express_apply_jobs:arr
"""
self.driver = webdriver.Chrome('./chromedriver')
self.query_string = "https://www.indeed.fr/jobs?q={job}&l={city}&sort=date"
self.jobs = []
self.express_apply_jobs = []
def nav(self, url):
"""
Navigates to a given url
Args:
url:str url chromedriver Chrome instance navigates to.
"""
self.driver.get(url)
time.sleep(2) # wait for page load
def __convert_query(self, job, city):
"""
Reformats the query for expected syntax of the search
Args:
job:str: Job type to search for.
city:str: City location of the job.
Returns:
job:str
city:str
"""
job = '+'.join(job.split(" "))
city = city.lower()
return job, city
def query(self, job, city):
"""
Searches indeed for a job in given city
Args:
job:str: Job type to search for.
city:str: City location of the job.
"""
job, city = self.__convert_query(job, city)
query = self.query_string.format(job=job, city=city)
self.nav(query)
def find_express_jobs(self):
""""
Called after chromedriver Chrome instance navigates to job search results.
Fills list with express jobs in search results.
"""
self.jobs = self.driver.find_elements_by_class_name("jobsearch-SerpJobCard")
print(f'Number of jobs {len(self.jobs)}')
for job in self.jobs:
try: # Express apply indicator
job.find_element_by_class_name('jobCardShelfContainer')
self.express_apply_jobs.append(job)
except: # Job is not express apply
pass
def apply_to_express_jobs(self, profile):
"""
Extracts jobs with express apply.
Args:
profile:dict
"""
print(f'Number of express jobs {len(self.express_apply_jobs)}')
for job in self.express_apply_jobs:
self.__process_job(job)
self.__process_apply_button()
self.__fill_applicant_form(profile)
# self.driver.find_element_by_id('form-action-continue').click()
def __process_apply_button(self):
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
apply_button = self.driver.find_element_by_id('indeedApplyButtonContainer')
apply_button.click()
time.sleep(2)
def __process_job(self, job):
"""
Refines url of job posting and navigates to it
Args:
job:Selenium.Webdriver.Chrome.WebElement
"""
job_a_tag = job.find_element_by_tag_name('a')
job_href = job_a_tag.get_attribute('href')
# Removing all extraneous indeed url query string parameters
job_href = job_href.split('&from')[0]
self.nav(job_href)
def __fill_applicant_form(self, profile):
"""
Finds elements on the applicant form
Args:
profile:dict
"""
actions = ActionChains(self.driver)
actions.send_keys(profile['name'] + Keys.TAB + \
profile['email'] + Keys.TAB + \
profile['phone_number'] + Keys.TAB)
actions.perform()
if __name__ == '__main__':
profile = {
'name': "jhon doe",
'email': "[email protected]",
'phone_number': '0123456789',
'resume': os.getcwd() + '/resume.txt'
}
id_bot = IndeedBot()
# keywords, city
id_bot.query('alternance python dev', 'paris')
id_bot.find_express_jobs()
id_bot.apply_to_express_jobs(profile) |
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import parse_qs, urlparse
class MyHTTPRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
parsed_path = urlparse(self.path)
self.send_response(200)
self.send_header('Content-Type', 'text/plain; charset=utf-8')
self.end_headers()
self.wfile.write("{}\n\n".format(self.requestline).encode())
self.wfile.write("client_address: {}\n".format(self.address_string()).encode())
self.wfile.write("path: {}\n".format(self.path).encode())
self.wfile.write("parsed path: {}, query: {}\n".format(parsed_path.path, parse_qs(parsed_path.query)).encode())
self.wfile.write("\n[Headers]\n".encode())
for k, v in self.headers.items():
self.wfile.write("{}: {}\n".format(k, v).encode())
return
with HTTPServer(('', 8000), MyHTTPRequestHandler) as server:
server.serve_forever()
|
from PyObjCTools import NibClassBuilder, AppHelper
import objc
objc.setVerbose(True)
NibClassBuilder.extractClasses("MainMenu")
NibClassBuilder.extractClasses("MyDocument")
import MyPDFDocument
import AppDelegate
AppHelper.runEventLoop()
|
import pytest
from .test_base import _HTTPErrorGenerator
from ...exception.master import MasterErrorCode, \
get_master_exception_class_by_error_code, get_master_exception_by_error, MasterSuccess, \
MasterSystemShuttingDown, MasterTaskDataInvalid, MasterSlaveTokenNotGiven, MasterSlaveTokenInvalid, \
MasterSelfTokenNotGiven, MasterSelfTokenInvalid, MasterChannelInvalid, \
MasterChannelNotGiven, MasterMasterTokenInvalid, MasterMasterTokenNotGiven
@pytest.mark.unittest
class TestInteractionExceptionMaster(_HTTPErrorGenerator):
def test_error_code(self):
assert len(MasterErrorCode.__members__) == 11
assert MasterErrorCode.SUCCESS == 0
def test_exception_class(self):
assert get_master_exception_class_by_error_code(MasterErrorCode.SUCCESS) == MasterSuccess
assert get_master_exception_class_by_error_code(
MasterErrorCode.SYSTEM_SHUTTING_DOWN
) == MasterSystemShuttingDown
assert get_master_exception_class_by_error_code(MasterErrorCode.CHANNEL_NOT_GIVEN) == MasterChannelNotGiven
assert get_master_exception_class_by_error_code(MasterErrorCode.CHANNEL_INVALID) == MasterChannelInvalid
assert get_master_exception_class_by_error_code(
MasterErrorCode.MASTER_TOKEN_NOT_GIVEN
) == MasterMasterTokenNotGiven
assert get_master_exception_class_by_error_code(
MasterErrorCode.MASTER_TOKEN_INVALID
) == MasterMasterTokenInvalid
assert get_master_exception_class_by_error_code(MasterErrorCode.SELF_TOKEN_NOT_GIVEN) == MasterSelfTokenNotGiven
assert get_master_exception_class_by_error_code(MasterErrorCode.SELF_TOKEN_INVALID) == MasterSelfTokenInvalid
assert get_master_exception_class_by_error_code(
MasterErrorCode.SLAVE_TOKEN_NOT_GIVEN
) == MasterSlaveTokenNotGiven
assert get_master_exception_class_by_error_code(MasterErrorCode.SLAVE_TOKEN_INVALID) == MasterSlaveTokenInvalid
assert get_master_exception_class_by_error_code(MasterErrorCode.TASK_DATA_INVALID) == MasterTaskDataInvalid
def test_get_master_exception_by_error(self):
err = get_master_exception_by_error(self._generate_exception(101, 'This is system shutting down.'))
assert isinstance(err, MasterSystemShuttingDown)
assert not err.success
assert err.status_code == 400
assert err.code == 101
assert err.message == 'This is system shutting down.'
assert err.data == {}
err = get_master_exception_by_error(self._generate_exception(601, 'Task data invalid.', data={'value': 233}))
assert isinstance(err, MasterTaskDataInvalid)
assert not err.success
assert err.status_code == 400
assert err.code == 601
assert err.message == 'Task data invalid.'
assert err.data == {'value': 233}
|
import argparse
import os
from typing import List
from pydantic import BaseModel
from fastapi import FastAPI, HTTPException
import uvicorn
from src.embedding_store import KGEmbeddingStore
from src.nearest_neighbours import FaissNearestNeighbours
from pathlib import Path
from dotenv import load_dotenv
from src.cli.log import get_logger
logger = get_logger(__name__)
load_dotenv()
EMBEDDINGS_FILE_NAMES = [
Path(os.environ.get("ENTITY_EMBEDDING_PATH")).name,
Path(os.environ.get("RELATION_EMBEDDING_PATH")).name,
]
assert (
Path(os.environ.get("ENTITY_EMBEDDING_PATH")).parent
== Path(os.environ.get("RELATION_EMBEDDING_PATH")).parent # noqa: W503
)
EMBEDDINGS_FOLDER = os.path.join(
os.path.dirname(__file__),
"..",
Path(os.environ.get("ENTITY_EMBEDDING_PATH")).parent,
)
MAPPINGS_FILE_NAMES = [
Path(os.environ.get("ENTITY_MAPPING_PATH")).name,
Path(os.environ.get("RELATION_MAPPING_PATH")).name,
]
assert (
Path(os.environ.get("ENTITY_MAPPING_PATH")).parent
== Path(os.environ.get("RELATION_MAPPING_PATH")).parent # noqa: W503
)
MAPPINGS_FOLDER = os.path.join(
os.path.dirname(__file__), "..", Path(os.environ.get("ENTITY_MAPPING_PATH")).parent
)
embedding_store = KGEmbeddingStore.from_dglke(
embeddings_folder=EMBEDDINGS_FOLDER,
embeddings_file_names=EMBEDDINGS_FILE_NAMES,
mappings_folder=MAPPINGS_FOLDER,
mappings_file_names=MAPPINGS_FILE_NAMES,
)
faiss_index = FaissNearestNeighbours(embedding_store).fit("entities")
app = FastAPI()
class NeighboursRequest(BaseModel):
entities: List[str]
k: int
@app.post("/neighbours")
async def get_nearest_neighbours(request: NeighboursRequest):
neighbours, distances = faiss_index.search(request.entities, request.k + 1)
response = {}
for idx, ent in enumerate(request.entities):
if ent != neighbours[idx][0]:
raise HTTPException(
status_code=404,
detail=f"It looks like there's a mismatch between a request entity and its nearest neighbour. Problem entity: {ent}",
)
response.update(
{ent: list(zip(neighbours[idx][1:], distances[idx][1:].tolist()))}
)
return response
class DistanceRequest(BaseModel):
entity_a: str
entity_b: str
@app.post("/distance")
async def get_distance(request: DistanceRequest):
logger.debug(f"DISTANCES: ent_a {request.entity_a}, ent_b {request.entity_b}")
if request.entity_a == request.entity_b:
return 0
return embedding_store.get_entity_distance([request.entity_a, request.entity_b])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-p", "--port", type=int, help="Optional port (default 8000)", default=8000
)
args = parser.parse_args()
port = args.port
uvicorn.run(app, host="0.0.0.0", port=port)
|
import datetime
import random
import csv
import json
# TODO: Fix * imports
from django.shortcuts import *
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.auth import logout as auth_logout
from social.apps.django_app.default.models import UserSocialAuth
from gnip_search.gnip_search_api import QueryError as GNIPQueryError
from chart import Chart
from timeframe import Timeframe
from frequency import Frequency
from tweets import Tweets
from home.utils import *
# import twitter
KEYWORD_RELEVANCE_THRESHOLD = .1 # Only show related terms if > 10%
TWEET_QUERY_COUNT = 10 # For real identification, > 100. Max of 500 via Search API.
DEFAULT_TIMEFRAME = 1 # When not specified or needed to constrain, this # of days lookback
TIMEDELTA_DEFAULT_TIMEFRAME = datetime.timedelta(days=DEFAULT_TIMEFRAME)
TIMEDELTA_DEFAULT_30 = datetime.timedelta(days=30)
DATE_FORMAT = "%Y-%m-%d %H:%M"
DATE_FORMAT_JSON = "%Y-%m-%dT%H:%M:%S"
def login(request):
"""
Returns login page for given request
"""
context = {"request": request}
return render_to_response('login.html', context, context_instance=RequestContext(request))
@login_required
# @user_passes_test(lambda u: u.is_staff or u.is_superuser, login_url='/')
def home(request):
"""
Returns home page for given request
"""
query = request.GET.get("query", "")
context = {"request": request, "query0": query}
tweets = []
return render_to_response('home.html', context, context_instance=RequestContext(request))
@login_required
def query_chart(request):
"""
Returns query chart for given request
"""
# TODO: Move this to one line e.g. queries to query
query = request.GET.get("query", None)
queries = request.GET.getlist("queries[]")
if query:
queries = [query]
request_timeframe = Timeframe(start = request.GET.get("start", None),
end = request.GET.get("end", None),
interval = request.GET.get("interval", "hour"))
response_chart = None
try:
response_chart = Chart(queries = queries,
start = request_timeframe.start,
end = request_timeframe.end,
interval = request_timeframe.interval)
except GNIPQueryError as e:
return handleQueryError(e)
response_data = {}
response_data['days'] = request_timeframe.days
response_data['start'] = request_timeframe.start.strftime(DATE_FORMAT_JSON)
response_data['end'] = request_timeframe.end.strftime(DATE_FORMAT_JSON)
response_data['columns'] = response_chart.columns
response_data['total'] = response_chart.total
return HttpResponse(json.dumps(response_data), content_type="application/json")
@login_required
def query_frequency(request):
query = request.GET.get("query", None)
response_data = {}
sample = 500
if query is not None:
# Get Timeframe e.g. process time from request
request_timeframe = Timeframe(start = request.GET.get("start", None),
end = request.GET.get("end", None),
interval = request.GET.get("interval", "hour"))
data = None
try:
# Query GNIP and get frequency
data = Frequency(query = query,
sample = sample,
start = request_timeframe.start,
end = request_timeframe.end)
except GNIPQueryError as e:
return handleQueryError(e)
response_data["frequency"] = data.freq
response_data["sample"] = sample
return HttpResponse(json.dumps(response_data), content_type="application/json")
@login_required
def query_tweets(request):
"""
Returns tweet query
"""
request_timeframe = Timeframe(start = request.GET.get("start", None),
end = request.GET.get("end", None),
interval = request.GET.get("interval", "hour"))
query_count = int(request.GET.get("embedCount", TWEET_QUERY_COUNT))
export = request.GET.get("export", None)
query = request.GET.get("query", "")
try:
tweets = Tweets(query=query, query_count=query_count, start=request_timeframe.start, end=request_timeframe.end, export=export)
except GNIPQueryError as e:
return handleQueryError(e)
response_data = {}
if export == "csv":
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="export.csv"'
writer = csv.writer(response, delimiter=',', quotechar="'", quoting=csv.QUOTE_ALL)
writer.writerow(['count','time','id','user_screen_name','user_id','status','retweet_count','favorite_count','is_retweet','in_reply_to_tweet_id','in_reply_to_screen_name'])
count = 0;
for t in tweets.get_data():
count = count + 1
body = t['body'].encode('ascii', 'replace')
status_id = t['id']
status_id = status_id[status_id.rfind(':')+1:]
user_id = t['actor']['id']
user_id = user_id[user_id.rfind(':')+1:]
writer.writerow([count, t['postedTime'], status_id, t['actor']['preferredUsername'], user_id, body, t['retweetCount'], t['favoritesCount'], 'X', 'X', 'X'])
return response
else:
response_data['tweets'] = tweets.get_data()
return HttpResponse(json.dumps(response_data), content_type="application/json")
def logout(request):
"""
Returns a redirect response and logs out user
"""
auth_logout(request)
return HttpResponseRedirect('/')
|
from dataclasses import dataclass
import cv2
import numpy as np
from opsi.manager.manager_schema import Function
from opsi.manager.types import RangeType, Slide
from opsi.util.cv.mat import Mat, MatBW
__package__ = "opsi.imageops"
__version__ = "0.123"
class Rotate(Function):
@dataclass
class Settings:
angle: Slide(0, 360)
@dataclass
class Inputs:
img: Mat
@dataclass
class Outputs:
img: Mat
def run(self, inputs):
img = inputs.img.mat.rotate(self.settings.angle)
return self.Outputs(img=img)
class RotateNoCrop(Function):
@dataclass
class Settings:
angle: Slide(0, 360)
@dataclass
class Inputs:
img: Mat
@dataclass
class Outputs:
img: Mat
def run(self, inputs):
img = inputs.img.mat.rotate_no_crop(self.settings.angle)
return self.Outputs(img=img)
class Flip(Function):
@dataclass
class Settings:
flipHorizontally: bool
flipVertically: bool
@dataclass
class Inputs:
img: Mat
@dataclass
class Outputs:
img: Mat
def run(self, inputs):
img = inputs.img
if self.settings.flipHorizontally:
img = img.flip_horizontally()
if self.settings.flipVertically:
img = img.flip_vertically()
return self.Outputs(img)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2021) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
---
module: create_snmpv3_users
description: This module creates SNMPv3 users in a server
requirements:
- "python >= 3.6"
- "ansible >= 2.11"
author:
- "Gayathiri Devi Ramasamy (@Gayathirideviramasamy)"
options:
baseuri:
description:
- iLO IP of the server
type: str
default: NONE
required: true
username:
description:
- Username of the server for authentication
type: str
default: NONE
required: true
password:
description:
- Password of the server for authentication
type: str
default: NONE
required: true
snmpv3_users:
description:
- List of SNMPv3 users that needs to be added in the given server
type: list
default: NONE
required: true
security_name:
description:
- SNMPv3 security name associated with SNMPv3trap or SNMPv3Inform set on SNMPAlertProtocol
- Alphanumeric value with 1-32 characters
type: str
default: NONE
required: true
auth_protocol:
description:
- Sets the message digest algorithm to use for encoding the authorization passphrase
- The message digest is calculated over an appropriate portion of an SNMP message and is included as part of the message sent to the recipient
- Supported Auth protocols are MD5, SHA, and SHA256
type: str
default: NONE
required: true
auth_passphrase:
description:
- Sets the passphrase to use for sign operations
- String with 8-49 characters
type: str
default: NONE
required: true
privacy_protocol:
description:
- Sets the encryption algorithm to use for encoding the privacy passphrase
- A portion of an SNMP message is encrypted before transmission
- Supported privacy protocols are AES and DES
type: str
default: NONE
required: true
privacy_passphrase:
description:
- Sets the passphrase to use for encrypt operations
- String with 8-49 characters
type: str
default: NONE
required: true
http_schema:
description:
- http or https Protocol
type: str
default: https
required: false
"""
EXAMPLES = r"""
- name: Creating SNMPv3 users
create_snmpv3_users:
baseuri: "***.***.***.***"
username: "abcxyz"
password: "******"
snmpv3_users:
- security_name: "Sec1"
auth_protocol: "SHA"
auth_passphrase: "********"
privacy_protocol: "AES"
privacy_passphrase: "********"
"""
RETURN = r"""
expected_result:
description: SNMPv3 users are created in the server
returned: SNMPv3 users are added
type: str
failure case 1:
description: Redfish Package is not installed
returned: Failed to import the required Python library (redfish)
corrective_action: Install python3-redfish package
type: str
failure case 2:
description: Incorrect/Unreachable server IP address(baseuri) is provided
returned: RetriesExhaustedError
corrective_action: Provide the correct IP address of the server
type: str
failure case 3:
description: Credentials not valid
returned: InvalidCredentialsError
corrective_action: Validate the credentials
type: str
failure case 4:
description: Getting managers data failed
returned: GET on /redfish/v1/Managers/1/ Failed, Status <Status code>, Response <API response>
corrective_action: Verify the response in the output message
type: str
failure case 5:
description: Getting list of SNMPv3 users failed
returned: GET on /redfish/v1/Managers/1/SnmpService/SNMPUsers/ Failed, Status <Status code>, Response <API response>
corrective_action: Verify the response in the output message
type: str
failure case 6:
description: Getting particular SNMPv3 user failed
returned: GET on /redfish/v1/Managers/1/SnmpService/SNMPUsers/<SNMPv3 user ID>/ Failed, Status <Status code>, Response <API response>
corrective_action: Verify the response in the output message
type: str
failure case 7:
description: Maximum snmpv3 users in the server reached
returned: Maximum of 8 SNMPv3 users can be added to a server. Already server has <number of existing users in server> users and provided
<number of users provided as input> more users
corrective_action: Expected output if maximum limit of snmpv3 users reached. Validate the input to provide the correct number of snmpv3 users
type: str
failure case 8:
description: Input parameter is missing
returned: Input parameter <list of parameters> is missing to create SNMPv3 user. Mandatory parameters are <List of input parameters>
corrective_action: Validate the input parameters
type: str
failure case 9:
description: Wrong protocol provided
returned: Given value <protocol type> is not supported for <protocol>
corrective_action: Validate the input parameters
type: str
failure case 10:
description: auth_passphrase & privacy_passphrase minimum length not satisfied
returned: Minimum character length for auth_passphrase & privacy_passphrase is 8
corrective_action: Validate the input values for auth_passphrase & privacy_passphrase
type: str
failure case 11:
description: User exists with same name and different protocols
returned: Already user exists with same name <security name> and protocols <auth_protocol and privacy_protocol>, so user cannot be
created with different protocols
corrective_action: Modify the security name or delete the existing user
type: str
failure case 12:
description: User exists with same name and protocols
returned: Already user exists with same name <security name> and same protocols <auth_protocol and privacy_protocol>
corrective_action: Modify the security name
type: str
failure case 13:
description: Adding SNMPv3 user failed
returned: Adding SNMPv3 user <SNMPv3 username> failed, Status <Status code>, Response <API response>
corrective_action: Verify the response in the output message
type: str
failure case 14:
description: Value for security_name is empty
returned: security_name should not be empty
corrective_action: Provide value for security name(user name)
type: str
failure case 15:
description: Wrong input parameter is provided
returned: Unsupported input parameters <list of input parameters>
corrective_action: Remove wrong parameters from the input
type: str
failure case 16:
description: Duplicate entry is provided
returned: Duplicate entries provided for users <list of duplicate users>
corrective_action: Remove duplicate entries from the input
type: str
"""
import json
try:
from redfish import redfish_client
HAS_REDFISH = True
except ImportError:
HAS_REDFISH = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
base_uri = "/redfish/v1/"
manager_uri = "Managers/1/"
def logout(redfishClient, module):
redfishClient.logout()
def error_msg(module, method, uri, status, response):
# Print error message
module.fail_json(
msg="%s on %s Failed, Status: %s, Response: %s"
% (str(method), str(uri), str(status), str(response))
)
def get_snmpv3_users(redfishClient, module):
# Get on Managers API
snmpv3_users = []
uri = base_uri + manager_uri
response = redfishClient.get(uri)
if response.status != 200:
error_msg(module, "GET", uri, response.status, response.text)
snmp_res = redfishClient.get(uri + "SnmpService/SNMPUsers/")
if snmp_res.status != 200:
error_msg(
module,
"GET",
uri + "SnmpService/SNMPUsers/",
snmp_res.status,
snmp_res.text,
)
snmp_list = json.loads(snmp_res.text)
for item in snmp_list["Members"]:
item_rsp = redfishClient.get(item["@odata.id"])
if item_rsp.status != 200:
error_msg(module, "GET", item["@odata.id"], item_rsp.status, item_rsp.text)
snmpv3_users.append(json.loads(item_rsp.text))
return snmpv3_users
def validate_duplicate_entries(snmpv3_users, module):
# Validating duplicate entry
duplicate = []
snmpv3_user_names = [i["security_name"] for i in snmpv3_users]
for snmp in snmpv3_user_names:
if snmpv3_user_names.count(snmp) > 1:
duplicate.append(snmp)
if duplicate:
module.fail_json(
msg="Duplicate entries provided for users: %s" % str(list(set(duplicate)))
)
def validate_snmpv3_users(server_snmpv3_users, snmpv3_users, module):
# Validating input parameters
if len(server_snmpv3_users) + len(snmpv3_users) > 8:
message = (
"Maximum of 8 SNMPv3 users can be added to a server..."
+ "Already server has %s users and provided %s more users"
)
module.fail_json(msg=message % (len(server_snmpv3_users), len(snmpv3_users)))
input_list = [
"security_name",
"auth_protocol",
"auth_passphrase",
"privacy_protocol",
"privacy_passphrase",
]
validate_dict = {
"auth_protocol": ["MD5", "SHA", "SHA256"],
"privacy_protocol": ["DES", "AES"],
}
for user in snmpv3_users:
missing_param = []
for input in input_list:
if input not in user.keys():
missing_param.append(input)
if missing_param:
module.fail_json(
msg="Input parameter %s is missing to create SNMPv3 user. Mandatory parameters are %s"
% (str(missing_param), str(input_list))
)
if not user["security_name"]:
module.fail_json(msg="'security_name' should not be empty")
for key, value in validate_dict.items():
if user[key] not in value:
module.fail_json(
"Given value '%s' is not supported for '%s'" % (user[key], key)
)
if not (
len(user["privacy_passphrase"]) >= 8 and len(user["auth_passphrase"]) >= 8
):
module.fail_json(
msg="Minimum character length for auth_passphrase & privacy_passphrase is 8"
)
if set(user.keys()) - set(input_list):
module.fail_json(
msg="Unsupported input parameters: %s"
% str(list(set(user.keys()) - set(input_list)))
)
def check_snmpv3_users(server_snmpv3_users, snmpv3_users, module):
# Validating if SNMPv3 users already exists
for user in snmpv3_users:
for data in server_snmpv3_users:
if data["SecurityName"] == user["security_name"]:
if (
data["AuthProtocol"] != user["auth_protocol"]
or data["PrivacyProtocol"] != user["privacy_protocol"]
):
message = (
"Already user exists with same name '%s' and protocols "
+ "AuthProtocol: '%s' and PrivacyProtocol: '%s'. "
+ "so user cannot be created with different protocols..."
)
module.fail_json(
msg=message
% (
data["SecurityName"],
data["AuthProtocol"],
data["PrivacyProtocol"],
)
)
else:
message = (
"Already user exists with same name '%s' and same protocols "
+ "AuthProtocol: '%s' and PrivacyProtocol: '%s'."
)
module.fail_json(
msg=message
% (
data["SecurityName"],
data["AuthProtocol"],
data["PrivacyProtocol"],
)
)
def create_snmpv3_user(redfishClient, user, module):
# Define payload
body = {
"SecurityName": user["security_name"],
"AuthProtocol": user["auth_protocol"],
"AuthPassphrase": user["auth_passphrase"],
"PrivacyProtocol": user["privacy_protocol"],
"PrivacyPassphrase": user["privacy_passphrase"],
}
# POST on Managers API
uri = base_uri + manager_uri + "SnmpService/SNMPUsers/"
snmp_res = redfishClient.post(uri, body=body)
if snmp_res.status != 201:
module.fail_json(
msg="Adding SNMPv3 user %s failed, status: %s, response: %s, API: %s"
% (user["security_name"], str(snmp_res.status), snmp_res.text, uri)
)
def main():
module = AnsibleModule(
argument_spec=dict(
baseuri=dict(required=True, type="str"),
username=dict(required=True, type="str"),
password=dict(required=True, type="str", no_log=True),
snmpv3_users=dict(required=True, type="list"),
http_schema=dict(required=False, default="https", type="str"),
)
)
if not HAS_REDFISH:
module.fail_json(msg=missing_required_lib("redfish"))
baseuri = module.params["baseuri"]
username = module.params["username"]
password = module.params["password"]
snmpv3_users = module.params["snmpv3_users"]
http_schema = module.params["http_schema"]
base_url = "{0}://{1}".format(http_schema, baseuri)
redfishClient = redfish_client(
base_url=base_url, username=username, password=password
)
redfishClient.login()
validate_duplicate_entries(snmpv3_users, module)
server_snmpv3_users = get_snmpv3_users(redfishClient, module)
validate_snmpv3_users(server_snmpv3_users, snmpv3_users, module)
check_snmpv3_users(server_snmpv3_users, snmpv3_users, module)
for user in snmpv3_users:
create_snmpv3_user(redfishClient, user, module)
logout(redfishClient, module)
module.exit_json(changed=True, msg="SNMPv3 users are added")
if __name__ == "__main__":
main()
|
# Generates a single arch xml file given arch string
import os
import subprocess
import re
import getopt
import sys
import arch_handler as ah
from my_regex import *
#==================================================================
# Global functions
def runCommand(command, arguments):
ret = subprocess.check_output([command] + arguments)
return ret
def makeWotan(wotanPath):
print("Making Wotan...")
os.chdir(wotanPath)
ret = runCommand("make", [])
return ret
def makeVPR(vprPath):
print("Making VPR...")
os.chdir(vprPath)
ret = runCommand("make", [])
return ret
# Copied from .../wotan/python/wotan_tester.py
def get_arch_to_path(arch_point):
assert isinstance(arch_point, Arch_Point_Info)
arch_path = ''
sb_pattern = arch_point.switchblock_pattern
wire_topology = arch_point.wire_topology
wirelengths = {}
wirelengths['semi-global'] = arch_point.s_wirelength
if arch_point.g_wirelength != None:
wirelengths['global'] = arch_point.g_wirelength
global_via_repeat = 4
fc_in = arch_point.fcin
fc_out = arch_point.fcout
lut_size = str(arch_point.lut_size) + 'LUT'
arch_path = ah.get_path_to_arch(sb_pattern, wire_topology, wirelengths, global_via_repeat, \
fc_in, fc_out, lut_size)
return arch_path
#==================================================================
# Class copied from .../wotan/python/wotan_tester.py
class Arch_Point_Info:
def __init__(self, lut_size, # Size of the LUT (i.e. K)
s_wirelength, # Semi-global wirelength
g_wirelength, # Global-layer wirelength; Specify None if not used
switchblock_pattern, # wilton/universal/subset
wire_topology, # 'single-wirelength', 'on-cb-off-cb', 'on-cb-off-sb',
# 'on-cb-off-cbsb', 'on-cbsb-off-cbsb', 'on-sb-off-sb'
fcin, # cb input flexibility
fcout, # cb output flexibility
arch_string = None): # Optional string that describes this architecture
if lut_size not in [4, 6]:
raise BaseException, 'Unexpected LUT size: %d' % (lut_size)
if switchblock_pattern not in ['wilton', 'universal', 'subset']:
raise BaseException, 'Unexpected switch block pattern: %s' % (switchblock_pattern)
if wire_topology not in ['single-wirelength', 'on-cb-off-cb', 'on-cb-off-sb', \
'on-cb-off-cbsb', 'on-cbsb-off-cbsb', 'on-sb-off-sb']:
raise BaseException, 'Unexpected wire topology: %s' % (wire_topology)
self.lut_size = lut_size
self.s_wirelength = s_wirelength
self.g_wirelength = g_wirelength
self.switchblock_pattern = switchblock_pattern
self.wire_topology = wire_topology
self.fcin = fcin
self.fcout = fcout
self.arch_string = arch_string
# Overload constructor -- initialize based on a string. Expecting string to be in
# the format of this class' 'as_str' function.
@classmethod
def from_str(cls, s):
regex_list = {
's_wirelength' : '.*_s(\d+)_.*',
'g_wirelength' : '.*_g(\d+)_.*',
'K' : '.*k(\d)_.*',
'wire_topology' : '.*_topology-([-\w]+)_.*',
'fcin' : '.*fcin(\d+\.*\d*)',
'fcout' : '.*fcout(\d+\.*\d*)',
}
# Get wirelength, fcin, fcout
tmp_dict = {}
for key in regex_list:
try:
tmp_dict[key] = regex_last_token(s, regex_list[key])
except RegexException as exception:
if key == 'g_wirelength':
# OK if global wirelength wasn't specified
tmp_dict[key] = None
continue
else:
raise
s_wirelength = int(tmp_dict['s_wirelength'])
g_wirelength = tmp_dict['g_wirelength']
if g_wirelength != None:
g_wirelength = int(g_wirelength)
lut_size = int(tmp_dict['K'])
wire_topology = tmp_dict['wire_topology']
fcin = float(tmp_dict['fcin'])
fcout = float(tmp_dict['fcout'])
# Get switchblock
switchblock = None
if 'subset' in s:
switchblock = 'subset'
elif 'universal' in s:
switchblock = 'universal'
elif 'wilton' in s:
switchblock = 'wilton'
else:
print('could not find a switchblock specification in string:\n\t' + s)
sys.exit()
return cls(lut_size, s_wirelength, g_wirelength, switchblock, wire_topology, fcin, fcout, s)
# Returns a string describing an object of this class
def as_str(self):
return self.arch_string
def __str__(self):
return self.arch_string
def __repr__(self):
return self.arch_string
#==================================================================
# Class for running architecture through Wotan
class Wotan:
def __init__(self, archPath, vtrPath, vprPath, wotanPath, wotanOpts, lut_size):
self.archPath = archPath
self.vtrPath = vtrPath
self.vprPath = vprPath
self.wotanPath = wotanPath
self.wotanOpts = wotanOpts
self.lut_size = lut_size
def runWotan(self):
benchmark = 'vtr_benchmarks_blif/sha.blif'
if self.lut_size == 4:
benchmark = '4LUT_DSP_vtr_benchmarks_blif/sha.pre-vpr.blif'
vprOpts = self.archPath + ' ' + self.vtrPath + '/vtr_flow/benchmarks/' + benchmark + \
' -dump_rr_structs_file ./dumped_rr_structs.txt ' + \
'-pack -place -route_chan_width ' + str(chanWidth)
# Run VPR to get RRG
ret = self._runVPRGetRRG(vprOpts)
assert ret
# Run Wotan to get routability metric
ret = self._runWotan()
assert ret
def _runVPRGetRRG(self, vprOpts):
print("Running VPR to get RRG...")
os.chdir(self.vprPath)
argsList = vprOpts.split()
output = runCommand("./vpr", argsList)
return output
def _runWotan(self):
print("Running Wotan to get routability metric...")
os.chdir(self.wotanPath)
argsList = self.wotanOpts.split()
output = runCommand("./wotan", argsList)
return output
#==================================================================
# Generates the custom architecture file
class GenerateArch:
def __init__(self, arch_str):
self.arch_str = arch_str
def getArch(self):
#arch_str = 'k4_s1_subset_topology-single-wirelength_fcin0.3_fcout0.4'
arch = Arch_Point_Info.from_str(self.arch_str)
return arch
def getCustomArch(self, archPoint):
# Returns the path to the architecture path
assert isinstance(archPoint, Arch_Point_Info)
archPath = get_arch_to_path(archPoint)
print "Arch File Path: ", archPath
#==================================================================
# Main function
def main(arch_str):
base_path = "/nfs/ug/homes-4/k/kongnath/code"
vtrPath = base_path + "/vtr"
vprPath = vtrPath + "/vpr"
wotan_path = base_path + "/wotan"
arch_dir = wotan_path + "/arch"
ga = GenerateArch(arch_str)
arch = ga.getArch()
ga.getCustomArch(arch)
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv[1:], "a:")
except getopt.GetOptError as err:
print str(err)
sys.exit(2)
arch = ""
for o, a in opts:
if o == '-a':
arch = a
else:
sys.exit(2)
# arch = 'k4_s1_subset_topology-single-wirelength_fcin0.3_fcout0.2'
# arch = 'k4_s1_subset_topology-single-wirelength_fcin0.2_fcout0.1'
if not arch:
print "Need arch name."
sys.exit(2)
print arch
main(arch)
|
# Written by Mike Smith michaeltsmith.org.uk
from GPy.kern import Kern
from GPy.core.parameterization import Param
from paramz.transformations import Logexp
import math
from scipy.misc import factorial
import numpy as np
#TODO: Is it ok for us to just fill the rest of X in with zeros?
# these won't have any points chosen in those 0-volume areas... but ideally we should do something else? Put NaNs in????
def randint(i):
"""
Convert floating point to an integer, but round randomly proportionate to the fraction remaining
E.g. randint(3.2) will return 3, 80% of the time, and 4, 20%.
"""
return int(i)+int(np.random.rand()<(i%1))
assert np.abs(np.mean([randint(4.2) for i in range(1000)])-4.2)<0.1
class ShapeIntegral(Kern):
"""
"""
def __init__(self, input_dim, input_space_dim=None, active_dims=None, kernel=None, name='shapeintegral',Nperunit=100, lengthscale=None, variance=None):
"""
NOTE: Added input_space_dim as the number of columns in X isn't the dimensionality of the space. I.e. for pentagons there
will be 10 columns in X, while only 2 dimensions of input space.
"""
super(ShapeIntegral, self).__init__(input_dim, active_dims, name)
assert ((kernel is not None) or (input_space_dim is not None)), "Need either the input space dimensionality defining or the latent kernel defining (to infer input space)"
if kernel is None:
kernel = RBF(input_space_dim, lengthscale=lengthscale)
else:
input_space_dim = kernel.input_dim
assert kernel.input_dim == input_space_dim, "Latent kernel (dim=%d) should have same input dimensionality as specified in input_space_dim (dim=%d)" % (kernel.input_dim,input_space_dim)
#assert len(kern.lengthscale)==input_space_dim, "Lengthscale of length %d, but input space has %d dimensions" % (len(lengthscale),input_space_dim)
self.lengthscale = Param('lengthscale', kernel.lengthscale, Logexp()) #Logexp - transforms to allow positive only values...
self.variance = Param('variance', kernel.variance, Logexp()) #and here.
self.link_parameters(self.variance, self.lengthscale) #this just takes a list of parameters we need to optimise.
self.kernel = kernel
self.Nperunit = Nperunit
self.input_space_dim = input_space_dim
def simplexRandom(self,vectors):
#vectors = np.array([[0,0],[0,2],[1,1]])
"""
Compute random point in arbitrary simplex
from Grimme, Christian. Picking a uniformly
random point from an arbitrary simplex.
Technical Report. University of M\:{u}nster, 2015.
vectors are row-vectors describing the
vertices of the simplex, e.g.
[[0,0],[0,2],[1,1]] is a triangle
"""
d = vectors.shape[1]
n = vectors.shape[0]
assert n == d+1, "Need exactly d+1 vertices to define a simplex (e.g. a 2d triangle needs 3 points, a 3d tetrahedron 4 points, etc). Currently have %d points and %d dimensions" % (n,d)
zs = np.r_[1,np.random.rand(d),0]
ls = zs**(1.0/np.arange(len(zs)-1,-1,-1))
vs = np.cumprod(ls) #could skip last element for speed
res = vectors.copy()
res = np.zeros(d)
for vect,l,v in zip(vectors.copy(),ls[1:],vs):
res+=(1-l)*v*vect
return res
def simplexVolume(self, vectors):
"""Returns the volume of the simplex defined by the
row vectors in vectors, e.g. passing [[0,0],[0,2],[2,0]]
will return 2 (as this triangle has area of 2)"""
assert vectors.shape[0]==self.input_space_dim+1, "For a %d dimensional space there should be %d+1 vectors describing the simplex" % (self.input_space_dim, self.input_space_dim)
return np.abs(np.linalg.det(vectors[1:,:]-vectors[0,:]))/factorial(self.input_space_dim)
def placepoints(self,shape,Nperunit=100):
"""Places uniformly random points in shape, where shape
is defined by an array of concatenated simplexes
e.g. a 2x2 square (from [0,0] to [2,2]) could be built
of two triangles:
[0,0,0,2,2,0 ,2,2,0,2,2,0]"""
allps = []
#each simplex in shape must have D*(D+1) coordinates, e.g. a triangle has 2*(2+1) = 6 coords (2 for each vertex)
#e.g. a tetrahedron has 4 points, each with 3 coords = 12: 3*(3+1) = 12.
Ncoords = self.input_space_dim*(self.input_space_dim+1)
assert len(shape)%Ncoords == 0, "The number of coordinates (%d) describing the simplexes that build the shape must factorise into the number of coordinates in a single simplex in %d dimensional space (=%d)" % (len(shape), self.input_space_dim, Ncoords)
for i in range(0,len(shape),Ncoords):
vectors = shape[i:(i+Ncoords)].reshape(self.input_space_dim+1,self.input_space_dim)
if np.isnan(vectors[0,0]): #if we get to nans this polytope has no more simplexes
break
vol = self.simplexVolume(vectors)
#print(vol)
points = np.array([self.simplexRandom(vectors) for i in range(int(Nperunit*vol))]) #i%2+int(x-0.5)
allps.extend(points)
return np.array(allps)
def calc_K_xx_wo_variance(self,X,X2=None):
"""Calculates K_xx without the variance term
X is in the form of an array, each row for one shape. each
is defined by an array of concatenated simplexes
e.g. a 2x2 square (from [0,0] to [2,2]) could be built
of two triangles:
[0,0,0,2,2,0 ,2,2,0,2,2,0]
"""
ps = []
qs = []
if X2 is None:
X2 = X
for s in X:
s = s[~np.isnan(s)]
ps.append(self.placepoints(s,self.Nperunit))
for s in X2:
s = s[~np.isnan(s)]
qs.append(self.placepoints(s,self.Nperunit))
K_xx = np.ones([len(ps),len(qs)])
for i,p in enumerate(ps):
for j,q in enumerate(qs):
if (len(p)==0) or (len(q)==0):
#print("Warning: no points in simplex. Assuming no covariance!")
v = 0 #what else can we do?
else:
cov = self.kernel.K(p,q)
v = np.sum(cov)/(self.Nperunit**2)
K_xx[i,j] = v #TODO Compute half and mirror
return K_xx
def update_gradients_full(self, dL_dK, X, X2=None):
"""
Given the derivative of the objective wrt the covariance matrix
(dL_dK), compute the gradient wrt the parameters of this kernel,
and store in the parameters object as e.g. self.variance.gradient
"""
#self.variance.gradient = np.sum(self.K(X, X2)* dL_dK)/self.variance
#now the lengthscale gradient(s)
#print dL_dK
if X2 is None:
X2 = X
ls_grads = np.zeros([len(X), len(X2), len(self.kernel.lengthscale.gradient)])
var_grads = np.zeros([len(X), len(X2)])
#print grads.shape
for i,x in enumerate(X):
for j,x2 in enumerate(X2):
ps = self.placepoints(x,self.Nperunit)
qs = self.placepoints(x2,self.Nperunit)
if (len(ps)==0) or (len(qs)==0):
pass
#print("Warning: no points in simplex. Assuming no covariance!")
else:
self.kernel.update_gradients_full(np.ones([len(ps),len(qs)]), ps, qs)
#this actually puts dK/dl in the lengthscale gradients
#print self.kernel.lengthscale.gradient.shape
#print grads.shape
ls_grads[i,j,:] = self.kernel.lengthscale.gradient
var_grads[i,j] = self.kernel.variance.gradient
#print dL_dK.shape
#print grads[:,:,0] * dL_dK
lg = np.zeros_like(self.kernel.lengthscale.gradient)
#find (1/N^2) * sum( gradient )
for i in range(ls_grads.shape[2]):
lg[i] = np.sum(ls_grads[:,:,i] * dL_dK)/(self.Nperunit**2)
vg = np.sum(var_grads[:,:] * dL_dK)/(self.Nperunit**2)
self.kernel.lengthscale.gradient = lg
self.kernel.variance.gradient = vg
def K(self, X, X2=None):
return self.calc_K_xx_wo_variance(X,X2)
#if X2 is None: #X vs X
# K_xx = self.calc_K_xx_wo_variance(X)
# return K_xx
#else: #X vs X2
# raise NotImplementedError()
# #pass #TODO
def Kdiag(self, X):
return self.K(X,X)
|
import pytest
from pytest import approx
from contextuality.model import Scenario, CyclicScenario, random_pr_like_model
def test_CbD_direction_influence():
for _ in range(10):
model = random_pr_like_model(n=3)
dist = model._distributions
assert approx(model.CbD_direct_influence()) == (2 * (abs(dist[0][0] - dist[1][0]) +
abs(dist[1][0] - dist[2][1]) +
abs(dist[2][1] + dist[0][0] - 1)))
def test_CbD_measure():
for _ in range(10):
model = random_pr_like_model(n=3)
dist = model._distributions
assert approx(model.CbD_measure()) == 2 - (2 * (abs(dist[0][0] - dist[1][0]) +
abs(dist[1][0] - dist[2][1]) +
abs(dist[2][1] + dist[0][0] - 1)))
|
import numpy as np
from .VariableUnitTest import VariableUnitTest
from gwlfe.MultiUse_Fxns.Runoff import CNumPerv
class TestCNumPerv(VariableUnitTest):
# @skip("not ready")
def test_CNumPerv(self):
z = self.z
np.testing.assert_array_almost_equal(
CNumPerv.CNumPerv_f(z.NYrs, z.DaysMonth, z.Temp, z.NRur, z.NUrb, z.CNP_0, z.InitSnow_0, z.Prec, z.Grow_0,
z.AntMoist_0),
CNumPerv.CNumPerv(z.NYrs, z.DaysMonth, z.Temp, z.NRur, z.NUrb, z.CNP_0, z.InitSnow_0, z.Prec, z.Grow_0,
z.AntMoist_0), decimal=7)
|
import os
from pathlib import Path
import subprocess
import oyaml as yaml
def create_empty_dbt_project(data_source_id: str, warehouse: str, target_dir: str):
Path(target_dir).mkdir(parents=True, exist_ok=True)
subprocess.call(
f"dbt-init --client {data_source_id} --warehouse {warehouse} --target_dir {target_dir} --project_name "
f"'kuwala' --project_directory {data_source_id} --profile_name 'kuwala'",
shell=True,
)
profiles_file_path = f"{target_dir}/{data_source_id}/sample.profiles.yml"
project_file_path = f"{target_dir}/{data_source_id}/dbt_project.yml"
packages_file_path = f"{target_dir}/{data_source_id}/packages.yml"
os.rename(profiles_file_path, profiles_file_path.replace("sample.", ""))
# Update dbt_project.yml to the latest version
with open(project_file_path, "r") as file:
project_yaml = yaml.safe_load(file)
file.close()
project_yaml["config-version"] = 2
project_yaml["model-paths"] = project_yaml.pop("source-paths")
project_yaml["seed-paths"] = project_yaml.pop("data-paths")
with open(project_file_path, "w") as file:
yaml.safe_dump(project_yaml, file, indent=4)
file.close()
# Update dbt packages
with open(packages_file_path, "r") as file:
packages_yaml = yaml.safe_load(file)
file.close()
packages_yaml["packages"] = [dict(package="dbt-labs/codegen", version="0.5.0")]
with open(packages_file_path, "w") as file:
yaml.safe_dump(packages_yaml, file, indent=4)
file.close()
subprocess.call("dbt deps", cwd=f"{target_dir}/{data_source_id}", shell=True)
|
"""
Geographic coordinate conversion.
"""
import numpy as np
from . import get_ellipsoid
def geodetic_to_spherical(longitude, latitude, height):
"""
Convert from geodetic to geocentric spherical coordinates.
The geodetic datum is defined by the default :class:`harmonica.ReferenceEllipsoid`
set by the :func:`harmonica.set_ellipsoid` function.
The coordinates are converted following [Vermeille2002]_.
Parameters
----------
longitude : array
Longitude coordinates on geodetic coordinate system in degrees.
latitude : array
Latitude coordinates on geodetic coordinate system in degrees.
height : array
Ellipsoidal heights in meters.
Returns
-------
longitude : array
Longitude coordinates on geocentric spherical coordinate system in degrees.
The longitude coordinates are not modified during this conversion.
spherical_latitude : array
Converted latitude coordinates on geocentric spherical coordinate system in
degrees.
radius : array
Converted spherical radius coordinates in meters.
See also
--------
spherical_to_geodetic : Convert from geocentric spherical to geodetic coordinates.
Examples
--------
In the poles, the radius should be the reference ellipsoid's semi-minor axis:
>>> import harmonica as hm
>>> spherical = hm.geodetic_to_spherical(longitude=0, latitude=90, height=0)
>>> print(", ".join("{:.4f}".format(i) for i in spherical))
0.0000, 90.0000, 6356752.3142
>>> print("{:.4f}".format(hm.get_ellipsoid().semiminor_axis))
6356752.3142
In the equator, it should be the semi-major axis:
>>> spherical = hm.geodetic_to_spherical(longitude=0, latitude=0, height=0)
>>> print(", ".join("{:.4f}".format(i) for i in spherical))
0.0000, 0.0000, 6378137.0000
>>> print("{:.4f}".format(hm.get_ellipsoid().semimajor_axis))
6378137.0000
"""
# Get ellipsoid
ellipsoid = get_ellipsoid()
# Convert latitude to radians
latitude_rad = np.radians(latitude)
prime_vertical_radius = ellipsoid.semimajor_axis / np.sqrt(
1 - ellipsoid.first_eccentricity ** 2 * np.sin(latitude_rad) ** 2
)
# Instead of computing X and Y, we only comupute the projection on the XY plane:
# xy_projection = sqrt( X**2 + Y**2 )
xy_projection = (height + prime_vertical_radius) * np.cos(latitude_rad)
z_cartesian = (
height + (1 - ellipsoid.first_eccentricity ** 2) * prime_vertical_radius
) * np.sin(latitude_rad)
radius = np.sqrt(xy_projection ** 2 + z_cartesian ** 2)
spherical_latitude = np.degrees(np.arcsin(z_cartesian / radius))
return longitude, spherical_latitude, radius
def spherical_to_geodetic(longitude, spherical_latitude, radius):
"""
Convert from geocentric spherical to geodetic coordinates.
The geodetic datum is defined by the default :class:`harmonica.ReferenceEllipsoid`
set by the :func:`harmonica.set_ellipsoid` function.
The coordinates are converted following [Vermeille2002]_.
Parameters
----------
longitude : array
Longitude coordinates on geocentric spherical coordinate system in degrees.
spherical_latitude : array
Latitude coordinates on geocentric spherical coordinate system in degrees.
radius : array
Spherical radius coordinates in meters.
Returns
-------
longitude : array
Longitude coordinates on geodetic coordinate system in degrees.
The longitude coordinates are not modified during this conversion.
latitude : array
Converted latitude coordinates on geodetic coordinate system in degrees.
height : array
Converted ellipsoidal height coordinates in meters.
See also
--------
geodetic_to_spherical : Convert from geodetic to geocentric spherical coordinates.
Examples
--------
In the poles and equator, using the semi-minor or semi-major axis of the ellipsoid
as the radius should yield 0 height:
>>> import harmonica as hm
>>> geodetic = hm.spherical_to_geodetic(
... longitude=0, spherical_latitude=90, radius=hm.get_ellipsoid().semiminor_axis
... )
>>> print(", ".join("{:.1f}".format(i) for i in geodetic))
0.0, 90.0, 0.0
>>> geodetic = hm.spherical_to_geodetic(
... longitude=0, spherical_latitude=0, radius=hm.get_ellipsoid().semimajor_axis
... )
>>> print(", ".join("{:.1f}".format(i) for i in geodetic))
0.0, 0.0, 0.0
>>> geodetic = hm.spherical_to_geodetic(
... longitude=0,
... spherical_latitude=-90,
... radius=hm.get_ellipsoid().semiminor_axis + 2
... )
>>> print(", ".join("{:.1f}".format(i) for i in geodetic))
0.0, -90.0, 2.0
"""
# Get ellipsoid
ellipsoid = get_ellipsoid()
k, big_d, big_z = _spherical_to_geodetic_parameters(spherical_latitude, radius)
latitude = np.degrees(
2 * np.arctan(big_z / (big_d + np.sqrt(big_d ** 2 + big_z ** 2)))
)
height = (
(k + ellipsoid.first_eccentricity ** 2 - 1)
/ k
* np.sqrt(big_d ** 2 + big_z ** 2)
)
return longitude, latitude, height
def _spherical_to_geodetic_parameters(spherical_latitude, radius):
"Compute parameters for spherical to geodetic coordinates conversion"
# Get ellipsoid
ellipsoid = get_ellipsoid()
# Convert latitude to radians
spherical_latitude_rad = np.radians(spherical_latitude)
big_z = radius * np.sin(spherical_latitude_rad)
p_0 = (
radius ** 2
* np.cos(spherical_latitude_rad) ** 2
/ ellipsoid.semimajor_axis ** 2
)
q_0 = (
(1 - ellipsoid.first_eccentricity ** 2)
/ ellipsoid.semimajor_axis ** 2
* big_z ** 2
)
r_0 = (p_0 + q_0 - ellipsoid.first_eccentricity ** 4) / 6
s_0 = ellipsoid.first_eccentricity ** 4 * p_0 * q_0 / 4 / r_0 ** 3
t_0 = np.cbrt(1 + s_0 + np.sqrt(2 * s_0 + s_0 ** 2))
u_0 = r_0 * (1 + t_0 + 1 / t_0)
v_0 = np.sqrt(u_0 ** 2 + q_0 * ellipsoid.first_eccentricity ** 4)
w_0 = ellipsoid.first_eccentricity ** 2 * (u_0 + v_0 - q_0) / 2 / v_0
k = np.sqrt(u_0 + v_0 + w_0 ** 2) - w_0
big_d = (
k
* radius
* np.cos(spherical_latitude_rad)
/ (k + ellipsoid.first_eccentricity ** 2)
)
return k, big_d, big_z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import os
import logging
from multiprocessing import Pool
import numpy as np
import multiprocessing
import os
import logging
import shutil
import tempfile
import argparse
import json
from urllib.parse import urlparse
from pathlib import Path
from typing import Optional, Tuple, Union, IO, Callable, Set
from hashlib import sha256
from functools import wraps
import random
from tqdm import tqdm
from random import shuffle
import pickle
import boto3
from botocore.exceptions import ClientError
import requests
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
}
VOCAB_NAME = 'vocab.txt'
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
def split_s3_path(url: str) -> Tuple[str, str]:
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func: Callable):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise FileNotFoundError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url: str) -> Optional[str]:
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url: str, temp_file: IO) -> None:
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url: str, temp_file: IO) -> None:
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
# progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
# progress.update(len(chunk))
temp_file.write(chunk)
# progress.close()
def url_to_filename(url: str, etag: str = None) -> str:
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def get_from_cache(url: str, cache_dir: str = None) -> str:
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
os.makedirs(cache_dir, exist_ok=True)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s",
url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s",
temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def cached_path(url_or_filename: Union[str, Path], cache_dir: str = None) -> str:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise FileNotFoundError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError(
"unable to parse {} as a URL or as a local path".format(url_or_filename))
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a peice of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertTokenizer(object):
"""Runs end-to-end tokenization: punctuation splitting + wordpiece"""
def __init__(self, vocab_file, do_lower_case=True):
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
@classmethod
def from_pretrained(cls, pretrained_model_name, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name]
else:
vocab_file = pretrained_model_name
if os.path.isdir(vocab_file):
vocab_file = os.path.join(vocab_file, VOCAB_NAME)
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
except FileNotFoundError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
vocab_file))
return None
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
# Instantiate tokenizer.
tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
return tokenizer
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
def truncate_input_sequence(tokens_a, tokens_b, max_num_tokens):
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_num_tokens:
break
trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
# assert len(trunc_tokens) >= 1
# We want to sometimes truncate from the front and sometimes from the
# back to add more randomness and avoid biases.
if random.random() < 0.5:
del trunc_tokens[0]
else:
trunc_tokens.pop()
class TokenInstance:
def __init__(self, tokens_a, tokens_b, is_next):
self.tokens_a = tokens_a
self.tokens_b = tokens_b
self.is_next = is_next # 0 is if in continuation, 1 if is random
def get_values(self):
return (self.tokens_a, self.tokens_b, self.is_next)
class PretrainingDataCreator:
def __init__(self, path, tokenizer: BertTokenizer, max_seq_length: int = 512, readin: int = 200000000, dupe_factor: int = 6, small_seq_prob: float = 0.1):
self.dupe_factor = dupe_factor
self.max_seq_length = max_seq_length
self.small_seq_prob = small_seq_prob
documents = []
instances = []
with open(path, encoding='utf-8') as fd:
for i, line in enumerate(tqdm(fd)):
line = line.replace('\n', '')
document = line
if len(document.split("<sep>")) <= 3:
continue
lines = document.split("<sep>")
document = []
for seq in lines:
document.append(tokenizer.tokenize(seq))
# document = list(map(tokenizer.tokenize, lines))
documents.append(document)
documents = [x for x in documents if x]
self.documents = documents
for _ in range(self.dupe_factor):
for index in range(len(self.documents)):
instances.extend(self.create_training_instance(index))
shuffle(instances)
self.instances = instances
self.len = len(self.instances)
self.documents = None
documents = None
def __len__(self):
return self.len
def __getstate__(self):
state = self.__dict__.copy()
return state
def __setstate__(self, state):
self.__dict__.update(state)
def save(self, filename):
with open(filename, 'wb') as outfile:
pickle.dump(self, outfile)
def save(self, filename):
with open(filename, 'wb') as outfile:
pickle.dump(self, outfile)
def save_npy(self, output_path, raw_bytes=True, use_separators=True, multilingual=False):
# Make Sure the directory exists
os.makedirs(output_path, exist_ok=True)
data = [] # list of numpy arrays containing data for each instance
lengths = [] # lengths of the data for each instance
tokens_split = [] # number of tokens in tokens_a for each instance
is_next = [] # is_next value for each instance
if multilingual:
lang = [] # language for each instance
if not use_separators:
token_offsets = []
instance_token_counts = [] #
else:
token_sep = b'\x1f'
for instance in tqdm(self.instances, desc='instances'):
tokens_a, tokens_b, instance_is_next = instance.get_values()
if raw_bytes:
tokens_a = [t.encode('utf8') for t in tokens_a]
tokens_b = [t.encode('utf8') for t in tokens_b]
if use_separators:
instance_data = np.array(
list(token_sep.join(tokens_a + tokens_b)), dtype='b')
# sanity check, make sure the separators didn't appear in the data
assert np.count_nonzero(instance_data == int.from_bytes(
token_sep, byteorder='big')) == len(tokens_a) + len(tokens_b) - 1
else:
instance_data = np.array(
list(b''.join(tokens_a+tokens_b)), dtype='b')
else:
instance_data = np.array(
list(''.join(tokens_a+tokens_b)), dtype='U1')
data.append(instance_data)
lengths.append(len(instance_data))
tokens_split.append(len(tokens_a))
is_next.append(instance_is_next)
if multilingual:
lang.append(instance.get_lang())
if not use_separators:
token_offsets.append(
np.cumsum([len(t) for t in tokens_a] + [len(t) for t in tokens_b])[:-1])
instance_token_counts.append(len(tokens_a)+len(tokens_b)-1)
data = np.concatenate(data)
tokens_split = np.array(tokens_split)
is_next = np.array(is_next)
instance_offsets = np.insert(np.cumsum(lengths), 0, 0)
np.save(os.path.join(output_path, 'data.npy'),
data, allow_pickle=False)
np.save(os.path.join(output_path, 'tokens_split.npy'),
tokens_split, allow_pickle=False)
np.save(os.path.join(output_path, 'is_next.npy'),
is_next, allow_pickle=False)
np.save(os.path.join(output_path, 'instance_offsets.npy'),
instance_offsets, allow_pickle=False)
if multilingual:
lang = np.array(lang)
np.save(os.path.join(output_path, 'lang.npy'),
lang, allow_pickle=False)
if not use_separators:
instance_token_offsets = np.insert(
np.cumsum(instance_token_counts), 0, 0)
token_offsets = np.concatenate(token_offsets)
np.save(os.path.join(output_path, 'instance_token_offsets.npy'),
instance_token_offsets, allow_pickle=False)
np.save(os.path.join(output_path, 'token_offsets.npy'),
token_offsets, allow_pickle=False)
@staticmethod
def load(filename):
with open(filename, 'rb') as f:
return pickle.load(f)
def create_training_instance(self, index):
document = self.documents[index]
# Need to add [CLS] + 2*[SEP] tokens
max_num_tokens = self.max_seq_length - 3
# We want to maximize the inp sequence but also want inputs similar
# to our generic task inputs which will be compartively smaller
# than the data on which we intend to pre-train.
target_seq_length = max_num_tokens
if random.random() < self.small_seq_prob:
target_seq_length = random.randint(5, max_num_tokens)
# Need to make the sequences split for NSP task for interesting
# rather than choosing some arbitrary point. If not the NSP
# task might become way too easy.
instances = []
current_chunk = []
current_length = 0
i = 0
while i < len(document):
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document)-1 or current_length >= target_seq_length:
if current_chunk:
# `a_end` is how many segments from `current_chunk` go into the `A`
# (first) sentence.
a_end = 1
if len(current_chunk) >= 2:
a_end = random.randint(1, len(current_chunk) - 1)
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
# Random Next
label_next = 0
rand_num = random.random()
if len(current_chunk) == 1 or rand_num < 0.5:
label_next = 1
target_b_length = target_seq_length - len(tokens_a)
# Pick a random document
for _ in range(10):
random_doc_index = random.randint(
0, len(self.documents) - 1)
if random_doc_index != index:
break
random_doc = self.documents[random_doc_index]
random_start = random.randint(0, len(random_doc)-1)
for j in range(random_start, len(random_doc)):
tokens_b.extend(random_doc[j])
if len(tokens_b) >= target_b_length:
break
# We didn't actually use these segments so we "put them back" so
# they don't go to waste.
num_unused_segments = len(current_chunk) - a_end
i -= num_unused_segments
# Actual Next
else:
label_next = 0
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
# # Actual Previous
# if rand_num > 0.666:
# tmp = tokens_a
# tokens_a = tokens_b
# tokens_b = tmp
# label_next = 2
truncate_input_sequence(tokens_a, tokens_b, max_num_tokens)
# assert len(tokens_a) >= 1
# assert len(tokens_b) >= 1
instances.append(TokenInstance(
tokens_a, tokens_b, int(label_next)))
current_chunk = []
current_length = 0
i += 1
return instances
class WikiNBookCorpusPretrainingDataCreator(PretrainingDataCreator):
def __init__(self, path, tokenizer: BertTokenizer, max_seq_length: int = 512, readin: int = 200000000, dupe_factor: int = 6, small_seq_prob: float = 0.1):
self.dupe_factor = dupe_factor
self.max_seq_length = max_seq_length
self.small_seq_prob = small_seq_prob
documents = []
instances = []
with open(path, encoding='utf-8') as fd:
document = []
for i, line in enumerate(tqdm(fd)):
line = line.replace('\n', '')
# document = line
# if len(document.split("<sep>")) <= 3:
# continue
if len(line) == 0: # This is end of document
documents.append(document)
document = []
if len(line.split(' ')) > 2:
document.append(tokenizer.tokenize(line))
if len(document) > 0:
documents.append(document)
documents = [x for x in documents if x]
print(documents[0])
print(len(documents))
self.documents = documents
for _ in range(self.dupe_factor):
for index in range(len(self.documents)):
instances.extend(self.create_training_instance(index))
shuffle(instances)
self.instances = instances
self.len = len(self.instances)
self.documents = None
documents = None
class BookCorpusPretrainingDataCreator(PretrainingDataCreator):
def __init__(self, path, tokenizer: BertTokenizer, max_seq_length: int = 512, readin: int = 200000000, dupe_factor: int = 6, small_seq_prob: float = 0.1):
self.dupe_factor = dupe_factor
self.max_seq_length = max_seq_length
self.small_seq_prob = small_seq_prob
documents = []
instances = []
with open(path, encoding='utf-8') as fd:
document = []
for i, line in enumerate(tqdm(fd)):
line = line.replace('\n', '')
# document = line
# if len(document.split("<sep>")) <= 3:
# continue
if len(line) == 0: # This is end of document
documents.append(document)
document = []
if len(line.split(' ')) > 2:
document.append(tokenizer.tokenize(line))
if len(document) > 0:
documents.append(document)
documents = [x for x in documents if x]
self.documents = documents
for _ in range(self.dupe_factor):
for index in range(len(self.documents)):
instances.extend(self.create_training_instance(index))
shuffle(instances)
self.instances = instances
self.len = len(self.instances)
self.documents = None
documents = None
def parse_data(input_file, output_file):
if not os.path.exists(output_file):
print(input_file)
dataset = WikiNBookCorpusPretrainingDataCreator(
input_file, tokenizer, dupe_factor=10, max_seq_length=128)
dataset.save_npy(output_file)
print(f"Completed Pickling: {output_file}")
else:
print(f'Already parsed: {output_file}')
parser = argparse.ArgumentParser(
description="Give initial arguments for parsing")
parser.add_argument("--input_dir", "--id", type=str)
parser.add_argument("--output_dir", "--od", type=str)
parser.add_argument("--token_file", default="bert-large-cased", type=str)
args = parser.parse_args()
tokenizer = BertTokenizer.from_pretrained(args.token_file, do_lower_case=False)
input_files = []
output_files = []
# parse_data("sample.txt", "test_sample")
# # data = WikiNBookCorpusPretrainingDataCreator.load("test_sample.bin")
# # print(len(data))
for filename in os.listdir(args.input_dir):
input_file = os.path.join(args.input_dir, filename)
outfilename = "_".join(filename.split('.')[:-1])
output_file = os.path.join(args.output_dir, outfilename)
input_files.append(input_file)
output_files.append(output_file)
# parse_data(input_file, output_file)
with Pool(processes=multiprocessing.cpu_count()) as pool:
pool.starmap(parse_data, zip(input_files, output_files))
|
from app import create_app ,db
from flask_script import Manager, Server
from flask_migrate import Migrate, MigrateCommand
from app.models import Quote, User, Post,Comment, Subscriber
app = create_app('production')
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('server', Server)
manager.add_command('db', MigrateCommand)
@manager.command
def test():
'''
run the unit tests
'''
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
# creating python shell
@manager.shell
def make_shell_context():
return dict(app = app, db = db, Quote = Quote, User=User, Post=Post,Comment=Comment, Subscriber=Subscriber)
if __name__ == '__main__':
manager.run() |
import os
from hokusai.lib.command import command
from hokusai.lib.config import config
from hokusai.lib.common import shout
from hokusai.lib.exceptions import HokusaiError
@command()
def build():
docker_compose_yml = os.path.join(os.getcwd(), 'hokusai/build.yml')
legacy_docker_compose_yml = os.path.join(os.getcwd(), 'hokusai/common.yml')
if not os.path.isfile(docker_compose_yml) and not os.path.isfile(legacy_docker_compose_yml):
raise HokusaiError("Yaml files %s / %s do not exist." % (docker_compose_yml, legacy_docker_compose_yml))
if os.path.isfile(docker_compose_yml):
shout("docker-compose -f %s -p hokusai build" % docker_compose_yml, print_output=True)
if os.path.isfile(legacy_docker_compose_yml):
shout("docker-compose -f %s -p hokusai build" % legacy_docker_compose_yml, print_output=True)
|
import os
import gym
import numpy as np
from gym import spaces
from gym.envs.registration import register
from .franka_panda import PandaAbstractEnv
class PandaReachCspaceEnv(PandaAbstractEnv, gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self, render=False, reward_type="jointcol", random_init=True, task_ll=[-1,-1,0], task_ul=[1,1,1]):
super().__init__(render=render, task_ll=task_ll, task_ul=task_ul)
self.observation_space = spaces.Dict(dict(
observation=spaces.Box(-1, 1, shape=(1,), dtype=np.float32),#spaces.Box(self.robot.joint_ll, self.robot.joint_ul, shape=(self.robot.n_joints,), dtype=np.float32),
achieved_goal=spaces.Box(self.robot.joint_ll, self.robot.joint_ul, shape=(self.robot.n_joints,), dtype=np.float32),
desired_goal=spaces.Box(self.robot.joint_ll, self.robot.joint_ul, shape=(self.robot.n_joints,), dtype=np.float32),
))
self.reward_type = reward_type
self.random_init = random_init
self._goal_joint = np.zeros(7)
self._goal_pos = np.zeros(3)
self.eps = 0.1
@property
def goal_joints(self):
return self._goal_joint.copy()
@goal_joints.setter
def goal_joints(self, joints):
self._goal_joint = joints
@property
def goal_pos(self):
return self._goal_pos.copy()
@goal_pos.setter
def goal_pos(self, pos):
self._goal_pos = pos
def _get_observation(self):
""" observation : joint, joint, joint_goal
"""
joints = self.robot.get_joints()
return dict(
observation=0,
achieved_goal=joints,
desired_goal=self.goal_joints,
)
def _is_success(self, achieved_goal, desired_goal):
return np.linalg.norm(achieved_goal - desired_goal) < self.eps
def get_random_joint_in_task_space(self):
for i in range(100):
joints = self.robot.get_random_joints(set=True)
pos = self.robot.get_ee_pos()
if np.all(self.task_ll < pos) & np.all(pos < self.task_ul):
return joints, pos
raise ValueError("EE position by a random configuration seems not in task-space.")
def reset(self):
with self.sim.no_rendering():
self.goal_joints = self.robot.get_random_joints(set=True)
self.goal_pos = self.robot.get_ee_pos()
if self.random_init:
self.start_joints = self.robot.get_random_joints(set=True)
self.start_pos = self.robot.get_ee_pos()
else:
self.start_joints = np.zeros(self.dim)
self.start_pos = self.robot.get_ee_pos()
self.robot.set_joints(self.start_joints)
if self.is_render == True:
self.sim.view_pos("goal", self.goal_pos)
self.sim.view_pos("curr", self.start_pos)
return self._get_observation()
def is_collision(self, joints):
is_ll = np.any(joints == self.robot.joint_ll)
is_ul = np.any(joints == self.robot.joint_ul)
return is_ll | is_ul
def step(self, action:np.ndarray):
self.robot.set_action(action)
obs_ = self._get_observation()
done = False
info = dict(
is_success=self._is_success(obs_["achieved_goal"], obs_["desired_goal"]),
actions=action.copy(),
collisions=self.is_collision(obs_["achieved_goal"])
)
reward = self.compute_reward(obs_["achieved_goal"], obs_["desired_goal"], info)
if self.is_render == True:
self.sim.view_pos("curr", self.robot.get_ee_pos())
return obs_, reward, done, info
def compute_reward(self, achieved_goal, desired_goal, info):
if len(achieved_goal.shape) == 2:
is_success = np.array([1 if i["is_success"] else 0 for i in info])
actions = np.array([i["actions"] for i in info])
collisions = np.array([i["collisions"] for i in info])
r = np.zeros(len(info))
else:
is_success = info["is_success"]
actions = info["actions"]
collisions = info["collisions"]
r = -0.
if "joint" in self.reward_type:
# delta = 0.5
# res = np.linalg.norm(desired_goal - achieved_goal, ord=1, axis=-1)
# cond = [res < delta, res >= delta]
# small_res = 0.5 * res**2
# large_res = delta * res - 0.5 * delta**2
# r -= np.select(cond, [small_res, large_res])/4
r -= np.linalg.norm(desired_goal - achieved_goal, axis=-1) / 4
if "sparse" in self.reward_type:
r -= -1 * (1 - is_success)
if "action" in self.reward_type:
#mask_goal = np.linalg.norm(desired_goal - achieved_goal, axis=-1) < self.eps
r -= np.linalg.norm(actions, axis=-1) / 10
if "col" in self.reward_type:
r -= collisions * 1.
return r
register(
id='PandaReachCspace-v0',
entry_point='utils.rxbot.panda_reach_cspace:PandaReachCspaceEnv',
max_episode_steps=50,
) |
#!/usr/bin/env python3
import os
import sys
import subprocess
FRONTEND_STACK_PREFIX = "sockshop_frontend_"
BACKEND_STACK = "sockshop_backend"
def print_usage():
print("This script takes exactly one argument.")
print("Usage: {} HOSTNAME".format(sys.argv[0]))
sys.exit(1)
if __name__ == "__main__":
if len(sys.argv) != 2:
print_usage()
hostname = sys.argv[1]
print("Generating compose file for host {}".format(hostname))
temp_config_file = "{}.yml".format(hostname)
with open(temp_config_file, "w") as fout:
subprocess.run(["sed", "s/{{hostname}}/"+hostname+"/g", "frontend.yml"], check=True, stdout=fout)
print("Deploying frontend stack")
stack_name = FRONTEND_STACK_PREFIX + hostname
subprocess.run(["docker", "stack", "deploy", "-c", temp_config_file, stack_name], check=True)
print("Adding backend services to network")
process = subprocess.run(["docker", "stack", "services", BACKEND_STACK], check=True, stdout=subprocess.PIPE)
process = subprocess.run(["tail", "-n", "+2"], input=process.stdout, check=True, stdout=subprocess.PIPE)
process = subprocess.run(["awk", "{print $2}"], input=process.stdout, check=True, stdout=subprocess.PIPE)
for line in process.stdout.decode().split("\n"):
service = line.strip()
if service:
alias = service.replace(BACKEND_STACK+"_", "")
subprocess.run(
["docker", "service", "update", "--network-add",
"name={},alias={}".format(stack_name + "_default", alias), service], check=True)
print("Cleaning up")
os.remove(temp_config_file)
|
import unittest
import os
from programy.context import ClientContext
from programytest.aiml_tests.client import TestClient
class ThatSraiTestClient(TestClient):
def __init__(self):
TestClient.__init__(self)
def load_configuration(self, arguments):
super(ThatSraiTestClient, self).load_configuration(arguments)
self.configuration.client_configuration.configurations[0].configurations[0].files.aiml_files._files=[os.path.dirname(__file__)]
class ThatSraiAIMLTests(unittest.TestCase):
def setUp(self):
client = ThatSraiTestClient()
self._client_context = client.create_client_context("testid")
def test_that_srai_agreement(self):
response = self._client_context.bot.ask_question(self._client_context, "GROUPAGREEMENT")
self.assertTrue(response in ['Default AGREEMENT'])
response = self._client_context.bot.ask_question(self._client_context, "HI")
self.assertTrue(response in ['Hello. Do you know explore the website?', 'Good day. Do you know explore the website?'])
response = self._client_context.bot.ask_question(self._client_context, "YES")
self.assertIsNotNone(response)
self.assertEqual(response, 'The website was created in 2014.')
def test_that_srai_disagreement(self):
response = self._client_context.bot.ask_question(self._client_context, "GROUPDISAGREEMENT")
self.assertTrue(response in ['Default DISAGREEMENT'])
response = self._client_context.bot.ask_question(self._client_context, "HI")
self.assertTrue(response in ['Hello. Do you know explore the website?', 'Good day. Do you know explore the website?'])
response = self._client_context.bot.ask_question(self._client_context, "NO")
self.assertIsNotNone(response)
self.assertEqual(response, 'Disagreement with that pattern.')
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import numpy as np
from scipy.constants import epsilon_0
from ipywidgets import IntSlider, FloatSlider, FloatText, ToggleButtons
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from matplotlib import rcParams
from SimPEG import Mesh, Maps, Utils, SolverLU
from ..base import widgetify
rcParams["font.size"] = 16
# Mesh parameters
npad = 20
cs = 0.5
hx = [(cs, npad, -1.3), (cs, 200), (cs, npad, 1.3)]
hy = [(cs, npad, -1.3), (cs, 100)]
mesh = Mesh.TensorMesh([hx, hy], "CN")
# bounds on electrical resistivity
rhomin = 1e2
rhomax = 1e3
eps = 1e-9 # to stabilize division
infinity = 100 # what is "far enough"
def r(xyz, src_loc):
"""
Distance from source to points on an xyz grid
"""
return (
np.sqrt(
(xyz[:, 0] - src_loc[0]) ** 2
+ (xyz[:, 1] - src_loc[1]) ** 2
+ (xyz[:, 2] - src_loc[2]) ** 2
)
+ eps
)
def sum_term(rho1, rho2, h, r):
m = Utils.mkvc(np.arange(1, infinity + 1))
k = (rho2 - rho1) / (rho2 + rho1)
return np.sum(
((k ** m.T) * np.ones_like(Utils.mkvc(r, 2)))
/ np.sqrt(1.0 + (2.0 * h * m.T / Utils.mkvc(r, 2)) ** 2),
1,
)
def sum_term_deriv(rho1, rho2, h, r):
m = Utils.mkvc(np.arange(1, infinity + 1))
k = (rho2 - rho1) / (rho2 + rho1)
return np.sum(
((k ** m.T) * np.ones_like(Utils.mkvc(r, 2)))
/ (1.0 + (2.0 * h * m.T / Utils.mkvc(r, 2)) ** 2) ** (3.0 / 2.0)
* ((2.0 * h * m.T) ** 2 / Utils.mkvc(r, 2) ** 3),
1,
)
def layer_potentials(rho1, rho2, h, A, B, xyz):
"""
Compute analytic solution of surface potential for 2-layered Earth
(Ref Telford 1990, section 8.3.4)s
"""
def V(I, src_loc):
return (I * rho1 / (2.0 * np.pi * r(xyz, src_loc))) * (
1 + 2 * sum_term(rho1, rho2, h, r(xyz, src_loc))
)
VA = V(1.0, A)
VB = V(-1.0, B)
return VA + VB
def layer_E(rho1, rho2, h, A, B, xyz):
def dr_dx(src_loc):
return (xyz[:, 0] - src_loc[0]) / r(xyz, src_loc)
def dr_dy(src_loc):
return (xyz[:, 1] - src_loc[1]) / r(xyz, src_loc)
def dr_dz(src_loc):
return (xyz[:, 2] - src_loc[2]) / r(xyz, src_loc)
# m = Utils.mkvc(np.arange(1, infinity + 1))
def deriv_1(r):
return (-1.0 / r) * (1.0 + 2.0 * sum_term(rho1, rho2, h, r))
def deriv_2(r):
return 2.0 * sum_term_deriv(rho1, rho2, h, r)
def Er(I, r):
return -(I * rho1 / (2.0 * np.pi * r)) * (deriv_1(r) + deriv_2(r))
def Ex(I, src_loc):
return Er(I, r(xyz, src_loc)) * dr_dx(src_loc)
def Ey(I, src_loc):
return Er(I, r(xyz, src_loc)) * dr_dy(src_loc)
def Ez(I, src_loc):
return Er(I, r(xyz, src_loc)) * dr_dz(src_loc)
ex = Ex(1.0, A) + Ex(-1.0, B)
ey = Ey(1.0, A) + Ey(-1.0, B)
ez = Ez(1.0, A) + Ez(-1.0, B)
return ex, ey, ez
def layer_J(rho1, rho2, h, A, B, xyz):
ex, ey, ez = layer_E(rho1, rho2, h, A, B, xyz)
sig = 1.0 / rho2 * np.ones_like(xyz[:, 0])
# print sig
sig[xyz[:, 1] >= -h] = 1.0 / rho1 # since the model is 2D
return sig * ex, sig * ey, sig * ez
def G(A, B, M, N):
"""
Geometric factor
"""
return 1.0 / (
1.0 / (np.abs(A - M) + eps)
- 1.0 / (np.abs(M - B) + eps)
- 1.0 / (np.abs(N - A) + eps)
+ 1.0 / (np.abs(N - B) + eps)
)
def rho_a(VM, VN, A, B, M, N):
"""
Apparent Resistivity
"""
return (VM - VN) * 2.0 * np.pi * G(A, B, M, N)
def solve_2D_potentials(rho1, rho2, h, A, B):
"""
Here we solve the 2D DC problem for potentials (using SimPEG Mesg Class)
"""
sigma = 1.0 / rho2 * np.ones(mesh.nC)
sigma[mesh.gridCC[:, 1] >= -h] = 1.0 / rho1 # since the model is 2D
q = np.zeros(mesh.nC)
a = Utils.closestPoints(mesh, A[:2])
b = Utils.closestPoints(mesh, B[:2])
q[a] = 1.0 / mesh.vol[a]
q[b] = -1.0 / mesh.vol[b]
# q = q * 1./mesh.vol
A = (
mesh.cellGrad.T
* Utils.sdiag(1.0 / (mesh.dim * mesh.aveF2CC.T * (1.0 / sigma)))
* mesh.cellGrad
)
Ainv = SolverLU(A)
V = Ainv * q
return V
def solve_2D_E(rho1, rho2, h, A, B):
"""
solve the 2D DC resistivity problem for electric fields
"""
V = solve_2D_potentials(rho1, rho2, h, A, B)
E = -mesh.cellGrad * V
E = mesh.aveF2CCV * E
ex = E[: mesh.nC]
ez = E[mesh.nC :]
return ex, ez, V
def solve_2D_J(rho1, rho2, h, A, B):
ex, ez, V = solve_2D_E(rho1, rho2, h, A, B)
sigma = 1.0 / rho2 * np.ones(mesh.nC)
sigma[mesh.gridCC[:, 1] >= -h] = 1.0 / rho1 # since the model is 2D
return Utils.sdiag(sigma) * ex, Utils.sdiag(sigma) * ez, V
def plot_layer_potentials(rho1, rho2, h, A, B, M, N, imgplt="Model"):
markersize = 8.0
fontsize = 16.0
ylim = np.r_[-1.0, 1.0] * rhomax / (5 * 2 * np.pi) * 1.5
fig, ax = plt.subplots(2, 1, figsize=(9, 7))
fig.subplots_adjust(right=0.8)
x = np.linspace(-40.0, 40.0, 200)
z = np.linspace(x.min(), 0, 100)
pltgrid = Utils.ndgrid(x, z)
xplt = pltgrid[:, 0].reshape(x.size, z.size, order="F")
zplt = pltgrid[:, 1].reshape(x.size, z.size, order="F")
V = layer_potentials(
rho1,
rho2,
h,
np.r_[A, 0.0, 0.0],
np.r_[B, 0.0, 0.0],
Utils.ndgrid(x, np.r_[0.0], np.r_[0.0]),
)
VM = layer_potentials(
rho1,
rho2,
h,
np.r_[A, 0.0, 0.0],
np.r_[B, 0.0, 0.0],
Utils.mkvc(np.r_[M, 0.0, 0], 2).T,
)
VN = layer_potentials(
rho1,
rho2,
h,
np.r_[A, 0.0, 0.0],
np.r_[B, 0.0, 0.0],
Utils.mkvc(np.r_[N, 0.0, 0], 2).T,
)
ax[0].plot(x, V, color=[0.1, 0.5, 0.1], linewidth=2)
ax[0].grid(
which="both", linestyle="-", linewidth=0.5, color=[0.2, 0.2, 0.2], alpha=0.5
)
ax[0].plot(A, 0, "+", markersize=12, markeredgewidth=3, color=[1.0, 0.0, 0])
ax[0].plot(B, 0, "_", markersize=12, markeredgewidth=3, color=[0.0, 0.0, 1.0])
ax[0].set_ylabel("Potential, (V)")
ax[0].set_xlabel("x (m)")
ax[0].set_xlim([x.min(), x.max()])
ax[0].set_ylim(ylim)
ax[0].plot(M, VM, "o", color="k")
ax[0].plot(N, VN, "o", color="k")
props = dict(boxstyle="round", facecolor="grey", alpha=0.3)
txtsp = 1
xytextM = (M + 0.5, np.max([np.min([VM, ylim.max()]), ylim.min()]) + 0.5)
xytextN = (N + 0.5, np.max([np.min([VN, ylim.max()]), ylim.min()]) + 0.5)
props = dict(boxstyle="round", facecolor="grey", alpha=0.4)
ax[0].annotate("%2.1e" % (VM), xy=xytextM, xytext=xytextM)
ax[0].annotate("%2.1e" % (VN), xy=xytextN, xytext=xytextN)
# ax[0].plot(np.r_[M, N], np.ones(2)*VN, color='k')
# ax[0].plot(np.r_[M, M], np.r_[VM, VN], color='k')
# ax[0].annotate('%2.1e'%(VM-VN) , xy=(M, (VM+VN)/2), xytext=(M-9, (VM+VN)/2.))
props = dict(boxstyle="round", facecolor="grey", alpha=0.4)
ax[0].text(
x.max() + 1,
ylim.max() - 0.1 * ylim.max(),
"$\\rho_a$ = %2.2f" % (rho_a(VM, VN, A, B, M, N)),
verticalalignment="bottom",
bbox=props,
)
if imgplt == "Model":
model = rho2 * np.ones(pltgrid.shape[0])
model[pltgrid[:, 1] >= -h] = rho1
model = model.reshape(x.size, z.size, order="F")
cb = ax[1].pcolor(xplt, zplt, model, norm=LogNorm())
ax[1].plot(
[xplt.min(), xplt.max()],
-h * np.r_[1.0, 1],
color=[0.5, 0.5, 0.5],
linewidth=1.5,
)
clim = [rhomin, rhomax]
clabel = "Resistivity ($\Omega$m)"
# elif imgplt == 'potential':
# Vplt = layer_potentials(rho1, rho2, h, np.r_[A, 0., 0.], np.r_[B, 0., 0.], np.c_[pltgrid, np.zeros_like(pltgrid[:, 0])])
# Vplt = Vplt.reshape(x.size, z.size, order='F')
# cb = ax[1].pcolor(xplt, zplt, Vplt)
# ax[1].contour(xplt, zplt, np.abs(Vplt), np.logspace(-2., 1., 10), colors='k', alpha=0.5)
# ax[1].set_ylabel('z (m)', fontsize=16)
# clim = ylim
# clabel = 'Potential (V)'
elif imgplt == "Potential":
Pc = mesh.getInterpolationMat(pltgrid, "CC")
V = solve_2D_potentials(rho1, rho2, h, np.r_[A, 0.0, 0.0], np.r_[B, 0.0, 0.0])
Vplt = Pc * V
Vplt = Vplt.reshape(x.size, z.size, order="F")
# since we are using a strictly 2D code, the potnetials at the surface
# do not match the analytic, so we scale the potentials to match the
# analytic 2.5D result at the surface.
fudgeFactor = (
layer_potentials(
rho1,
rho2,
h,
np.r_[A, 0.0, 0.0],
np.r_[B, 0.0, 0.0],
np.c_[x.min(), 0.0, 0.0],
)
/ Vplt[0, 0]
)
cb = ax[1].pcolor(xplt, zplt, Vplt * fudgeFactor, cmap="viridis")
ax[1].plot(
[xplt.min(), xplt.max()],
-h * np.r_[1.0, 1],
color=[0.5, 0.5, 0.5],
linewidth=1.5,
)
ax[1].contour(xplt, zplt, np.abs(Vplt), colors="k", alpha=0.5)
ax[1].set_ylabel("z (m)", fontsize=16)
clim = np.r_[-15.0, 15.0]
clabel = "Potential (V)"
elif imgplt == "E":
Pc = mesh.getInterpolationMat(pltgrid, "CC")
ex, ez, V = solve_2D_E(rho1, rho2, h, np.r_[A, 0.0, 0.0], np.r_[B, 0.0, 0.0])
ex, ez = Pc * ex, Pc * ez
Vplt = (Pc * V).reshape(x.size, z.size, order="F")
fudgeFactor = (
layer_potentials(
rho1,
rho2,
h,
np.r_[A, 0.0, 0.0],
np.r_[B, 0.0, 0.0],
np.c_[x.min(), 0.0, 0.0],
)
/ Vplt[0, 0]
)
# ex, ez, _ = layer_E(rho1, rho2, h, np.r_[A, 0., 0.], np.r_[B, 0., 0.], np.c_[pltgrid, np.zeros_like(pltgrid[:, 0])])
ex = fudgeFactor * ex.reshape(x.size, z.size, order="F")
ez = fudgeFactor * ez.reshape(x.size, z.size, order="F")
e = np.sqrt(ex ** 2.0 + ez ** 2.0)
cb = ax[1].pcolor(xplt, zplt, e, cmap="viridis", norm=LogNorm())
ax[1].plot(
[xplt.min(), xplt.max()],
-h * np.r_[1.0, 1],
color=[0.5, 0.5, 0.5],
linewidth=1.5,
)
clim = np.r_[3e-3, 1e1]
ax[1].streamplot(
x,
z,
ex.T,
ez.T,
color="k",
linewidth=2
* (np.log(e.T) - np.log(e).min())
/ (np.log(e).max() - np.log(e).min()),
)
clabel = "Electric Field (V/m)"
elif imgplt == "J":
Pc = mesh.getInterpolationMat(pltgrid, "CC")
Jx, Jz, V = solve_2D_J(rho1, rho2, h, np.r_[A, 0.0, 0.0], np.r_[B, 0.0, 0.0])
Jx, Jz = Pc * Jx, Pc * Jz
Vplt = (Pc * V).reshape(x.size, z.size, order="F")
fudgeFactor = (
layer_potentials(
rho1,
rho2,
h,
np.r_[A, 0.0, 0.0],
np.r_[B, 0.0, 0.0],
np.c_[x.min(), 0.0, 0.0],
)
/ Vplt[0, 0]
)
Jx = fudgeFactor * Jx.reshape(x.size, z.size, order="F")
Jz = fudgeFactor * Jz.reshape(x.size, z.size, order="F")
J = np.sqrt(Jx ** 2.0 + Jz ** 2.0)
cb = ax[1].pcolor(xplt, zplt, J, cmap="viridis", norm=LogNorm())
ax[1].plot(
[xplt.min(), xplt.max()],
-h * np.r_[1.0, 1],
color=[0.5, 0.5, 0.5],
linewidth=1.5,
)
ax[1].streamplot(
x,
z,
Jx.T,
Jz.T,
color="k",
linewidth=2
* (np.log(J.T) - np.log(J).min())
/ (np.log(J).max() - np.log(J).min()),
)
ax[1].set_ylabel("z (m)", fontsize=16)
clim = np.r_[3e-5, 3e-2]
clabel = "Current Density (A/m$^2$)"
ax[1].set_xlim([x.min(), x.max()])
ax[1].set_ylim([z.min(), 6.0])
ax[1].set_ylabel("z (m)", fontsize=16)
cbar_ax = fig.add_axes([1.0, 0.08, 0.04, 0.4])
plt.colorbar(cb, cax=cbar_ax, label=clabel)
if "clim" in locals():
cb.set_clim(clim)
ax[1].set_xlabel("x(m)", fontsize=16)
xytextA1 = (A - 0.75, 2.5)
xytextB1 = (B - 0.75, 2.5)
xytextM1 = (M - 0.75, 2.5)
xytextN1 = (N - 0.75, 2.5)
ax[1].plot(A, 1.0, marker="v", color="red", markersize=markersize)
ax[1].plot(B, 1.0, marker="v", color="blue", markersize=markersize)
ax[1].plot(M, 1.0, marker="^", color="yellow", markersize=markersize)
ax[1].plot(N, 1.0, marker="^", color="green", markersize=markersize)
ax[1].annotate("A", xy=xytextA1, xytext=xytextA1, fontsize=fontsize)
ax[1].annotate("B", xy=xytextB1, xytext=xytextB1, fontsize=fontsize)
ax[1].annotate("M", xy=xytextM1, xytext=xytextM1, fontsize=fontsize)
ax[1].annotate("N", xy=xytextN1, xytext=xytextN1, fontsize=fontsize)
plt.tight_layout()
plt.show()
return fig, ax
def plot_layer_potentials_app():
def plot_layer_potentials_interact(A, B, M, N, rho1, rho2, h, Plot):
return plot_layer_potentials(rho1, rho2, h, A, B, M, N, Plot)
app = widgetify(
plot_layer_potentials_interact,
A=FloatSlider(
min=-40.0, max=40.0, step=1.0, value=-30.0, continuous_update=False
),
B=FloatSlider(
min=-40.0, max=40.0, step=1.0, value=30.0, continuous_update=False
),
M=FloatSlider(
min=-40.0, max=40.0, step=1.0, value=-10.0, continuous_update=False
),
N=FloatSlider(
min=-40.0, max=40.0, step=1.0, value=10.0, continuous_update=False
),
rho1=FloatText(
min=rhomin,
max=rhomax,
value=500.0,
continuous_update=False,
description="$\\rho_1$",
),
rho2=FloatText(
min=rhomin,
max=rhomax,
value=500.0,
continuous_update=False,
description="$\\rho_2$",
),
h=FloatSlider(min=0.0, max=40.0, step=1.0, value=5.0, continuous_update=False),
Plot=ToggleButtons(options=["Model", "Potential", "E", "J"], value="Model"),
)
return app
if __name__ == "__main__":
rho1, rho2 = rhomin, rhomax
h = 5.0
A, B = -30.0, 30.0
M, N = -10.0, 10.0
Plot = "e"
plot_layer_potentials(rho1, rho2, h, A, B, M, N, Plot)
|
secret = """
---
apiVersion: v1
kind: Secret
metadata:
name: {cfg[metadata][name]}
type: {cfg[secret_type]}
stringData: []
"""
|
from __future__ import absolute_import, division, print_function
from unittest import TestCase
import numpy as np
import pytest
import torch
import pyro.distributions as dist
from pyro.nn import AutoRegressiveNN
pytestmark = pytest.mark.init(rng_seed=123)
class AutoregressiveFlowTests(TestCase):
def setUp(self):
# Epsilon is used to compare numerical gradient to analytical one
self.epsilon = 1e-3
# Delta is tolerance for testing f(f^{-1}(x)) = x
self.delta = 1e-6
def _test_jacobian(self, input_dim, make_flow):
jacobian = torch.zeros(input_dim, input_dim)
iaf = make_flow(input_dim)
def nonzero(x):
return torch.sign(torch.abs(x))
x = torch.randn(1, input_dim)
iaf_x = iaf(x)
analytic_ldt = iaf.log_abs_det_jacobian(x, iaf_x).data.sum()
for j in range(input_dim):
for k in range(input_dim):
epsilon_vector = torch.zeros(1, input_dim)
epsilon_vector[0, j] = self.epsilon
delta = (iaf(x + 0.5 * epsilon_vector) - iaf(x - 0.5 * epsilon_vector)) / self.epsilon
jacobian[j, k] = float(delta[0, k].data.sum())
permutation = iaf.arn.get_permutation()
permuted_jacobian = jacobian.clone()
for j in range(input_dim):
for k in range(input_dim):
permuted_jacobian[j, k] = jacobian[permutation[j], permutation[k]]
numeric_ldt = torch.sum(torch.log(torch.diag(permuted_jacobian)))
ldt_discrepancy = np.fabs(analytic_ldt - numeric_ldt)
diag_sum = torch.sum(torch.diag(nonzero(permuted_jacobian)))
lower_sum = torch.sum(torch.tril(nonzero(permuted_jacobian), diagonal=-1))
assert ldt_discrepancy < self.epsilon
assert diag_sum == float(input_dim)
assert lower_sum == float(0.0)
def _test_inverse(self, input_dim, make_flow):
base_dist = dist.Normal(torch.zeros(input_dim), torch.ones(input_dim))
iaf = make_flow(input_dim)
x_true = base_dist.sample(torch.Size([10]))
y = iaf._call(x_true)
x_calculated = iaf._inverse(y)
assert torch.norm(x_true - x_calculated, dim=-1).max().item() < self.delta
def _test_shape(self, base_shape, make_flow):
base_dist = dist.Normal(torch.zeros(base_shape), torch.ones(base_shape))
last_dim = base_shape[-1] if isinstance(base_shape, tuple) else base_shape
iaf = make_flow(input_dim=last_dim)
sample = dist.TransformedDistribution(base_dist, [iaf]).sample()
assert sample.shape == base_shape
def _make_iaf(self, input_dim):
arn = AutoRegressiveNN(input_dim, [3 * input_dim + 1])
return dist.InverseAutoregressiveFlow(arn)
def _make_flipflow(self, input_dim):
permutation = torch.randperm(input_dim, device='cpu').to(torch.Tensor().device)
return dist.PermuteTransform(permutation)
def test_iaf_jacobians(self):
for input_dim in [2, 3, 5, 7, 9, 11]:
self._test_jacobian(input_dim, self._make_iaf)
def test_flipflow_inverses(self):
for input_dim in [2, 3, 5, 7, 9, 11]:
self._test_inverse(input_dim, self._make_flipflow)
def test_iaf_shapes(self):
for shape in [(3,), (3, 4), (3, 4, 2)]:
self._test_shape(shape, self._make_iaf)
def test_flipflow_shapes(self):
for shape in [(3,), (3, 4), (3, 4, 2)]:
self._test_shape(shape, self._make_flipflow)
|
from flask import render_template, url_for, session, redirect, request
from app import webapp
from datetime import datetime, timedelta
import collections
from pytz import timezone,utc
import boto3
from boto3.dynamodb.conditions import Key
from app import send_email
ZONE = 'Canada/Eastern'
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
global manger
manger = True
# get user name from 'user' table in our database
def get_username(email):
table = dynamodb.Table('user')
response = table.get_item(
Key={
'email': email
}
)
return response['Item']['user_name']
# scan the 'userdata' table from our database
def scan_userdata(email):
table = dynamodb.Table('userdata')
response = table.scan(
ExpressionAttributeValues={
':e': email
},
FilterExpression='email = :e',
#TODO:
#to try if it work?
#KeyConditionExpression: 'Time = :time',
#ScanIndexForward: True,
# true = ascending, false = descending
#end todo
)
return response
#TODO: CHECK THIS FUNCTION
# scan the 'user' table from our database
def getColumnItems():
table = dynamodb.Table('user')
resp = table.scan(AttributesToGet=['email'])
return resp['Items']
#background thread
def backgroundProcess():
print("start bg")
global manger
manger = True
while True:
nowtime = datetime.now(timezone(ZONE))
weekday = nowtime.strftime('%A')
hour = nowtime.strftime('%H:%M:%S')
#send email to manager evey Monday 7am
if manger:
if weekday == 'Monday':
#if weekday == 'Thursday':
if hour == '07:00:00':
#if hour == '09:48:00':
manger = False
res = manager_email()
print("true", res)
else:
if hour != '07:00:00' and manger == False:
#if hour != '09:48:00' and manger == False:
manger = True
#send email to manager inbox
def manager_email():
#The cursor should looks like: (order isn't matter)
# cursor = [{'email':'[email protected]','user_name':'lll','password':'1231213'},{'time':'2020-09-19','user_name':'hhh','password':'1231213'}]
cursor = getColumnItems()
userlen = len(cursor)
#message body for email
output = ''
userstr = ''
Children = 0
Adolescence = 0
Adults = 0
Seniors = 0
underweight = 0
normalweight = 0
overweight = 0
obesity = 0
female = 0
male = 0
other_gender = 0
riskWHRlow = 0
riskWHRmed = 0
riskWHRHih = 0
avgstep = 0
avgcalorie = 0
#iterate to get all user
for user in cursor:
# scan user data
username = user['email']
weight = 0
BMI = 0
WHR = 0
Daily_Steps = 0
Calorie = 0
age = 0
gender = ''
#iterate to get all data for certain user
userdata = scan_userdata(user['email'])
num_samples = len(userdata['Items'])
for item in userdata['Items']:
age = int(item['age'])
gender = str(item['gender'])
weight = weight + float(item['weight'])
BMI = BMI + float(item['BMI'])
WHR = WHR + float(item['WHR'])
Daily_Steps = Daily_Steps + int(item['step'])
Calorie = Calorie + float(item['calorie'])
#detail data analysis for each user
userstr = userstr + ' \u2022 '+str(username) + ': ' + str(age)+ 'years old; ' + gender + '. --Average Weight: ' + str(float(weight/num_samples)) + '; Average BMI: ' + str(float(BMI/num_samples)) + '; Average WHR: ' + str(float(WHR/num_samples)) + '; Average Daily steps: ' + str(float(Daily_Steps/num_samples)) + '; Average Daily Calories: ' + str(float(Calorie/num_samples)) + '\n'
#category each user
#for BMI
avg = float(BMI/num_samples)
if avg < 18.5:
underweight = underweight + 1
elif avg < 25.0:
normalweight = normalweight + 1
elif avg < 29.9:
overweight = overweight + 1
else:
obesity = obesity + 1
#for age and WHR
avg = float(WHR/num_samples)
if gender == 'Female':
female += 1
if avg < 0.8:
riskWHRlow += 1
elif avg< 0.86:
riskWHRmed += 1
else:
riskWHRHih += 1
elif gender == 'Male':
male += 1
if avg < 0.95:
riskWHRlow += 1
elif avg< 1.0:
riskWHRmed += 1
else:
riskWHRHih += 1
else:
other_gender += 1
#for age
if age < 13:
Children += 1
elif age < 19:
Adolescence += 1
elif age < 60:
Adults += 1
else:
Seniors += 1
avgstep = avgstep + float(Daily_Steps/num_samples)
avgcalorie = avgcalorie + float(Calorie/num_samples)
#email body
output = 'The total number of users in our health monitor app: ' + str(userlen) + '\n'
output = output + 'Summary For All Users: \n' + ' \u2022 # of Children(0~12): ' + str(Children) + ' (%.2f)'%(Children/(Children+Adolescence+Adults+Seniors)*100) + '; Adolescence(13~18): ' + str(Adolescence) + ' (%.2f)'%(Adolescence/(Children+Adolescence+Adults+Seniors)*100) +'; Adult(19~59): ' + str(Adults) + ' (%.2f)'%(Adults/(Children+Adolescence+Adults+Seniors)*100) +'; Seniors(60+): ' + str(Seniors) + ' (%.2f)'%(Seniors/(Children+Adolescence+Adults+Seniors)*100) +'\n'
output = output + ' \u2022 # of Female: ' + str(female) + ' (%.2f)'%(female/(female+male+other_gender)*100) + '; # of Male: ' + str(male) + ' (%.2f)'%(male/(female+male+other_gender)*100) +'; # of Others: ' + str(other_gender) + ' (%.2f)'%(other_gender/(female+male+other_gender)*100) + '\n'
output = output + ' \u2022 Average Daily steps across all users: ' + str(float(avgstep/userlen)) + '\n'
output = output + ' \u2022 Average Daily Calorie consumed by all users: ' + str(float(avgcalorie/userlen)) + '\n'
output = output + ' \u2022 # of Underweight' + str(underweight) + ' (%.2f)'%(underweight/(underweight+normalweight+overweight+obesity)*100) + '; # of Normal weight: ' + str(normalweight) + ' (%.2f)'%(normalweight/(underweight+normalweight+overweight+obesity)*100) + '; # of Overweight: ' + str(overweight) + ' (%.2f)'%(overweight/(underweight+normalweight+overweight+obesity)*100) + '; # of Obesity: ' + str(obesity) + ' (%.2f)'%(obesity/(underweight+normalweight+overweight+obesity)*100) + '\n'
output = output + ' \u2022 Health Risk based on WHR: \n' + ' -- Low(0.80 or lower for women/0.95 or lower for men: ' + str(riskWHRlow) + ' (%.2f)'%(riskWHRlow/(riskWHRlow+riskWHRmed+riskWHRHih)*100) + '\n' + ' -- Moderate(0.81–0.85 for women/0.96–1.0 for men: ' + str(riskWHRmed) + ' (%.2f)'%(riskWHRmed/(riskWHRlow+riskWHRmed+riskWHRHih)*100) + '\n' + ' -- High(0.86 or higher for women/1.0 or higher for men: ' + str(riskWHRHih) + ' (%.2f)'%(riskWHRHih/(riskWHRlow+riskWHRmed+riskWHRHih)*100) + '\n'
output = output + '\n\n'
if userlen <= 10:
output = output + 'User Details for each one: \n'
output = output + userstr
output = output + '\n'
sent = send_email.send_manager(output)
if sent == True:
print("Background thread running")
return "Background thread running"
else:
print("Fail to run Background thread")
return "Fail to run Background thread"
|
# Generated by Django 3.1.2 on 2020-11-05 20:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0002_relationship'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='bio',
field=models.TextField(default='no bio...', max_length=300),
),
]
|
import sys
N=int(sys.stdin.readline())
d=[0]*101
d[1]=1
d[2]=1
d[3]=1
for _ in range(N):
num = int(sys.stdin.readline())
for i in range(4, num + 1):
d[i] = d[i - 2] + d[i - 3]
print(d[num]) |
x = 9 #make x equal to 9
y = 3 #make y equal to 3
#Arithmetric Operators
print(x+y) #Addition
print(x-y) #Subtraction
print(x*y) #Multiplication
print(x/y) #Division
print(x%y) #Modulus
print(x**y) #Exponentiation
x = 9.191823
print(x//y) #floor division
#assignment operators
x = 9 # set x = 9
x += 3 # set x = x + 3
print(x)
x = 9
x -= 3 # set x = x - 3
print(x)
x *= 3 # set x = x * 3
print(x)
x /= 3 # set x = x / 3
print(x)
x **= 3 # set x = x ** 3
print(x)
#comparison operators
x = 9
y = 3
print(x==y) #true if x equals y, false otherwise
print(x!=y) #true if x does not equal y, false otherwise
print(x>y) #true if x is greater than y, false otherwise
print(x<y) #true if x is greater than y, fasle otherwise
print(x>=y) #true if x is greater or equal to y, false otherwise
print(x<=y) #true if x is less than or equal to y, false otherwise |
import csv
import io
from django.core.management import call_command
from mock import patch
from datetime import datetime, date
from decimal import Decimal
from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework_simplejwt.tokens import RefreshToken
from billing.constants import USD, EUR, CAD, SUPPORTED_CURRENCIES
from billing.context import top_up_wallet, find_transactions
from billing.models import User, Wallet, Transaction, ExchangeRate, TransactionEntry
class TestAPI(TestCase):
fixtures = []
def setUp(self):
self.now = datetime.utcnow()
self.user = User.objects.create(
username="admin",
email="[email protected]",
first_name="Kirill",
last_name="Bakanov",
)
self.user2 = User.objects.create(
username="terminator",
email="[email protected]",
first_name="Arny",
last_name="Shwarts",
)
self.password = "asd123456"
self.user.set_password(self.password)
self.user2.set_password(self.password)
self.user.save()
self.user2.save()
self.user_wallet = Wallet.objects.create(currency=USD, user=self.user)
self.user2_wallet = Wallet.objects.create(currency=EUR, user=self.user2)
self.client = APIClient()
self.anon_client = APIClient()
refresh = RefreshToken.for_user(self.user)
self.client.credentials(
HTTP_AUTHORIZATION=f"Bearer {str(refresh.access_token)}"
)
def test_auth_permissions_work(self):
result = self.anon_client.post(
reverse("top-up-wallet"), dict(amount=100), format="json"
)
self.assertEqual(result.status_code, 401)
def test_top_up_wallet_validation(self):
for amount in [-100, 0, 0.001]:
result = self.client.post(
reverse("top-up-wallet"), dict(amount=amount), format="json"
)
self.assertEqual(result.status_code, 400)
self.assertEqual(Transaction.objects.count(), 0)
def test_top_up_wallet(self):
result = self.client.post(
reverse("top-up-wallet"), dict(amount=100), format="json"
)
self.assertEqual(result.status_code, 201)
transaction_data = result.data["transaction"]
self.assertEqual(transaction_data["description"], "Top up")
self.assertTrue(transaction_data["is_top_up"])
self.assertEqual(len(transaction_data["entries"]), 1)
self.assertEqual(transaction_data["entries"][0]["wallet"], self.user_wallet.id)
self.assertEqual(transaction_data["entries"][0]["amount"], "100.00")
self.assertEqual(Transaction.objects.count(), 1)
self.assertEqual(TransactionEntry.objects.count(), 1)
self.user_wallet.refresh_from_db()
self.assertEqual(self.user_wallet.balance, 100)
def test_get_exchange_rates_downloads_new_rates(self):
to_date = date.today().isoformat()
example_response = {
"rates": {
"CAD": 1.3259568293,
"EUR": 0.9069472157,
"CNY": 7.0967712679,
"USD": 1.0,
},
"base": "USD",
"date": to_date,
}
with patch(
"billing.context.download_exchange_rates", return_value=example_response
):
result = self.client.get(
f"{reverse('exchange-rates')}?from_currency=USD&date={to_date}"
)
self.assertEqual(len(result.data["results"]), 3) # excluding USD
for result in result.data["results"]:
self.assertEqual(result["from_currency"], USD)
self.assertIn(result["to_currency"], SUPPORTED_CURRENCIES)
self.assertIsNotNone(result["rate"])
self.assertIsNotNone(result["date"])
def test_get_existing_exchange_rates(self):
ExchangeRate.objects.create(
from_currency=USD, to_currency=EUR, rate=0.90, date=date.today()
)
ExchangeRate.objects.create(
from_currency=USD, to_currency=USD, rate=1, date=date.today()
)
ExchangeRate.objects.create(
from_currency=USD, to_currency=CAD, rate=1.33, date=date.today()
)
result = self.client.get(f"{reverse('exchange-rates')}?from_currency=EUR")
self.assertEqual(len(result.data["results"]), 2) # self rate excluded
self.assertEqual(result.data["results"][0]["from_currency"], EUR)
self.assertEqual(result.data["results"][0]["to_currency"], USD)
self.assertEqual(result.data["results"][1]["from_currency"], EUR)
self.assertEqual(result.data["results"][1]["to_currency"], CAD)
# Rates calculated according to from_currency in request
self.assertEqual(result.data["results"][0]["rate"], Decimal("1.11"))
self.assertEqual(result.data["results"][1]["rate"], Decimal("1.48"))
result = self.client.get(
f"{reverse('exchange-rates')}?from_currency=EUR&date={date.today()}"
)
self.assertEqual(len(result.data["results"]), 2) # self rate excluded
def test_signup(self):
result = self.anon_client.post(
reverse("signup"),
dict(
username="hellothere",
password="GeneralKenobi!",
email="[email protected]",
city="Jedi Temple",
country="Coruscant",
currency=CAD,
),
format="json",
)
self.assertEqual(result.status_code, 201)
result = result.json()
self.assertIsNotNone(result["id"])
self.assertIsNotNone(result["wallet"])
self.assertEqual(result["email"], "[email protected]")
self.assertEqual(result["username"], "hellothere")
self.assertTrue(User.objects.filter(username="hellothere").count(), 1)
self.assertEqual(
sorted(result["wallet"].keys()), sorted(["id", "balance", "currency"])
)
self.assertEqual(result["wallet"]["currency"], CAD)
def test_send_money(self):
today = date.today()
ExchangeRate.objects.create(
from_currency=USD, to_currency=USD, rate=1, date=today
)
ExchangeRate.objects.create(
from_currency=USD, to_currency=EUR, rate=0.90, date=today
)
self.assertEqual(self.user_wallet.balance, 0)
post_data = dict(
amount=100,
description="It's a trap!",
destination_wallet=self.user2_wallet.id,
)
result = self.client.post(reverse("transactions"), post_data, format="json")
# didn't validate because not enough funds
self.assertEqual(result.status_code, 400)
self.assertEqual(result.json(), ["More gold is needed."])
# Add 500 $ to user wallet
top_up_wallet(self.user_wallet, 500)
result = self.client.post(reverse("transactions"), post_data, format="json")
self.assertEqual(result.status_code, 201)
self.user_wallet.refresh_from_db()
self.user2_wallet.refresh_from_db()
self.assertEqual(self.user_wallet.balance, Decimal("400"))
self.assertEqual(self.user2_wallet.balance, Decimal("90"))
self.assertEqual(Transaction.objects.count(), 2) # 1 top up, 1 payment
self.assertEqual(
TransactionEntry.objects.count(), 3
) # 1 top up, 2 for payment
def test_report(self):
# Setup data before report testing
today = date.today()
ExchangeRate.objects.create(
from_currency=USD, to_currency=USD, rate=1, date=today
)
ExchangeRate.objects.create(
from_currency=USD, to_currency=EUR, rate=0.90, date=today
)
call_command("add_transactions")
transactions = Transaction.objects.count()
self.assertEqual(transactions, 102)
# Check report query of another user doesn't work if you are not staff
result = self.client.get(
f"{reverse('generate-report')}?username={self.user2.username}"
)
self.assertEqual(result.status_code, 403)
# Check report works for self
result = self.client.get(
f"{reverse('generate-report')}?username={self.user.username}"
)
self.assertEqual(result.status_code, 200)
self.assertEqual(len(result.data), 101) # 102 - 1 for top up from another user
self.assertEqual(
sorted(result.data[0].keys()),
sorted(["id", "username", "created", "currency", "amount"]),
)
# Check report works with date_from filter
result = self.client.get(
f"{reverse('generate-report')}?username={self.user.username}&date_from={result.data[50]['created']}"
)
self.assertEqual(result.status_code, 200)
self.assertEqual(len(result.data), 51)
# Check report works with date_from and date_to
result = self.client.get(
f"{reverse('generate-report')}?"
f"username={self.user.username}&date_from={result.data[10]['created']}&date_to={result.data[5]['created']}"
)
self.assertEqual(result.status_code, 200)
self.assertEqual(len(result.data), 6)
# Check CSV
result = self.client.get(
f"{reverse('generate-report')}?username={self.user.username}&format=csv"
)
content = result.content.decode("utf-8")
cvs_reader = csv.reader(io.StringIO(content))
body = list(cvs_reader)
headers = body.pop(0)
self.assertEqual(headers, ["amount", "created", "currency", "id", "username"])
self.assertEqual(len(body), 101)
|
#!/usr/bin/env python
# coding=UTF-8
"""
Desc:
Author:TavisD
Time:2019/10/15 15:31
"""
import pytest
from test.utils import HOST, http_get, http_post, get_db_session
apply_url = HOST + "device/apply/{device_id}"
return_url = HOST + "device/return/{apply_id}"
audit_url = HOST + "device/audit/{apply_id}"
cancel_url = HOST + "device/cancel/{apply_id}"
@pytest.fixture
def empty_device_apply_record():
def _empty(device_id, apply_id=None):
session = get_db_session()
session.execute("delete from device_apply_record where device_id={device_id}".format(device_id=device_id))
session.commit()
if apply_id:
session.execute("delete from device_apply_record where id={id}".format(id=apply_id))
session.commit()
return _empty
def test_apply_device_successful(admin_token, empty_device_apply_record):
"""
申请设备成功
:param admin_token:
:param empty_device_apply_record:
:return:
"""
empty_device_apply_record(1)
data = {
"start_time": "2019-10-15 09:28:55", "end_time": "2019-10-30 00:55:55", "application_desc": "测试需要"
}
result = http_post(apply_url.format(device_id=1), data=data, token=admin_token)
assert result.json()['code'] == 0
assert result.json()['msg'] == "ok"
def test_apply_device_failed_not_available(admin_token):
"""
申请设备失败,设备不可用
:param admin_token:
:return:
"""
data = {
"start_time": "2019-10-15 09:28:55", "end_time": "2019-10-30 00:55:55", "application_desc": "测试需要"
}
result = http_post(apply_url.format(device_id=9999), data=data, token=admin_token)
assert result.json()['code'] == 3004
assert result.json()['msg'] == "设备不可用"
def test_apply_device_failed_duplicate_apply(admin_token, empty_device_apply_record):
"""
申请设备失败,重复申请
:param admin_token:
:param empty_device_apply_record:
:return:
"""
empty_device_apply_record(1)
data = {
"start_time": "2019-10-15 09:28:55", "end_time": "2019-10-30 00:55:55", "application_desc": "测试需要"
}
http_post(apply_url.format(device_id=1), data=data, token=admin_token)
result = http_post(apply_url.format(device_id=1), data=data, token=admin_token)
assert result.json()['code'] == 3005
assert result.json()['msg'] == "重复申请"
def test_modify_apply_device_successful(admin_token, empty_device_apply_record, execute_sql):
"""
修改设备申请成功
:param admin_token:
:param empty_device_apply_record:
:param execute_sql:
:return:
"""
empty_device_apply_record(1, 1)
sql = """
INSERT INTO device_apply_record VALUES (5, 1, 1, '2019-10-15 09:28:55', '2019-10-30 00:55:55', '测试需要', 3, NULL, NULL, NULL, NULL, 0, 0, '2019-10-15 16:16:48.399755', '2019-10-15 16:16:48.399755');
"""
execute_sql(sql)
data = {
"apply_id": 5, "start_time": "2019-10-15 00:00:00", "end_time": "2019-10-30 23:59:59",
"application_desc": "修改申请"
}
result = http_post(apply_url.format(device_id=1), data=data, token=admin_token)
assert result.json()['code'] == 0
assert result.json()['msg'] == "ok"
def test_return_back_successful(admin_token, empty_device_apply_record, execute_sql):
"""
归还设备成功
:param admin_token:
:param empty_device_apply_record:
:param execute_sql:
:return:
"""
empty_device_apply_record(1, 1)
sql = """
INSERT INTO device_apply_record VALUES (1, 1, 1, '2019-10-15 09:28:55', '2019-10-30 00:55:55', '测试需要', 6, NULL, NULL, NULL, NULL, 0, 0, '2019-10-15 16:16:48.399755', '2019-10-15 16:16:48.399755');
"""
execute_sql(sql)
result = http_get(return_url.format(apply_id=1), token=admin_token)
assert result.json()['code'] == 0
assert result.json()['msg'] == "ok"
def test_return_back_failed(admin_token, empty_device_apply_record):
"""
归还设备失败
:param admin_token:
:param empty_device_apply_record:
:return:
"""
empty_device_apply_record(1, 999)
result = http_get(return_url.format(apply_id=999), token=admin_token)
assert result.json()['code'] == 3006
assert result.json()['msg'] == "归还失败"
def test_return_back_failed_wrong_status(admin_token, empty_device_apply_record, execute_sql):
"""
归还设备失败,错误的设备状态
:param admin_token:
:param empty_device_apply_record:
:param execute_sql:
:return:
"""
empty_device_apply_record(1, 1)
sql = """
INSERT INTO device_apply_record VALUES (1, 1, 1, '2019-10-15 09:28:55', '2019-10-30 00:55:55', '测试需要', 3, NULL, NULL, NULL, NULL, 0, 0, '2019-10-15 16:16:48.399755', '2019-10-15 16:16:48.399755');
"""
execute_sql(sql)
result = http_get(return_url.format(apply_id=1), token=admin_token)
assert result.json()['code'] == 3006
assert result.json()['msg'] == "归还失败"
def test_admin_approval_apply_successful(admin_token, empty_device_apply_record, execute_sql):
"""
admin审批通过apply的记录
:param admin_token:
:param empty_device_apply_record:
:param execute_sql:
:return:
"""
empty_device_apply_record(1, 1)
sql = """
INSERT INTO device_apply_record VALUES (1, 1, 1, '2019-10-15 09:28:55', '2019-10-30 00:55:55', '测试需要', 1, NULL, NULL, NULL, NULL, 0, 0, '2019-10-15 16:16:48.399755', '2019-10-15 16:16:48.399755');
"""
execute_sql(sql)
data = {"approval": 1, "reason": "admin审批apply记录,通过"}
result = http_post(audit_url.format(apply_id=1), data=data, token=admin_token)
assert result.json()['code'] == 0
assert result.json()['msg'] == "ok"
def test_admin_not_approval_apply_successful(admin_token, empty_device_apply_record, execute_sql):
"""
admin不通过apply的记录
:param admin_token:
:param empty_device_apply_record:
:param execute_sql:
:return:
"""
empty_device_apply_record(1, 1)
sql = """
INSERT INTO device_apply_record VALUES (1, 1, 1, '2019-10-15 09:28:55', '2019-10-30 00:55:55', '测试需要', 1, NULL, NULL, NULL, NULL, 0, 0, '2019-10-15 16:16:48.399755', '2019-10-15 16:16:48.399755');
"""
execute_sql(sql)
data = {"approval": 0, "reason": "admin审批apply记录,不通过"}
result = http_post(audit_url.format(apply_id=1), data=data, token=admin_token)
assert result.json()['code'] == 0
assert result.json()['msg'] == "ok"
def test_admin_approval_return_successful(admin_token, empty_device_apply_record, execute_sql):
"""
admin通过return记录
:param admin_token:
:param empty_device_apply_record:
:param execute_sql:
:return:
"""
empty_device_apply_record(1, 1)
sql = """
INSERT INTO device_apply_record VALUES (1, 1, 1, '2019-10-15 09:28:55', '2019-10-30 00:55:55', '测试需要', 4, NULL, NULL, NULL, NULL, 0, 0, '2019-10-15 16:16:48.399755', '2019-10-15 16:16:48.399755');
"""
execute_sql(sql)
data = {"approval": 1, "reason": "admin审批return记录,通过"}
result = http_post(audit_url.format(apply_id=1), data=data, token=admin_token)
assert result.json()['code'] == 0
assert result.json()['msg'] == "ok"
def test_admin_not_approval_return_successful(admin_token, empty_device_apply_record, execute_sql):
"""
admin不通过return记录
:param admin_token:
:param empty_device_apply_record:
:param execute_sql:
:return:
"""
empty_device_apply_record(1, 1)
sql = """
INSERT INTO device_apply_record VALUES (1, 1, 1, '2019-10-15 09:28:55', '2019-10-30 00:55:55', '测试需要', 4, NULL, NULL, NULL, NULL, 0, 0, '2019-10-15 16:16:48.399755', '2019-10-15 16:16:48.399755');
"""
execute_sql(sql)
data = {"approval": 0, "reason": "admin审批return记录,不通过"}
result = http_post(audit_url.format(apply_id=1), data=data, token=admin_token)
assert result.json()['code'] == 0
assert result.json()['msg'] == "ok"
def test_admin_approval_return_failed(admin_token, empty_device_apply_record, execute_sql):
"""
审批记录失败
:param admin_token:
:param empty_device_apply_record:
:param execute_sql:
:return:
"""
empty_device_apply_record(1, 1)
sql = """
INSERT INTO device_apply_record VALUES (1, 1, 1, '2019-10-15 09:28:55', '2019-10-30 00:55:55', '测试需要', 3, NULL, NULL, NULL, NULL, 0, 0, '2019-10-15 16:16:48.399755', '2019-10-15 16:16:48.399755');
"""
execute_sql(sql)
data = {"approval": 0, "reason": "admin审批return记录,不通过"}
result = http_post(audit_url.format(apply_id=1), data=data, token=admin_token)
assert result.json()['code'] == 3007
assert result.json()['msg'] == "审批失败"
def test_owner_approval_apply_successful(test1_token, empty_device_apply_record, execute_sql):
"""
owner通过申请
:param test1_token:
:param empty_device_apply_record:
:param execute_sql:
:return:
"""
empty_device_apply_record(1, 1)
sql = """INSERT INTO device_apply_record VALUES (1, 4, 2, '2019-10-15 09:28:55', '2019-10-30 00:55:55', '测试需要', 1, NULL, NULL, NULL, NULL, 0, 0, '2019-10-15 16:16:48.399755', '2019-10-15 16:16:48.399755');
"""
execute_sql(sql)
data = {"approval": 1, "reason": "owner审批apply记录,通过"}
result = http_post(audit_url.format(apply_id=1), data=data, token=test1_token)
assert result.json()['code'] == 0
assert result.json()['msg'] == "ok"
def test_owner_approval_apply_failed(test1_token, empty_device_apply_record, execute_sql):
"""
owner审批失败,不能审批其他owner的
:param test1_token:
:param empty_device_apply_record:
:param execute_sql:
:return:
"""
empty_device_apply_record(1, 1)
sql = """INSERT INTO device_apply_record VALUES (1, 1, 2, '2019-10-15 09:28:55', '2019-10-30 00:55:55', '测试需要', 1, NULL, NULL, NULL, NULL, 0, 0, '2019-10-15 16:16:48.399755', '2019-10-15 16:16:48.399755');
"""
execute_sql(sql)
data = {"approval": 1, "reason": "owner审批apply记录,失败"}
result = http_post(audit_url.format(apply_id=1), data=data, token=test1_token)
assert result.json()['code'] == 3007
assert result.json()['msg'] == "审批失败"
def test_cancel_apply_successful(test1_token, empty_device_apply_record, execute_sql):
"""
取消申请成功
:param test1_token:
:param empty_device_apply_record:
:param execute_sql:
:return:
"""
empty_device_apply_record(1, 1)
sql = """INSERT INTO device_apply_record VALUES (1, 4, 2, '2019-10-15 09:28:55', '2019-10-30 00:55:55', '测试需要', 1, NULL, NULL, NULL, NULL, 0, 0, '2019-10-15 16:16:48.399755', '2019-10-15 16:16:48.399755');
"""
execute_sql(sql)
result = http_get(cancel_url.format(apply_id=1), token=test1_token)
assert result.json()['code'] == 0
assert result.json()['msg'] == "ok"
|
import os
from flask import Flask, render_template, g
from gifs import get_giphy_results
from helpers import divide_chunks
app = Flask(__name__)
@app.route('/', strict_slashes=False)
def index():
gifs = get_giphy_results()
gifs = divide_chunks(gifs, int(len(gifs)/4))
return render_template(
'index.html',
gif_set_0=gifs[0],
gif_set_1=gifs[1],
gif_set_2=gifs[2],
gif_set_3=gifs[3],
nav_link="https://media.giphy.com/media/xUOwG3nVH6Of928xJm/giphy.gif",
nav_path="./map",
)
@app.errorhandler(404)
def page_not_found(page_name):
return index()
@app.route('/map', strict_slashes=False)
def map():
return render_template(
'map.html',
nav_link="https://media.giphy.com/media/TFedSWdWdQnoOJ3YWL/giphy.gif",
nav_path="./",
)
if __name__ == '__main__':
app.run(debug=os.environ.get('DEBUG', False))
|
from turtle import Turtle
ALIGNMENT = "center"
FONT = ("courier", 24, "normal")
class scoreBoard(Turtle):
def __init__(self):
super().__init__()
self.score = 0
with open("my_score.txt", mode = 'r') as file:
self.high_score = int(file.read())
self.color("white")
self.penup()
self.goto(0, 250)
self.hideturtle()
self.update()
def update(self):
self.clear()
self.write(f"Score: {self.score} High Score {self.high_score}", align = ALIGNMENT, font= FONT)
def increase_score(self):
self.score +=1
self.update()
def reset_score(self):
if self.score > self.high_score:
self.high_score = self.score
with open("my_score.txt", mode='w') as file:
int(file.write(f" {self.high_score}"))
self.score = 0
self.update()
|
from flask import Flask, render_template, jsonify
from quotes import QUOTES
import random
app = Flask('__name__')
@app.route("/")
def hello():
return render_template('index.html')
@app.route("/quote")
def generateQuote():
return jsonify({'quote': str(randomQuote(QUOTES))})
def randomQuote(_list_):
return random.choice(_list_)
def main():
app.run(debug=True)
if __name__ == '__main__':
main()
|
from xv_leak_tools.log import L
from xv_leak_tools.test_components.cleanup.cleanup import Cleanup
class LinuxCleanup(Cleanup):
def cleanup(self):
L.warning("No cleanup implemented for Linux yet!")
|
# -*- coding: utf-8 -*-
import os
import pytest
from severus.translator import Translator
@pytest.fixture(scope='session')
def T():
return Translator(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'lang1'
)
)
@pytest.fixture(scope='session')
def Tpre():
return Translator(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'lang2'
)
)
|
'''
tree_constants.py holds constants used in defining decision trees,
such as column definitions.
Column definitions:
0) Split feature, -1 if leaf
1) Split threshold
2) Node number of this node (nodes are numbered in pre-order).
3) Node number of left child, -1 if leaf
4) Node number of right child, -1 if leaf
5) Number of data points in this node
6) Value of this node
'''
# Position constants for the fields in the tree
FEATURE_COL = 0
THR_COL = 1
NODE_NUM_COL = 2
CHILD_LEFT_COL = 3
CHILD_RIGHT_COL = 4
CT_COL = 5
VAL_COL = 6
# FEATURE_COL value if no split/leaf
NO_FEATURE = -2
# THR_COL value if no split/leaf
NO_THR = -2.0
# CHILD_LEFT_COL and CHILD_RIGHT_COL values if leaf (no children)
NO_CHILD = -1
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
import requests
import csv
from datetime import datetime
from datetime import timedelta
from datetime import date
from geopy.distance import vincenty
import json
from lib import csv_io
from lib import json_io
if __name__ == '__main__':
m8 = 'http://data.tainan.gov.tw/dataset/4c260d97-e268-4b4a-8b15-c0fc92a25120/resource/316034ad-f2ae-4a8e-bafd-d6d98e388aaa/download/10408.csv'
m9 = 'http://data.tainan.gov.tw/dataset/4c260d97-e268-4b4a-8b15-c0fc92a25120/resource/2cdd3bbe-6a8c-438e-b85a-1bde14382944/download/10409.csv'
m10 = 'http://data.tainan.gov.tw/dataset/4c260d97-e268-4b4a-8b15-c0fc92a25120/resource/fde0f38c-ba91-40e1-a69b-406f061c1a3b/download/10410.csv'
m11 = 'http://data.tainan.gov.tw/dataset/4c260d97-e268-4b4a-8b15-c0fc92a25120/resource/ede84d86-ffdf-4233-aaa1-b31b329fcaec/download/z10410410411.csv'
m8 = csv_io.req_csv(m8, 'utf-8')
m9 = csv_io.req_csv(m9, 'utf-8')
m10 = csv_io.req_csv(m10, 'utf-8')
m11 = csv_io.req_csv(m11, 'utf-8')
print (m9[0])
print (m10[0])
for row in m10:
row.insert(8, row[-2])
row.insert(9, row[-3])
del row[-2]
del row[-2]
for row in m11:
row.insert(8, row[-2])
row.insert(9, row[-3])
del row[-2]
del row[-2]
data = m8 + m9[1:] + m10[1:] + m11[1:]
print (m10[0])
print (data[-1])
csv_io.write_csv('../data/drug_all.csv', data)
|
# Exercise number 4 - Python WorkOut
# Author: Barrios Ramirez Luis Fernando
# Language: Python3 3.8.2 64-bit
# Main function with no arguments
def hex_output():
decimal_num = 0
hex_num = input('Enter a hex number to convert: ')
for power, digit in enumerate(reversed(hex_num)):
decimal_num += int(digit, 16) * (16 ** power)
print(decimal_num)
hex_output()
|
#
# Copyright (c) 2018 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
from types import FunctionType
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from django.utils.translation import ungettext_lazy
from bridge.vars import SCHEDULER_TYPE, USER_ROLES, JOB_ROLES
from bridge.utils import logger, file_get_or_create, unique_id, BridgeException
import marks.SafeUtils as SafeUtils
import marks.UnsafeUtils as UnsafeUtils
import marks.UnknownUtils as UnknownUtils
from users.models import Extended
from jobs.models import Job, JobFile
from marks.models import MarkUnsafeCompare, MarkUnsafeConvert, ErrorTraceConvertionCache
from service.models import Scheduler
from jobs.jobForm import JobForm
from marks.ConvertTrace import ConvertTrace
from marks.CompareTrace import CompareTrace, CONVERSION
from marks.tags import CreateTagsFromFile
JOB_SETTINGS_FILE = 'settings.json'
def extend_user(user, role=USER_ROLES[1][0]):
try:
user.extended.role = role
user.extended.save()
except ObjectDoesNotExist:
Extended.objects.create(role=role, user=user)
user.first_name = 'Firstname'
user.last_name = 'Lastname'
user.save()
class PopulationError(Exception):
pass
class Population:
jobs_dir = os.path.join(settings.BASE_DIR, 'jobs', 'presets')
def __init__(self, user=None, manager=None, service=None):
self.changes = {'marks': {}}
self.user = user
if manager is None:
self.manager = self.__get_manager(None, None)
if service is not None:
self.__add_service_user(service[0], service[1])
else:
self.manager = self.__get_manager(manager[0], manager[1])
if service is not None and manager[0] != service[0]:
self.__add_service_user(service[0], service[1])
self.__population()
def __population(self):
if self.user is not None:
try:
Extended.objects.get(user=self.user)
except ObjectDoesNotExist:
extend_user(self.user)
self.__populate_functions()
self.changes['jobs'] = self.__populate_jobs()
self.changes['tags'] = self.__populate_tags()
self.__populate_unknown_marks()
self.__populate_unsafe_marks()
self.__populate_safe_marks()
sch_crtd1 = Scheduler.objects.get_or_create(type=SCHEDULER_TYPE[0][0])[1]
sch_crtd2 = Scheduler.objects.get_or_create(type=SCHEDULER_TYPE[1][0])[1]
self.changes['schedulers'] = (sch_crtd1 or sch_crtd2)
def __populate_functions(self):
conversions = {}
for func_name in [x for x, y in ConvertTrace.__dict__.items()
if type(y) == FunctionType and not x.startswith('_')]:
description = self.__correct_description(getattr(ConvertTrace, func_name).__doc__)
func, crtd = MarkUnsafeConvert.objects.get_or_create(name=func_name)
if crtd or description != func.description:
self.changes['functions'] = True
func.description = description
func.save()
conversions[func_name] = func
MarkUnsafeConvert.objects.filter(~Q(name__in=list(conversions))).delete()
comparisons = []
for func_name in [x for x, y in CompareTrace.__dict__.items()
if type(y) == FunctionType and not x.startswith('_')]:
comparisons.append(func_name)
description = self.__correct_description(getattr(CompareTrace, func_name).__doc__)
conversion = CONVERSION.get(func_name, func_name)
if conversion not in conversions:
raise BridgeException('Convert function "%s" for comparison "%s" does not exist' %
(conversion, func_name))
func, crtd = MarkUnsafeCompare.objects.get_or_create(name=func_name, convert=conversions[conversion])
if crtd or description != func.description:
self.changes['functions'] = True
func.description = description
func.save()
MarkUnsafeCompare.objects.filter(~Q(name__in=comparisons)).delete()
ErrorTraceConvertionCache.objects.all().delete()
def __correct_description(self, descr):
self.__is_not_used()
descr_strs = descr.split('\n')
new_descr_strs = []
for s in descr_strs:
if len(s) > 0 and len(s.split()) > 0:
new_descr_strs.append(s)
return '\n'.join(new_descr_strs)
def __get_manager(self, manager_username, manager_password):
if manager_username is None:
try:
return Extended.objects.filter(role=USER_ROLES[2][0])[0].user
except IndexError:
raise BridgeException('There are no managers in the system')
try:
manager = User.objects.get(username=manager_username)
except ObjectDoesNotExist:
manager = User.objects.create(username=manager_username, first_name='Firstname', last_name='Lastname')
self.changes['manager'] = {
'username': manager.username,
'password': self.__add_password(manager, manager_password)
}
extend_user(manager, USER_ROLES[2][0])
return manager
def __add_service_user(self, service_username, service_password):
if service_username is None:
return
try:
extend_user(User.objects.get(username=service_username), USER_ROLES[4][0])
except ObjectDoesNotExist:
service = User.objects.create(username=service_username, first_name='Firstname', last_name='Lastname')
extend_user(service, USER_ROLES[4][0])
self.changes['service'] = {
'username': service.username,
'password': self.__add_password(service, service_password)
}
def __add_password(self, user, password):
self.__is_not_used()
if isinstance(password, str):
password = password.strip()
if not isinstance(password, str) or len(password) == 0:
password = unique_id()[:8]
user.set_password(password)
user.save()
return password
def __check_job_name(self, name):
if not isinstance(name, str) or len(name) == 0:
raise BridgeException('Default job name is required')
job_name = name
cnt = 1
while True:
try:
Job.objects.get(name=job_name)
except ObjectDoesNotExist:
break
cnt += 1
job_name = "%s #%s" % (name, cnt)
return job_name
def __populate_jobs(self):
created_jobs = []
# Directory "specifications" and files "program fragmentation.json" and "verifier profiles.json" should be added
# for all preset jobs.
specs_children = self.__get_dir(os.path.join(self.jobs_dir, 'specifications'), 'specifications')
program_fragmentation = self.__get_file(os.path.join(self.jobs_dir, 'program fragmentation.json'),
'program fragmentation.json')
verifier_profiles = self.__get_file(os.path.join(self.jobs_dir, 'verifier profiles.json'),
'verifier profiles.json')
for dirpath, dirnames, filenames in os.walk(self.jobs_dir):
# Do not traverse within specific directories. Directory "specifications" should be placed within the root
# preset jobs directory, directory "staging" can be placed anywhere.
if os.path.basename(dirpath) == 'specifications' or os.path.basename(dirpath) == 'staging':
dirnames[:] = []
filenames[:] = []
continue
# Directories without preset job settings file serve to keep ones with that file and specific ones.
job_settings_file = os.path.join(dirpath, JOB_SETTINGS_FILE)
if not os.path.exists(job_settings_file):
continue
# Do not traverse within directories with preset job settings file.
dirnames[:] = []
with open(job_settings_file, encoding='utf8') as fp:
try:
job_settings = json.load(fp)
except Exception as e:
logger.exception(e)
raise BridgeException('Settings file of preset job "{0}" is not valid JSON file'.format(dirpath))
if settings.POPULATE_JUST_PRODUCTION_PRESETS and not job_settings.get('production'):
# Do not populate non-production jobs
continue
if 'description' not in job_settings:
raise BridgeException('Preset job "{0}" does not have description'.format(dirpath))
try:
job_name = self.__check_job_name(job_settings.get('name'))
except BridgeException as e:
raise BridgeException('{0} (preset job "{1}"'.format(str(e), dirpath))
job = JobForm(self.manager, None, 'copy').save({
'identifier': job_settings.get('identifier'),
'name': job_name,
'description': job_settings['description'],
'global_role': JOB_ROLES[1][0],
'file_data': json.dumps([{
'type': 'root',
'text': 'Root',
'children':
[specs_children, program_fragmentation, verifier_profiles] + self.__get_children(dirpath)
}], ensure_ascii=False)
})
created_jobs.append([job.name, job.identifier])
return created_jobs
def __get_file(self, path, fname):
with open(path, mode='rb') as fp:
hashsum = file_get_or_create(fp, fname, JobFile, True)[1]
return {'type': 'file', 'text': fname, 'data': {'hashsum': hashsum}}
def __get_dir(self, path, fname):
return {'type': 'folder', 'text': fname, 'children': self.__get_children(path)}
def __get_children(self, root):
children = []
for fname in os.listdir(root):
if fname == JOB_SETTINGS_FILE:
continue
path = os.path.join(root, fname)
if os.path.isfile(path):
children.append(self.__get_file(path, fname))
elif os.path.isdir(path):
children.append(self.__get_dir(path, fname))
return children
def __populate_unknown_marks(self):
res = UnknownUtils.PopulateMarks(self.manager)
if res.created > 0:
self.changes['marks']['unknown'] = (res.created, res.total)
def __populate_safe_marks(self):
res = SafeUtils.PopulateMarks(self.manager)
new_num = len(res.created)
if new_num > 0:
self.changes['marks']['safe'] = (new_num, res.total)
def __populate_unsafe_marks(self):
res = UnsafeUtils.PopulateMarks(self.manager)
new_num = len(res.created)
if new_num > 0:
self.changes['marks']['unsafe'] = (new_num, res.total)
def __populate_tags(self):
created_tags = []
num_of_new = self.__create_tags('unsafe')
if num_of_new > 0:
created_tags.append(ungettext_lazy(
'%(count)d new unsafe tag uploaded.', '%(count)d new unsafe tags uploaded.', num_of_new
) % {'count': num_of_new})
num_of_new = self.__create_tags('safe')
if num_of_new > 0:
created_tags.append(ungettext_lazy(
'%(count)d new safe tag uploaded.', '%(count)d new safe tags uploaded.', num_of_new
) % {'count': num_of_new})
return created_tags
def __create_tags(self, tag_type):
self.__is_not_used()
preset_tags = os.path.join(settings.BASE_DIR, 'marks', 'tags_presets', "%s.json" % tag_type)
if not os.path.isfile(preset_tags):
return 0
with open(preset_tags, mode='rb') as fp:
try:
res = CreateTagsFromFile(self.manager, fp, tag_type, True)
except Exception as e:
raise BridgeException("Error while creating tags: %s" % str(e))
return res.number_of_created
def __is_not_used(self):
pass
# Example argument: {'username': 'myname', 'password': '12345', 'last_name': 'Mylastname', 'first_name': 'Myfirstname'}
# last_name and first_name are not required; username and password are required. email can be set for admin.
# Returns None if everything is OK, str (error text) in other cases.
def populate_users(admin=None, manager=None, service=None, exist_ok=False):
def check_user_data(userdata):
if not isinstance(userdata, dict):
raise PopulationError('{0} data has wrong format')
if 'username' not in userdata or not isinstance(userdata['username'], str) or len(userdata['username']) == 0:
raise PopulationError('{0} username is required')
if 'password' not in userdata or not isinstance(userdata['password'], str) or len(userdata['password']) == 0:
raise PopulationError('{0} password is required')
if 'last_name' not in userdata:
userdata['last_name'] = 'Lastname'
if 'first_name' not in userdata:
userdata['first_name'] = 'Firstname'
try:
User.objects.get(username=userdata['username'])
userdata['exists'] = True
raise PopulationError('{0} with specified username already exists')
except ObjectDoesNotExist:
pass
if admin is not None:
try:
check_user_data(admin)
except PopulationError as e:
if not admin.get('exists') or not exist_ok:
return str(e).format('Administrator')
else:
user = User.objects.create_superuser(
username=admin['username'], email=admin.get('email', ''), password=admin['password'],
first_name=admin['first_name'], last_name=admin['last_name']
)
Extended.objects.create(user=user, role=USER_ROLES[1][0])
if manager is not None:
try:
check_user_data(manager)
except PopulationError as e:
if not manager.get('exists') or not exist_ok:
return str(e).format('Manager')
else:
user = User.objects.create_user(
username=manager['username'], password=manager['password'],
first_name=manager['first_name'], last_name=manager['last_name']
)
Extended.objects.create(user=user, role=USER_ROLES[2][0])
if service is not None:
try:
check_user_data(service)
except PopulationError as e:
if not service.get('exists') or not exist_ok:
return str(e).format('Service user')
else:
user = User.objects.create_user(
username=service['username'], password=service['password'],
first_name=service['first_name'], last_name=service['last_name']
)
Extended.objects.create(user=user, role=USER_ROLES[4][0])
return None
|
'''
See the namespace for available functions and the corresponding docstrings
for more information.
'''
from .npv import NPV
from .histogram import histogram
from .params import sample_params, load_params_from_file
__version__ = "0.4.1"
|
import math
from typing import List
import numpy
from nobos_commons.data_structures.geometry import Triangle
from nobos_commons.data_structures.skeletons.joint_3d import Joint3D
from nobos_commons.data_structures.skeletons.joint_visibility import JointVisibility
def get_euclidean_distance_joint3D(joint_a: Joint3D, joint_b: Joint3D) -> float:
return numpy.linalg.norm(joint_a.to_numpy_position() - joint_b.to_numpy_position())
def get_euclidean_distance_joint_lists_3D(joints_a: List[Joint3D], joints_b: List[Joint3D],
min_joint_score: float = 0.0) -> List[float]:
"""
Returns the distance of the correspondiong joints of two lists. The lists must have the same length
:param min_joint_score: The minimum score for both joints to be included in the distance check
:param joints_a:
:param joints_b:
:return: List of floats for each joint_id in the lists with the euclidean distance
"""
assert len(joints_a) == len(joints_b)
joint_distances = []
for joint_id, joint_tuple in enumerate(zip(joints_a, joints_b)):
joint_a, joint_b = joint_tuple
if joint_a.score >= min_joint_score and joint_b.score >= min_joint_score:
joint_distances.append(get_euclidean_distance_joint3D(joint_a, joint_b))
return joint_distances
def get_distances_3D(joint_a: Joint3D, joint_b: Joint3D) -> (float, float, float, float):
"""
Calculates the distances between the x and y coordinates as well as the euclidean distance between the joints.
:param joint_a: 2D joint from
:param joint_b: 2D joint to
:return: (
distance between the joint's x coordinates,
distance between the joint's x coordinates,
euclidean distance between the joints
)
"""
distance_x = abs(joint_a.x - joint_b.x)
distance_y = abs(joint_a.y - joint_b.y)
distance_z = abs(joint_a.z - joint_b.z)
euclidean_distance = get_euclidean_distance_joint3D(joint_a, joint_b)
return distance_x, distance_y, distance_z, euclidean_distance
def get_triangle_from_joints_3D(joint_a: Joint3D, joint_b: Joint3D, joint_c: Joint3D) -> Triangle:
"""
Returns alpha, beta and gamma in a triangle formed by three joints (in radians).
length_a = length_line c->b
length_b = length_line c->a
length_c = length_line a->b
alpha = angle between joint_b and joint_c
beta = angle between joint_a and joint_c
gamma = angle between joint_a and joint_b
cos alpha = (b^2 + c^2 - a^2) / (2 * b * c)
cos beta = (a^2 + c^2 - b^2) / (2 * a * c)
gamma = pi - alpha - beta
:param joint_a: 2D joint
:param joint_b: 2D joint
:param joint_c: 2D joint
:return: (alpha_rad, beta_rad, gamma_rad)
"""
length_a = get_euclidean_distance_joint3D(joint_c, joint_b)
length_b = get_euclidean_distance_joint3D(joint_c, joint_a)
length_c = get_euclidean_distance_joint3D(joint_a, joint_b)
# Note: Round to prevent round errors on later decimals on extremes (1.0, -1.0)
# TODO: How to handle 0 distance correctly?
if length_a == 0 or length_b == 0 or length_c == 0:
return Triangle(0, 0, 0, 0, 0, 0)
cos_alpha = round((((length_b ** 2) + (length_c ** 2) - (length_a ** 2)) / (2 * length_b * length_c)), 2)
alpha_rad = math.acos(cos_alpha)
cos_beta = round((((length_a ** 2) + (length_c ** 2) - (length_b ** 2)) / (2 * length_a * length_c)), 2)
beta_rad = math.acos(cos_beta)
gamma_rad = math.pi - alpha_rad - beta_rad
return Triangle(length_a, length_b, length_c, alpha_rad, beta_rad, gamma_rad)
def get_middle_joint_3D(joint_a: Joint3D, joint_b: Joint3D) -> Joint3D:
"""
Returns a joint which is in the middle of the two input joints. The visibility and score is estimated by the
visibility and score of the two surrounding joints.
:param joint_a: Surrounding joint one
:param joint_b: Surrounding joint two
:return: Joint in the middle of joint_a and joint_b
"""
if not joint_a.is_set or not joint_b.is_set:
return None
visibility: JointVisibility
if joint_a.visibility == JointVisibility.VISIBLE and joint_b.visibility == JointVisibility.VISIBLE:
visibility = JointVisibility.VISIBLE
elif joint_a.visibility == JointVisibility.INVISIBLE or joint_b.visibility == JointVisibility.INVISIBLE:
visibility = JointVisibility.INVISIBLE
elif joint_a.visibility == JointVisibility.ABSENT or joint_b.visibility == JointVisibility.ABSENT:
visibility = JointVisibility.ABSENT
return Joint3D(
x=((joint_a.x + joint_b.x) / 2),
y=((joint_a.y + joint_b.y) / 2),
z=((joint_a.z + joint_b.z) / 2),
# TODO: Rotation
score=(joint_a.score + joint_b.score) / 2,
visibility=visibility
)
|
print ("Hola Mundo!!!")
m = [5, 'old', 'new', 8, 'time', 2]
print (m[0])
print (m[-1])
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, print_function
import pytest
from russian_tagsets import converters, ud
#from .opencorpora_aot_data import PARSE_RESULTS
class TestInternalConversion(object):
TEST_DATA = [
#Noun, Verb, ADJF, ADVB, PRTF, PRTS, NUMB, COMP, NPRO, GRND, CONJ, PRCL, PREP
#GNdr, Pltm, Sgtm, Ms-f, Inmx, Name, Surn, Patr, Fixd
#inan, anim - Noun, anim, inan - ADJF/PRTF
#femn, masc, neut; sing plur; nomn, gent, datv, accs, ablt, loct; voct, loc2, gen2, gen1
#Anum, Qual, Apro
#perf, impr; intr, tran; pres, past, futr; indc, impr; actv pssv; 1per, 2per, 3per; excl, incl
['власть', 'NOUN,inan,femn sing,nomn', 'NOUN Animacy=Inan|Case=Nom|Gender=Fem|Number=Sing'],
# в UD род у Pltm проставлен, но в рамках соревнования не просим его определять (то же про мн.ч прил)
['суток', 'NOUN,inan,GNdr,Pltm,plur,gent', 'NOUN Animacy=Inan|Case=Gen|Number=Ptan'],
['лесу', 'NOUN,inan,masc,sing,loc2', 'NOUN Animacy=Inan|Case=Loc|Gender=Masc|Number=Sing'],
['боку', 'NOUN,inan,masc,sing,gen2', 'NOUN Animacy=Inan|Case=Gen|Gender=Masc|Number=Sing'],
['бока', 'NOUN,inan,masc,sing,gen1', 'NOUN Animacy=Inan|Case=Gen|Gender=Masc|Number=Sing'],
['ань', 'NOUN,anim,femn,sing,voct', 'NOUN Animacy=Anim|Case=Nom|Gender=Fem|Number=Sing'],
#МИКРОБ!!! у нас выбираем одуш у леммы, а у формы вин ставим другую одуш - у них видимо две леммы
['персонаж', 'NOUN,anim,masc,Inmx,sing,inan,accs', 'NOUN Animacy=Inan|Case=Acc|Gender=Masc|Number=Sing'],
['персонажа', 'NOUN,anim,masc,Inmx,sing,accs', 'NOUN Animacy=Anim|Case=Acc|Gender=Masc|Number=Sing'],
['персонаж', 'NOUN,anim,masc,Inmx,sing,nomn', 'NOUN Animacy=Anim|Case=Nom|Gender=Masc|Number=Sing'],
#['персонаж', 'NOUN,anim,masc,Inmx,sing,nomn', 'NOUN Animacy=Inan|Case=Nom|Gender=Masc|Number=Sing'],
['ивана', 'NOUN,anim,masc,Name sing,gent', 'PROPN Animacy=Anim|Case=Gen|Gender=Masc|Number=Sing'],
#непоянтно, какое число должно быть у них у фамилий sing или Coll
['иванова', 'NOUN,anim,masc,Sgtm,Surn,sing,gent', 'PROPN Animacy=Anim|Case=Gen|Gender=Masc|Number=Sing'],
['гиппиус', 'NOUN,anim,femn,Sgtm,Fixd,Surn,sing,gent', 'PROPN Animacy=Anim|Case=Gen|Gender=Fem|Number=Sing'],
#похоже, что мн у фамилий только в мр (нужно проверить)
#['ивановы', 'NOUN,anim,GNdr,Ms-f,Pltm,Surn,plur,nomn', 'PROPN Animacy=Anim|Case=Nom|Gender=Masc|Number=Plur'],
['ивановичем', 'NOUN,anim,masc,Patr,sing,ablt', 'PROPN Animacy=Anim|Case=Ins|Gender=Masc|Number=Sing'],
#если не изменяемая фам по обоим родам, то у нам одна лемма Sgtm, у них видимо две леммы (проверить)
#странно, что у нам только ед ч
#['винчи', 'NOUN,anim,GNdr,Ms-f,Sgtm,Fixd,Surn,sing,gent', 'PROPN Animacy=Anim|Case=Gen|Gender=Masc|Number=Sing'],
#['винчи', 'NOUN,anim,GNdr,Ms-f,Sgtm,Fixd,Surn,sing,gent', 'PROPN Animacy=Anim|Case=Gen|Gender=Fem|Number=Sing'],
['москвы', 'NOUN,inan,femn,Sgtm,Geox,sing,gent', 'PROPN Animacy=Inan|Case=Gen|Gender=Fem|Number=Sing'],
['оон', 'NOUN,inan,femn,Sgtm,Fixd,Abbr,Orgn,sing,gent', 'PROPN Animacy=Inan|Case=Gen|Gender=Fem|Number=Sing'],
#['а', 'NOUN,anim,GNdr,Ms-f,Sgtm,Fixd,Abbr,Name,Init,sing,nomn', 'PROPN Animacy=Anim|Case=Nom|Gender=Masc|Number=Sing'], # инициал
['я', 'NPRO,1per,sing,nomn', 'PRON Case=Nom|Number=Sing|Person=1'],
['вами', 'NPRO,2per,plur,ablt', 'PRON Case=Ins|Number=Plur|Person=2'],
['поменяться', 'INFN,perf,intr', 'VERB Aspect=Perf|VerbForm=Inf'],
['было', 'VERB,impf,intr neut,sing,past,indc,Auxt', 'AUX Aspect=Imp|Gender=Neut|Mood=Ind|Number=Sing|Tense=Past|VerbForm=Fin'], #"было сделано" (вспм) vs. "было уязвимо" (глагол); в OpenCorpora помета на токене
['было', 'VERB,impf,intr neut,sing,past,indc', 'VERB Aspect=Imp|Gender=Neut|Mood=Ind|Number=Sing|Tense=Past|VerbForm=Fin'], #"было дано" (вспм) vs. "в классе было 20 человек" (глагол)
['смогут', 'VERB,perf,intr,plur,3per,futr,indc', 'VERB Aspect=Perf|Mood=Ind|Number=Plur|Person=3|Tense=Fut|VerbForm=Fin'],
#['подойди', 'VERB,perf,intr,sing,impr,excl', 'VERB Aspect=Perf|Mood=Imp|Number=Sing|Person=2|VerbForm=Fin|Voice=Act'],
#['пройдемте', 'VERB,perf,intr,plur,impr,incl', 'VERB Aspect=Imp|Mood=Imp|Number=Plur|Person=1|VerbForm=Fin|Voice=Act'],
#['отражая', 'GRND,impf,tran,pres', 'VERB Aspect=Imp|Tense=Pres|VerbForm=Trans|Voice=Act'],
#['выстрадав', 'GRND,perf,tran,past', 'VERB Aspect=Perf|Tense=Past|VerbForm=Trans|Voice=Act'],
['голодающими', 'PRTF,impf,intr,pres,actv,plur,ablt', 'VERB Aspect=Imp|Case=Ins|Number=Plur|Tense=Pres|VerbForm=Part|Voice=Act'],
#по инструкции должно быть прил
['сделанный', 'PRTF,perf,tran,past,pssv,inan,masc,sing,accs', 'VERB Animacy=Inan|Aspect=Perf|Case=Acc|Gender=Masc|Number=Sing|Tense=Past|VerbForm=Part|Voice=Pass'],
['написана', 'PRTS,perf,past,pssv,femn,sing', 'VERB Aspect=Perf|Gender=Fem|Number=Sing|Tense=Past|Variant=Brev|VerbForm=Part|Voice=Pass'],
['первом', 'ADJF,Anum,masc,sing,loct', 'ADJ Case=Loc|Gender=Masc|Number=Sing'],
['первого', 'ADJF,Anum,anim,masc,sing,accs', 'ADJ Animacy=Anim|Case=Acc|Gender=Masc|Number=Sing'],
['большая', 'ADJF,Qual,femn,sing,nomn', 'ADJ Case=Nom|Gender=Fem|Number=Sing'],
['студенческих', 'ADJF,plur,loct', 'ADJ Case=Loc|Number=Plur'],
['лучшим', 'ADJF,Supr,Qual,masc,sing,ablt', 'ADJ Case=Ins|Degree=Sup|Gender=Masc|Number=Sing'],
#по инструкции должно быть прил
['неприкосновенны', 'ADJS,plur', 'ADJ Number=Plur|Variant=Brev'],
['бела', 'ADJS,sing,masc,gent', 'ADJ Case=Gen|Gender=Masc|Number=Sing|Variant=Brev'],
#у нас неправильно
#['1', 'NUMB', 'ADJ Animacy=Inan|Case=Nom|Gender=Neut|Number=Sing'], # 1 декабря
#['XX', 'ROMN', 'ADJ Animacy=Inan|Case=Nom|Gender=Masc|Number=Sing'], # XX век
#['выше', 'COMP', 'ADJ Degree=Cmp'], # он выше меня vs подняться выше
#['выше', 'COMP', 'ADV Degree=Cmp'], # есть идея создать пул и ставить помету на токен
['мой', 'ADJF,Apro,sing,masc,nomn', 'DET Case=Nom|Gender=Masc|Number=Sing'],
#['2', 'NUMB', 'NUM Case=Nom|Gender=Masc'], # два мальчика
['2', 'NUMB,intg', 'NUM NumForm=Digit'], # pymorphy2 result
['двум', 'NUMR,femn,datv', 'NUM Case=Dat|Gender=Fem'],
['пяти', 'NUMR,loct', 'NUM Case=Loc'],
['три', 'NUMR,inan,accs', 'NUM Animacy=Inan|Case=Acc'],
['громко', 'ADVB', 'ADV _'],
['ах', 'INTJ', 'INTJ _'],
['у', 'PREP', 'ADP _'],
['и', 'CONJ', 'CONJ _'],
#['если', 'CONJ', 'SCONJ'], # нам нужны списки
['же', 'PRCL', 'PART _'],
['.', 'PNCT', 'PUNCT _'],
#['%', 'PNCT', 'SYM _'],
]
@pytest.mark.parametrize(("word", "internal", "ud14"), TEST_DATA)
def test_from_internal(self, word, internal, ud14):
converted = converters.convert(internal, 'opencorpora-int', 'ud14')
assert converted == ud14
|
import pickle
root_folder="/home/guillefix/code/inria/captionRLenv/"
types = pickle.load(open(root_folder+"object_types.pkl","rb"))
geometric_solid = ('cube', 'block', 'cylinder')
kitchen_ware = ('bottle', 'bowl', 'plate', 'cup', 'spoon')
animal_model = ('bear', 'bird', 'dog', 'fish', 'elephant')
food_model = ('apple', 'banana', 'cookie', 'donut', 'sandwich')
vehicles_model = ('train', 'plane', 'car', 'bike', 'bus')
categories = dict(solid = geometric_solid,
kitchenware = kitchen_ware,
animal = animal_model,
food = food_model,
vehicle = vehicles_model,
)
types
colors = list(('red', 'green', 'blue', 'yellow', 'magenta', 'cyan', 'white', 'black'))
colors = tuple(colors)
positions = ('on the left side of the table', 'on the right side of the table', 'on the shelf', 'behind the door', 'in the drawer')
drawer_door = ('drawer', 'door')
any_all = ('any', 'all')
rgbb = ('red', 'green', 'blue')
attributes = dict(types=types,
categories=tuple(categories.keys()),
colors=colors,
positions=positions,
drawer_door=drawer_door,
any_all=any_all,
rgbb=rgbb)
admissible_actions=('Open', 'Close', 'Grasp', 'Put', 'Hide', 'Turn on', 'Turn off', 'Make', 'Paint', 'Move', 'Throw')
extra_words = ['Throw objects on the floor', 'Open the', 'Close the', 'Grasp any object', 'Grasp', 'Move any object', 'Move', 'Put object', 'Put', 'Hide object', 'Hide', 'Turn on the light', 'Turn off the light', 'Make the panel', 'Paint object', 'Paint', 'Paint']
words = sum([sum([v2.split(" ") for v2 in v], []) for k,v in attributes.items()], []) + list(admissible_actions) + sum([w.split(" ") for w in extra_words], []) + ["0","1","2","3"]
import numpy as np
unique_words = np.unique(words)
unique_words
vocab_dict = {x:str(i) for i,x in enumerate(unique_words)}
vocab_dict_reverse = {str(i):x for i,x in enumerate(unique_words)}
len(vocab_dict)
#%%
# from constants import *
root_dir="/home/guillefix/code/inria/"
import json
with open(root_dir+"UR5_processed/npz.annotation.txt.annotation.class_index.json", "w") as f:
f.write(json.dumps(vocab_dict))
with open(root_dir+"UR5_processed/npz.annotation.txt.annotation.class_index_reverse.json", "w") as f:
f.write(json.dumps(vocab_dict_reverse))
|
import math
import copy
import random
import numpy as np
from collections import OrderedDict
import torch
from torch.distributions.kl import kl_divergence
from torch.nn.utils.convert_parameters import (vector_to_parameters,
parameters_to_vector)
from maml_trpo.utils import DifferentiableSGD
import maml_trpo.utils.pytorch_utils as ptu
class MetaLearner(object):
def __init__(self,
policy,
value_fn,
inner_lr,
num_adapt_steps=1,
episodes_per_task=10,
gamma =0.99,
gae_lam = 0.95,
max_kl = 0.01 ,
max_backtracks = 10 ,
cg_steps = 10,
cg_damping = 0.01,
device='cpu',
restore=False,
ckpt_path=None):
self.policy = policy
self.old_policy = copy.deepcopy(policy)
self.value_fn = value_fn
self.value_fn_optimizer = torch.optim.Adam(self.value_fn.parameters(), lr=inner_lr)
self.inner_optimizer = DifferentiableSGD(self.policy, lr=inner_lr)
self.gamma = gamma
self.num_adapt_steps = num_adapt_steps
self.episodes_per_task = episodes_per_task
self.gamma = gamma
self.gae_lam = gae_lam
# trpo and conjugate gradient params
self.max_kl = max_kl
self.max_backtracks = max_backtracks
self.cg_steps = cg_steps
self.cg_damping = cg_damping
self.to(device)
self.itr = 0
if restore:
self.restore(ckpt_path)
def sample(self, tasks, sampler):
"""
"""
old_params = dict(self.old_policy.named_parameters())
self.all_episodes = [[] for _ in range(len(tasks))]
self.all_params = []
logs = OrderedDict()
for i, (env_name, env_cls, task) in enumerate(tasks):
sampler.update_task(env_cls, task)
for j in range(self.num_adapt_steps):
train_episodes = sampler.sample(self.policy, eps_per_task = self.episodes_per_task,
gamma=self.gamma, device=self.device)
self.train_value_function(train_episodes)
require_grad = j < self.num_adapt_steps -1
self.adapt(train_episodes, set_grad=require_grad)
self.all_episodes[i].append(train_episodes)
if j==0:
logs.update(self.log_performance(train_episodes, metric_prefix=f"mt_tr_{env_name}_pre_adapt"))
self.all_params.append(dict(self.policy.named_parameters()))
valid_episodes = sampler.sample(self.policy, eps_per_task = self.episodes_per_task,
gamma=self.gamma, device=self.device)
logs.update(self.log_performance(valid_episodes, metric_prefix=f'mt_tr_{env_name}_post_adapt'))
self.train_value_function(valid_episodes)
self.all_episodes[i].append(valid_episodes)
ptu.update_module_params(self.policy, old_params)
return logs
def train_value_function(self, episodes):
value_loss = self.value_fn.value_loss(episodes.observations, episodes.returns)
self.value_fn_optimizer.zero_grad(set_to_none=True)
value_loss.backward()
self.value_fn_optimizer.step()
def inner_loss(self, episodes):
values = self.value_fn(episodes.observations)
advantages = episodes.gae(values, gae_lam=self.gae_lam)
advantages = ptu.weighted_normalize(advantages, weights=episodes.mask)
logprobs = self.policy.logprobs(episodes.observations, episodes.actions)
if logprobs.dim() > 2:
logprobs = torch.sum(logprobs, dim=2)
loss = -ptu.weighted_mean(logprobs * advantages, dim=0,
weights=episodes.mask)
return loss.mean()
def adapt(self, episodes, set_grad=True):
inner_loss = self.inner_loss(episodes)
self.inner_optimizer.set_grads_none()
inner_loss.backward(create_graph=set_grad)
with torch.set_grad_enabled(set_grad):
self.inner_optimizer.step()
def train_step(self, tasks, sampler):
metric_logs = OrderedDict()
metric_logs.update(self.sample(tasks, sampler))
kl_before = self.compute_kl_divergence(set_grad=False)
meta_loss = self.meta_loss()
grads = torch.autograd.grad(meta_loss, self.policy.parameters())
policy_grad = torch.cat([grad.view(-1) for grad in grads]).detach()
step_dir = self.conjugate_gradient(-policy_grad)
max_step = torch.sqrt(2 *self.max_kl/torch.dot(step_dir, self.fisher_vector_product(step_dir)))
full_step = max_step * step_dir
expected_improve = torch.dot(-policy_grad, full_step)
prev_params = parameters_to_vector(self.policy.parameters()).clone()
success, new_params = self.line_search(prev_params, full_step, expected_improve)
vector_to_parameters(new_params, self.policy.parameters())
meta_loss_after = self.meta_loss(set_grad=False)
kl_after = self.compute_kl_divergence(set_grad=False)
metric_logs.update({
'pre_adapt_kl':kl_before,
'pre_adapt_meta_loss':meta_loss,
'post_adapt_kl':kl_after,
'post_adapt_meta_loss':meta_loss_after
})
self.itr+=1
return metric_logs
def evaluate_step(self, eval_tasks, eval_sampler, log_videofn=None, prefix='mt_ts', render=False, video_itr=None):
# eval meta batch size == benchmark test classes
#eval_policy = copy.deepcopy(self.policy)
theta = dict(self.policy.named_parameters())
value_theta = dict(self.value_fn.named_parameters())
logs = OrderedDict()
for (env_name, env_cls, task) in eval_tasks:
eval_sampler.update_task(env_cls, task)
for j in range(self.num_adapt_steps):
adapt_episodes = eval_sampler.sample(self.policy, eps_per_task=self.episodes_per_task,
gamma=self.gamma, device=self.device, render=render)
if j==0:
logs.update(self.log_performance(adapt_episodes, f'{prefix}_{env_name}_pre_adapt'))
if render:
pre_imageobs = adapt_episodes.image_obses
log_videofn(pre_imageobs, video_itr, video_title=f'{env_name}_pre_adapt')
self.train_value_function(adapt_episodes)
require_grad = j < self.num_adapt_steps - 1
self.adapt(adapt_episodes, set_grad=require_grad)
valid_episodes = eval_sampler.sample(self.policy, eps_per_task =10, gamma=self.gamma, device=self.device, render=render)
if render:
post_imageobs = valid_episodes.image_obses
log_videofn(post_imageobs, video_itr, video_title=f'{env_name}_post_adapt')
logs.update(self.log_performance(valid_episodes, f'{prefix}_{env_name}_post_adapt'))
ptu.update_module_params(self.policy, theta)
ptu.update_module_params(self.value_fn, value_theta)
return logs
def meta_loss(self, set_grad=True):
old_params = dict(self.old_policy.named_parameters())
params = dict(self.policy.named_parameters())
task_losses = []
for task_episodes, task_params in zip(self.all_episodes, self.all_params):
train_episodes = task_episodes[:-1]
valid_episodes = task_episodes[-1]
for i in range(self.num_adapt_steps):
require_grad = i < self.num_adapt_steps-1 or set_grad
self.adapt(train_episodes[i], set_grad = require_grad)
ptu.update_module_params(self.old_policy, task_params)
with torch.set_grad_enabled(set_grad):
oldlogprobs = self.old_policy.logprobs(valid_episodes.observations, valid_episodes.actions).detach()
logprobs =self.policy.logprobs(valid_episodes.observations, valid_episodes.actions)
values = self.value_fn(valid_episodes.observations)
advantages = valid_episodes.gae(values, gae_lam=self.gae_lam)
advantages = ptu.weighted_normalize(advantages,
weights=valid_episodes.mask)
log_ratio = logprobs-oldlogprobs
if log_ratio.dim() > 2:
log_ratio = torch.sum(log_ratio, dim=2)
ratio = torch.exp(log_ratio)
loss = -ptu.weighted_mean(ratio * advantages, dim=0,
weights=valid_episodes.mask)
task_losses.append(loss)
ptu.update_module_params(self.policy, params)
ptu.update_module_params(self.old_policy, old_params)
return torch.mean(torch.stack(task_losses, dim=0))
def compute_kl_divergence(self, set_grad=True):
#old_pi = copy.deepcopy(self.policy)
old_params = dict(self.old_policy.named_parameters())
params = dict(self.policy.named_parameters())
kls = []
for task_episodes, task_params in zip(self.all_episodes, self.all_params):
train_episodes = task_episodes[:-1]
valid_episodes = task_episodes[-1]
for i in range(self.num_adapt_steps):
require_grad = i < self.num_adapt_steps-1 or set_grad
self.adapt(train_episodes[i], set_grad=require_grad)
ptu.update_module_params(self.old_policy, task_params)
with torch.set_grad_enabled(set_grad):
old_pi = ptu.detach_distribution(self.old_policy(valid_episodes.observations))
new_pi = self.policy(valid_episodes.observations)
mask = valid_episodes.mask
if valid_episodes.actions.dim() > 2:
mask = mask.unsqueeze(2)
kl_loss = ptu.weighted_mean(kl_divergence(new_pi, old_pi), dim=0,
weights=mask)
kls.append(kl_loss)
ptu.update_module_params(self.policy, params)
ptu.update_module_params(self.old_policy, old_params)
return torch.mean(torch.stack(kls, dim=0))
def conjugate_gradient(self, b, residual_tol=1e-10):
"""
Conjugate gradient descent algorithm
For Conjugate gradient descent algorithm and derivation refer below link
http://www.cs.cmu.edu/~pradeepr/convexopt/Lecture_Slides/conjugate_direction_methods.pdf
"""
x_k = torch.zeros(b.size())
d_k = b.clone().detach()
g_k = b.clone().detach()
g_dot_g = torch.dot(g_k, g_k)
for _ in range(self.cg_steps):
fvp = self.fisher_vector_product(d_k)
alpha = g_dot_g / torch.dot(d_k, fvp)
x_k += alpha * d_k
g_k -= alpha * fvp
new_g_dot_g = torch.dot(g_k, g_k)
beta = new_g_dot_g / g_dot_g
d_k = g_k + beta * d_k
g_dot_g = new_g_dot_g
if g_dot_g < residual_tol:
break
return x_k.detach()
def line_search(self, prev_params, fullstep, expected_improve, accept_ratio=0.1):
"""
line search to find optimal parameters in trust region
"""
prev_loss = self.meta_loss()
for stepfrac in [.5**x for x in range(self.max_backtracks)]:
new_params = prev_params + stepfrac * fullstep
vector_to_parameters(new_params, self.policy.parameters())
loss = self.meta_loss(set_grad=False)
kl = self.compute_kl_divergence(set_grad=False)
improved = prev_loss - loss
#expected_improve = expected_improve * stepfrac
#ratio = improved/expected_improve
if improved.item() > 0.0 and kl.item() < self.max_kl:
return True, new_params
return False, prev_params
def fisher_vector_product(self, vector):
"""
Helper_fn to compute Hessian vector product to be used in cg algorithm
"""
kl_loss = self.compute_kl_divergence(set_grad=True)
grads = torch.autograd.grad(kl_loss, self.policy.parameters(), create_graph=True)
grad_vector = torch.cat([grad.view(-1) for grad in grads])
grad_vector_product = torch.sum(grad_vector * vector)
grad_grads = torch.autograd.grad(grad_vector_product, self.policy.parameters())
fisher_vector_product = torch.cat([grad.contiguous().view(-1) for grad in grad_grads]).detach()
return fisher_vector_product + self.cg_damping * vector
def to(self, device, **kwargs):
self.policy.to(device, **kwargs)
self.old_policy.to(device, **kwargs)
self.value_fn.to(device, **kwargs)
self.device = device
def save(self, path):
torch.save({
"meta_policy" : self.policy.state_dict(),
"value_function" : self.value_fn.state_dict(),
"value_fn_optimizer": self.value_fn_optimizer.state_dict(),
"iteration": self.itr,
}, path)
def restore(self, path):
checkpoint = torch.load(path)
self.policy.load_state_dict(checkpoint['meta_policy'])
self.value_fn.load_state_dict(checkpoint['value_function'])
self.value_fn_optimizer.load_state_dict(checkpoint['value_fn_optimizer'])
self.itr = checkpoint['iteration']
def log_performance(self, batch_episodes, metric_prefix='mt_tr'):
rewards = batch_episodes.rewards.to('cpu').detach().numpy()
success_scores = batch_episodes.success_scores
return_per_episode = np.sum(rewards, axis=0)
avg_return = np.mean(return_per_episode)
std_return = np.std(return_per_episode)
max_return = np.max(return_per_episode)
min_return = np.min(return_per_episode)
avg_success = np.mean(success_scores)
metric_log = {
f"{metric_prefix}_AvgReturn": avg_return,
f"{metric_prefix}_StdReturn" : std_return,
f"{metric_prefix}_MaxReturn" : max_return,
f"{metric_prefix}_MinReturn" : min_return,
f"{metric_prefix}_AvgSuccess" : avg_success,
}
return metric_log
|
import math
import pprint
import os
import shutil
from multiprocessing import Pool
from functools import partial
import numpy as np
import dlib
import cv2
def generate_face(img):
# Tamanho extra para recortar o rosto
N = 60
# Shape para calcular algumas questões...
if not isinstance(img, np.ndarray):
return None
wg, hg = img.shape
# Cortar exatamente a posição da face
for face_cascade in face_cascades:
faces = face_cascade.detectMultiScale(img, 1.1, 1, minSize=(15, 15), flags=cv2.CASCADE_SCALE_IMAGE)
for face in faces:
(x, y, w, h) = face
yN, xN = np.abs(y-N), np.abs(x-N)
yNh, xNw = np.abs((y+N)+h), np.abs((x+N)+w)
crop = img[yN:yNh, xN:xNw]
# Adicionar os pontos de identificação da face
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
clahe_image = clahe.apply(crop)
detections = detector(clahe_image, 1) #Detect the faces in the image
if len(list(detections)):
img = crop
break
# Resize, para ficarem todas do mesmo tamanho
return cv2.resize(img, (225, 225))
def save_image(emotional_img, i, directory, emt):
img = cv2.imread(emotional_img, 0)
if isinstance(img, np.ndarray):
emot = generate_face(img)
cv2.imwrite(f'{directory}/{emt}.{i}.png', emot)
return True
# Face detector
detector = dlib.get_frontal_face_detector()
# Landmark identifier. Set the filename to whatever you named the downloaded file
predictor = dlib.shape_predictor('classifier/shape_predictor_68_face_landmarks.dat')
face_cascades = [
cv2.CascadeClassifier('classifier/haarcascade_frontalface_default.xml'),
cv2.CascadeClassifier('classifier/haarcascade_frontalface_alt.xml'),
cv2.CascadeClassifier('classifier/haarcascade_frontalface_alt2.xml'),
cv2.CascadeClassifier('classifier/haarcascade_frontalface_alt_tree.xml'),
]
emotions_mapper = {
'0': 'neutral', '1': 'anger', '2':'contempt',
'3': 'disgust', '4': 'fear', '5': 'happy',
'6': 'sadness', '7': 'surprise'
}
pp = pprint.PrettyPrinter(indent=4)
if __name__ == '__main__':
emotion = {}
# Emotions
for em_dr in os.listdir('emotion'):
for em in os.listdir(f'emotion/{em_dr}'):
for fl in os.listdir(f'emotion/{em_dr}/{em}'):
with open(f'emotion/{em_dr}/{em}/{fl}', 'r') as hem:
emotion[(em_dr, em)] = {
'emotion': int(float(hem.read())),
'images': {
'neutral': None,
'emotional': []
}
}
# Images
for k in emotion.keys():
dr = f'images/{k[0]}/{k[1]}'
if os.path.isdir(dr):
imgs = sorted(os.listdir(dr))
# for img in [imgs[0], imgs[-2]]:
if len(imgs) > 1:
emotion[(k[0], k[1])]['images']['neutral'] = f'{dr}/{imgs[0]}'
emotion[(k[0], k[1])]['images']['emotional'].append(f'{dr}/{imgs[-6]}')
emotion[(k[0], k[1])]['images']['emotional'].append(f'{dr}/{imgs[-4]}')
emotion[(k[0], k[1])]['images']['emotional'].append(f'{dr}/{imgs[-2]}')
# pp.pprint(emotion)
print('Iniciando gravação...')
for k in emotion.items():
emt = emotions_mapper[str(k[1]['emotion'])]
directory = f'dataset/{k[0][0]}/{k[0][1]}'
if not os.path.exists(directory):
os.makedirs(directory)
neutral = generate_face(cv2.imread(k[1]['images']['neutral'], 0))
if isinstance(neutral, np.ndarray):
cv2.imwrite(f'{directory}/neutral.png', neutral)
with Pool(3) as pool:
imgs = k[1]['images']['emotional']
f = partial(save_image, directory=directory, emt=emt)
r = pool.starmap(f, zip(imgs, range(len(imgs))))
print('Terminado...')
|
from output.models.nist_data.list_pkg.nmtoken.schema_instance.nistschema_sv_iv_list_nmtoken_min_length_3_xsd.nistschema_sv_iv_list_nmtoken_min_length_3 import NistschemaSvIvListNmtokenMinLength3
__all__ = [
"NistschemaSvIvListNmtokenMinLength3",
]
|
Subsets and Splits