language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def read_data_line(inputfile,num_entries=1,type='float'):
#---------------------------------------------------------
r"""
Read data a single line from an input file
Reads one line from an input file and returns an array of values
inputfile: a file pointer to an open file object
num_entries: number of entries that should be read, defaults to only 1
type: Type of the values to be read in, they all most be the same type
This function will return either a single value or an array of values
depending on if num_entries > 1
"""
l = []
while l==[]: # skip over blank lines
line = inputfile.readline()
l = line.split()
val = np.empty(num_entries,type)
if num_entries > len(l):
print 'Error in read_data_line: num_entries = ', num_entries
print ' is larger than length of l = ',l
try:
for i in range(num_entries):
exec("val[i] = %s(l[i])" % type)
if num_entries == 1: # This is a convenience for calling functions
return val[0]
return val
except(ValueError):
print "Invalid type for the %s value in %s" % (i,l)
print " type = ",type
return None
except:
raise | def read_data_line(inputfile,num_entries=1,type='float'):
#---------------------------------------------------------
r"""
Read data a single line from an input file
Reads one line from an input file and returns an array of values
inputfile: a file pointer to an open file object
num_entries: number of entries that should be read, defaults to only 1
type: Type of the values to be read in, they all most be the same type
This function will return either a single value or an array of values
depending on if num_entries > 1
"""
l = []
while l==[]: # skip over blank lines
line = inputfile.readline()
l = line.split()
val = np.empty(num_entries,type)
if num_entries > len(l):
print 'Error in read_data_line: num_entries = ', num_entries
print ' is larger than length of l = ',l
try:
for i in range(num_entries):
exec("val[i] = %s(l[i])" % type)
if num_entries == 1: # This is a convenience for calling functions
return val[0]
return val
except(ValueError):
print "Invalid type for the %s value in %s" % (i,l)
print " type = ",type
return None
except:
raise |
Python | def verify_classic_acoustics(controller):
import os
from clawpack.pyclaw.util import check_diff
import numpy as np
""" Verifies 2d variable-coefficient acoustics from a previously verified classic run """
state = controller.frames[controller.num_output_times].state
dx, dy = controller.solution.domain.grid.delta
test_q=state.get_q_global()
if test_q != None:
thisdir = os.path.dirname(__file__)
expected_pressure = np.loadtxt(os.path.join(thisdir,'pressure_classic.txt'))
test_pressure = test_q[0,:,:]
#test_err = dx*dy*np.linalg.norm(expected_pressure-test_pressure)
test_err = np.max(np.abs(expected_pressure[:]-test_pressure[:]))
return check_diff(0, test_err, abstol=1e-1) | def verify_classic_acoustics(controller):
import os
from clawpack.pyclaw.util import check_diff
import numpy as np
""" Verifies 2d variable-coefficient acoustics from a previously verified classic run """
state = controller.frames[controller.num_output_times].state
dx, dy = controller.solution.domain.grid.delta
test_q=state.get_q_global()
if test_q != None:
thisdir = os.path.dirname(__file__)
expected_pressure = np.loadtxt(os.path.join(thisdir,'pressure_classic.txt'))
test_pressure = test_q[0,:,:]
#test_err = dx*dy*np.linalg.norm(expected_pressure-test_pressure)
test_err = np.max(np.abs(expected_pressure[:]-test_pressure[:]))
return check_diff(0, test_err, abstol=1e-1) |
Python | def teardown(self):
r"""
Delete Fortran objects, which otherwise tend to persist in Python sessions.
"""
if(self.kernel_language == 'Fortran'):
del self.fmod | def teardown(self):
r"""
Delete Fortran objects, which otherwise tend to persist in Python sessions.
"""
if(self.kernel_language == 'Fortran'):
del self.fmod |
Python | def step_hyperbolic(self,solution):
r"""
Take one time step on the homogeneous hyperbolic system.
:Input:
- *solution* - (:class:`~pyclaw.solution.Solution`) Solution that
will be evolved
"""
import numpy as np
state = solution.states[0]
grid = state.grid
self.apply_q_bcs(state)
if state.num_aux > 0:
self.apply_aux_bcs(state)
num_eqn,num_ghost = state.num_eqn,self.num_ghost
if(self.kernel_language == 'Fortran'):
mx = grid.num_cells[0]
dx,dt = grid.delta[0],self.dt
dtdx = np.zeros( (mx+2*num_ghost) ) + dt/dx
rp1 = self.rp.rp1._cpointer
self.qbc,cfl = self.fmod.step1(num_ghost,mx,self.qbc,self.auxbc,dx,dt,self._method,self._mthlim,self.fwave,rp1)
elif(self.kernel_language == 'Python'):
q = self.qbc
aux = self.auxbc
# Limiter to use in the pth family
limiter = np.array(self._mthlim,ndmin=1)
dtdx = np.zeros( (2*self.num_ghost+grid.num_cells[0]) )
# Find local value for dt/dx
if state.index_capa>=0:
dtdx = self.dt / (grid.delta[0] * state.aux[state.index_capa,:])
else:
dtdx += self.dt/grid.delta[0]
# Solve Riemann problem at each interface
q_l=q[:,:-1]
q_r=q[:,1:]
if state.aux is not None:
aux_l=aux[:,:-1]
aux_r=aux[:,1:]
else:
aux_l = None
aux_r = None
wave,s,amdq,apdq = self.rp(q_l,q_r,aux_l,aux_r,state.problem_data)
# Update loop limits, these are the limits for the Riemann solver
# locations, which then update a grid cell value
# We include the Riemann problem just outside of the grid so we can
# do proper limiting at the grid edges
# LL | | UL
# | LL | | | | ... | | | UL | |
# | |
LL = self.num_ghost - 1
UL = self.num_ghost + grid.num_cells[0] + 1
# Update q for Godunov update
for m in xrange(num_eqn):
q[m,LL:UL] -= dtdx[LL:UL]*apdq[m,LL-1:UL-1]
q[m,LL-1:UL-1] -= dtdx[LL-1:UL-1]*amdq[m,LL-1:UL-1]
# Compute maximum wave speed
cfl = 0.0
for mw in xrange(wave.shape[1]):
smax1 = np.max(dtdx[LL:UL]*s[mw,LL-1:UL-1])
smax2 = np.max(-dtdx[LL-1:UL-1]*s[mw,LL-1:UL-1])
cfl = max(cfl,smax1,smax2)
# If we are doing slope limiting we have more work to do
if self.order == 2:
# Initialize flux corrections
f = np.zeros( (num_eqn,grid.num_cells[0] + 2*self.num_ghost) )
# Apply Limiters to waves
if (limiter > 0).any():
wave = tvd.limit(state.num_eqn,wave,s,limiter,dtdx)
# Compute correction fluxes for second order q_{xx} terms
dtdxave = 0.5 * (dtdx[LL-1:UL-1] + dtdx[LL:UL])
if self.fwave:
for mw in xrange(wave.shape[1]):
sabs = np.abs(s[mw,LL-1:UL-1])
om = 1.0 - sabs*dtdxave[:UL-LL]
ssign = np.sign(s[mw,LL-1:UL-1])
for m in xrange(num_eqn):
f[m,LL:UL] += 0.5 * ssign * om * wave[m,mw,LL-1:UL-1]
else:
for mw in xrange(wave.shape[1]):
sabs = np.abs(s[mw,LL-1:UL-1])
om = 1.0 - sabs*dtdxave[:UL-LL]
for m in xrange(num_eqn):
f[m,LL:UL] += 0.5 * sabs * om * wave[m,mw,LL-1:UL-1]
# Update q by differencing correction fluxes
for m in xrange(num_eqn):
q[m,LL:UL-1] -= dtdx[LL:UL-1] * (f[m,LL+1:UL] - f[m,LL:UL-1])
else: raise Exception("Unrecognized kernel_language; choose 'Fortran' or 'Python'")
self.cfl.update_global_max(cfl)
state.set_q_from_qbc(num_ghost,self.qbc)
if state.num_aux > 0:
state.set_aux_from_auxbc(num_ghost,self.auxbc) | def step_hyperbolic(self,solution):
r"""
Take one time step on the homogeneous hyperbolic system.
:Input:
- *solution* - (:class:`~pyclaw.solution.Solution`) Solution that
will be evolved
"""
import numpy as np
state = solution.states[0]
grid = state.grid
self.apply_q_bcs(state)
if state.num_aux > 0:
self.apply_aux_bcs(state)
num_eqn,num_ghost = state.num_eqn,self.num_ghost
if(self.kernel_language == 'Fortran'):
mx = grid.num_cells[0]
dx,dt = grid.delta[0],self.dt
dtdx = np.zeros( (mx+2*num_ghost) ) + dt/dx
rp1 = self.rp.rp1._cpointer
self.qbc,cfl = self.fmod.step1(num_ghost,mx,self.qbc,self.auxbc,dx,dt,self._method,self._mthlim,self.fwave,rp1)
elif(self.kernel_language == 'Python'):
q = self.qbc
aux = self.auxbc
# Limiter to use in the pth family
limiter = np.array(self._mthlim,ndmin=1)
dtdx = np.zeros( (2*self.num_ghost+grid.num_cells[0]) )
# Find local value for dt/dx
if state.index_capa>=0:
dtdx = self.dt / (grid.delta[0] * state.aux[state.index_capa,:])
else:
dtdx += self.dt/grid.delta[0]
# Solve Riemann problem at each interface
q_l=q[:,:-1]
q_r=q[:,1:]
if state.aux is not None:
aux_l=aux[:,:-1]
aux_r=aux[:,1:]
else:
aux_l = None
aux_r = None
wave,s,amdq,apdq = self.rp(q_l,q_r,aux_l,aux_r,state.problem_data)
# Update loop limits, these are the limits for the Riemann solver
# locations, which then update a grid cell value
# We include the Riemann problem just outside of the grid so we can
# do proper limiting at the grid edges
# LL | | UL
# | LL | | | | ... | | | UL | |
# | |
LL = self.num_ghost - 1
UL = self.num_ghost + grid.num_cells[0] + 1
# Update q for Godunov update
for m in xrange(num_eqn):
q[m,LL:UL] -= dtdx[LL:UL]*apdq[m,LL-1:UL-1]
q[m,LL-1:UL-1] -= dtdx[LL-1:UL-1]*amdq[m,LL-1:UL-1]
# Compute maximum wave speed
cfl = 0.0
for mw in xrange(wave.shape[1]):
smax1 = np.max(dtdx[LL:UL]*s[mw,LL-1:UL-1])
smax2 = np.max(-dtdx[LL-1:UL-1]*s[mw,LL-1:UL-1])
cfl = max(cfl,smax1,smax2)
# If we are doing slope limiting we have more work to do
if self.order == 2:
# Initialize flux corrections
f = np.zeros( (num_eqn,grid.num_cells[0] + 2*self.num_ghost) )
# Apply Limiters to waves
if (limiter > 0).any():
wave = tvd.limit(state.num_eqn,wave,s,limiter,dtdx)
# Compute correction fluxes for second order q_{xx} terms
dtdxave = 0.5 * (dtdx[LL-1:UL-1] + dtdx[LL:UL])
if self.fwave:
for mw in xrange(wave.shape[1]):
sabs = np.abs(s[mw,LL-1:UL-1])
om = 1.0 - sabs*dtdxave[:UL-LL]
ssign = np.sign(s[mw,LL-1:UL-1])
for m in xrange(num_eqn):
f[m,LL:UL] += 0.5 * ssign * om * wave[m,mw,LL-1:UL-1]
else:
for mw in xrange(wave.shape[1]):
sabs = np.abs(s[mw,LL-1:UL-1])
om = 1.0 - sabs*dtdxave[:UL-LL]
for m in xrange(num_eqn):
f[m,LL:UL] += 0.5 * sabs * om * wave[m,mw,LL-1:UL-1]
# Update q by differencing correction fluxes
for m in xrange(num_eqn):
q[m,LL:UL-1] -= dtdx[LL:UL-1] * (f[m,LL+1:UL] - f[m,LL:UL-1])
else: raise Exception("Unrecognized kernel_language; choose 'Fortran' or 'Python'")
self.cfl.update_global_max(cfl)
state.set_q_from_qbc(num_ghost,self.qbc)
if state.num_aux > 0:
state.set_aux_from_auxbc(num_ghost,self.auxbc) |
Python | def step_hyperbolic(self,solution):
r"""
Take a step on the homogeneous hyperbolic system using the Clawpack
algorithm.
Clawpack is based on the Lax-Wendroff method, combined with Riemann
solvers and TVD limiters applied to waves.
"""
if(self.kernel_language == 'Fortran'):
state = solution.states[0]
grid = state.grid
dx,dy = grid.delta
mx,my = grid.num_cells
maxm = max(mx,my)
self.apply_q_bcs(state)
if state.num_aux > 0:
self.apply_aux_bcs(state)
qold = self.qbc.copy('F')
rpn2 = self.rp.rpn2._cpointer
rpt2 = self.rp.rpt2._cpointer
if self.dimensional_split:
#Right now only Godunov-dimensional-splitting is implemented.
#Strang-dimensional-splitting could be added following dimsp2.f in Clawpack.
self.qbc, cfl_x = self.fmod.step2ds(maxm,self.num_ghost,mx,my, \
qold,self.qbc,self.auxbc,dx,dy,self.dt,self._method,self._mthlim,\
self.aux1,self.aux2,self.aux3,self.work,1,self.fwave,rpn2,rpt2)
#self.qbc[:, :, my + self.num_ghost:my + 2*self.num_ghost] = qold[:, :, my + self.num_ghost:my + 2*self.num_ghost]
#self.qbc[:, :, 1:self.num_ghost] = qold[:, :, 1:self.num_ghost]
self.qbc, cfl_y = self.fmod.step2ds(maxm,self.num_ghost,mx,my, \
self.qbc,self.qbc,self.auxbc,dx,dy,self.dt,self._method,self._mthlim,\
self.aux1,self.aux2,self.aux3,self.work,2,self.fwave,rpn2,rpt2)
cfl = max(cfl_x,cfl_y)
else:
self.qbc, cfl = self.fmod.step2(maxm,self.num_ghost,mx,my, \
qold,self.qbc,self.auxbc,dx,dy,self.dt,self._method,self._mthlim,\
self.aux1,self.aux2,self.aux3,self.work,self.fwave,rpn2,rpt2)
self.cfl.update_global_max(cfl)
state.set_q_from_qbc(self.num_ghost,self.qbc)
if state.num_aux > 0:
state.set_aux_from_auxbc(self.num_ghost,self.auxbc)
else:
raise NotImplementedError("No python implementation for step_hyperbolic in 2D.") | def step_hyperbolic(self,solution):
r"""
Take a step on the homogeneous hyperbolic system using the Clawpack
algorithm.
Clawpack is based on the Lax-Wendroff method, combined with Riemann
solvers and TVD limiters applied to waves.
"""
if(self.kernel_language == 'Fortran'):
state = solution.states[0]
grid = state.grid
dx,dy = grid.delta
mx,my = grid.num_cells
maxm = max(mx,my)
self.apply_q_bcs(state)
if state.num_aux > 0:
self.apply_aux_bcs(state)
qold = self.qbc.copy('F')
rpn2 = self.rp.rpn2._cpointer
rpt2 = self.rp.rpt2._cpointer
if self.dimensional_split:
#Right now only Godunov-dimensional-splitting is implemented.
#Strang-dimensional-splitting could be added following dimsp2.f in Clawpack.
self.qbc, cfl_x = self.fmod.step2ds(maxm,self.num_ghost,mx,my, \
qold,self.qbc,self.auxbc,dx,dy,self.dt,self._method,self._mthlim,\
self.aux1,self.aux2,self.aux3,self.work,1,self.fwave,rpn2,rpt2)
#self.qbc[:, :, my + self.num_ghost:my + 2*self.num_ghost] = qold[:, :, my + self.num_ghost:my + 2*self.num_ghost]
#self.qbc[:, :, 1:self.num_ghost] = qold[:, :, 1:self.num_ghost]
self.qbc, cfl_y = self.fmod.step2ds(maxm,self.num_ghost,mx,my, \
self.qbc,self.qbc,self.auxbc,dx,dy,self.dt,self._method,self._mthlim,\
self.aux1,self.aux2,self.aux3,self.work,2,self.fwave,rpn2,rpt2)
cfl = max(cfl_x,cfl_y)
else:
self.qbc, cfl = self.fmod.step2(maxm,self.num_ghost,mx,my, \
qold,self.qbc,self.auxbc,dx,dy,self.dt,self._method,self._mthlim,\
self.aux1,self.aux2,self.aux3,self.work,self.fwave,rpn2,rpt2)
self.cfl.update_global_max(cfl)
state.set_q_from_qbc(self.num_ghost,self.qbc)
if state.num_aux > 0:
state.set_aux_from_auxbc(self.num_ghost,self.auxbc)
else:
raise NotImplementedError("No python implementation for step_hyperbolic in 2D.") |
Python | def allocate_workspace(self,solution):
r"""
Allocate auxN and work arrays for use in Fortran subroutines.
"""
import numpy as np
state = solution.states[0]
num_eqn,num_aux,num_waves,num_ghost,aux = state.num_eqn,state.num_aux,self.num_waves,self.num_ghost,state.aux
#The following is a hack to work around an issue
#with f2py. It involves wastefully allocating three arrays.
#f2py seems not able to handle multiple zero-size arrays being passed.
# it appears the bug is related to f2py/src/fortranobject.c line 841.
if(aux == None): num_aux=1
grid = state.grid
maxmx,maxmy = grid.num_cells[0],grid.num_cells[1]
maxm = max(maxmx, maxmy)
# These work arrays really ought to live inside a fortran module
# as is done for sharpclaw
self.aux1 = np.empty((num_aux,maxm+2*num_ghost,3),order='F')
self.aux2 = np.empty((num_aux,maxm+2*num_ghost,3),order='F')
self.aux3 = np.empty((num_aux,maxm+2*num_ghost,3),order='F')
mwork = (maxm+2*num_ghost) * (31*num_eqn + num_waves + num_eqn*num_waves)
self.work = np.empty((mwork),order='F') | def allocate_workspace(self,solution):
r"""
Allocate auxN and work arrays for use in Fortran subroutines.
"""
import numpy as np
state = solution.states[0]
num_eqn,num_aux,num_waves,num_ghost,aux = state.num_eqn,state.num_aux,self.num_waves,self.num_ghost,state.aux
#The following is a hack to work around an issue
#with f2py. It involves wastefully allocating three arrays.
#f2py seems not able to handle multiple zero-size arrays being passed.
# it appears the bug is related to f2py/src/fortranobject.c line 841.
if(aux == None): num_aux=1
grid = state.grid
maxmx,maxmy = grid.num_cells[0],grid.num_cells[1]
maxm = max(maxmx, maxmy)
# These work arrays really ought to live inside a fortran module
# as is done for sharpclaw
self.aux1 = np.empty((num_aux,maxm+2*num_ghost,3),order='F')
self.aux2 = np.empty((num_aux,maxm+2*num_ghost,3),order='F')
self.aux3 = np.empty((num_aux,maxm+2*num_ghost,3),order='F')
mwork = (maxm+2*num_ghost) * (31*num_eqn + num_waves + num_eqn*num_waves)
self.work = np.empty((mwork),order='F') |
Python | def step_hyperbolic(self,solution):
r"""
Take a step on the homogeneous hyperbolic system using the Clawpack
algorithm.
Clawpack is based on the Lax-Wendroff method, combined with Riemann
solvers and TVD limiters applied to waves.
"""
if(self.kernel_language == 'Fortran'):
state = solution.states[0]
grid = state.grid
dx,dy,dz = grid.delta
mx,my,mz = grid.num_cells
maxm = max(mx,my,mz)
self.apply_q_bcs(state)
if state.num_aux > 0:
self.apply_aux_bcs(state)
qnew = self.qbc
qold = qnew.copy('F')
rpn3 = self.rp.rpn3._cpointer
rpt3 = self.rp.rpt3._cpointer
rptt3 = self.rp.rptt3._cpointer
if self.dimensional_split:
#Right now only Godunov-dimensional-splitting is implemented.
#Strang-dimensional-splitting could be added following dimsp2.f in Clawpack.
self.qbc, cfl_x = self.fmod.step3ds(maxm,self.num_ghost,mx,my,mz, \
qold, self.qbc, self.auxbc,dx,dy,dz,self.dt,self._method,self._mthlim,\
self.aux1,self.aux2,self.aux3,self.work,1,rpn3,rpt3,rptt3)
# self.qbc[:, :, my + self.num_ghost:my + 2*self.num_ghost, :] = qold[:, :, my + self.num_ghost:my + 2*self.num_ghost, :]
# self.qbc[:, :, 1:self.num_ghost, :] = qold[:, :, 1:self.num_ghost, :]
self.qbc, cfl_y = self.fmod.step3ds(maxm,self.num_ghost,mx,my,mz, \
self.qbc, self.qbc, self.auxbc,dx,dy,dz,self.dt,self._method,self._mthlim,\
self.aux1,self.aux2,self.aux3,self.work,2,rpn3,rpt3,rptt3)
# self.qbc[:, :, :, mz + self.num_ghost:mz + 2*self.num_ghost] = qold[:, :, :, mz + self.num_ghost:mz + 2*self.num_ghost]
# self.qbc[:, :, :, 1:self.num_ghost] = qold[:, :, :, 1:self.num_ghost]
self.qbc, cfl_z = self.fmod.step3ds(maxm,self.num_ghost,mx,my,mz, \
self.qbc, self.qbc, self.auxbc,dx,dy,dz,self.dt,self._method,self._mthlim,\
self.aux1,self.aux2,self.aux3,self.work,3,rpn3,rpt3,rptt3)
cfl = max(cfl_x,cfl_y,cfl_z)
else:
q, cfl = self.fmod.step3(maxm,self.num_ghost,mx,my,mz, \
qold,qnew,self.auxbc,dx,dy,dz,self.dt,self._method,self._mthlim,\
self.aux1,self.aux2,self.aux3,self.work,rpn3,rpt3,rptt3)
self.cfl.update_global_max(cfl)
state.set_q_from_qbc(self.num_ghost,self.qbc)
if state.num_aux > 0:
state.set_aux_from_auxbc(self.num_ghost,self.auxbc)
else:
raise NotImplementedError("No python implementation for step_hyperbolic in 3D.") | def step_hyperbolic(self,solution):
r"""
Take a step on the homogeneous hyperbolic system using the Clawpack
algorithm.
Clawpack is based on the Lax-Wendroff method, combined with Riemann
solvers and TVD limiters applied to waves.
"""
if(self.kernel_language == 'Fortran'):
state = solution.states[0]
grid = state.grid
dx,dy,dz = grid.delta
mx,my,mz = grid.num_cells
maxm = max(mx,my,mz)
self.apply_q_bcs(state)
if state.num_aux > 0:
self.apply_aux_bcs(state)
qnew = self.qbc
qold = qnew.copy('F')
rpn3 = self.rp.rpn3._cpointer
rpt3 = self.rp.rpt3._cpointer
rptt3 = self.rp.rptt3._cpointer
if self.dimensional_split:
#Right now only Godunov-dimensional-splitting is implemented.
#Strang-dimensional-splitting could be added following dimsp2.f in Clawpack.
self.qbc, cfl_x = self.fmod.step3ds(maxm,self.num_ghost,mx,my,mz, \
qold, self.qbc, self.auxbc,dx,dy,dz,self.dt,self._method,self._mthlim,\
self.aux1,self.aux2,self.aux3,self.work,1,rpn3,rpt3,rptt3)
# self.qbc[:, :, my + self.num_ghost:my + 2*self.num_ghost, :] = qold[:, :, my + self.num_ghost:my + 2*self.num_ghost, :]
# self.qbc[:, :, 1:self.num_ghost, :] = qold[:, :, 1:self.num_ghost, :]
self.qbc, cfl_y = self.fmod.step3ds(maxm,self.num_ghost,mx,my,mz, \
self.qbc, self.qbc, self.auxbc,dx,dy,dz,self.dt,self._method,self._mthlim,\
self.aux1,self.aux2,self.aux3,self.work,2,rpn3,rpt3,rptt3)
# self.qbc[:, :, :, mz + self.num_ghost:mz + 2*self.num_ghost] = qold[:, :, :, mz + self.num_ghost:mz + 2*self.num_ghost]
# self.qbc[:, :, :, 1:self.num_ghost] = qold[:, :, :, 1:self.num_ghost]
self.qbc, cfl_z = self.fmod.step3ds(maxm,self.num_ghost,mx,my,mz, \
self.qbc, self.qbc, self.auxbc,dx,dy,dz,self.dt,self._method,self._mthlim,\
self.aux1,self.aux2,self.aux3,self.work,3,rpn3,rpt3,rptt3)
cfl = max(cfl_x,cfl_y,cfl_z)
else:
q, cfl = self.fmod.step3(maxm,self.num_ghost,mx,my,mz, \
qold,qnew,self.auxbc,dx,dy,dz,self.dt,self._method,self._mthlim,\
self.aux1,self.aux2,self.aux3,self.work,rpn3,rpt3,rptt3)
self.cfl.update_global_max(cfl)
state.set_q_from_qbc(self.num_ghost,self.qbc)
if state.num_aux > 0:
state.set_aux_from_auxbc(self.num_ghost,self.auxbc)
else:
raise NotImplementedError("No python implementation for step_hyperbolic in 3D.") |
Python | def p(self):
r"""
Array containing values of derived quantities for output.
"""
if self._p_da is None: return 0
shape = self.grid.num_cells
shape.insert(0,self.mp)
p=self.gpVec.getArray().reshape(shape, order = 'F')
return p | def p(self):
r"""
Array containing values of derived quantities for output.
"""
if self._p_da is None: return 0
shape = self.grid.num_cells
shape.insert(0,self.mp)
p=self.gpVec.getArray().reshape(shape, order = 'F')
return p |
Python | def F(self):
r"""
Array containing pointwise values (densities) of output functionals.
This is just used as temporary workspace before summing.
"""
if self._F_da is None: return 0
shape = self.grid.num_cells
shape.insert(0,self.mF)
F=self.gFVec.getArray().reshape(shape, order = 'F')
return F | def F(self):
r"""
Array containing pointwise values (densities) of output functionals.
This is just used as temporary workspace before summing.
"""
if self._F_da is None: return 0
shape = self.grid.num_cells
shape.insert(0,self.mF)
F=self.gFVec.getArray().reshape(shape, order = 'F')
return F |
Python | def aux(self):
"""
We never communicate aux values; every processor should set its own ghost cell
values for the aux array. The global aux vector is used only for outputting
the aux values to file; everywhere else we use the local vector.
"""
if self.aux_da is None: return None
shape = self.grid.num_cells
shape.insert(0,self.num_aux)
aux=self.gauxVec.getArray().reshape(shape, order = 'F')
return aux | def aux(self):
"""
We never communicate aux values; every processor should set its own ghost cell
values for the aux array. The global aux vector is used only for outputting
the aux values to file; everywhere else we use the local vector.
"""
if self.aux_da is None: return None
shape = self.grid.num_cells
shape.insert(0,self.num_aux)
aux=self.gauxVec.getArray().reshape(shape, order = 'F')
return aux |
Python | def _create_DA(self,dof,num_ghost=0):
r"""Returns a PETSc DA and associated global Vec.
Note that no local vector is returned.
"""
from petsc4py import PETSc
#Due to the way PETSc works, we just make the patch always periodic,
#regardless of the boundary conditions actually selected.
#This works because in solver.qbc() we first call globalToLocal()
#and then impose the real boundary conditions (if non-periodic).
if hasattr(PETSc.DA, 'PeriodicType'):
if self.num_dim == 1:
periodic_type = PETSc.DA.PeriodicType.X
elif self.num_dim == 2:
periodic_type = PETSc.DA.PeriodicType.XY
elif self.num_dim == 3:
periodic_type = PETSc.DA.PeriodicType.XYZ
else:
raise Exception("Invalid number of dimensions")
DA = PETSc.DA().create(dim=self.num_dim,
dof=dof,
sizes=self.patch.num_cells_global,
periodic_type = periodic_type,
stencil_width=num_ghost,
comm=PETSc.COMM_WORLD)
else:
DA = PETSc.DA().create(dim=self.num_dim,
dof=dof,
sizes=self.patch.num_cells_global,
boundary_type = PETSc.DA.BoundaryType.PERIODIC,
stencil_width=num_ghost,
comm=PETSc.COMM_WORLD)
return DA | def _create_DA(self,dof,num_ghost=0):
r"""Returns a PETSc DA and associated global Vec.
Note that no local vector is returned.
"""
from petsc4py import PETSc
#Due to the way PETSc works, we just make the patch always periodic,
#regardless of the boundary conditions actually selected.
#This works because in solver.qbc() we first call globalToLocal()
#and then impose the real boundary conditions (if non-periodic).
if hasattr(PETSc.DA, 'PeriodicType'):
if self.num_dim == 1:
periodic_type = PETSc.DA.PeriodicType.X
elif self.num_dim == 2:
periodic_type = PETSc.DA.PeriodicType.XY
elif self.num_dim == 3:
periodic_type = PETSc.DA.PeriodicType.XYZ
else:
raise Exception("Invalid number of dimensions")
DA = PETSc.DA().create(dim=self.num_dim,
dof=dof,
sizes=self.patch.num_cells_global,
periodic_type = periodic_type,
stencil_width=num_ghost,
comm=PETSc.COMM_WORLD)
else:
DA = PETSc.DA().create(dim=self.num_dim,
dof=dof,
sizes=self.patch.num_cells_global,
boundary_type = PETSc.DA.BoundaryType.PERIODIC,
stencil_width=num_ghost,
comm=PETSc.COMM_WORLD)
return DA |
Python | def acoustics2D(iplot=False,kernel_language='Fortran',htmlplot=False,use_petsc=False,outdir='./_output',solver_type='classic'):
"""
Example python script for solving the 2d acoustics equations.
"""
if use_petsc:
from clawpack import petclaw as pyclaw
else:
from clawpack import pyclaw
if solver_type=='classic':
solver=pyclaw.ClawSolver2D()
solver.dimensional_split=True
elif solver_type=='sharpclaw':
solver=pyclaw.SharpClawSolver2D()
if kernel_language != 'Fortran':
raise Exception('Unrecognized value of kernel_language for 2D acoustics')
from clawpack.riemann import rp2_acoustics
solver.rp = rp2_acoustics
solver.cfl_max = 0.5
solver.cfl_desired = 0.45
solver.num_waves = 2
solver.limiters = pyclaw.limiters.tvd.MC
solver.bc_lower[0]=pyclaw.BC.extrap
solver.bc_upper[0]=pyclaw.BC.extrap
solver.bc_lower[1]=pyclaw.BC.extrap
solver.bc_upper[1]=pyclaw.BC.extrap
# Initialize domain
mx=100; my=100
x = pyclaw.Dimension('x',-1.0,1.0,mx)
y = pyclaw.Dimension('y',-1.0,1.0,my)
domain = pyclaw.Domain([x,y])
num_eqn = 3
state = pyclaw.State(domain,num_eqn)
rho = 1.0
bulk = 4.0
cc = np.sqrt(bulk/rho)
zz = rho*cc
state.problem_data['rho']= rho
state.problem_data['bulk']=bulk
state.problem_data['zz']= zz
state.problem_data['cc']=cc
qinit(state)
claw = pyclaw.Controller()
claw.keep_copy = True
claw.solution = pyclaw.Solution(state,domain)
solver.dt_initial=np.min(domain.grid.delta)/state.problem_data['cc']*solver.cfl_desired
claw.solver = solver
claw.outdir = outdir
num_output_times = 10
claw.num_output_times = num_output_times
# Solve
claw.tfinal = 0.12
status = claw.run()
if htmlplot: pyclaw.plot.html_plot(outdir=outdir,file_format=claw.output_format)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir,file_format=claw.output_format)
return claw.frames[-1].state | def acoustics2D(iplot=False,kernel_language='Fortran',htmlplot=False,use_petsc=False,outdir='./_output',solver_type='classic'):
"""
Example python script for solving the 2d acoustics equations.
"""
if use_petsc:
from clawpack import petclaw as pyclaw
else:
from clawpack import pyclaw
if solver_type=='classic':
solver=pyclaw.ClawSolver2D()
solver.dimensional_split=True
elif solver_type=='sharpclaw':
solver=pyclaw.SharpClawSolver2D()
if kernel_language != 'Fortran':
raise Exception('Unrecognized value of kernel_language for 2D acoustics')
from clawpack.riemann import rp2_acoustics
solver.rp = rp2_acoustics
solver.cfl_max = 0.5
solver.cfl_desired = 0.45
solver.num_waves = 2
solver.limiters = pyclaw.limiters.tvd.MC
solver.bc_lower[0]=pyclaw.BC.extrap
solver.bc_upper[0]=pyclaw.BC.extrap
solver.bc_lower[1]=pyclaw.BC.extrap
solver.bc_upper[1]=pyclaw.BC.extrap
# Initialize domain
mx=100; my=100
x = pyclaw.Dimension('x',-1.0,1.0,mx)
y = pyclaw.Dimension('y',-1.0,1.0,my)
domain = pyclaw.Domain([x,y])
num_eqn = 3
state = pyclaw.State(domain,num_eqn)
rho = 1.0
bulk = 4.0
cc = np.sqrt(bulk/rho)
zz = rho*cc
state.problem_data['rho']= rho
state.problem_data['bulk']=bulk
state.problem_data['zz']= zz
state.problem_data['cc']=cc
qinit(state)
claw = pyclaw.Controller()
claw.keep_copy = True
claw.solution = pyclaw.Solution(state,domain)
solver.dt_initial=np.min(domain.grid.delta)/state.problem_data['cc']*solver.cfl_desired
claw.solver = solver
claw.outdir = outdir
num_output_times = 10
claw.num_output_times = num_output_times
# Solve
claw.tfinal = 0.12
status = claw.run()
if htmlplot: pyclaw.plot.html_plot(outdir=outdir,file_format=claw.output_format)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir,file_format=claw.output_format)
return claw.frames[-1].state |
Python | def before_step(solver,solution):
r"""
Dummy routine called before each step
Replace this routine if you want to do something before each time step.
"""
pass | def before_step(solver,solution):
r"""
Dummy routine called before each step
Replace this routine if you want to do something before each time step.
"""
pass |
Python | def step(self,solution):
"""Evolve q over one time step.
Take on Runge-Kutta time step using the method specified by
self..time_integrator. Currently implemented methods:
'Euler' : 1st-order Forward Euler integration
'SSP33' : 3rd-order strong stability preserving method of Shu & Osher
'SSP104' : 4th-order strong stability preserving method Ketcheson
"""
state = solution.states[0]
self.before_step(self,solution)
try:
if self.time_integrator=='Euler':
deltaq=self.dq(state)
state.q+=deltaq
elif self.time_integrator=='SSP33':
deltaq=self.dq(state)
self._rk_stages[0].q=state.q+deltaq
self._rk_stages[0].t =state.t+self.dt
deltaq=self.dq(self._rk_stages[0])
self._rk_stages[0].q= 0.75*state.q + 0.25*(self._rk_stages[0].q+deltaq)
self._rk_stages[0].t = state.t+0.5*self.dt
deltaq=self.dq(self._rk_stages[0])
state.q = 1./3.*state.q + 2./3.*(self._rk_stages[0].q+deltaq)
elif self.time_integrator=='SSP104':
s1=self._rk_stages[0]
s2=self._rk_stages[1]
s1.q = state.q.copy()
deltaq=self.dq(state)
s1.q = state.q + deltaq/6.
s1.t = state.t + self.dt/6.
for i in xrange(4):
deltaq=self.dq(s1)
s1.q=s1.q + deltaq/6.
s1.t =s1.t + self.dt/6.
s2.q = state.q/25. + 9./25 * s1.q
s1.q = 15. * s2.q - 5. * s1.q
s1.t = state.t + self.dt/3.
for i in xrange(4):
deltaq=self.dq(s1)
s1.q=s1.q + deltaq/6.
s1.t =s1.t + self.dt/6.
deltaq = self.dq(s1)
state.q = s2.q + 0.6 * s1.q + 0.1 * deltaq
else:
raise Exception('Unrecognized time integrator')
except CFLError:
return False | def step(self,solution):
"""Evolve q over one time step.
Take on Runge-Kutta time step using the method specified by
self..time_integrator. Currently implemented methods:
'Euler' : 1st-order Forward Euler integration
'SSP33' : 3rd-order strong stability preserving method of Shu & Osher
'SSP104' : 4th-order strong stability preserving method Ketcheson
"""
state = solution.states[0]
self.before_step(self,solution)
try:
if self.time_integrator=='Euler':
deltaq=self.dq(state)
state.q+=deltaq
elif self.time_integrator=='SSP33':
deltaq=self.dq(state)
self._rk_stages[0].q=state.q+deltaq
self._rk_stages[0].t =state.t+self.dt
deltaq=self.dq(self._rk_stages[0])
self._rk_stages[0].q= 0.75*state.q + 0.25*(self._rk_stages[0].q+deltaq)
self._rk_stages[0].t = state.t+0.5*self.dt
deltaq=self.dq(self._rk_stages[0])
state.q = 1./3.*state.q + 2./3.*(self._rk_stages[0].q+deltaq)
elif self.time_integrator=='SSP104':
s1=self._rk_stages[0]
s2=self._rk_stages[1]
s1.q = state.q.copy()
deltaq=self.dq(state)
s1.q = state.q + deltaq/6.
s1.t = state.t + self.dt/6.
for i in xrange(4):
deltaq=self.dq(s1)
s1.q=s1.q + deltaq/6.
s1.t =s1.t + self.dt/6.
s2.q = state.q/25. + 9./25 * s1.q
s1.q = 15. * s2.q - 5. * s1.q
s1.t = state.t + self.dt/3.
for i in xrange(4):
deltaq=self.dq(s1)
s1.q=s1.q + deltaq/6.
s1.t =s1.t + self.dt/6.
deltaq = self.dq(s1)
state.q = s2.q + 0.6 * s1.q + 0.1 * deltaq
else:
raise Exception('Unrecognized time integrator')
except CFLError:
return False |
Python | def dqdt(self,state):
"""
Evaluate dq/dt. This routine is used for implicit time stepping.
"""
self.dt = 1
deltaq = self.dq_hyperbolic(state)
if self.dq_src is not None:
deltaq+=self.dq_src(self,state,self.dt)
return deltaq.flatten('f') | def dqdt(self,state):
"""
Evaluate dq/dt. This routine is used for implicit time stepping.
"""
self.dt = 1
deltaq = self.dq_hyperbolic(state)
if self.dq_src is not None:
deltaq+=self.dq_src(self,state,self.dt)
return deltaq.flatten('f') |
Python | def allocate_rk_stages(self,solution):
r"""
Instantiate State objects for Runge--Kutta stages.
This routine is only used by method-of-lines solvers (SharpClaw),
not by the Classic solvers. It allocates additional State objects
to store the intermediate stages used by Runge--Kutta time integrators.
If we create a MethodOfLinesSolver subclass, this should be moved there.
"""
if self.time_integrator == 'Euler': nregisters=1
elif self.time_integrator == 'SSP33': nregisters=2
elif self.time_integrator == 'SSP104': nregisters=3
state = solution.states[0]
# use the same class constructor as the solution for the Runge Kutta stages
State = type(state)
self._rk_stages = []
for i in xrange(nregisters-1):
#Maybe should use State.copy() here?
self._rk_stages.append(State(state.patch,state.num_eqn,state.num_aux))
self._rk_stages[-1].problem_data = state.problem_data
self._rk_stages[-1].set_num_ghost(self.num_ghost)
self._rk_stages[-1].t = state.t
if state.num_aux > 0:
self._rk_stages[-1].aux = state.aux | def allocate_rk_stages(self,solution):
r"""
Instantiate State objects for Runge--Kutta stages.
This routine is only used by method-of-lines solvers (SharpClaw),
not by the Classic solvers. It allocates additional State objects
to store the intermediate stages used by Runge--Kutta time integrators.
If we create a MethodOfLinesSolver subclass, this should be moved there.
"""
if self.time_integrator == 'Euler': nregisters=1
elif self.time_integrator == 'SSP33': nregisters=2
elif self.time_integrator == 'SSP104': nregisters=3
state = solution.states[0]
# use the same class constructor as the solution for the Runge Kutta stages
State = type(state)
self._rk_stages = []
for i in xrange(nregisters-1):
#Maybe should use State.copy() here?
self._rk_stages.append(State(state.patch,state.num_eqn,state.num_aux))
self._rk_stages[-1].problem_data = state.problem_data
self._rk_stages[-1].set_num_ghost(self.num_ghost)
self._rk_stages[-1].t = state.t
if state.num_aux > 0:
self._rk_stages[-1].aux = state.aux |
Python | def teardown(self):
r"""
Deallocate F90 module arrays.
Also delete Fortran objects, which otherwise tend to persist in Python sessions.
"""
if self.kernel_language=='Fortran':
self.fmod.clawparams.dealloc_clawparams()
self.fmod.workspace.dealloc_workspace(self.char_decomp)
self.fmod.reconstruct.dealloc_recon_workspace(self.fmod.clawparams.lim_type,self.fmod.clawparams.char_decomp)
del self.fmod | def teardown(self):
r"""
Deallocate F90 module arrays.
Also delete Fortran objects, which otherwise tend to persist in Python sessions.
"""
if self.kernel_language=='Fortran':
self.fmod.clawparams.dealloc_clawparams()
self.fmod.workspace.dealloc_workspace(self.char_decomp)
self.fmod.reconstruct.dealloc_recon_workspace(self.fmod.clawparams.lim_type,self.fmod.clawparams.char_decomp)
del self.fmod |
Python | def dq_hyperbolic(self,state):
r"""
Compute dq/dt * (delta t) for the hyperbolic hyperbolic system.
Note that the capa array, if present, should be located in the aux
variable.
Indexing works like this (here num_ghost=2 as an example)::
0 1 2 3 4 mx+num_ghost-2 mx+num_ghost mx+num_ghost+2
| mx+num_ghost-1 | mx+num_ghost+1
| | | | | ... | | | | |
0 1 | 2 3 mx+num_ghost-2 |mx+num_ghost
mx+num_ghost-1 mx+num_ghost+1
The top indices represent the values that are located on the grid
cell boundaries such as waves, s and other Riemann problem values,
the bottom for the cell centered values such as q. In particular
the ith grid cell boundary has the following related information::
i-1 i i+1
| | |
| i-1 | i |
| | |
Again, grid cell boundary quantities are at the top, cell centered
values are in the cell.
"""
import numpy as np
self.apply_q_bcs(state)
if state.num_aux > 0:
self.apply_aux_bcs(state)
q = self.qbc
grid = state.grid
mx = grid.num_cells[0]
ixy=1
if self.kernel_language=='Fortran':
rp1 = self.rp.rp1._cpointer
dq,cfl=self.fmod.flux1(q,self.auxbc,self.dt,state.t,ixy,mx,self.num_ghost,mx,rp1)
elif self.kernel_language=='Python':
dtdx = np.zeros( (mx+2*self.num_ghost) ,order='F')
dq = np.zeros( (state.num_eqn,mx+2*self.num_ghost) ,order='F')
# Find local value for dt/dx
if state.index_capa>=0:
dtdx = self.dt / (grid.delta[0] * state.aux[state.index_capa,:])
else:
dtdx += self.dt/grid.delta[0]
aux=self.auxbc
if aux.shape[0]>0:
aux_l=aux[:,:-1]
aux_r=aux[:,1: ]
else:
aux_l = None
aux_r = None
#Reconstruct (wave reconstruction uses a Riemann solve)
if self.lim_type==-1: #1st-order Godunov
ql=q; qr=q
elif self.lim_type==0: #Unlimited reconstruction
raise NotImplementedError('Unlimited reconstruction not implemented')
elif self.lim_type==1: #TVD Reconstruction
raise NotImplementedError('TVD reconstruction not implemented')
elif self.lim_type==2: #WENO Reconstruction
if self.char_decomp==0: #No characteristic decomposition
ql,qr=recon.weno(5,q)
elif self.char_decomp==1: #Wave-based reconstruction
q_l=q[:,:-1]
q_r=q[:,1: ]
wave,s,amdq,apdq = self.rp(q_l,q_r,aux_l,aux_r,state.problem_data)
ql,qr=recon.weno5_wave(q,wave,s)
elif self.char_decomp==2: #Characteristic-wise reconstruction
raise NotImplementedError
# Solve Riemann problem at each interface
q_l=qr[:,:-1]
q_r=ql[:,1: ]
wave,s,amdq,apdq = self.rp(q_l,q_r,aux_l,aux_r,state.problem_data)
# Loop limits for local portion of grid
# THIS WON'T WORK IN PARALLEL!
LL = self.num_ghost - 1
UL = grid.num_cells[0] + self.num_ghost + 1
# Compute maximum wave speed
cfl = 0.0
for mw in xrange(self.num_waves):
smax1 = np.max( dtdx[LL :UL] *s[mw,LL-1:UL-1])
smax2 = np.max(-dtdx[LL-1:UL-1]*s[mw,LL-1:UL-1])
cfl = max(cfl,smax1,smax2)
#Find total fluctuation within each cell
wave,s,amdq2,apdq2 = self.rp(ql,qr,aux,aux,state.problem_data)
# Compute dq
for m in xrange(state.num_eqn):
dq[m,LL:UL] = -dtdx[LL:UL]*(amdq[m,LL:UL] + apdq[m,LL-1:UL-1] \
+ apdq2[m,LL:UL] + amdq2[m,LL:UL])
else:
raise Exception('Unrecognized value of solver.kernel_language.')
self.cfl.update_global_max(cfl)
return dq[:,self.num_ghost:-self.num_ghost] | def dq_hyperbolic(self,state):
r"""
Compute dq/dt * (delta t) for the hyperbolic hyperbolic system.
Note that the capa array, if present, should be located in the aux
variable.
Indexing works like this (here num_ghost=2 as an example)::
0 1 2 3 4 mx+num_ghost-2 mx+num_ghost mx+num_ghost+2
| mx+num_ghost-1 | mx+num_ghost+1
| | | | | ... | | | | |
0 1 | 2 3 mx+num_ghost-2 |mx+num_ghost
mx+num_ghost-1 mx+num_ghost+1
The top indices represent the values that are located on the grid
cell boundaries such as waves, s and other Riemann problem values,
the bottom for the cell centered values such as q. In particular
the ith grid cell boundary has the following related information::
i-1 i i+1
| | |
| i-1 | i |
| | |
Again, grid cell boundary quantities are at the top, cell centered
values are in the cell.
"""
import numpy as np
self.apply_q_bcs(state)
if state.num_aux > 0:
self.apply_aux_bcs(state)
q = self.qbc
grid = state.grid
mx = grid.num_cells[0]
ixy=1
if self.kernel_language=='Fortran':
rp1 = self.rp.rp1._cpointer
dq,cfl=self.fmod.flux1(q,self.auxbc,self.dt,state.t,ixy,mx,self.num_ghost,mx,rp1)
elif self.kernel_language=='Python':
dtdx = np.zeros( (mx+2*self.num_ghost) ,order='F')
dq = np.zeros( (state.num_eqn,mx+2*self.num_ghost) ,order='F')
# Find local value for dt/dx
if state.index_capa>=0:
dtdx = self.dt / (grid.delta[0] * state.aux[state.index_capa,:])
else:
dtdx += self.dt/grid.delta[0]
aux=self.auxbc
if aux.shape[0]>0:
aux_l=aux[:,:-1]
aux_r=aux[:,1: ]
else:
aux_l = None
aux_r = None
#Reconstruct (wave reconstruction uses a Riemann solve)
if self.lim_type==-1: #1st-order Godunov
ql=q; qr=q
elif self.lim_type==0: #Unlimited reconstruction
raise NotImplementedError('Unlimited reconstruction not implemented')
elif self.lim_type==1: #TVD Reconstruction
raise NotImplementedError('TVD reconstruction not implemented')
elif self.lim_type==2: #WENO Reconstruction
if self.char_decomp==0: #No characteristic decomposition
ql,qr=recon.weno(5,q)
elif self.char_decomp==1: #Wave-based reconstruction
q_l=q[:,:-1]
q_r=q[:,1: ]
wave,s,amdq,apdq = self.rp(q_l,q_r,aux_l,aux_r,state.problem_data)
ql,qr=recon.weno5_wave(q,wave,s)
elif self.char_decomp==2: #Characteristic-wise reconstruction
raise NotImplementedError
# Solve Riemann problem at each interface
q_l=qr[:,:-1]
q_r=ql[:,1: ]
wave,s,amdq,apdq = self.rp(q_l,q_r,aux_l,aux_r,state.problem_data)
# Loop limits for local portion of grid
# THIS WON'T WORK IN PARALLEL!
LL = self.num_ghost - 1
UL = grid.num_cells[0] + self.num_ghost + 1
# Compute maximum wave speed
cfl = 0.0
for mw in xrange(self.num_waves):
smax1 = np.max( dtdx[LL :UL] *s[mw,LL-1:UL-1])
smax2 = np.max(-dtdx[LL-1:UL-1]*s[mw,LL-1:UL-1])
cfl = max(cfl,smax1,smax2)
#Find total fluctuation within each cell
wave,s,amdq2,apdq2 = self.rp(ql,qr,aux,aux,state.problem_data)
# Compute dq
for m in xrange(state.num_eqn):
dq[m,LL:UL] = -dtdx[LL:UL]*(amdq[m,LL:UL] + apdq[m,LL-1:UL-1] \
+ apdq2[m,LL:UL] + amdq2[m,LL:UL])
else:
raise Exception('Unrecognized value of solver.kernel_language.')
self.cfl.update_global_max(cfl)
return dq[:,self.num_ghost:-self.num_ghost] |
Python | def dq_hyperbolic(self,state):
"""Compute dq/dt * (delta t) for the hyperbolic hyperbolic system
Note that the capa array, if present, should be located in the aux
variable.
Indexing works like this (here num_ghost=2 as an example)::
0 1 2 3 4 mx+num_ghost-2 mx+num_ghost mx+num_ghost+2
| mx+num_ghost-1 | mx+num_ghost+1
| | | | | ... | | | | |
0 1 | 2 3 mx+num_ghost-2 |mx+num_ghost
mx+num_ghost-1 mx+num_ghost+1
The top indices represent the values that are located on the grid
cell boundaries such as waves, s and other Riemann problem values,
the bottom for the cell centered values such as q. In particular
the ith grid cell boundary has the following related information::
i-1 i i+1
| | |
| i-1 | i |
| | |
Again, grid cell boundary quantities are at the top, cell centered
values are in the cell.
"""
self.apply_q_bcs(state)
if state.num_aux > 0:
self.apply_aux_bcs(state)
q = self.qbc
grid = state.grid
num_ghost=self.num_ghost
mx=grid.num_cells[0]
my=grid.num_cells[1]
maxm = max(mx,my)
if self.kernel_language=='Fortran':
rpn2 = self.rp.rpn2._cpointer
dq,cfl=self.fmod.flux2(q,self.auxbc,self.dt,state.t,num_ghost,maxm,mx,my,rpn2)
else: raise Exception('Only Fortran kernels are supported in 2D.')
self.cfl.update_global_max(cfl)
return dq[:,num_ghost:-num_ghost,num_ghost:-num_ghost] | def dq_hyperbolic(self,state):
"""Compute dq/dt * (delta t) for the hyperbolic hyperbolic system
Note that the capa array, if present, should be located in the aux
variable.
Indexing works like this (here num_ghost=2 as an example)::
0 1 2 3 4 mx+num_ghost-2 mx+num_ghost mx+num_ghost+2
| mx+num_ghost-1 | mx+num_ghost+1
| | | | | ... | | | | |
0 1 | 2 3 mx+num_ghost-2 |mx+num_ghost
mx+num_ghost-1 mx+num_ghost+1
The top indices represent the values that are located on the grid
cell boundaries such as waves, s and other Riemann problem values,
the bottom for the cell centered values such as q. In particular
the ith grid cell boundary has the following related information::
i-1 i i+1
| | |
| i-1 | i |
| | |
Again, grid cell boundary quantities are at the top, cell centered
values are in the cell.
"""
self.apply_q_bcs(state)
if state.num_aux > 0:
self.apply_aux_bcs(state)
q = self.qbc
grid = state.grid
num_ghost=self.num_ghost
mx=grid.num_cells[0]
my=grid.num_cells[1]
maxm = max(mx,my)
if self.kernel_language=='Fortran':
rpn2 = self.rp.rpn2._cpointer
dq,cfl=self.fmod.flux2(q,self.auxbc,self.dt,state.t,num_ghost,maxm,mx,my,rpn2)
else: raise Exception('Only Fortran kernels are supported in 2D.')
self.cfl.update_global_max(cfl)
return dq[:,num_ghost:-num_ghost,num_ghost:-num_ghost] |
Python | def acoustics3D(iplot=False,htmlplot=False,use_petsc=False,outdir='./_output',solver_type='classic',**kwargs):
"""
Example python script for solving the 3d acoustics equations.
"""
#===========================================================================
# Import libraries
#===========================================================================
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
#===========================================================================
# Setup solver and solver parameters
#===========================================================================
subdivisionFactor = 6
if solver_type=='classic':
solver=pyclaw.ClawSolver3D()
else:
raise Exception('Unrecognized solver_type.')
from clawpack import riemann
# Peano Solver
peanoSolver = peanoclaw.Solver(solver, (2./3.)/subdivisionFactor, init)
solver.rp = riemann.rp3_vc_acoustics
solver.num_waves = 2
solver.limiters = pyclaw.limiters.tvd.MC
solver.bc_lower[0]=pyclaw.BC.extrap
solver.bc_upper[0]=pyclaw.BC.extrap
solver.bc_lower[1]=pyclaw.BC.extrap
solver.bc_upper[1]=pyclaw.BC.extrap
solver.bc_lower[2]=pyclaw.BC.extrap
solver.bc_upper[2]=pyclaw.BC.extrap
solver.aux_bc_lower[0]=pyclaw.BC.extrap
solver.aux_bc_upper[0]=pyclaw.BC.extrap
solver.aux_bc_lower[1]=pyclaw.BC.extrap
solver.aux_bc_upper[1]=pyclaw.BC.extrap
solver.aux_bc_lower[2]=pyclaw.BC.extrap
solver.aux_bc_upper[2]=pyclaw.BC.extrap
solver.dimensional_split=True
solver.limiters = pyclaw.limiters.tvd.MC
#===========================================================================
# Initialize domain and state, then initialize the solution associated to the
# state and finally initialize aux array
#===========================================================================
# Initialize domain
mx = subdivisionFactor
my = subdivisionFactor
mz = subdivisionFactor
x = pyclaw.Dimension('x', -1.0, 1.0, mx)
y = pyclaw.Dimension('y', -1.0, 1.0, my)
z = pyclaw.Dimension('z', -1.0, 1.0, mz)
domain = pyclaw.Domain([x,y,z])
num_eqn = 4
num_aux = 2 # density, sound speed
state = pyclaw.State(domain,num_eqn,num_aux)
#===========================================================================
# Set up controller and controller parameters
#===========================================================================
claw = pyclaw.Controller()
claw.tfinal = 2.0
claw.keep_copy = True
claw.solution = peanoclaw.solution.Solution(state,domain) #pyclaw.Solution(state,domain)
claw.solver = peanoSolver #solver
claw.outdir=outdir
claw.num_output_times = 20
#===========================================================================
# Solve the problem
#===========================================================================
def _probe( solver , solution):
dim_x = solution.states[0].patch.dimensions[0]
dim_y = solution.states[0].patch.dimensions[1]
dim_z = solution.states[0].patch.dimensions[2]
if abs( dim_x.lower + 1./3. ) < 1e-8 and abs(dim_y.lower + 1./3. ) < 1e-8 and abs( dim_z.lower - 1./3. ) < 1e-8:
print "PROBE"
print solver.qbc[0,:,3,:]
if abs( dim_x.lower - 1./3. ) < 1e-8 and abs(dim_y.lower + 1./3. ) < 1e-8 and abs( dim_z.lower + 1./3. ) < 1e-8:
print "PROBE1"
print solver.qbc[0,:,3,:]
# solver.before_step = _probe
status = claw.run()
#===========================================================================
# Plot results
#===========================================================================
if htmlplot: pyclaw.plot.html_plot(outdir=outdir,file_format=claw.output_format)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir,file_format=claw.output_format) | def acoustics3D(iplot=False,htmlplot=False,use_petsc=False,outdir='./_output',solver_type='classic',**kwargs):
"""
Example python script for solving the 3d acoustics equations.
"""
#===========================================================================
# Import libraries
#===========================================================================
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
#===========================================================================
# Setup solver and solver parameters
#===========================================================================
subdivisionFactor = 6
if solver_type=='classic':
solver=pyclaw.ClawSolver3D()
else:
raise Exception('Unrecognized solver_type.')
from clawpack import riemann
# Peano Solver
peanoSolver = peanoclaw.Solver(solver, (2./3.)/subdivisionFactor, init)
solver.rp = riemann.rp3_vc_acoustics
solver.num_waves = 2
solver.limiters = pyclaw.limiters.tvd.MC
solver.bc_lower[0]=pyclaw.BC.extrap
solver.bc_upper[0]=pyclaw.BC.extrap
solver.bc_lower[1]=pyclaw.BC.extrap
solver.bc_upper[1]=pyclaw.BC.extrap
solver.bc_lower[2]=pyclaw.BC.extrap
solver.bc_upper[2]=pyclaw.BC.extrap
solver.aux_bc_lower[0]=pyclaw.BC.extrap
solver.aux_bc_upper[0]=pyclaw.BC.extrap
solver.aux_bc_lower[1]=pyclaw.BC.extrap
solver.aux_bc_upper[1]=pyclaw.BC.extrap
solver.aux_bc_lower[2]=pyclaw.BC.extrap
solver.aux_bc_upper[2]=pyclaw.BC.extrap
solver.dimensional_split=True
solver.limiters = pyclaw.limiters.tvd.MC
#===========================================================================
# Initialize domain and state, then initialize the solution associated to the
# state and finally initialize aux array
#===========================================================================
# Initialize domain
mx = subdivisionFactor
my = subdivisionFactor
mz = subdivisionFactor
x = pyclaw.Dimension('x', -1.0, 1.0, mx)
y = pyclaw.Dimension('y', -1.0, 1.0, my)
z = pyclaw.Dimension('z', -1.0, 1.0, mz)
domain = pyclaw.Domain([x,y,z])
num_eqn = 4
num_aux = 2 # density, sound speed
state = pyclaw.State(domain,num_eqn,num_aux)
#===========================================================================
# Set up controller and controller parameters
#===========================================================================
claw = pyclaw.Controller()
claw.tfinal = 2.0
claw.keep_copy = True
claw.solution = peanoclaw.solution.Solution(state,domain) #pyclaw.Solution(state,domain)
claw.solver = peanoSolver #solver
claw.outdir=outdir
claw.num_output_times = 20
#===========================================================================
# Solve the problem
#===========================================================================
def _probe( solver , solution):
dim_x = solution.states[0].patch.dimensions[0]
dim_y = solution.states[0].patch.dimensions[1]
dim_z = solution.states[0].patch.dimensions[2]
if abs( dim_x.lower + 1./3. ) < 1e-8 and abs(dim_y.lower + 1./3. ) < 1e-8 and abs( dim_z.lower - 1./3. ) < 1e-8:
print "PROBE"
print solver.qbc[0,:,3,:]
if abs( dim_x.lower - 1./3. ) < 1e-8 and abs(dim_y.lower + 1./3. ) < 1e-8 and abs( dim_z.lower + 1./3. ) < 1e-8:
print "PROBE1"
print solver.qbc[0,:,3,:]
# solver.before_step = _probe
status = claw.run()
#===========================================================================
# Plot results
#===========================================================================
if htmlplot: pyclaw.plot.html_plot(outdir=outdir,file_format=claw.output_format)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir,file_format=claw.output_format) |
Python | def auxinit(state):
"""
aux[1,i,j] = y-coordinate of cell centers for cylindrical source terms
"""
x=state.grid.x.centers
y=state.grid.y.centers
for j,ycoord in enumerate(y):
state.aux[0,:,j] = ycoord | def auxinit(state):
"""
aux[1,i,j] = y-coordinate of cell centers for cylindrical source terms
"""
x=state.grid.x.centers
y=state.grid.y.centers
for j,ycoord in enumerate(y):
state.aux[0,:,j] = ycoord |
Python | def shockbc(state,dim,t,qbc,num_ghost):
"""
Incoming shock at left boundary.
"""
for (i,state_dim) in enumerate(state.patch.dimensions):
if state_dim.name == dim.name:
dim_index = i
break
if (state.patch.dimensions[dim_index].lower ==
state.grid.dimensions[dim_index].lower):
pinf=5.
rinf = (gamma1 + pinf*(gamma+1.))/ ((gamma+1.) + gamma1*pinf)
vinf = 1./np.sqrt(gamma) * (pinf - 1.) / np.sqrt(0.5*((gamma+1.)/gamma) * pinf+0.5*gamma1/gamma)
einf = 0.5*rinf*vinf**2 + pinf/gamma1
for i in xrange(num_ghost):
qbc[0,i,...] = rinf
qbc[1,i,...] = rinf*vinf
qbc[2,i,...] = 0.
qbc[3,i,...] = einf
qbc[4,i,...] = 0. | def shockbc(state,dim,t,qbc,num_ghost):
"""
Incoming shock at left boundary.
"""
for (i,state_dim) in enumerate(state.patch.dimensions):
if state_dim.name == dim.name:
dim_index = i
break
if (state.patch.dimensions[dim_index].lower ==
state.grid.dimensions[dim_index].lower):
pinf=5.
rinf = (gamma1 + pinf*(gamma+1.))/ ((gamma+1.) + gamma1*pinf)
vinf = 1./np.sqrt(gamma) * (pinf - 1.) / np.sqrt(0.5*((gamma+1.)/gamma) * pinf+0.5*gamma1/gamma)
einf = 0.5*rinf*vinf**2 + pinf/gamma1
for i in xrange(num_ghost):
qbc[0,i,...] = rinf
qbc[1,i,...] = rinf*vinf
qbc[2,i,...] = 0.
qbc[3,i,...] = einf
qbc[4,i,...] = 0. |
Python | def shockbubble(use_petsc=False,kernel_language='Fortran',solver_type='classic',iplot=False,htmlplot=False,amr_type=None):
"""
Solve the Euler equations of compressible fluid dynamics.
This example involves a bubble of dense gas that is impacted by a shock.
"""
#===========================================================================
# Import libraries
#===========================================================================
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
if kernel_language != 'Fortran':
raise Exception('Unrecognized value of kernel_language for Euler Shockbubble')
if solver_type=='sharpclaw':
solver = pyclaw.SharpClawSolver2D()
solver.dq_src=dq_Euler_radial
solver.weno_order=5
solver.lim_type=2
else:
solver = pyclaw.ClawSolver2D()
solver.limiters = [4,4,4,4,2]
solver.step_source=step_Euler_radial
#===========================================================================
# Initialize domain
#===========================================================================
# resolution of each grid
mx=160; my=40
# number of initial AMR grids in each dimension
msubgrid = 1
if amr_type is None:
# number of Domain grid cells expressed as the product of
# grid resolution and the number of AMR sub-grids for
# easy comparison between the two methods
mx = mx*msubgrid
my = my*msubgrid
x = pyclaw.Dimension('x',0.0,2.0,mx)
y = pyclaw.Dimension('y',0.0,0.5,my)
domain = pyclaw.Domain([x,y])
num_eqn = 5
num_aux = 1
state = pyclaw.State(domain,num_eqn,num_aux)
state.problem_data['gamma']= gamma
state.problem_data['gamma1']= gamma1
qinit(state)
auxinit(state)
initial_solution = pyclaw.Solution(state,domain)
from clawpack import riemann
solver.rp = riemann.rp2_euler_5wave
solver.cfl_max = 0.5
solver.cfl_desired = 0.45
solver.num_waves = 5
solver.dt_initial=0.005
solver.user_bc_lower=shockbc
solver.source_split = 1
solver.bc_lower[0]=pyclaw.BC.custom
solver.bc_upper[0]=pyclaw.BC.extrap
solver.bc_lower[1]=pyclaw.BC.wall
solver.bc_upper[1]=pyclaw.BC.extrap
#Aux variable in ghost cells doesn't matter
solver.aux_bc_lower[0]=pyclaw.BC.extrap
solver.aux_bc_upper[0]=pyclaw.BC.extrap
solver.aux_bc_lower[1]=pyclaw.BC.extrap
solver.aux_bc_upper[1]=pyclaw.BC.extrap
#===========================================================================
# Set up controller and controller parameters
#===========================================================================
claw = pyclaw.Controller()
claw.keep_copy = True
claw.tfinal = 0.2
claw.solution = initial_solution
claw.num_output_times = 1
if amr_type is not None:
if amr_type == 'peano':
import clawpack.peanoclaw as amrclaw
claw.solver = amrclaw.Solver(solver,
(x.upper-x.lower)/(mx*msubgrid),
qinit, auxinit)
claw.solution = amrclaw.Solution(state, domain)
else:
raise Exception('unsupported amr_type %s' % amr_type)
else:
claw.solver = solver
claw.solution = pyclaw.Solution(state,domain)
# Solve
status = claw.run()
if htmlplot: pyclaw.plot.html_plot(file_format=claw.output_format)
if iplot: pyclaw.plot.interactive_plot(file_format=claw.output_format)
return claw.frames[claw.num_output_times].state | def shockbubble(use_petsc=False,kernel_language='Fortran',solver_type='classic',iplot=False,htmlplot=False,amr_type=None):
"""
Solve the Euler equations of compressible fluid dynamics.
This example involves a bubble of dense gas that is impacted by a shock.
"""
#===========================================================================
# Import libraries
#===========================================================================
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
if kernel_language != 'Fortran':
raise Exception('Unrecognized value of kernel_language for Euler Shockbubble')
if solver_type=='sharpclaw':
solver = pyclaw.SharpClawSolver2D()
solver.dq_src=dq_Euler_radial
solver.weno_order=5
solver.lim_type=2
else:
solver = pyclaw.ClawSolver2D()
solver.limiters = [4,4,4,4,2]
solver.step_source=step_Euler_radial
#===========================================================================
# Initialize domain
#===========================================================================
# resolution of each grid
mx=160; my=40
# number of initial AMR grids in each dimension
msubgrid = 1
if amr_type is None:
# number of Domain grid cells expressed as the product of
# grid resolution and the number of AMR sub-grids for
# easy comparison between the two methods
mx = mx*msubgrid
my = my*msubgrid
x = pyclaw.Dimension('x',0.0,2.0,mx)
y = pyclaw.Dimension('y',0.0,0.5,my)
domain = pyclaw.Domain([x,y])
num_eqn = 5
num_aux = 1
state = pyclaw.State(domain,num_eqn,num_aux)
state.problem_data['gamma']= gamma
state.problem_data['gamma1']= gamma1
qinit(state)
auxinit(state)
initial_solution = pyclaw.Solution(state,domain)
from clawpack import riemann
solver.rp = riemann.rp2_euler_5wave
solver.cfl_max = 0.5
solver.cfl_desired = 0.45
solver.num_waves = 5
solver.dt_initial=0.005
solver.user_bc_lower=shockbc
solver.source_split = 1
solver.bc_lower[0]=pyclaw.BC.custom
solver.bc_upper[0]=pyclaw.BC.extrap
solver.bc_lower[1]=pyclaw.BC.wall
solver.bc_upper[1]=pyclaw.BC.extrap
#Aux variable in ghost cells doesn't matter
solver.aux_bc_lower[0]=pyclaw.BC.extrap
solver.aux_bc_upper[0]=pyclaw.BC.extrap
solver.aux_bc_lower[1]=pyclaw.BC.extrap
solver.aux_bc_upper[1]=pyclaw.BC.extrap
#===========================================================================
# Set up controller and controller parameters
#===========================================================================
claw = pyclaw.Controller()
claw.keep_copy = True
claw.tfinal = 0.2
claw.solution = initial_solution
claw.num_output_times = 1
if amr_type is not None:
if amr_type == 'peano':
import clawpack.peanoclaw as amrclaw
claw.solver = amrclaw.Solver(solver,
(x.upper-x.lower)/(mx*msubgrid),
qinit, auxinit)
claw.solution = amrclaw.Solution(state, domain)
else:
raise Exception('unsupported amr_type %s' % amr_type)
else:
claw.solver = solver
claw.solution = pyclaw.Solution(state,domain)
# Solve
status = claw.run()
if htmlplot: pyclaw.plot.html_plot(file_format=claw.output_format)
if iplot: pyclaw.plot.interactive_plot(file_format=claw.output_format)
return claw.frames[claw.num_output_times].state |
Python | def verify(test_state):
""" verifies 2d homogeneous acoustics from a previously verified run """
import os
import numpy as np
from clawpack.pyclaw.util import check_diff
#grabs parallel results to process 0, None to other processes
test_q=test_state.get_q_global()
if test_q is not None:
test_pressure = test_q[0,:,:]
thisdir = os.path.dirname(__file__)
expected_pressure = np.loadtxt(os.path.join(thisdir,data_filename))
test_err = np.linalg.norm(expected_pressure-test_pressure)
expected_err = 0
return check_diff(expected_err, test_err, abstol=1e-1)
else:
return | def verify(test_state):
""" verifies 2d homogeneous acoustics from a previously verified run """
import os
import numpy as np
from clawpack.pyclaw.util import check_diff
#grabs parallel results to process 0, None to other processes
test_q=test_state.get_q_global()
if test_q is not None:
test_pressure = test_q[0,:,:]
thisdir = os.path.dirname(__file__)
expected_pressure = np.loadtxt(os.path.join(thisdir,data_filename))
test_err = np.linalg.norm(expected_pressure-test_pressure)
expected_err = 0
return check_diff(expected_err, test_err, abstol=1e-1)
else:
return |
Python | def write_ascii(solution,frame,path,file_prefix='fort',write_aux=False,
options={},write_p=False):
r"""
Write out ascii data file
Write out an ascii file formatted identical to the fortran clawpack files
including writing out fort.t, fort.q, and fort.aux if necessary. Note
that there are some parameters that assumed to be the same for every patch
in this format which is not necessarily true for the actual data objects.
Make sure that if you use this output format that all of you patchs share
the appropriate values of num_dim, num_eqn, num_aux, and t. Only supports up to
3 dimensions.
:Input:
- *solution* - (:class:`~pyclaw.solution.Solution`) Pyclaw object to be
output.
- *frame* - (int) Frame number
- *path* - (string) Root path
- *file_prefix* - (string) Prefix for the file name. ``default = 'fort'``
- *write_aux* - (bool) Boolean controlling whether the associated
auxiliary array should be written out. ``default = False``
- *options* - (dict) Dictionary of optional arguments dependent on
the format being written. ``default = {}``
"""
try:
# Create file name
file_name = '%s.t%s' % (file_prefix,str(frame).zfill(4))
f = open(os.path.join(path,file_name),'w')
# Header for fort.txxxx file
f.write("%18.8e time\n" % solution.t)
f.write("%5i num_eqn\n" % solution.num_eqn)
f.write("%5i nstates\n" % len(solution.states))
f.write("%5i num_aux\n" % solution.num_aux)
f.write("%5i num_dim\n" % solution.domain.num_dim)
f.close()
# Open fort.qxxxx for writing
file_name = 'fort.q%s' % str(frame).zfill(4)
q_file = open(os.path.join(path,file_name),'w')
# If num_aux != 0 then we open up a file to write it out as well
if solution.num_aux > 0 and write_aux:
file_name = 'fort.a%s' % str(frame).zfill(4)
aux_file = open(os.path.join(path,file_name),'w')
# for i in range(0,len(solution.patchs)):
for state in solution.states:
patch = state.patch
# Header for fort.qxxxx file
q_file.write("%5i patch_number\n" % patch.patch_index)
q_file.write("%5i AMR_level\n" % patch.level)
for dim in patch.dimensions:
q_file.write("%5i m%s\n" % (dim.num_cells,dim.name))
for dim in patch.dimensions:
q_file.write("%18.8e %slow\n" % (dim.lower,dim.name))
for dim in patch.dimensions:
q_file.write("%18.8e d%s\n" % (dim.delta,dim.name))
q_file.write("\n")
# Write data from q
if write_p:
q = state.p
else:
q = state.q
dims = patch.dimensions
if patch.num_dim == 1:
for k in xrange(dims[0].num_cells):
for m in xrange(state.num_eqn):
q_file.write("%18.8e" % q[m,k])
q_file.write('\n')
elif patch.num_dim == 2:
for j in xrange(dims[1].num_cells):
for k in xrange(dims[0].num_cells):
for m in xrange(state.num_eqn):
q_file.write("%18.8e" % q[m,k,j])
q_file.write('\n')
q_file.write('\n')
elif patch.num_dim == 3:
for l in xrange(dims[2].num_cells):
for j in xrange(dims[1].num_cells):
for k in xrange(dims[0].num_cells):
for m in range(state.num_eqn):
q_file.write("%18.8e" % q[m,k,j,l])
q_file.write('\n')
q_file.write('\n')
q_file.write('\n')
else:
raise Exception("Dimension Exception in writing fort file.")
if state.num_aux > 0 and write_aux:
aux = state.aux
aux_file.write("%5i patch_number\n" % patch.patch_index)
aux_file.write("%5i AMR_level\n" % patch.level)
for dim in patch.dimensions:
aux_file.write("%5i m%s\n" % (dim.num_cells,dim.name))
for dim in patch.dimensions:
aux_file.write("%18.8e %slow\n" % (dim.lower,dim.name))
for dim in patch.dimensions:
aux_file.write("%18.8e d%s\n" % (dim.delta,dim.name))
aux_file.write("\n")
dims = patch.dimensions
if patch.num_dim == 1:
for k in xrange(dims[0].num_cells):
for m in xrange(state.num_aux):
aux_file.write("%18.8e" % aux[m,k])
aux_file.write('\n')
elif patch.num_dim == 2:
for j in xrange(dims[1].num_cells):
for k in xrange(dims[0].num_cells):
for m in xrange(state.num_aux):
aux_file.write("%18.8e" % aux[m,k,j])
aux_file.write('\n')
aux_file.write('\n')
elif patch.num_dim == 3:
for l in xrange(dims[2].num_cells):
for j in xrange(dims[1].num_cells):
for k in xrange(dims[0].num_cells):
for m in xrange(state.num_aux):
aux_file.write("%18.8e" % aux[m,k,j,l])
aux_file.write('\n')
aux_file.write('\n')
aux_file.write('\n')
q_file.close()
if state.num_aux > 0 and write_aux:
aux_file.close()
except IOError, (errno, strerror):
logger.error("Error writing file: %s" % os.path.join(path,file_name))
logger.error("I/O error(%s): %s" % (errno, strerror))
raise
except:
logger.error("Unexpected error:", sys.exc_info()[0])
raise | def write_ascii(solution,frame,path,file_prefix='fort',write_aux=False,
options={},write_p=False):
r"""
Write out ascii data file
Write out an ascii file formatted identical to the fortran clawpack files
including writing out fort.t, fort.q, and fort.aux if necessary. Note
that there are some parameters that assumed to be the same for every patch
in this format which is not necessarily true for the actual data objects.
Make sure that if you use this output format that all of you patchs share
the appropriate values of num_dim, num_eqn, num_aux, and t. Only supports up to
3 dimensions.
:Input:
- *solution* - (:class:`~pyclaw.solution.Solution`) Pyclaw object to be
output.
- *frame* - (int) Frame number
- *path* - (string) Root path
- *file_prefix* - (string) Prefix for the file name. ``default = 'fort'``
- *write_aux* - (bool) Boolean controlling whether the associated
auxiliary array should be written out. ``default = False``
- *options* - (dict) Dictionary of optional arguments dependent on
the format being written. ``default = {}``
"""
try:
# Create file name
file_name = '%s.t%s' % (file_prefix,str(frame).zfill(4))
f = open(os.path.join(path,file_name),'w')
# Header for fort.txxxx file
f.write("%18.8e time\n" % solution.t)
f.write("%5i num_eqn\n" % solution.num_eqn)
f.write("%5i nstates\n" % len(solution.states))
f.write("%5i num_aux\n" % solution.num_aux)
f.write("%5i num_dim\n" % solution.domain.num_dim)
f.close()
# Open fort.qxxxx for writing
file_name = 'fort.q%s' % str(frame).zfill(4)
q_file = open(os.path.join(path,file_name),'w')
# If num_aux != 0 then we open up a file to write it out as well
if solution.num_aux > 0 and write_aux:
file_name = 'fort.a%s' % str(frame).zfill(4)
aux_file = open(os.path.join(path,file_name),'w')
# for i in range(0,len(solution.patchs)):
for state in solution.states:
patch = state.patch
# Header for fort.qxxxx file
q_file.write("%5i patch_number\n" % patch.patch_index)
q_file.write("%5i AMR_level\n" % patch.level)
for dim in patch.dimensions:
q_file.write("%5i m%s\n" % (dim.num_cells,dim.name))
for dim in patch.dimensions:
q_file.write("%18.8e %slow\n" % (dim.lower,dim.name))
for dim in patch.dimensions:
q_file.write("%18.8e d%s\n" % (dim.delta,dim.name))
q_file.write("\n")
# Write data from q
if write_p:
q = state.p
else:
q = state.q
dims = patch.dimensions
if patch.num_dim == 1:
for k in xrange(dims[0].num_cells):
for m in xrange(state.num_eqn):
q_file.write("%18.8e" % q[m,k])
q_file.write('\n')
elif patch.num_dim == 2:
for j in xrange(dims[1].num_cells):
for k in xrange(dims[0].num_cells):
for m in xrange(state.num_eqn):
q_file.write("%18.8e" % q[m,k,j])
q_file.write('\n')
q_file.write('\n')
elif patch.num_dim == 3:
for l in xrange(dims[2].num_cells):
for j in xrange(dims[1].num_cells):
for k in xrange(dims[0].num_cells):
for m in range(state.num_eqn):
q_file.write("%18.8e" % q[m,k,j,l])
q_file.write('\n')
q_file.write('\n')
q_file.write('\n')
else:
raise Exception("Dimension Exception in writing fort file.")
if state.num_aux > 0 and write_aux:
aux = state.aux
aux_file.write("%5i patch_number\n" % patch.patch_index)
aux_file.write("%5i AMR_level\n" % patch.level)
for dim in patch.dimensions:
aux_file.write("%5i m%s\n" % (dim.num_cells,dim.name))
for dim in patch.dimensions:
aux_file.write("%18.8e %slow\n" % (dim.lower,dim.name))
for dim in patch.dimensions:
aux_file.write("%18.8e d%s\n" % (dim.delta,dim.name))
aux_file.write("\n")
dims = patch.dimensions
if patch.num_dim == 1:
for k in xrange(dims[0].num_cells):
for m in xrange(state.num_aux):
aux_file.write("%18.8e" % aux[m,k])
aux_file.write('\n')
elif patch.num_dim == 2:
for j in xrange(dims[1].num_cells):
for k in xrange(dims[0].num_cells):
for m in xrange(state.num_aux):
aux_file.write("%18.8e" % aux[m,k,j])
aux_file.write('\n')
aux_file.write('\n')
elif patch.num_dim == 3:
for l in xrange(dims[2].num_cells):
for j in xrange(dims[1].num_cells):
for k in xrange(dims[0].num_cells):
for m in xrange(state.num_aux):
aux_file.write("%18.8e" % aux[m,k,j,l])
aux_file.write('\n')
aux_file.write('\n')
aux_file.write('\n')
q_file.close()
if state.num_aux > 0 and write_aux:
aux_file.close()
except IOError, (errno, strerror):
logger.error("Error writing file: %s" % os.path.join(path,file_name))
logger.error("I/O error(%s): %s" % (errno, strerror))
raise
except:
logger.error("Unexpected error:", sys.exc_info()[0])
raise |
Python | def read_ascii_t(frame,path='./',file_prefix='fort'):
r"""Read only the fort.t file and return the data
:Input:
- *frame* - (int) Frame number to be read in
- *path* - (string) Path to the current directory of the file
- *file_prefix* - (string) Prefix of the files to be read in.
``default = 'fort'``
:Output:
- (list) List of output variables
- *t* - (int) Time of frame
- *num_eqn* - (int) Number of equations in the frame
- *nstates* - (int) Number of states
- *num_aux* - (int) Auxillary value in the frame
- *num_dim* - (int) Number of dimensions in q and aux
"""
base_path = os.path.join(path,)
path = os.path.join(base_path, '%s.t' % file_prefix) + str(frame).zfill(4)
try:
logger.debug("Opening %s file." % path)
f = open(path,'r')
t = read_data_line(f)
num_eqn = read_data_line(f,type='int')
nstates = read_data_line(f,type='int')
num_aux = read_data_line(f,type='int')
num_dim = read_data_line(f,type='int')
f.close()
except(IOError):
raise
except:
logger.error("File " + path + " should contain t, num_eqn, nstates, num_aux, num_dim")
print "File " + path + " should contain t, num_eqn, nstates, num_aux, num_dim"
raise
return t,num_eqn,nstates,num_aux,num_dim | def read_ascii_t(frame,path='./',file_prefix='fort'):
r"""Read only the fort.t file and return the data
:Input:
- *frame* - (int) Frame number to be read in
- *path* - (string) Path to the current directory of the file
- *file_prefix* - (string) Prefix of the files to be read in.
``default = 'fort'``
:Output:
- (list) List of output variables
- *t* - (int) Time of frame
- *num_eqn* - (int) Number of equations in the frame
- *nstates* - (int) Number of states
- *num_aux* - (int) Auxillary value in the frame
- *num_dim* - (int) Number of dimensions in q and aux
"""
base_path = os.path.join(path,)
path = os.path.join(base_path, '%s.t' % file_prefix) + str(frame).zfill(4)
try:
logger.debug("Opening %s file." % path)
f = open(path,'r')
t = read_data_line(f)
num_eqn = read_data_line(f,type='int')
nstates = read_data_line(f,type='int')
num_aux = read_data_line(f,type='int')
num_dim = read_data_line(f,type='int')
f.close()
except(IOError):
raise
except:
logger.error("File " + path + " should contain t, num_eqn, nstates, num_aux, num_dim")
print "File " + path + " should contain t, num_eqn, nstates, num_aux, num_dim"
raise
return t,num_eqn,nstates,num_aux,num_dim |
Python | def compute_p_centers(self, recompute=False):
r"""Calculates the :attr:`p_centers` array, which contains the physical
coordinates of the cell centers when a mapping is used.
grid._p_centers is a list of numpy arrays. Each array has shape equal
to the shape of the grid; the number of arrays is equal to the
dimension of the embedding space for the mapping.
This array is computed only when requested and then stored for later
use unless the recompute flag is set to True (you may want to do this
for time-dependent mappings).
Access the resulting physical coordinate array via the corresponding
dimensions or via the computational grid properties :attr:`p_centers`.
:Input:
- *recompute* - (bool) Whether to force a recompute of the arrays
"""
if recompute or not len(self._p_centers) == len(self._dimensions):
# Initialize array
self._p_centers = [None]*self.num_dim
# Special case
if self.num_dim == 1:
self._p_centers[0] = self.mapc2p(self,self.dimensions[0].centers)
# Higer dimensional calculate center arrays
else:
index = np.indices(self.num_cells)
array_list = []
for i,center_array in enumerate(self.get_dim_attribute('centers')):
#We could just use indices directly and deal with
#numpy arrays instead of lists of numpy arrays
array_list.append(center_array[index[i,...]])
self._p_centers = self.mapc2p(self,array_list) | def compute_p_centers(self, recompute=False):
r"""Calculates the :attr:`p_centers` array, which contains the physical
coordinates of the cell centers when a mapping is used.
grid._p_centers is a list of numpy arrays. Each array has shape equal
to the shape of the grid; the number of arrays is equal to the
dimension of the embedding space for the mapping.
This array is computed only when requested and then stored for later
use unless the recompute flag is set to True (you may want to do this
for time-dependent mappings).
Access the resulting physical coordinate array via the corresponding
dimensions or via the computational grid properties :attr:`p_centers`.
:Input:
- *recompute* - (bool) Whether to force a recompute of the arrays
"""
if recompute or not len(self._p_centers) == len(self._dimensions):
# Initialize array
self._p_centers = [None]*self.num_dim
# Special case
if self.num_dim == 1:
self._p_centers[0] = self.mapc2p(self,self.dimensions[0].centers)
# Higer dimensional calculate center arrays
else:
index = np.indices(self.num_cells)
array_list = []
for i,center_array in enumerate(self.get_dim_attribute('centers')):
#We could just use indices directly and deal with
#numpy arrays instead of lists of numpy arrays
array_list.append(center_array[index[i,...]])
self._p_centers = self.mapc2p(self,array_list) |
Python | def compute_p_edges(self, recompute=False):
r"""Calculates the :attr:`p_edges` array
This array is computed only when requested and then stored for later
use unless the recompute flag is set to True (you may want to do this
for time dependent mappings).
Access the resulting physical coordinate array via the corresponding
dimensions or via the computational grid properties :attr:`p_edges`.
:Input:
- *recompute* - (bool) Whether to force a recompute of the arrays
"""
if recompute or not len(self._p_edges) == len(self._dimensions):
# Initialize array
self._p_edges = [None for i in xrange(self.num_dim)]
if self.num_dim == 1:
self._p_edges[0] = self.mapc2p(self,self.dimensions[0].edges)
else:
index = np.indices([n+1 for n in self.num_cells])
array_list = []
for i,edge_array in enumerate(self.get_dim_attribute('edges')):
#We could just use indices directly and deal with
#numpy arrays instead of lists of numpy arrays
array_list.append(edge_array[index[i,...]])
self._p_edges = self.mapc2p(self,array_list) | def compute_p_edges(self, recompute=False):
r"""Calculates the :attr:`p_edges` array
This array is computed only when requested and then stored for later
use unless the recompute flag is set to True (you may want to do this
for time dependent mappings).
Access the resulting physical coordinate array via the corresponding
dimensions or via the computational grid properties :attr:`p_edges`.
:Input:
- *recompute* - (bool) Whether to force a recompute of the arrays
"""
if recompute or not len(self._p_edges) == len(self._dimensions):
# Initialize array
self._p_edges = [None for i in xrange(self.num_dim)]
if self.num_dim == 1:
self._p_edges[0] = self.mapc2p(self,self.dimensions[0].edges)
else:
index = np.indices([n+1 for n in self.num_cells])
array_list = []
for i,edge_array in enumerate(self.get_dim_attribute('edges')):
#We could just use indices directly and deal with
#numpy arrays instead of lists of numpy arrays
array_list.append(edge_array[index[i,...]])
self._p_edges = self.mapc2p(self,array_list) |
Python | def compute_c_centers(self, recompute=False):
r"""
Calculate the :attr:`c_centers` array
This array is computed only when requested and then stored for later
use unless the recompute flag is set to True.
Access the resulting computational coodinate array via the
corresponding dimensions or via the computational grid properties
:attr:`c_centers`.
:Input:
- *recompute* - (bool) Whether to force a recompute of the arrays
"""
if recompute or (self._c_centers is None):
self._c_centers = [None]*self.num_dim
# For one dimension, the center and edge arrays are equivalent
if self.num_dim == 1:
self._c_centers[0] = self.dimensions[0].centers
else:
index = np.indices(self.num_cells)
self._c_centers = []
for i,center_array in enumerate(self.get_dim_attribute('centers')):
#We could just use indices directly and deal with
#numpy arrays instead of lists of numpy arrays
self._c_centers.append(center_array[index[i,...]]) | def compute_c_centers(self, recompute=False):
r"""
Calculate the :attr:`c_centers` array
This array is computed only when requested and then stored for later
use unless the recompute flag is set to True.
Access the resulting computational coodinate array via the
corresponding dimensions or via the computational grid properties
:attr:`c_centers`.
:Input:
- *recompute* - (bool) Whether to force a recompute of the arrays
"""
if recompute or (self._c_centers is None):
self._c_centers = [None]*self.num_dim
# For one dimension, the center and edge arrays are equivalent
if self.num_dim == 1:
self._c_centers[0] = self.dimensions[0].centers
else:
index = np.indices(self.num_cells)
self._c_centers = []
for i,center_array in enumerate(self.get_dim_attribute('centers')):
#We could just use indices directly and deal with
#numpy arrays instead of lists of numpy arrays
self._c_centers.append(center_array[index[i,...]]) |
Python | def compute_c_edges(self, recompute=False):
r"""
Calculate the :attr:`c_edges` array
This array is computed only when requested and then stored for later
use unless the recompute flag is set to True.
Access the resulting computational coodinate array via the
corresponding dimensions or via the computational grid properties
:attr:`c_edges`.
:Input:
- *recompute* - (bool) Whether to force a recompute of the arrays
"""
if recompute or not len(self._c_edges) == len(self._dimensions):
self._c_edges = [None for i in xrange(self.num_dim)]
if self.num_dim == 1:
self._c_edges[0] = self.dimensions[0].edges
else:
index = np.indices([n+1 for n in self.num_cells])
self._c_edges = []
for i,edge_array in enumerate(self.get_dim_attribute('edges')):
#We could just use indices directly and deal with
#numpy arrays instead of lists of numpy arrays
self._c_edges.append(edge_array[index[i,...]]) | def compute_c_edges(self, recompute=False):
r"""
Calculate the :attr:`c_edges` array
This array is computed only when requested and then stored for later
use unless the recompute flag is set to True.
Access the resulting computational coodinate array via the
corresponding dimensions or via the computational grid properties
:attr:`c_edges`.
:Input:
- *recompute* - (bool) Whether to force a recompute of the arrays
"""
if recompute or not len(self._c_edges) == len(self._dimensions):
self._c_edges = [None for i in xrange(self.num_dim)]
if self.num_dim == 1:
self._c_edges[0] = self.dimensions[0].edges
else:
index = np.indices([n+1 for n in self.num_cells])
self._c_edges = []
for i,edge_array in enumerate(self.get_dim_attribute('edges')):
#We could just use indices directly and deal with
#numpy arrays instead of lists of numpy arrays
self._c_edges.append(edge_array[index[i,...]]) |
Python | def add_gauges(self,gauge_coords):
r"""
Determine the cell indices of each gauge and make a list of all gauges
with their cell indices.
For PetClaw, first check whether each gauge is in the part of the grid
corresponding to this grid.
THIS SHOULD BE MOVED TO GRID
"""
import os
from numpy import floor
if not os.path.exists(self.gauge_path):
try:
os.makedirs(self.gauge_path)
except OSError:
print "gauge directory already exists, ignoring"
for gauge in gauge_coords:
# Determine gauge locations in units of mesh spacing
if all(self.lower[n]<=gauge[n]<self.upper[n] for n in range(self.num_dim)):
# Set indices relative to this grid
gauge_index = [int(floor(gauge[n]/self.delta[n])) for n in xrange(self.num_dim)]
gauge_path = self.gauge_path+'gauge'+'_'.join(str(coord) for coord in gauge)+'.txt'
if os.path.isfile(gauge_path):
os.remove(gauge_path)
self.gauges.append(gauge_index)
self.gauge_files.append(open(gauge_path,'a')) | def add_gauges(self,gauge_coords):
r"""
Determine the cell indices of each gauge and make a list of all gauges
with their cell indices.
For PetClaw, first check whether each gauge is in the part of the grid
corresponding to this grid.
THIS SHOULD BE MOVED TO GRID
"""
import os
from numpy import floor
if not os.path.exists(self.gauge_path):
try:
os.makedirs(self.gauge_path)
except OSError:
print "gauge directory already exists, ignoring"
for gauge in gauge_coords:
# Determine gauge locations in units of mesh spacing
if all(self.lower[n]<=gauge[n]<self.upper[n] for n in range(self.num_dim)):
# Set indices relative to this grid
gauge_index = [int(floor(gauge[n]/self.delta[n])) for n in xrange(self.num_dim)]
gauge_path = self.gauge_path+'gauge'+'_'.join(str(coord) for coord in gauge)+'.txt'
if os.path.isfile(gauge_path):
os.remove(gauge_path)
self.gauges.append(gauge_index)
self.gauge_files.append(open(gauge_path,'a')) |
Python | def burgers(iplot=1,htmlplot=0,outdir='./_output'):
"""
Example from Chapter 11 of LeVeque, Figure 11.8.
Shows decay of an initial wave packet to an N-wave with Burgers' equation.
"""
import numpy as np
from clawpack import pyclaw
solver = pyclaw.ClawSolver1D()
solver.num_waves = 1
solver.limiters = pyclaw.limiters.tvd.MC
solver.bc_lower[0] = pyclaw.BC.periodic
solver.bc_upper[0] = pyclaw.BC.periodic
#===========================================================================
# Initialize grids and then initialize the solution associated to the grid
#===========================================================================
x = pyclaw.Dimension('x',-8.0,8.0,1000)
grid = pyclaw.Grid(x)
num_eqn = 1
state = pyclaw.State(grid,num_eqn)
xc=grid.x.center
state.q[0,:] = (xc>-np.pi)*(xc<np.pi)*(2.*np.sin(3.*xc)+np.cos(2.*xc)+0.2)
state.q[0,:] = state.q[0,:]*(np.cos(xc)+1.)
state.problem_data['efix']=True
#===========================================================================
# Setup controller and controller parameters. Then solve the problem
#===========================================================================
claw = pyclaw.Controller()
claw.tfinal = 6.0
claw.num_output_times = 30
claw.solution = pyclaw.Solution(state)
claw.solver = solver
claw.outdir = outdir
status = claw.run()
if htmlplot: pyclaw.plot.html_plot(outdir=outdir)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir) | def burgers(iplot=1,htmlplot=0,outdir='./_output'):
"""
Example from Chapter 11 of LeVeque, Figure 11.8.
Shows decay of an initial wave packet to an N-wave with Burgers' equation.
"""
import numpy as np
from clawpack import pyclaw
solver = pyclaw.ClawSolver1D()
solver.num_waves = 1
solver.limiters = pyclaw.limiters.tvd.MC
solver.bc_lower[0] = pyclaw.BC.periodic
solver.bc_upper[0] = pyclaw.BC.periodic
#===========================================================================
# Initialize grids and then initialize the solution associated to the grid
#===========================================================================
x = pyclaw.Dimension('x',-8.0,8.0,1000)
grid = pyclaw.Grid(x)
num_eqn = 1
state = pyclaw.State(grid,num_eqn)
xc=grid.x.center
state.q[0,:] = (xc>-np.pi)*(xc<np.pi)*(2.*np.sin(3.*xc)+np.cos(2.*xc)+0.2)
state.q[0,:] = state.q[0,:]*(np.cos(xc)+1.)
state.problem_data['efix']=True
#===========================================================================
# Setup controller and controller parameters. Then solve the problem
#===========================================================================
claw = pyclaw.Controller()
claw.tfinal = 6.0
claw.num_output_times = 30
claw.solution = pyclaw.Solution(state)
claw.solver = solver
claw.outdir = outdir
status = claw.run()
if htmlplot: pyclaw.plot.html_plot(outdir=outdir)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir) |
Python | def interactive_plot(outdir='./_output',file_format='ascii'):
"""
Convenience function for launching an interactive plotting session.
"""
plot(outdir=outdir,file_format=file_format,iplot=True,htmlplot=False) | def interactive_plot(outdir='./_output',file_format='ascii'):
"""
Convenience function for launching an interactive plotting session.
"""
plot(outdir=outdir,file_format=file_format,iplot=True,htmlplot=False) |
Python | def html_plot(outdir='./_output',file_format='ascii'):
"""
Convenience function for creating html page with plots.
"""
plot(outdir=outdir,file_format=file_format,htmlplot=True,iplot=False) | def html_plot(outdir='./_output',file_format='ascii'):
"""
Convenience function for creating html page with plots.
"""
plot(outdir=outdir,file_format=file_format,htmlplot=True,iplot=False) |
Python | def evolve_to_time(self,solution,tend=None):
r"""
Performs one global timestep until all patches in the mesh reach the given end time.
See :class:`Solver` for full documentation
"""
if(tend == None) :
raise Exception("Not yet implemented.")
self.solution = solution
self.libpeano.pyclaw_peano_evolveToTime(tend, self.peano, self.boundary_condition_callback, self.solver_callback) | def evolve_to_time(self,solution,tend=None):
r"""
Performs one global timestep until all patches in the mesh reach the given end time.
See :class:`Solver` for full documentation
"""
if(tend == None) :
raise Exception("Not yet implemented.")
self.solution = solution
self.libpeano.pyclaw_peano_evolveToTime(tend, self.peano, self.boundary_condition_callback, self.solver_callback) |
Python | def acoustics3D(iplot=False,htmlplot=False,use_petsc=False,outdir='./_output',solver_type='classic',**kwargs):
"""
Example python script for solving the 3d acoustics equations.
"""
#===========================================================================
# Import libraries
#===========================================================================
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
#===========================================================================
# Setup solver and solver parameters
#===========================================================================
subdivisionFactor = 7
if solver_type=='classic':
solver=pyclaw.ClawSolver3D()
else:
raise Exception('Unrecognized solver_type.')
from clawpack import riemann
# Peano Solver
peanoSolver = peanoclaw.Solver(solver, (2./3./subdivisionFactor), init)
solver.rp = riemann.rp3_vc_acoustics
solver.num_waves = 2
solver.limiters = pyclaw.limiters.tvd.MC
solver.bc_lower[0]=pyclaw.BC.wall
solver.bc_upper[0]=pyclaw.BC.wall
solver.bc_lower[1]=pyclaw.BC.wall
solver.bc_upper[1]=pyclaw.BC.wall
solver.bc_lower[2]=pyclaw.BC.wall
solver.bc_upper[2]=pyclaw.BC.wall
solver.aux_bc_lower[0]=pyclaw.BC.wall
solver.aux_bc_upper[0]=pyclaw.BC.wall
solver.aux_bc_lower[1]=pyclaw.BC.wall
solver.aux_bc_upper[1]=pyclaw.BC.wall
solver.aux_bc_lower[2]=pyclaw.BC.wall
solver.aux_bc_upper[2]=pyclaw.BC.wall
solver.dimensional_split=True
solver.limiters = pyclaw.limiters.tvd.MC
#===========================================================================
# Initialize domain and state, then initialize the solution associated to the
# state and finally initialize aux array
#===========================================================================
# Initialize domain
mx = subdivisionFactor
my = subdivisionFactor
mz = subdivisionFactor
x = pyclaw.Dimension('x', -1.0, 1.0, mx)
y = pyclaw.Dimension('y', -1.0, 1.0, my)
z = pyclaw.Dimension('z', -1.0, 1.0, mz)
domain = pyclaw.Domain([x,y,z])
num_eqn = 4
num_aux = 2 # density, sound speed
state = pyclaw.State(domain,num_eqn,num_aux)
#===========================================================================
# Set up controller and controller parameters
#===========================================================================
claw = pyclaw.Controller()
claw.tfinal = 2.0
claw.keep_copy = True
claw.solution = peanoclaw.solution.Solution(state,domain) #pyclaw.Solution(state,domain)
claw.solver = peanoSolver #solver
claw.outdir=outdir
#===========================================================================
# Solve the problem
#===========================================================================
status = claw.run()
#===========================================================================
# Plot results
#===========================================================================
if htmlplot: pyclaw.plot.html_plot(outdir=outdir,file_format=claw.output_format)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir,file_format=claw.output_format) | def acoustics3D(iplot=False,htmlplot=False,use_petsc=False,outdir='./_output',solver_type='classic',**kwargs):
"""
Example python script for solving the 3d acoustics equations.
"""
#===========================================================================
# Import libraries
#===========================================================================
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
#===========================================================================
# Setup solver and solver parameters
#===========================================================================
subdivisionFactor = 7
if solver_type=='classic':
solver=pyclaw.ClawSolver3D()
else:
raise Exception('Unrecognized solver_type.')
from clawpack import riemann
# Peano Solver
peanoSolver = peanoclaw.Solver(solver, (2./3./subdivisionFactor), init)
solver.rp = riemann.rp3_vc_acoustics
solver.num_waves = 2
solver.limiters = pyclaw.limiters.tvd.MC
solver.bc_lower[0]=pyclaw.BC.wall
solver.bc_upper[0]=pyclaw.BC.wall
solver.bc_lower[1]=pyclaw.BC.wall
solver.bc_upper[1]=pyclaw.BC.wall
solver.bc_lower[2]=pyclaw.BC.wall
solver.bc_upper[2]=pyclaw.BC.wall
solver.aux_bc_lower[0]=pyclaw.BC.wall
solver.aux_bc_upper[0]=pyclaw.BC.wall
solver.aux_bc_lower[1]=pyclaw.BC.wall
solver.aux_bc_upper[1]=pyclaw.BC.wall
solver.aux_bc_lower[2]=pyclaw.BC.wall
solver.aux_bc_upper[2]=pyclaw.BC.wall
solver.dimensional_split=True
solver.limiters = pyclaw.limiters.tvd.MC
#===========================================================================
# Initialize domain and state, then initialize the solution associated to the
# state and finally initialize aux array
#===========================================================================
# Initialize domain
mx = subdivisionFactor
my = subdivisionFactor
mz = subdivisionFactor
x = pyclaw.Dimension('x', -1.0, 1.0, mx)
y = pyclaw.Dimension('y', -1.0, 1.0, my)
z = pyclaw.Dimension('z', -1.0, 1.0, mz)
domain = pyclaw.Domain([x,y,z])
num_eqn = 4
num_aux = 2 # density, sound speed
state = pyclaw.State(domain,num_eqn,num_aux)
#===========================================================================
# Set up controller and controller parameters
#===========================================================================
claw = pyclaw.Controller()
claw.tfinal = 2.0
claw.keep_copy = True
claw.solution = peanoclaw.solution.Solution(state,domain) #pyclaw.Solution(state,domain)
claw.solver = peanoSolver #solver
claw.outdir=outdir
#===========================================================================
# Solve the problem
#===========================================================================
status = claw.run()
#===========================================================================
# Plot results
#===========================================================================
if htmlplot: pyclaw.plot.html_plot(outdir=outdir,file_format=claw.output_format)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir,file_format=claw.output_format) |
Python | def _parse_message(internal_msg: PyMessage):
"""Creates a Message from an internal stream's response.
Args:
internal_msg: The internal message to parse.
"""
if internal_msg.is_timestamped_data():
timestamp = Timestamp(_py_timestamp=internal_msg.timestamp)
data = pickle.loads(internal_msg.data)
return Message(timestamp, data)
if internal_msg.is_watermark():
return WatermarkMessage(Timestamp(_py_timestamp=internal_msg.timestamp))
raise Exception("Unable to parse message") | def _parse_message(internal_msg: PyMessage):
"""Creates a Message from an internal stream's response.
Args:
internal_msg: The internal message to parse.
"""
if internal_msg.is_timestamped_data():
timestamp = Timestamp(_py_timestamp=internal_msg.timestamp)
data = pickle.loads(internal_msg.data)
return Message(timestamp, data)
if internal_msg.is_watermark():
return WatermarkMessage(Timestamp(_py_timestamp=internal_msg.timestamp))
raise Exception("Unable to parse message") |
Python | def map(self, function: Callable[[Any], Any]) -> "OperatorStream":
"""Applies the given function to each value sent on the stream, and outputs the
results on the returned stream.
Args:
function: The function applied to each value sent on this stream.
Returns:
A stream that carries the results of the applied function.
"""
def map_fn(serialized_data: bytes) -> bytes:
result = function(pickle.loads(serialized_data))
return pickle.dumps(result)
return OperatorStream(self._internal_stream._map(map_fn)) | def map(self, function: Callable[[Any], Any]) -> "OperatorStream":
"""Applies the given function to each value sent on the stream, and outputs the
results on the returned stream.
Args:
function: The function applied to each value sent on this stream.
Returns:
A stream that carries the results of the applied function.
"""
def map_fn(serialized_data: bytes) -> bytes:
result = function(pickle.loads(serialized_data))
return pickle.dumps(result)
return OperatorStream(self._internal_stream._map(map_fn)) |
Python | def flat_map(self, function: Callable[[Any], Sequence[Any]]) -> "OperatorStream":
"""Applies the given function to each value sent on the stream, and outputs the
sequence of received outputs as individual messages.
Args:
function: The function applied to each value sent on this stream.
Returns:
A stream that carries the results of the applied function.
"""
# TODO (Sukrit): This method generates all the elements together and then sends
# the messages out to downstream operators. Instead, the method should `yield`
# individual elements so that they can be eagerly sent out.
def flat_map_fn(serialized_data: bytes) -> Sequence[bytes]:
mapped_values = function(pickle.loads(serialized_data))
result = []
for element in mapped_values:
result.append(pickle.dumps(element))
return result
return OperatorStream(self._internal_stream._flat_map(flat_map_fn)) | def flat_map(self, function: Callable[[Any], Sequence[Any]]) -> "OperatorStream":
"""Applies the given function to each value sent on the stream, and outputs the
sequence of received outputs as individual messages.
Args:
function: The function applied to each value sent on this stream.
Returns:
A stream that carries the results of the applied function.
"""
# TODO (Sukrit): This method generates all the elements together and then sends
# the messages out to downstream operators. Instead, the method should `yield`
# individual elements so that they can be eagerly sent out.
def flat_map_fn(serialized_data: bytes) -> Sequence[bytes]:
mapped_values = function(pickle.loads(serialized_data))
result = []
for element in mapped_values:
result.append(pickle.dumps(element))
return result
return OperatorStream(self._internal_stream._flat_map(flat_map_fn)) |
Python | def filter(self, function: Callable[[Any], bool]) -> "OperatorStream":
"""Applies the given function to each value sent on the stream, and sends the
value on the returned stream if the function evaluates to `True`.
Args:
function: The function applied to each value sent on this stream. The value
is retained if the function returns `True`.
Returns:
An stream that carries the filtered results from the applied function.
"""
def filter_fn(serialized_data: bytes) -> bool:
return function(pickle.loads(serialized_data))
return OperatorStream(self._internal_stream._filter(filter_fn)) | def filter(self, function: Callable[[Any], bool]) -> "OperatorStream":
"""Applies the given function to each value sent on the stream, and sends the
value on the returned stream if the function evaluates to `True`.
Args:
function: The function applied to each value sent on this stream. The value
is retained if the function returns `True`.
Returns:
An stream that carries the filtered results from the applied function.
"""
def filter_fn(serialized_data: bytes) -> bool:
return function(pickle.loads(serialized_data))
return OperatorStream(self._internal_stream._filter(filter_fn)) |
Python | def split(
self, function: Callable[[Any], bool]
) -> Tuple["OperatorStream", "OperatorStream"]:
"""Applies the given function to each value sent on the stream, and outputs the
value to either the left or the right stream depending on if the returned
boolean value is `True` or `False` respectively.
Args:
function: The function applied to each message sent on this stream.
Returns:
The left and the right stream respectively, containing the values output
according to the split function.
"""
def split_fn(serialized_data: bytes) -> bool:
return function(pickle.loads(serialized_data))
left_stream, right_stream = self._internal_stream._split(split_fn)
return (OperatorStream(left_stream), OperatorStream(right_stream)) | def split(
self, function: Callable[[Any], bool]
) -> Tuple["OperatorStream", "OperatorStream"]:
"""Applies the given function to each value sent on the stream, and outputs the
value to either the left or the right stream depending on if the returned
boolean value is `True` or `False` respectively.
Args:
function: The function applied to each message sent on this stream.
Returns:
The left and the right stream respectively, containing the values output
according to the split function.
"""
def split_fn(serialized_data: bytes) -> bool:
return function(pickle.loads(serialized_data))
left_stream, right_stream = self._internal_stream._split(split_fn)
return (OperatorStream(left_stream), OperatorStream(right_stream)) |
Python | def split_by_type(self, *data_type: Type) -> Tuple["OperatorStream"]:
"""Returns a stream for each provided type on which each message's data is an
instance of that provided type.
Message with data not corresponding to a provided type are filtered out.
Useful for building operators that send messages with more than 2 data types.
Args:
data_type: the type of the data to be forwarded to the corresponding
stream.
Returns:
A stream for each provided type where each message's data is an instance of
that type.
"""
# TODO(peter): optimize the implementation by moving logic to Rust.
if len(data_type) == 0:
raise ValueError("Did not receive a list of types.")
last_stream = self
streams = ()
for t in data_type[:-1]:
s, last_stream = last_stream.split(lambda x: isinstance(x, t))
streams += (s,)
last_type = data_type[-1]
last_stream = last_stream.filter(lambda x: isinstance(x, last_type))
return streams + (last_stream,) | def split_by_type(self, *data_type: Type) -> Tuple["OperatorStream"]:
"""Returns a stream for each provided type on which each message's data is an
instance of that provided type.
Message with data not corresponding to a provided type are filtered out.
Useful for building operators that send messages with more than 2 data types.
Args:
data_type: the type of the data to be forwarded to the corresponding
stream.
Returns:
A stream for each provided type where each message's data is an instance of
that type.
"""
# TODO(peter): optimize the implementation by moving logic to Rust.
if len(data_type) == 0:
raise ValueError("Did not receive a list of types.")
last_stream = self
streams = ()
for t in data_type[:-1]:
s, last_stream = last_stream.split(lambda x: isinstance(x, t))
streams += (s,)
last_type = data_type[-1]
last_stream = last_stream.filter(lambda x: isinstance(x, last_type))
return streams + (last_stream,) |
Python | def timestamp_join(self, other: "Stream") -> "OperatorStream":
"""Joins the data with matching timestamps from the two different streams.
Args:
other: The stream to join with.
Returns:
A stream that carries the joined results from the two
streams.
"""
def join_fn(serialized_data_left: bytes, serialized_data_right: bytes) -> bytes:
left_data = pickle.loads(serialized_data_left)
right_data = pickle.loads(serialized_data_right)
return pickle.dumps((left_data, right_data))
return OperatorStream(
self._internal_stream._timestamp_join(other._internal_stream, join_fn)
) | def timestamp_join(self, other: "Stream") -> "OperatorStream":
"""Joins the data with matching timestamps from the two different streams.
Args:
other: The stream to join with.
Returns:
A stream that carries the joined results from the two
streams.
"""
def join_fn(serialized_data_left: bytes, serialized_data_right: bytes) -> bytes:
left_data = pickle.loads(serialized_data_left)
right_data = pickle.loads(serialized_data_right)
return pickle.dumps((left_data, right_data))
return OperatorStream(
self._internal_stream._timestamp_join(other._internal_stream, join_fn)
) |
Python | def concat(self, *other: "Stream") -> "OperatorStream":
"""Merges the data messages from the given streams into a single stream and
forwards a watermark when a minimum watermark on the streams is achieved.
Args:
other: The other stream(s) to merge with.
Returns:
A stream that carries messages from all merged streams.
"""
if len(other) == 0:
raise ValueError("Received empty list of streams to merge.")
# Iteratively keep merging the streams in pairs of two.
streams_to_be_merged = list(other) + [self]
while len(streams_to_be_merged) != 1:
merged_streams = []
paired_streams = zip_longest(
streams_to_be_merged[::2], streams_to_be_merged[1::2]
)
for left_stream, right_stream in paired_streams:
if right_stream is not None:
merged_streams.append(
OperatorStream(
left_stream._internal_stream._concat(
right_stream._internal_stream
)
)
)
else:
merged_streams.append(left_stream)
streams_to_be_merged = merged_streams
return streams_to_be_merged[0] | def concat(self, *other: "Stream") -> "OperatorStream":
"""Merges the data messages from the given streams into a single stream and
forwards a watermark when a minimum watermark on the streams is achieved.
Args:
other: The other stream(s) to merge with.
Returns:
A stream that carries messages from all merged streams.
"""
if len(other) == 0:
raise ValueError("Received empty list of streams to merge.")
# Iteratively keep merging the streams in pairs of two.
streams_to_be_merged = list(other) + [self]
while len(streams_to_be_merged) != 1:
merged_streams = []
paired_streams = zip_longest(
streams_to_be_merged[::2], streams_to_be_merged[1::2]
)
for left_stream, right_stream in paired_streams:
if right_stream is not None:
merged_streams.append(
OperatorStream(
left_stream._internal_stream._concat(
right_stream._internal_stream
)
)
)
else:
merged_streams.append(left_stream)
streams_to_be_merged = merged_streams
return streams_to_be_merged[0] |
Python | def send(self, msg: Message):
"""Sends a message on the stream.
Args:
msg: the message to send. This may be a `Watermark` or a `Message`.
"""
if not isinstance(msg, Message):
raise TypeError("msg must inherent from erdos.Message!")
internal_msg = msg._to_py_message()
logger.debug("Sending message {} on the stream {}".format(msg, self.name))
# Raise exception with the name.
try:
return self._py_write_stream.send(internal_msg)
except Exception as e:
raise Exception(
"Exception on stream {} ({}): {}".format(self.name, self.id, e)
) from e | def send(self, msg: Message):
"""Sends a message on the stream.
Args:
msg: the message to send. This may be a `Watermark` or a `Message`.
"""
if not isinstance(msg, Message):
raise TypeError("msg must inherent from erdos.Message!")
internal_msg = msg._to_py_message()
logger.debug("Sending message {} on the stream {}".format(msg, self.name))
# Raise exception with the name.
try:
return self._py_write_stream.send(internal_msg)
except Exception as e:
raise Exception(
"Exception on stream {} ({}): {}".format(self.name, self.id, e)
) from e |
Python | def is_closed(self) -> bool:
"""Whether the stream is closed.
Returns True if the a top watermark message was sent or the
IngestStream was unable to successfully set up.
"""
return self._internal_stream.is_closed() | def is_closed(self) -> bool:
"""Whether the stream is closed.
Returns True if the a top watermark message was sent or the
IngestStream was unable to successfully set up.
"""
return self._internal_stream.is_closed() |
Python | def send(self, msg: Message):
"""Sends a message on the stream.
Args:
msg: the message to send. This may be a
:py:class:`WatermarkMessage` or a :py:class:`Message`.
"""
if not isinstance(msg, Message):
raise TypeError("msg must inherent from erdos.Message!")
logger.debug(
"Sending message {} on the Ingest stream {}".format(msg, self.name)
)
internal_msg = msg._to_py_message()
self._internal_stream.send(internal_msg) | def send(self, msg: Message):
"""Sends a message on the stream.
Args:
msg: the message to send. This may be a
:py:class:`WatermarkMessage` or a :py:class:`Message`.
"""
if not isinstance(msg, Message):
raise TypeError("msg must inherent from erdos.Message!")
logger.debug(
"Sending message {} on the Ingest stream {}".format(msg, self.name)
)
internal_msg = msg._to_py_message()
self._internal_stream.send(internal_msg) |
Python | def main():
"""Creates and runs the dataflow graph."""
count_stream = erdos.connect_source(SendOp, erdos.operator.OperatorConfig())
top_stream = erdos.connect_source(TopOp, erdos.operator.OperatorConfig())
batch_stream = erdos.connect_one_in_one_out(
BatchOp, erdos.operator.OperatorConfig(), count_stream
)
erdos.connect_sink(
CallbackWatermarkListener, erdos.operator.OperatorConfig(), batch_stream
)
erdos.connect_sink(
CallbackWatermarkListener, erdos.operator.OperatorConfig(), top_stream
)
erdos.connect_sink(
PullWatermarkListener, erdos.operator.OperatorConfig(), batch_stream
)
erdos.run() | def main():
"""Creates and runs the dataflow graph."""
count_stream = erdos.connect_source(SendOp, erdos.operator.OperatorConfig())
top_stream = erdos.connect_source(TopOp, erdos.operator.OperatorConfig())
batch_stream = erdos.connect_one_in_one_out(
BatchOp, erdos.operator.OperatorConfig(), count_stream
)
erdos.connect_sink(
CallbackWatermarkListener, erdos.operator.OperatorConfig(), batch_stream
)
erdos.connect_sink(
CallbackWatermarkListener, erdos.operator.OperatorConfig(), top_stream
)
erdos.connect_sink(
PullWatermarkListener, erdos.operator.OperatorConfig(), batch_stream
)
erdos.run() |
Python | def mock_internal_type(qualname: str) -> mock.Mock:
"""Fixes an autodoc error when mocking internal types as arguments."""
mocked_class = mock.Mock()
mocked_class.__qualname__ = qualname
return mocked_class | def mock_internal_type(qualname: str) -> mock.Mock:
"""Fixes an autodoc error when mocking internal types as arguments."""
mocked_class = mock.Mock()
mocked_class.__qualname__ = qualname
return mocked_class |
Python | def reset():
"""Create a new dataflow graph.
Note:
A call to this function renders the previous dataflow graph unsafe to
use.
"""
logger.info("Resetting the default graph.")
global _num_py_operators
_num_py_operators = 0
_internal.reset() | def reset():
"""Create a new dataflow graph.
Note:
A call to this function renders the previous dataflow graph unsafe to
use.
"""
logger.info("Resetting the default graph.")
global _num_py_operators
_num_py_operators = 0
_internal.reset() |
Python | def wait(self):
"""Waits for the completion of all the operators in the dataflow"""
for p in self.processes:
p.join()
logger.debug("Finished waiting for the dataflow graph processes.") | def wait(self):
"""Waits for the completion of all the operators in the dataflow"""
for p in self.processes:
p.join()
logger.debug("Finished waiting for the dataflow graph processes.") |
Python | def run(graph_filename: Optional[str] = None, start_port: Optional[int] = 9000):
"""Instantiates and runs the dataflow graph.
ERDOS will spawn 1 process for each python operator, and connect them via
TCP.
Args:
graph_filename: The filename to which to write the dataflow graph
as a DOT file.
start_port: The port on which to start. The start port is the
lowest port ERDOS will use to establish TCP connections between
operators.
"""
driver_handle = run_async(graph_filename, start_port)
logger.debug("Waiting for the dataflow to complete ...")
driver_handle.wait() | def run(graph_filename: Optional[str] = None, start_port: Optional[int] = 9000):
"""Instantiates and runs the dataflow graph.
ERDOS will spawn 1 process for each python operator, and connect them via
TCP.
Args:
graph_filename: The filename to which to write the dataflow graph
as a DOT file.
start_port: The port on which to start. The start port is the
lowest port ERDOS will use to establish TCP connections between
operators.
"""
driver_handle = run_async(graph_filename, start_port)
logger.debug("Waiting for the dataflow to complete ...")
driver_handle.wait() |
Python | def run_async(
graph_filename: Optional[str] = None, start_port: Optional[int] = 9000
) -> NodeHandle:
"""Instantiates and runs the dataflow graph asynchronously.
ERDOS will spawn 1 process for each python operator, and connect them via
TCP.
Args:
graph_filename: The filename to which to write the dataflow graph
as a DOT file.
start_port: The port on which to start. The start port is the
lowest port ERDOS will use to establish TCP connections between
operators.
Returns:
A :py:class:`.NodeHandle` that allows the driver to interface with the
dataflow graph.
"""
data_addresses = [
"127.0.0.1:{port}".format(port=start_port + i)
for i in range(_num_py_operators + 1)
]
control_addresses = [
"127.0.0.1:{port}".format(port=start_port + len(data_addresses) + i)
for i in range(_num_py_operators + 1)
]
logger.debug("Running the dataflow graph on addresses: {}".format(data_addresses))
# Fix for macOS where mulitprocessing defaults
# to spawn() instead of fork() in Python 3.8+
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
# Warning: may lead to crashes
# https://bugs.python.org/issue33725
ctx = mp.get_context("fork")
processes = [
ctx.Process(target=_run_node, args=(i, data_addresses, control_addresses))
for i in range(1, _num_py_operators + 1)
]
# Needed to shut down child processes
def sigint_handler(sig, frame):
for p in processes:
p.terminate()
sys.exit(0)
signal.signal(signal.SIGINT, sigint_handler)
for p in processes:
p.start()
# The driver must always be on node 0 otherwise ingest and extract streams
# will break
py_node_handle = _internal.run_async(
0, data_addresses, control_addresses, graph_filename
)
return NodeHandle(py_node_handle, processes) | def run_async(
graph_filename: Optional[str] = None, start_port: Optional[int] = 9000
) -> NodeHandle:
"""Instantiates and runs the dataflow graph asynchronously.
ERDOS will spawn 1 process for each python operator, and connect them via
TCP.
Args:
graph_filename: The filename to which to write the dataflow graph
as a DOT file.
start_port: The port on which to start. The start port is the
lowest port ERDOS will use to establish TCP connections between
operators.
Returns:
A :py:class:`.NodeHandle` that allows the driver to interface with the
dataflow graph.
"""
data_addresses = [
"127.0.0.1:{port}".format(port=start_port + i)
for i in range(_num_py_operators + 1)
]
control_addresses = [
"127.0.0.1:{port}".format(port=start_port + len(data_addresses) + i)
for i in range(_num_py_operators + 1)
]
logger.debug("Running the dataflow graph on addresses: {}".format(data_addresses))
# Fix for macOS where mulitprocessing defaults
# to spawn() instead of fork() in Python 3.8+
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
# Warning: may lead to crashes
# https://bugs.python.org/issue33725
ctx = mp.get_context("fork")
processes = [
ctx.Process(target=_run_node, args=(i, data_addresses, control_addresses))
for i in range(1, _num_py_operators + 1)
]
# Needed to shut down child processes
def sigint_handler(sig, frame):
for p in processes:
p.terminate()
sys.exit(0)
signal.signal(signal.SIGINT, sigint_handler)
for p in processes:
p.start()
# The driver must always be on node 0 otherwise ingest and extract streams
# will break
py_node_handle = _internal.run_async(
0, data_addresses, control_addresses, graph_filename
)
return NodeHandle(py_node_handle, processes) |
Python | def _serialize_data(self):
"""Serializes the message's data using pickle.
Allows an application to front-load cost of serializing data, which
usually occurs when the message is sent, in order to reduce the cost
of later sending the message.
If the data is later changed, :py:meth:`Message._serialize_data` must
be called again to reflect changes in the message.
"""
self._serialized_data = pickle.dumps(
self.data, protocol=pickle.HIGHEST_PROTOCOL
) | def _serialize_data(self):
"""Serializes the message's data using pickle.
Allows an application to front-load cost of serializing data, which
usually occurs when the message is sent, in order to reduce the cost
of later sending the message.
If the data is later changed, :py:meth:`Message._serialize_data` must
be called again to reflect changes in the message.
"""
self._serialized_data = pickle.dumps(
self.data, protocol=pickle.HIGHEST_PROTOCOL
) |
Python | def main():
"""Creates and runs the dataflow graph."""
loop_stream = erdos.streams.LoopStream()
stream = erdos.connect_one_in_one_out(
LoopOp, erdos.operator.OperatorConfig(), loop_stream
)
loop_stream.connect_loop(stream)
erdos.run() | def main():
"""Creates and runs the dataflow graph."""
loop_stream = erdos.streams.LoopStream()
stream = erdos.connect_one_in_one_out(
LoopOp, erdos.operator.OperatorConfig(), loop_stream
)
loop_stream.connect_loop(stream)
erdos.run() |
Python | def run(self, write_stream: WriteStream):
"""Runs the operator.
Invoked automatically by ERDOS, and provided with a
:py:class:`.WriteStream` to send data on.
Args:
write_stream: A :py:class:`.WriteStream` instance to send data on.
""" | def run(self, write_stream: WriteStream):
"""Runs the operator.
Invoked automatically by ERDOS, and provided with a
:py:class:`.WriteStream` to send data on.
Args:
write_stream: A :py:class:`.WriteStream` instance to send data on.
""" |
Python | def run(self, read_stream: ReadStream):
"""Runs the operator.
Invoked automatically by ERDOS, and provided with a
:py:class:`.ReadStream` to retrieve data from.
Args:
read_stream: A :py:class:`.ReadStream` instance to read data from.
""" | def run(self, read_stream: ReadStream):
"""Runs the operator.
Invoked automatically by ERDOS, and provided with a
:py:class:`.ReadStream` to retrieve data from.
Args:
read_stream: A :py:class:`.ReadStream` instance to read data from.
""" |
Python | def run(self, read_stream: ReadStream, write_stream: WriteStream):
"""Runs the operator.
Invoked automatically by ERDOS, and provided with a
:py:class:`.ReadStream` to retrieve data from, and a
:py:class:`.WriteStream` to send data on.
Args:
read_stream: A :py:class:`.ReadStream` instance to read data from.
write_stream: A :py:class:`.WriteStream` instance to send data on.
""" | def run(self, read_stream: ReadStream, write_stream: WriteStream):
"""Runs the operator.
Invoked automatically by ERDOS, and provided with a
:py:class:`.ReadStream` to retrieve data from, and a
:py:class:`.WriteStream` to send data on.
Args:
read_stream: A :py:class:`.ReadStream` instance to read data from.
write_stream: A :py:class:`.WriteStream` instance to send data on.
""" |
Python | def _install_package(library, instrumentation):
"""
Ensures that desired version is installed w/o upgrading its dependencies
by uninstalling where necessary (if `target` is not provided).
OpenTelemetry auto-instrumentation packages often have traced libraries
as instrumentation dependency (e.g. flask for opentelemetry-ext-flask),
so using -I on library could cause likely undesired Flask upgrade.
Using --no-dependencies alone would leave potential for nonfunctional
installations.
"""
pip_list = _sys_pip_freeze()
for package in libraries[library]:
if "{}==".format(package).lower() in pip_list:
logger.info(
"Existing %s installation detected. Uninstalling.", package
)
_sys_pip_uninstall(package)
_sys_pip_install(instrumentation) | def _install_package(library, instrumentation):
"""
Ensures that desired version is installed w/o upgrading its dependencies
by uninstalling where necessary (if `target` is not provided).
OpenTelemetry auto-instrumentation packages often have traced libraries
as instrumentation dependency (e.g. flask for opentelemetry-ext-flask),
so using -I on library could cause likely undesired Flask upgrade.
Using --no-dependencies alone would leave potential for nonfunctional
installations.
"""
pip_list = _sys_pip_freeze()
for package in libraries[library]:
if "{}==".format(package).lower() in pip_list:
logger.info(
"Existing %s installation detected. Uninstalling.", package
)
_sys_pip_uninstall(package)
_sys_pip_install(instrumentation) |
Python | def _pip_check():
"""Ensures none of the instrumentations have dependency conflicts.
Clean check reported as:
'No broken requirements found.'
Dependency conflicts are reported as:
'opentelemetry-ext-flask 1.0.1 has requirement opentelemetry-sdk<2.0,>=1.0, but you have opentelemetry-sdk 0.5.'
To not be too restrictive, we'll only check for relevant packages.
"""
check_pipe = subprocess.Popen(
[sys.executable, "-m", "pip", "check"], stdout=subprocess.PIPE
)
pip_check = check_pipe.communicate()[0].decode()
pip_check_lower = pip_check.lower()
for package_tup in libraries.values():
for package in package_tup:
if package.lower() in pip_check_lower:
raise RuntimeError(
"Dependency conflict found: {}".format(pip_check)
) | def _pip_check():
"""Ensures none of the instrumentations have dependency conflicts.
Clean check reported as:
'No broken requirements found.'
Dependency conflicts are reported as:
'opentelemetry-ext-flask 1.0.1 has requirement opentelemetry-sdk<2.0,>=1.0, but you have opentelemetry-sdk 0.5.'
To not be too restrictive, we'll only check for relevant packages.
"""
check_pipe = subprocess.Popen(
[sys.executable, "-m", "pip", "check"], stdout=subprocess.PIPE
)
pip_check = check_pipe.communicate()[0].decode()
pip_check_lower = pip_check.lower()
for package_tup in libraries.values():
for package in package_tup:
if package.lower() in pip_check_lower:
raise RuntimeError(
"Dependency conflict found: {}".format(pip_check)
) |
Python | def trace_integration(
connect_module: typing.Callable[..., typing.Any],
connect_method_name: str,
database_component: str,
database_type: str = "",
connection_attributes: typing.Dict = None,
tracer_provider: typing.Optional[TracerProvider] = None,
):
"""Integrate with DB API library.
https://www.python.org/dev/peps/pep-0249/
Args:
connect_module: Module name where connect method is available.
connect_method_name: The connect method name.
database_component: Database driver name or database name "JDBI",
"jdbc", "odbc", "postgreSQL".
database_type: The Database type. For any SQL database, "sql".
connection_attributes: Attribute names for database, port, host and
user in Connection object.
tracer_provider: The :class:`opentelemetry.trace.TracerProvider` to
use. If ommited the current configured one is used.
"""
tracer = get_tracer(__name__, __version__, tracer_provider)
wrap_connect(
tracer,
connect_module,
connect_method_name,
database_component,
database_type,
connection_attributes,
) | def trace_integration(
connect_module: typing.Callable[..., typing.Any],
connect_method_name: str,
database_component: str,
database_type: str = "",
connection_attributes: typing.Dict = None,
tracer_provider: typing.Optional[TracerProvider] = None,
):
"""Integrate with DB API library.
https://www.python.org/dev/peps/pep-0249/
Args:
connect_module: Module name where connect method is available.
connect_method_name: The connect method name.
database_component: Database driver name or database name "JDBI",
"jdbc", "odbc", "postgreSQL".
database_type: The Database type. For any SQL database, "sql".
connection_attributes: Attribute names for database, port, host and
user in Connection object.
tracer_provider: The :class:`opentelemetry.trace.TracerProvider` to
use. If ommited the current configured one is used.
"""
tracer = get_tracer(__name__, __version__, tracer_provider)
wrap_connect(
tracer,
connect_module,
connect_method_name,
database_component,
database_type,
connection_attributes,
) |
Python | def wrap_connect(
tracer: Tracer,
connect_module: typing.Callable[..., typing.Any],
connect_method_name: str,
database_component: str,
database_type: str = "",
connection_attributes: typing.Dict = None,
):
"""Integrate with DB API library.
https://www.python.org/dev/peps/pep-0249/
Args:
tracer: The :class:`opentelemetry.trace.Tracer` to use.
connect_module: Module name where connect method is available.
connect_method_name: The connect method name.
database_component: Database driver name or database name "JDBI",
"jdbc", "odbc", "postgreSQL".
database_type: The Database type. For any SQL database, "sql".
connection_attributes: Attribute names for database, port, host and
user in Connection object.
"""
# pylint: disable=unused-argument
def wrap_connect_(
wrapped: typing.Callable[..., typing.Any],
instance: typing.Any,
args: typing.Tuple[typing.Any, typing.Any],
kwargs: typing.Dict[typing.Any, typing.Any],
):
db_integration = DatabaseApiIntegration(
tracer,
database_component,
database_type=database_type,
connection_attributes=connection_attributes,
)
return db_integration.wrapped_connection(wrapped, args, kwargs)
try:
wrapt.wrap_function_wrapper(
connect_module, connect_method_name, wrap_connect_
)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Failed to integrate with DB API. %s", str(ex)) | def wrap_connect(
tracer: Tracer,
connect_module: typing.Callable[..., typing.Any],
connect_method_name: str,
database_component: str,
database_type: str = "",
connection_attributes: typing.Dict = None,
):
"""Integrate with DB API library.
https://www.python.org/dev/peps/pep-0249/
Args:
tracer: The :class:`opentelemetry.trace.Tracer` to use.
connect_module: Module name where connect method is available.
connect_method_name: The connect method name.
database_component: Database driver name or database name "JDBI",
"jdbc", "odbc", "postgreSQL".
database_type: The Database type. For any SQL database, "sql".
connection_attributes: Attribute names for database, port, host and
user in Connection object.
"""
# pylint: disable=unused-argument
def wrap_connect_(
wrapped: typing.Callable[..., typing.Any],
instance: typing.Any,
args: typing.Tuple[typing.Any, typing.Any],
kwargs: typing.Dict[typing.Any, typing.Any],
):
db_integration = DatabaseApiIntegration(
tracer,
database_component,
database_type=database_type,
connection_attributes=connection_attributes,
)
return db_integration.wrapped_connection(wrapped, args, kwargs)
try:
wrapt.wrap_function_wrapper(
connect_module, connect_method_name, wrap_connect_
)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Failed to integrate with DB API. %s", str(ex)) |
Python | def instrument_connection(
tracer,
connection,
database_component: str,
database_type: str = "",
connection_attributes: typing.Dict = None,
):
"""Enable instrumentation in a database connection.
Args:
tracer: The :class:`opentelemetry.trace.Tracer` to use.
connection: The connection to instrument.
database_component: Database driver name or database name "JDBI",
"jdbc", "odbc", "postgreSQL".
database_type: The Database type. For any SQL database, "sql".
connection_attributes: Attribute names for database, port, host and
user in a connection object.
Returns:
An instrumented connection.
"""
db_integration = DatabaseApiIntegration(
tracer,
database_component,
database_type,
connection_attributes=connection_attributes,
)
db_integration.get_connection_attributes(connection)
return get_traced_connection_proxy(connection, db_integration) | def instrument_connection(
tracer,
connection,
database_component: str,
database_type: str = "",
connection_attributes: typing.Dict = None,
):
"""Enable instrumentation in a database connection.
Args:
tracer: The :class:`opentelemetry.trace.Tracer` to use.
connection: The connection to instrument.
database_component: Database driver name or database name "JDBI",
"jdbc", "odbc", "postgreSQL".
database_type: The Database type. For any SQL database, "sql".
connection_attributes: Attribute names for database, port, host and
user in a connection object.
Returns:
An instrumented connection.
"""
db_integration = DatabaseApiIntegration(
tracer,
database_component,
database_type,
connection_attributes=connection_attributes,
)
db_integration.get_connection_attributes(connection)
return get_traced_connection_proxy(connection, db_integration) |
Python | def _add_resource_info(self, series: TimeSeries) -> None:
"""Add Google resource specific information (e.g. instance id, region).
Args:
series: ProtoBuf TimeSeries
"""
# TODO: Leverage this better | def _add_resource_info(self, series: TimeSeries) -> None:
"""Add Google resource specific information (e.g. instance id, region).
Args:
series: ProtoBuf TimeSeries
"""
# TODO: Leverage this better |
Python | def _get_metric_descriptor(
self, record: MetricRecord
) -> Optional[MetricDescriptor]:
""" We can map Metric to MetricDescriptor using Metric.name or
MetricDescriptor.type. We create the MetricDescriptor if it doesn't
exist already and cache it. Note that recreating MetricDescriptors is
a no-op if it already exists.
:param record:
:return:
"""
instrument = record.instrument
descriptor_type = "custom.googleapis.com/OpenTelemetry/{}".format(
instrument.name
)
if descriptor_type in self._metric_descriptors:
return self._metric_descriptors[descriptor_type]
descriptor = {
"name": None,
"type": descriptor_type,
"display_name": instrument.name,
"description": instrument.description,
"labels": [],
}
for key, value in record.labels:
if isinstance(value, str):
descriptor["labels"].append(
LabelDescriptor(key=key, value_type="STRING")
)
elif isinstance(value, bool):
descriptor["labels"].append(
LabelDescriptor(key=key, value_type="BOOL")
)
elif isinstance(value, int):
descriptor["labels"].append(
LabelDescriptor(key=key, value_type="INT64")
)
else:
logger.warning(
"Label value %s is not a string, bool or integer", value
)
if isinstance(record.aggregator, CounterAggregator):
descriptor["metric_kind"] = MetricDescriptor.MetricKind.GAUGE
else:
logger.warning(
"Unsupported aggregation type %s, ignoring it",
type(record.aggregator).__name__,
)
return None
if instrument.value_type == int:
descriptor["value_type"] = MetricDescriptor.ValueType.INT64
elif instrument.value_type == float:
descriptor["value_type"] = MetricDescriptor.ValueType.DOUBLE
proto_descriptor = MetricDescriptor(**descriptor)
try:
descriptor = self.client.create_metric_descriptor(
self.project_name, proto_descriptor
)
# pylint: disable=broad-except
except Exception as ex:
logger.error(
"Failed to create metric descriptor %s",
proto_descriptor,
exc_info=ex,
)
return None
self._metric_descriptors[descriptor_type] = descriptor
return descriptor | def _get_metric_descriptor(
self, record: MetricRecord
) -> Optional[MetricDescriptor]:
""" We can map Metric to MetricDescriptor using Metric.name or
MetricDescriptor.type. We create the MetricDescriptor if it doesn't
exist already and cache it. Note that recreating MetricDescriptors is
a no-op if it already exists.
:param record:
:return:
"""
instrument = record.instrument
descriptor_type = "custom.googleapis.com/OpenTelemetry/{}".format(
instrument.name
)
if descriptor_type in self._metric_descriptors:
return self._metric_descriptors[descriptor_type]
descriptor = {
"name": None,
"type": descriptor_type,
"display_name": instrument.name,
"description": instrument.description,
"labels": [],
}
for key, value in record.labels:
if isinstance(value, str):
descriptor["labels"].append(
LabelDescriptor(key=key, value_type="STRING")
)
elif isinstance(value, bool):
descriptor["labels"].append(
LabelDescriptor(key=key, value_type="BOOL")
)
elif isinstance(value, int):
descriptor["labels"].append(
LabelDescriptor(key=key, value_type="INT64")
)
else:
logger.warning(
"Label value %s is not a string, bool or integer", value
)
if isinstance(record.aggregator, CounterAggregator):
descriptor["metric_kind"] = MetricDescriptor.MetricKind.GAUGE
else:
logger.warning(
"Unsupported aggregation type %s, ignoring it",
type(record.aggregator).__name__,
)
return None
if instrument.value_type == int:
descriptor["value_type"] = MetricDescriptor.ValueType.INT64
elif instrument.value_type == float:
descriptor["value_type"] = MetricDescriptor.ValueType.DOUBLE
proto_descriptor = MetricDescriptor(**descriptor)
try:
descriptor = self.client.create_metric_descriptor(
self.project_name, proto_descriptor
)
# pylint: disable=broad-except
except Exception as ex:
logger.error(
"Failed to create metric descriptor %s",
proto_descriptor,
exc_info=ex,
)
return None
self._metric_descriptors[descriptor_type] = descriptor
return descriptor |
Python | def extract(
self,
get_from_carrier: httptextformat.Getter[
httptextformat.HTTPTextFormatT
],
carrier: httptextformat.HTTPTextFormatT,
context: typing.Optional[Context] = None,
) -> Context:
"""Extracts SpanContext from the carrier.
See `opentelemetry.trace.propagation.httptextformat.HTTPTextFormat.extract`
"""
header = get_from_carrier(carrier, self._TRACEPARENT_HEADER_NAME)
if not header:
return trace.set_span_in_context(trace.INVALID_SPAN, context)
match = re.search(self._TRACEPARENT_HEADER_FORMAT_RE, header[0])
if not match:
return trace.set_span_in_context(trace.INVALID_SPAN, context)
version = match.group(1)
trace_id = match.group(2)
span_id = match.group(3)
trace_flags = match.group(4)
if trace_id == "0" * 32 or span_id == "0" * 16:
return trace.set_span_in_context(trace.INVALID_SPAN, context)
if version == "00":
if match.group(5):
return trace.set_span_in_context(trace.INVALID_SPAN, context)
if version == "ff":
return trace.set_span_in_context(trace.INVALID_SPAN, context)
tracestate_headers = get_from_carrier(
carrier, self._TRACESTATE_HEADER_NAME
)
tracestate = _parse_tracestate(tracestate_headers)
span_context = trace.SpanContext(
trace_id=int(trace_id, 16),
span_id=int(span_id, 16),
is_remote=True,
trace_flags=trace.TraceFlags(trace_flags),
trace_state=tracestate,
)
return trace.set_span_in_context(
trace.DefaultSpan(span_context), context
) | def extract(
self,
get_from_carrier: httptextformat.Getter[
httptextformat.HTTPTextFormatT
],
carrier: httptextformat.HTTPTextFormatT,
context: typing.Optional[Context] = None,
) -> Context:
"""Extracts SpanContext from the carrier.
See `opentelemetry.trace.propagation.httptextformat.HTTPTextFormat.extract`
"""
header = get_from_carrier(carrier, self._TRACEPARENT_HEADER_NAME)
if not header:
return trace.set_span_in_context(trace.INVALID_SPAN, context)
match = re.search(self._TRACEPARENT_HEADER_FORMAT_RE, header[0])
if not match:
return trace.set_span_in_context(trace.INVALID_SPAN, context)
version = match.group(1)
trace_id = match.group(2)
span_id = match.group(3)
trace_flags = match.group(4)
if trace_id == "0" * 32 or span_id == "0" * 16:
return trace.set_span_in_context(trace.INVALID_SPAN, context)
if version == "00":
if match.group(5):
return trace.set_span_in_context(trace.INVALID_SPAN, context)
if version == "ff":
return trace.set_span_in_context(trace.INVALID_SPAN, context)
tracestate_headers = get_from_carrier(
carrier, self._TRACESTATE_HEADER_NAME
)
tracestate = _parse_tracestate(tracestate_headers)
span_context = trace.SpanContext(
trace_id=int(trace_id, 16),
span_id=int(span_id, 16),
is_remote=True,
trace_flags=trace.TraceFlags(trace_flags),
trace_state=tracestate,
)
return trace.set_span_in_context(
trace.DefaultSpan(span_context), context
) |
Python | def inject(
self,
set_in_carrier: httptextformat.Setter[httptextformat.HTTPTextFormatT],
carrier: httptextformat.HTTPTextFormatT,
context: typing.Optional[Context] = None,
) -> None:
"""Injects SpanContext into the carrier.
See `opentelemetry.trace.propagation.httptextformat.HTTPTextFormat.inject`
"""
span = trace.get_current_span(context)
if span is None:
return
span_context = span.get_context()
if span_context == trace.INVALID_SPAN_CONTEXT:
return
traceparent_string = "00-{:032x}-{:016x}-{:02x}".format(
span_context.trace_id,
span_context.span_id,
span_context.trace_flags,
)
set_in_carrier(
carrier, self._TRACEPARENT_HEADER_NAME, traceparent_string
)
if span_context.trace_state:
tracestate_string = _format_tracestate(span_context.trace_state)
set_in_carrier(
carrier, self._TRACESTATE_HEADER_NAME, tracestate_string
) | def inject(
self,
set_in_carrier: httptextformat.Setter[httptextformat.HTTPTextFormatT],
carrier: httptextformat.HTTPTextFormatT,
context: typing.Optional[Context] = None,
) -> None:
"""Injects SpanContext into the carrier.
See `opentelemetry.trace.propagation.httptextformat.HTTPTextFormat.inject`
"""
span = trace.get_current_span(context)
if span is None:
return
span_context = span.get_context()
if span_context == trace.INVALID_SPAN_CONTEXT:
return
traceparent_string = "00-{:032x}-{:016x}-{:02x}".format(
span_context.trace_id,
span_context.span_id,
span_context.trace_flags,
)
set_in_carrier(
carrier, self._TRACEPARENT_HEADER_NAME, traceparent_string
)
if span_context.trace_state:
tracestate_string = _format_tracestate(span_context.trace_state)
set_in_carrier(
carrier, self._TRACESTATE_HEADER_NAME, tracestate_string
) |
Python | def _parse_tracestate(header_list: typing.List[str]) -> trace.TraceState:
"""Parse one or more w3c tracestate header into a TraceState.
Args:
string: the value of the tracestate header.
Returns:
A valid TraceState that contains values extracted from
the tracestate header.
If the format of one headers is illegal, all values will
be discarded and an empty tracestate will be returned.
If the number of keys is beyond the maximum, all values
will be discarded and an empty tracestate will be returned.
"""
tracestate = trace.TraceState()
value_count = 0
for header in header_list:
for member in re.split(_DELIMITER_FORMAT_RE, header):
# empty members are valid, but no need to process further.
if not member:
continue
match = _MEMBER_FORMAT_RE.fullmatch(member)
if not match:
# TODO: log this?
return trace.TraceState()
key, _eq, value = match.groups()
if key in tracestate: # pylint:disable=E1135
# duplicate keys are not legal in
# the header, so we will remove
return trace.TraceState()
# typing.Dict's update is not recognized by pylint:
# https://github.com/PyCQA/pylint/issues/2420
tracestate[key] = value # pylint:disable=E1137
value_count += 1
if value_count > _TRACECONTEXT_MAXIMUM_TRACESTATE_KEYS:
return trace.TraceState()
return tracestate | def _parse_tracestate(header_list: typing.List[str]) -> trace.TraceState:
"""Parse one or more w3c tracestate header into a TraceState.
Args:
string: the value of the tracestate header.
Returns:
A valid TraceState that contains values extracted from
the tracestate header.
If the format of one headers is illegal, all values will
be discarded and an empty tracestate will be returned.
If the number of keys is beyond the maximum, all values
will be discarded and an empty tracestate will be returned.
"""
tracestate = trace.TraceState()
value_count = 0
for header in header_list:
for member in re.split(_DELIMITER_FORMAT_RE, header):
# empty members are valid, but no need to process further.
if not member:
continue
match = _MEMBER_FORMAT_RE.fullmatch(member)
if not match:
# TODO: log this?
return trace.TraceState()
key, _eq, value = match.groups()
if key in tracestate: # pylint:disable=E1135
# duplicate keys are not legal in
# the header, so we will remove
return trace.TraceState()
# typing.Dict's update is not recognized by pylint:
# https://github.com/PyCQA/pylint/issues/2420
tracestate[key] = value # pylint:disable=E1137
value_count += 1
if value_count > _TRACECONTEXT_MAXIMUM_TRACESTATE_KEYS:
return trace.TraceState()
return tracestate |
Python | def _format_tracestate(tracestate: trace.TraceState) -> str:
"""Parse a w3c tracestate header into a TraceState.
Args:
tracestate: the tracestate header to write
Returns:
A string that adheres to the w3c tracestate
header format.
"""
return ",".join(key + "=" + value for key, value in tracestate.items()) | def _format_tracestate(tracestate: trace.TraceState) -> str:
"""Parse a w3c tracestate header into a TraceState.
Args:
tracestate: the tracestate header to write
Returns:
A string that adheres to the w3c tracestate
header format.
"""
return ",".join(key + "=" + value for key, value in tracestate.items()) |
Python | def collect_request_attributes(scope):
"""Collects HTTP request attributes from the ASGI scope and returns a
dictionary to be used as span creation attributes."""
server = scope.get("server") or ["0.0.0.0", 80]
port = server[1]
server_host = server[0] + (":" + str(port) if port != 80 else "")
full_path = scope.get("root_path", "") + scope.get("path", "")
http_url = scope.get("scheme", "http") + "://" + server_host + full_path
query_string = scope.get("query_string")
if query_string and http_url:
if isinstance(query_string, bytes):
query_string = query_string.decode("utf8")
http_url = http_url + ("?" + urllib.parse.unquote(query_string))
result = {
"component": scope["type"],
"http.scheme": scope.get("scheme"),
"http.host": server_host,
"host.port": port,
"http.flavor": scope.get("http_version"),
"http.target": scope.get("path"),
"http.url": http_url,
}
http_method = scope.get("method")
if http_method:
result["http.method"] = http_method
http_host_value = ",".join(get_header_from_scope(scope, "host"))
if http_host_value:
result["http.server_name"] = http_host_value
http_user_agent = get_header_from_scope(scope, "user-agent")
if len(http_user_agent) > 0:
result["http.user_agent"] = http_user_agent[0]
if "client" in scope and scope["client"] is not None:
result["net.peer.ip"] = scope.get("client")[0]
result["net.peer.port"] = scope.get("client")[1]
# remove None values
result = {k: v for k, v in result.items() if v is not None}
return result | def collect_request_attributes(scope):
"""Collects HTTP request attributes from the ASGI scope and returns a
dictionary to be used as span creation attributes."""
server = scope.get("server") or ["0.0.0.0", 80]
port = server[1]
server_host = server[0] + (":" + str(port) if port != 80 else "")
full_path = scope.get("root_path", "") + scope.get("path", "")
http_url = scope.get("scheme", "http") + "://" + server_host + full_path
query_string = scope.get("query_string")
if query_string and http_url:
if isinstance(query_string, bytes):
query_string = query_string.decode("utf8")
http_url = http_url + ("?" + urllib.parse.unquote(query_string))
result = {
"component": scope["type"],
"http.scheme": scope.get("scheme"),
"http.host": server_host,
"host.port": port,
"http.flavor": scope.get("http_version"),
"http.target": scope.get("path"),
"http.url": http_url,
}
http_method = scope.get("method")
if http_method:
result["http.method"] = http_method
http_host_value = ",".join(get_header_from_scope(scope, "host"))
if http_host_value:
result["http.server_name"] = http_host_value
http_user_agent = get_header_from_scope(scope, "user-agent")
if len(http_user_agent) > 0:
result["http.user_agent"] = http_user_agent[0]
if "client" in scope and scope["client"] is not None:
result["net.peer.ip"] = scope.get("client")[0]
result["net.peer.port"] = scope.get("client")[1]
# remove None values
result = {k: v for k, v in result.items() if v is not None}
return result |
Python | def _instrument(tracer_provider=None, span_callback=None):
"""Enables tracing of all requests calls that go through
:code:`requests.session.Session.request` (this includes
:code:`requests.get`, etc.)."""
# Since
# https://github.com/psf/requests/commit/d72d1162142d1bf8b1b5711c664fbbd674f349d1
# (v0.7.0, Oct 23, 2011), get, post, etc are implemented via request which
# again, is implemented via Session.request (`Session` was named `session`
# before v1.0.0, Dec 17, 2012, see
# https://github.com/psf/requests/commit/4e5c4a6ab7bb0195dececdd19bb8505b872fe120)
wrapped = Session.request
tracer = trace.get_tracer(__name__, __version__, tracer_provider)
@functools.wraps(wrapped)
def instrumented_request(self, method, url, *args, **kwargs):
if context.get_value("suppress_instrumentation"):
return wrapped(self, method, url, *args, **kwargs)
# See
# https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/semantic_conventions/http.md#http-client
try:
parsed_url = urlparse(url)
span_name = parsed_url.path
except ValueError as exc: # Invalid URL
span_name = "<Unparsable URL: {}>".format(exc)
exception = None
with tracer.start_as_current_span(
span_name, kind=SpanKind.CLIENT
) as span:
span.set_attribute("component", "http")
span.set_attribute("http.method", method.upper())
span.set_attribute("http.url", url)
headers = kwargs.get("headers", {}) or {}
propagators.inject(type(headers).__setitem__, headers)
kwargs["headers"] = headers
try:
result = wrapped(
self, method, url, *args, **kwargs
) # *** PROCEED
except Exception as exc: # pylint: disable=W0703
exception = exc
result = getattr(exc, "response", None)
if exception is not None:
span.set_status(
Status(_exception_to_canonical_code(exception))
)
if result is not None:
span.set_attribute("http.status_code", result.status_code)
span.set_attribute("http.status_text", result.reason)
span.set_status(
Status(http_status_to_canonical_code(result.status_code))
)
if span_callback is not None:
span_callback(span, result)
if exception is not None:
raise exception.with_traceback(exception.__traceback__)
return result
instrumented_request.opentelemetry_ext_requests_applied = True
Session.request = instrumented_request
# TODO: We should also instrument requests.sessions.Session.send
# but to avoid doubled spans, we would need some context-local
# state (i.e., only create a Span if the current context's URL is
# different, then push the current URL, pop it afterwards) | def _instrument(tracer_provider=None, span_callback=None):
"""Enables tracing of all requests calls that go through
:code:`requests.session.Session.request` (this includes
:code:`requests.get`, etc.)."""
# Since
# https://github.com/psf/requests/commit/d72d1162142d1bf8b1b5711c664fbbd674f349d1
# (v0.7.0, Oct 23, 2011), get, post, etc are implemented via request which
# again, is implemented via Session.request (`Session` was named `session`
# before v1.0.0, Dec 17, 2012, see
# https://github.com/psf/requests/commit/4e5c4a6ab7bb0195dececdd19bb8505b872fe120)
wrapped = Session.request
tracer = trace.get_tracer(__name__, __version__, tracer_provider)
@functools.wraps(wrapped)
def instrumented_request(self, method, url, *args, **kwargs):
if context.get_value("suppress_instrumentation"):
return wrapped(self, method, url, *args, **kwargs)
# See
# https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/semantic_conventions/http.md#http-client
try:
parsed_url = urlparse(url)
span_name = parsed_url.path
except ValueError as exc: # Invalid URL
span_name = "<Unparsable URL: {}>".format(exc)
exception = None
with tracer.start_as_current_span(
span_name, kind=SpanKind.CLIENT
) as span:
span.set_attribute("component", "http")
span.set_attribute("http.method", method.upper())
span.set_attribute("http.url", url)
headers = kwargs.get("headers", {}) or {}
propagators.inject(type(headers).__setitem__, headers)
kwargs["headers"] = headers
try:
result = wrapped(
self, method, url, *args, **kwargs
) # *** PROCEED
except Exception as exc: # pylint: disable=W0703
exception = exc
result = getattr(exc, "response", None)
if exception is not None:
span.set_status(
Status(_exception_to_canonical_code(exception))
)
if result is not None:
span.set_attribute("http.status_code", result.status_code)
span.set_attribute("http.status_text", result.reason)
span.set_status(
Status(http_status_to_canonical_code(result.status_code))
)
if span_callback is not None:
span_callback(span, result)
if exception is not None:
raise exception.with_traceback(exception.__traceback__)
return result
instrumented_request.opentelemetry_ext_requests_applied = True
Session.request = instrumented_request
# TODO: We should also instrument requests.sessions.Session.send
# but to avoid doubled spans, we would need some context-local
# state (i.e., only create a Span if the current context's URL is
# different, then push the current URL, pop it afterwards) |
Python | def _instrument(self, **kwargs):
"""Integrate with SQLite3 Python library.
https://docs.python.org/3/library/sqlite3.html
"""
tracer_provider = kwargs.get("tracer_provider")
tracer = get_tracer(__name__, __version__, tracer_provider)
dbapi.wrap_connect(
tracer,
sqlite3,
"connect",
self._DATABASE_COMPONENT,
self._DATABASE_TYPE,
self._CONNECTION_ATTRIBUTES,
) | def _instrument(self, **kwargs):
"""Integrate with SQLite3 Python library.
https://docs.python.org/3/library/sqlite3.html
"""
tracer_provider = kwargs.get("tracer_provider")
tracer = get_tracer(__name__, __version__, tracer_provider)
dbapi.wrap_connect(
tracer,
sqlite3,
"connect",
self._DATABASE_COMPONENT,
self._DATABASE_TYPE,
self._CONNECTION_ATTRIBUTES,
) |
Python | def instrument_connection(self, connection):
"""Enable instrumentation in a SQLite connection.
Args:
connection: The connection to instrument.
Returns:
An instrumented connection.
"""
tracer = get_tracer(__name__, __version__)
return dbapi.instrument_connection(
tracer,
connection,
self._DATABASE_COMPONENT,
self._DATABASE_TYPE,
self._CONNECTION_ATTRIBUTES,
) | def instrument_connection(self, connection):
"""Enable instrumentation in a SQLite connection.
Args:
connection: The connection to instrument.
Returns:
An instrumented connection.
"""
tracer = get_tracer(__name__, __version__)
return dbapi.instrument_connection(
tracer,
connection,
self._DATABASE_COMPONENT,
self._DATABASE_TYPE,
self._CONNECTION_ATTRIBUTES,
) |
Python | def uninstrument_connection(self, connection):
"""Disable instrumentation in a SQLite connection.
Args:
connection: The connection to uninstrument.
Returns:
An uninstrumented connection.
"""
return dbapi.uninstrument_connection(connection) | def uninstrument_connection(self, connection):
"""Disable instrumentation in a SQLite connection.
Args:
connection: The connection to uninstrument.
Returns:
An uninstrumented connection.
"""
return dbapi.uninstrument_connection(connection) |
Python | def _translate_to_cloud_trace(
self, spans: Sequence[Span]
) -> List[Dict[str, Any]]:
"""Translate the spans to Cloud Trace format.
Args:
spans: Tuple of spans to convert
"""
cloud_trace_spans = []
for span in spans:
ctx = span.get_context()
trace_id = _get_hexadecimal_trace_id(ctx.trace_id)
span_id = _get_hexadecimal_span_id(ctx.span_id)
span_name = "projects/{}/traces/{}/spans/{}".format(
self.project_id, trace_id, span_id
)
parent_id = None
if span.parent:
parent_id = _get_hexadecimal_span_id(span.parent.span_id)
start_time = _get_time_from_ns(span.start_time)
end_time = _get_time_from_ns(span.end_time)
if len(span.attributes) > MAX_SPAN_ATTRS:
logger.warning(
"Span has more then %s attributes, some will be truncated",
MAX_SPAN_ATTRS,
)
cloud_trace_spans.append(
{
"name": span_name,
"span_id": span_id,
"display_name": _get_truncatable_str_object(
span.name, 128
),
"start_time": start_time,
"end_time": end_time,
"parent_span_id": parent_id,
"attributes": _extract_attributes(
span.attributes, MAX_SPAN_ATTRS
),
"links": _extract_links(span.links),
"status": _extract_status(span.status),
"time_events": _extract_events(span.events),
}
)
# TODO: Leverage more of the Cloud Trace API, e.g.
# same_process_as_parent_span and child_span_count
return cloud_trace_spans | def _translate_to_cloud_trace(
self, spans: Sequence[Span]
) -> List[Dict[str, Any]]:
"""Translate the spans to Cloud Trace format.
Args:
spans: Tuple of spans to convert
"""
cloud_trace_spans = []
for span in spans:
ctx = span.get_context()
trace_id = _get_hexadecimal_trace_id(ctx.trace_id)
span_id = _get_hexadecimal_span_id(ctx.span_id)
span_name = "projects/{}/traces/{}/spans/{}".format(
self.project_id, trace_id, span_id
)
parent_id = None
if span.parent:
parent_id = _get_hexadecimal_span_id(span.parent.span_id)
start_time = _get_time_from_ns(span.start_time)
end_time = _get_time_from_ns(span.end_time)
if len(span.attributes) > MAX_SPAN_ATTRS:
logger.warning(
"Span has more then %s attributes, some will be truncated",
MAX_SPAN_ATTRS,
)
cloud_trace_spans.append(
{
"name": span_name,
"span_id": span_id,
"display_name": _get_truncatable_str_object(
span.name, 128
),
"start_time": start_time,
"end_time": end_time,
"parent_span_id": parent_id,
"attributes": _extract_attributes(
span.attributes, MAX_SPAN_ATTRS
),
"links": _extract_links(span.links),
"status": _extract_status(span.status),
"time_events": _extract_events(span.events),
}
)
# TODO: Leverage more of the Cloud Trace API, e.g.
# same_process_as_parent_span and child_span_count
return cloud_trace_spans |
Python | def connect(self, host: str, port: int, auth: Optional['Dict'] = None) -> 'Olt':
""" Connect with pOlt (client mode))
The function must be implemented in derived classes.
Args:
host: host to connect to
port: port to connect to
auth: authentication parameters (TBD)
Returns:
'Olt' object it is connected to or None if failed
"""
raise Exception('Unimplemented connect() method') | def connect(self, host: str, port: int, auth: Optional['Dict'] = None) -> 'Olt':
""" Connect with pOlt (client mode))
The function must be implemented in derived classes.
Args:
host: host to connect to
port: port to connect to
auth: authentication parameters (TBD)
Returns:
'Olt' object it is connected to or None if failed
"""
raise Exception('Unimplemented connect() method') |
Python | def send(self, onu: 'OnuDriver', msg: RawMessage) -> bool:
""" Send message to ONU
The function must be implemented in derived classes.
Args:
onu: ONU object
msg: raw OMCI message without MIC
Returns:
True if successful
"""
raise Exception('Unimplemented send() method') | def send(self, onu: 'OnuDriver', msg: RawMessage) -> bool:
""" Send message to ONU
The function must be implemented in derived classes.
Args:
onu: ONU object
msg: raw OMCI message without MIC
Returns:
True if successful
"""
raise Exception('Unimplemented send() method') |
Python | def connected(self, olt_id: OltId) -> 'Olt':
""" Connected indication
This function is called by the derived class after successful
completion of Hello exchange.
Args:
olt_id: OLT Id reported by the peer pOLT
Returns:
Olt object
"""
logger.info("vOMCI {} is connected to pOLT {}".format(self._name, olt_id))
self._olt = OltDatabase().OltAddUpdate(olt_id, self)
return self._olt | def connected(self, olt_id: OltId) -> 'Olt':
""" Connected indication
This function is called by the derived class after successful
completion of Hello exchange.
Args:
olt_id: OLT Id reported by the peer pOLT
Returns:
Olt object
"""
logger.info("vOMCI {} is connected to pOLT {}".format(self._name, olt_id))
self._olt = OltDatabase().OltAddUpdate(olt_id, self)
return self._olt |
Python | def disconnected(self):
""" Disconnected indication
This function is called by the derived class when connection with
pOLT is broken.
"""
if self._olt is not None:
logger.info("vOMCI {} disconnected from pOLT {}".format(self._name, self._olt.id))
self._olt.set_channel(None)
OltDatabase().OltDelete(self._olt.id)
self._olt = None | def disconnected(self):
""" Disconnected indication
This function is called by the derived class when connection with
pOLT is broken.
"""
if self._olt is not None:
logger.info("vOMCI {} disconnected from pOLT {}".format(self._name, self._olt.id))
self._olt.set_channel(None)
OltDatabase().OltDelete(self._olt.id)
self._olt = None |
Python | def CreateOnu(cls, onu_name: OnuName):
"""
Add ONU with onu_name to management chain.
Args:
onu_name: unique name of ONU
"""
with cls._lock:
if cls._onus.get(onu_name) is None:
cls._onus[onu_name] = ManagedOnu(onu_name)
return cls._onus[onu_name] | def CreateOnu(cls, onu_name: OnuName):
"""
Add ONU with onu_name to management chain.
Args:
onu_name: unique name of ONU
"""
with cls._lock:
if cls._onus.get(onu_name) is None:
cls._onus[onu_name] = ManagedOnu(onu_name)
return cls._onus[onu_name] |
Python | def encode_content(self) -> RawMessage:
"""Encode CREATE request message content.
Returns:
raw OMCI message content
"""
if not self._ak:
# CREATE request - normal flow
msg = self.encode_attributes(bytearray())
else:
# CREATE response - mainly for debugging
msg = struct.pack("!HH", self._omci_result, self._attr_exec_mask)
return msg | def encode_content(self) -> RawMessage:
"""Encode CREATE request message content.
Returns:
raw OMCI message content
"""
if not self._ak:
# CREATE request - normal flow
msg = self.encode_attributes(bytearray())
else:
# CREATE response - mainly for debugging
msg = struct.pack("!HH", self._omci_result, self._attr_exec_mask)
return msg |
Python | def decode_content(self, msg: RawMessage) -> bool:
"""Decode CREATE response message content.
Returns:
result : True if successful
"""
if self._ak:
# CREATE response - normal flow
self._omci_result, self._attr_exec_mask = struct.unpack_from("!BH", msg, self.content_offset)
logger.debug('Decoded OMCI result: {} attr_exec_mask: {} at offset: {}'.format(self._omci_result, self._attr_exec_mask, self.content_offset))
ret = True
else:
# CREATE request - mainly for debugging
ret = self.decode_attributes(msg, self.content_offset, self._me.attr_mask(access='C'))
return ret | def decode_content(self, msg: RawMessage) -> bool:
"""Decode CREATE response message content.
Returns:
result : True if successful
"""
if self._ak:
# CREATE response - normal flow
self._omci_result, self._attr_exec_mask = struct.unpack_from("!BH", msg, self.content_offset)
logger.debug('Decoded OMCI result: {} attr_exec_mask: {} at offset: {}'.format(self._omci_result, self._attr_exec_mask, self.content_offset))
ret = True
else:
# CREATE request - mainly for debugging
ret = self.decode_attributes(msg, self.content_offset, self._me.attr_mask(access='C'))
return ret |
Python | def commit(self, onu: 'OnuDriver'):
""" Commit action results top ONU MIB.
Args:
onu : OnuDriver containing the current ONU MIB
Raises: an exception in case of commit failure
"""
if not onu.add(self._me):
raise Exception('{} - failed to commit to the local MIB'.format(self.name)) | def commit(self, onu: 'OnuDriver'):
""" Commit action results top ONU MIB.
Args:
onu : OnuDriver containing the current ONU MIB
Raises: an exception in case of commit failure
"""
if not onu.add(self._me):
raise Exception('{} - failed to commit to the local MIB'.format(self.name)) |
Python | def rollback(self, onu: 'OnuDriver') -> 'OmciAction':
""" Create a roll-back action.
Args:
onu : OnuDriver containing the current ONU MIB
Returns:
An action that rolls-back 'this' action, or None if not applicable
"""
# If action failed - there is nothing to rollback
if self._omci_result != 0:
return None
return DeleteAction(self._owner, self._me, self._extended) | def rollback(self, onu: 'OnuDriver') -> 'OmciAction':
""" Create a roll-back action.
Args:
onu : OnuDriver containing the current ONU MIB
Returns:
An action that rolls-back 'this' action, or None if not applicable
"""
# If action failed - there is nothing to rollback
if self._omci_result != 0:
return None
return DeleteAction(self._owner, self._me, self._extended) |
Python | def connected(self, olt_id: OltId) -> 'Olt':
""" Connected indication
This function is called after successful
completion of Hello exchange.
Args:
olt_id: OLT Id as given by the management chain configuration
Returns:
Olt object
"""
if not self.olt_connection_exists(olt_id):
polt_id = (olt_id, self.remote_endpoint_name)
logger.info("vOMCI {} is connected to pOLT {}".format(self._name, polt_id))
self._olts[olt_id] = OltDatabase().OltAddUpdate(polt_id, self)
return self._olts[olt_id] | def connected(self, olt_id: OltId) -> 'Olt':
""" Connected indication
This function is called after successful
completion of Hello exchange.
Args:
olt_id: OLT Id as given by the management chain configuration
Returns:
Olt object
"""
if not self.olt_connection_exists(olt_id):
polt_id = (olt_id, self.remote_endpoint_name)
logger.info("vOMCI {} is connected to pOLT {}".format(self._name, polt_id))
self._olts[olt_id] = OltDatabase().OltAddUpdate(polt_id, self)
return self._olts[olt_id] |
Python | def add_managed_onu(self, olt_id, onu_name: OnuName, onu_id: OnuSbiId, tci: int = 0):
""" Add managed onu to self OLT Database
Args:
olt_id: Name of OLT
onu_id: ONU id
onu_name: ONU name
"""
olt = self.connected(olt_id)
olt.OnuAddUpdate(onu_name, onu_id, tci)
logger.info("Managed ONU {}:{} was added to pOLT {}".format(onu_name, onu_id, olt.id))
return olt | def add_managed_onu(self, olt_id, onu_name: OnuName, onu_id: OnuSbiId, tci: int = 0):
""" Add managed onu to self OLT Database
Args:
olt_id: Name of OLT
onu_id: ONU id
onu_name: ONU name
"""
olt = self.connected(olt_id)
olt.OnuAddUpdate(onu_name, onu_id, tci)
logger.info("Managed ONU {}:{} was added to pOLT {}".format(onu_name, onu_id, olt.id))
return olt |
Python | def _start(self, notify_done = None) -> OMHStatus:
""" Internal function. Start the OMH handler in the foreground or in the background.
Args:
notify_done: Optional OMH handler completed notification handler.<br>
If notify_done is None (default), the function blocks until OMH handler execution
is completed.<br>
If notify_done is set, the 'start' function spawns a new thread.
The thread is terminated when the OMH handler is finished. notify_done(this_handler)
is called to inform the requester that OMH handler has completed.
Returns:
self.completion_status
"""
if self._running:
logger.warning("OMH handler {} is already running".format(self._name))
return OMHStatus.IN_PROGRESS
self._running = True
self._canceled = False
if notify_done is not None:
self._thread = threading.Thread(name=self.name, target=self._thread_function)
self._status = OMHStatus.IN_PROGRESS
self._notify_done = notify_done
self._thread.start()
else:
self._run_to_completion()
self._running = False
return self._status | def _start(self, notify_done = None) -> OMHStatus:
""" Internal function. Start the OMH handler in the foreground or in the background.
Args:
notify_done: Optional OMH handler completed notification handler.<br>
If notify_done is None (default), the function blocks until OMH handler execution
is completed.<br>
If notify_done is set, the 'start' function spawns a new thread.
The thread is terminated when the OMH handler is finished. notify_done(this_handler)
is called to inform the requester that OMH handler has completed.
Returns:
self.completion_status
"""
if self._running:
logger.warning("OMH handler {} is already running".format(self._name))
return OMHStatus.IN_PROGRESS
self._running = True
self._canceled = False
if notify_done is not None:
self._thread = threading.Thread(name=self.name, target=self._thread_function)
self._status = OMHStatus.IN_PROGRESS
self._notify_done = notify_done
self._thread.start()
else:
self._run_to_completion()
self._running = False
return self._status |
Python | def start(self, notify_done) -> OMHStatus:
""" Start the OMH handler in the background.
This function spawns a new thread and executes the OMH handler to completion
in this dedicated thread's context.
Args:
notify_done: OMH handler completed notification handler.
notify_done(this_handler) is called upon completion and the
background thread is destroyed.
Returns:
self.completion_status
"""
return self._start(notify_done) | def start(self, notify_done) -> OMHStatus:
""" Start the OMH handler in the background.
This function spawns a new thread and executes the OMH handler to completion
in this dedicated thread's context.
Args:
notify_done: OMH handler completed notification handler.
notify_done(this_handler) is called upon completion and the
background thread is destroyed.
Returns:
self.completion_status
"""
return self._start(notify_done) |
Python | def run(self) -> OMHStatus:
""" Start the OMH handler and run it to completion.
Returns:
self.completion_status
"""
return self._start() | def run(self) -> OMHStatus:
""" Start the OMH handler and run it to completion.
Returns:
self.completion_status
"""
return self._start() |
Python | def wait_for_completion(self) -> OMHStatus:
""" Wait for completion of an OMH handler running in its own thread """
if self._thread is None:
return self._status
self._thread.join()
self._thread = None | def wait_for_completion(self) -> OMHStatus:
""" Wait for completion of an OMH handler running in its own thread """
if self._thread is None:
return self._status
self._thread.join()
self._thread = None |
Python | def run_subsidiary(self, subsidiary: 'OmhHandler') -> OMHStatus:
""" Execute a subsidiary OMH handler in the context of the 'self' OMH handler.
Args:
subsidiary : subsidiary OMH handler to run to completion
Returns:
subsidiary OMH handler completion status
"""
subsidiary._top_level = False
self._status = subsidiary.run()
self._num_transactions += subsidiary._num_transactions
self._num_messages += subsidiary._num_messages
self._num_retries += subsidiary._num_retries
self._actions += subsidiary._actions
return self._status | def run_subsidiary(self, subsidiary: 'OmhHandler') -> OMHStatus:
""" Execute a subsidiary OMH handler in the context of the 'self' OMH handler.
Args:
subsidiary : subsidiary OMH handler to run to completion
Returns:
subsidiary OMH handler completion status
"""
subsidiary._top_level = False
self._status = subsidiary.run()
self._num_transactions += subsidiary._num_transactions
self._num_messages += subsidiary._num_messages
self._num_retries += subsidiary._num_retries
self._actions += subsidiary._actions
return self._status |
Python | def transaction(self, action : OmciAction = None) -> OMHStatus:
""" Perform request-response transaction.
The transaction can involve multiple request-response exchanges if
action has to be split to fit in a BASELINE OMCI message.
Args:
action : Action to execute. In most cases this parameter is set (not None).
It might be None only if the previous transaction has to be continued
with the same ME, for example, for get-next or mib-upload-next
Returns:
Transaction completion status
"""
assert action is not None or len(self._actions) > 0
if self._canceled:
return OMHStatus.CANCELED
self._transaction_status = OMHStatus.IN_PROGRESS
if action is None:
action = self._actions[-1]
elif not self._rolling_back:
self._actions.append(action)
self._num_transactions += 1
# Loop by (request,response) message pairs
while True:
# Convert to raw OMCI message
self._num_messages += 1
raw_msg = action.encode(self._onu.next_tci)
if raw_msg is None:
self._transaction_status = OMHStatus.ENCODING_DECODING_ERROR
break
self._action_waiting_for_response = action.ar and action or None
# Transmit and wait for acknowledge loop
num_retries = 0
while True:
# Send request to ONU
if not self._onu.send(self, raw_msg, action.ar, action.tci):
self._transaction_status = OMHStatus.COMMUNICATION_ERROR
self._action_waiting_for_response = None
break
# Wait for response if necessary
if not action.ar:
break
if self._transaction_sem.acquire(True, self._onu.ack_timeout):
break # All good
# Timeout. Need to retry
num_retries += 1
if num_retries > self._onu.max_retries:
self._transaction_status = OMHStatus.TIMEOUT
break
self._num_retries += 1
action.reinit()
# Couldn't send or out of retries?
if self._transaction_status != OMHStatus.IN_PROGRESS:
break
# OMCI error?
if action.omci_result != 0 and action.omci_result != omci_status['ATTRIBUTES_FAILED_OR_UNKNOWN']:
self._transaction_status = OMHStatus.ERROR_REPORTED_BY_ONU
break
# Increment local mib_sync if necessary (decided by the action)
if action.is_configuration:
self._onu.increment_mib_sync()
# Do the next iteration if action was split
if action.remaining_attr_mask == 0:
self._transaction_status = OMHStatus.OK
break
# Prepare action for the next iteration
action.reinit()
# Commit to the candidate MIB if successful
if self._transaction_status == OMHStatus.OK and not self._rolling_back:
action.commit(self._onu)
return self._transaction_status | def transaction(self, action : OmciAction = None) -> OMHStatus:
""" Perform request-response transaction.
The transaction can involve multiple request-response exchanges if
action has to be split to fit in a BASELINE OMCI message.
Args:
action : Action to execute. In most cases this parameter is set (not None).
It might be None only if the previous transaction has to be continued
with the same ME, for example, for get-next or mib-upload-next
Returns:
Transaction completion status
"""
assert action is not None or len(self._actions) > 0
if self._canceled:
return OMHStatus.CANCELED
self._transaction_status = OMHStatus.IN_PROGRESS
if action is None:
action = self._actions[-1]
elif not self._rolling_back:
self._actions.append(action)
self._num_transactions += 1
# Loop by (request,response) message pairs
while True:
# Convert to raw OMCI message
self._num_messages += 1
raw_msg = action.encode(self._onu.next_tci)
if raw_msg is None:
self._transaction_status = OMHStatus.ENCODING_DECODING_ERROR
break
self._action_waiting_for_response = action.ar and action or None
# Transmit and wait for acknowledge loop
num_retries = 0
while True:
# Send request to ONU
if not self._onu.send(self, raw_msg, action.ar, action.tci):
self._transaction_status = OMHStatus.COMMUNICATION_ERROR
self._action_waiting_for_response = None
break
# Wait for response if necessary
if not action.ar:
break
if self._transaction_sem.acquire(True, self._onu.ack_timeout):
break # All good
# Timeout. Need to retry
num_retries += 1
if num_retries > self._onu.max_retries:
self._transaction_status = OMHStatus.TIMEOUT
break
self._num_retries += 1
action.reinit()
# Couldn't send or out of retries?
if self._transaction_status != OMHStatus.IN_PROGRESS:
break
# OMCI error?
if action.omci_result != 0 and action.omci_result != omci_status['ATTRIBUTES_FAILED_OR_UNKNOWN']:
self._transaction_status = OMHStatus.ERROR_REPORTED_BY_ONU
break
# Increment local mib_sync if necessary (decided by the action)
if action.is_configuration:
self._onu.increment_mib_sync()
# Do the next iteration if action was split
if action.remaining_attr_mask == 0:
self._transaction_status = OMHStatus.OK
break
# Prepare action for the next iteration
action.reinit()
# Commit to the candidate MIB if successful
if self._transaction_status == OMHStatus.OK and not self._rolling_back:
action.commit(self._onu)
return self._transaction_status |
Python | def recv(self, msg: RawMessage):
"""
This function is called by OnuDriver when an Ack
for an outstanding request is received.
Args:
msg: raw OMCI message
"""
if self._action_waiting_for_response is None:
logger.warning("Unexpected response from ONU {}. Ignored.".format(self._onu.onu_id))
return
# Unlock pending transaction if decoded successfully
if OmciAction.decode(msg, self._action_waiting_for_response) is not None:
self._action_waiting_for_response = None
self._transaction_sem.release() | def recv(self, msg: RawMessage):
"""
This function is called by OnuDriver when an Ack
for an outstanding request is received.
Args:
msg: raw OMCI message
"""
if self._action_waiting_for_response is None:
logger.warning("Unexpected response from ONU {}. Ignored.".format(self._onu.onu_id))
return
# Unlock pending transaction if decoded successfully
if OmciAction.decode(msg, self._action_waiting_for_response) is not None:
self._action_waiting_for_response = None
self._transaction_sem.release() |
Python | def rollback(self):
""" Rollback all handlers performed by the OMH handler.
Returns: rollback status
"""
# Rollback in the order opposite to execution
self._onu.commit(omci_me_class['ONU_DATA'])
self._onu.clear_candidate()
self._rolling_back = True
self._actions.reverse()
for action in self._actions:
rollback_action = action.rollback(self._onu)
if rollback_action is not None:
rollback_status = self.transaction(rollback_action)
if rollback_status == OMHStatus.TIMEOUT or rollback_status == OMHStatus.COMMUNICATION_ERROR:
break
self._rolling_back = False | def rollback(self):
""" Rollback all handlers performed by the OMH handler.
Returns: rollback status
"""
# Rollback in the order opposite to execution
self._onu.commit(omci_me_class['ONU_DATA'])
self._onu.clear_candidate()
self._rolling_back = True
self._actions.reverse()
for action in self._actions:
rollback_action = action.rollback(self._onu)
if rollback_action is not None:
rollback_status = self.transaction(rollback_action)
if rollback_status == OMHStatus.TIMEOUT or rollback_status == OMHStatus.COMMUNICATION_ERROR:
break
self._rolling_back = False |
Python | def logerr_and_return(self, status: OMHStatus, text: str) -> OMHStatus:
""" Log error and return the error status.
Args:
status: OMH handler completion status
text: Error text
Returns: status
"""
logger.warning('ONU {}: {}: {}'.format(self._onu.onu_id, status.name, text))
return status | def logerr_and_return(self, status: OMHStatus, text: str) -> OMHStatus:
""" Log error and return the error status.
Args:
status: OMH handler completion status
text: Error text
Returns: status
"""
logger.warning('ONU {}: {}: {}'.format(self._onu.onu_id, status.name, text))
return status |
Python | def disconnect(self):
""" Disconnect from the peer pOlt.
"""
while not self.omci_msg_queue.empty():
logger.info("Queue was not empty. emptying")
self.omci_msg_queue.get()
self.omci_msg_queue.task_done()
if self._context is not None:
self.disconnecting = True
while self._context is not None:
time.sleep(1)
self.disconnected() | def disconnect(self):
""" Disconnect from the peer pOlt.
"""
while not self.omci_msg_queue.empty():
logger.info("Queue was not empty. emptying")
self.omci_msg_queue.get()
self.omci_msg_queue.task_done()
if self._context is not None:
self.disconnecting = True
while self._context is not None:
time.sleep(1)
self.disconnected() |
Python | def connected(self, olt_id: OltId) -> 'Olt':
""" Connected indication
This function is called after successful
completion of Hello exchange.
Args:
olt_id: OLT Id as given by the management chain configuration
Returns:
Olt object
"""
if not self.olt_connection_exists(olt_id):
polt_id = (olt_id, self.remote_endpoint_name)
logger.info("vOMCI {} is connected to pOLT {}".format(self.local_endpoint_name, polt_id))
self._olts[olt_id] = OltDatabase().OltAddUpdate(polt_id, self)
return self._olts[olt_id] | def connected(self, olt_id: OltId) -> 'Olt':
""" Connected indication
This function is called after successful
completion of Hello exchange.
Args:
olt_id: OLT Id as given by the management chain configuration
Returns:
Olt object
"""
if not self.olt_connection_exists(olt_id):
polt_id = (olt_id, self.remote_endpoint_name)
logger.info("vOMCI {} is connected to pOLT {}".format(self.local_endpoint_name, polt_id))
self._olts[olt_id] = OltDatabase().OltAddUpdate(polt_id, self)
return self._olts[olt_id] |
Python | def create_connection(self, remote_endpoint_name, peer):
""" Iterate over connection and check if connection with the
same remote_endpoint_name already exists. If does exist - it is stale
and need to be replaced. If it doesn't, create a new one
"""
channel = None
for _peer, _conn in self._connections.items():
if _conn.name == remote_endpoint_name:
self.connection_delete(_peer)
channel = _conn
break
if channel is None:
channel = self._connection_type(server=self, name=self._name, description=peer, remote_endpoint=remote_endpoint_name)
self.connection_add(peer, channel)
if self._parent is not None:
self._parent.add_managed_onus(channel) | def create_connection(self, remote_endpoint_name, peer):
""" Iterate over connection and check if connection with the
same remote_endpoint_name already exists. If does exist - it is stale
and need to be replaced. If it doesn't, create a new one
"""
channel = None
for _peer, _conn in self._connections.items():
if _conn.name == remote_endpoint_name:
self.connection_delete(_peer)
channel = _conn
break
if channel is None:
channel = self._connection_type(server=self, name=self._name, description=peer, remote_endpoint=remote_endpoint_name)
self.connection_add(peer, channel)
if self._parent is not None:
self._parent.add_managed_onus(channel) |
Python | def send(self, olt_id, onu_id: OnuSbiId, msg: RawMessage):
""" Send message to the right connection according the olt_id
Args:
olt_id: name of OLT
onu_id: OnuSbiId object
msg: raw OMCI message without MIC
Returns:
True if successful
"""
conn = None
for peer in self._connections.keys():
if self._connections[peer].olt_connection_exists(olt_id):
conn = self._connections[peer]
break
if conn is None:
logger.error("GrpcServer: connection for OLT {} not found".format(olt_id))
return False
else:
return conn.send(olt_id, onu_id, msg) | def send(self, olt_id, onu_id: OnuSbiId, msg: RawMessage):
""" Send message to the right connection according the olt_id
Args:
olt_id: name of OLT
onu_id: OnuSbiId object
msg: raw OMCI message without MIC
Returns:
True if successful
"""
conn = None
for peer in self._connections.keys():
if self._connections[peer].olt_connection_exists(olt_id):
conn = self._connections[peer]
break
if conn is None:
logger.error("GrpcServer: connection for OLT {} not found".format(olt_id))
return False
else:
return conn.send(olt_id, onu_id, msg) |
Python | def HelloVomci(self, request, context):
""" rpc to be called - Session establishment"""
remote_endpoint_name = request.local_endpoint_hello.endpoint_name
peer = context.peer()
logger.info("vOMCI {} received hello from {} at {}".format(self._name, remote_endpoint_name, peer))
self._server.create_connection(remote_endpoint_name, peer)
hello_resp = tr451_vomci_sbi_message_pb2.HelloVomciResponse(
remote_endpoint_hello=tr451_vomci_sbi_message_pb2.Hello(
endpoint_name=self._name))
return hello_resp | def HelloVomci(self, request, context):
""" rpc to be called - Session establishment"""
remote_endpoint_name = request.local_endpoint_hello.endpoint_name
peer = context.peer()
logger.info("vOMCI {} received hello from {} at {}".format(self._name, remote_endpoint_name, peer))
self._server.create_connection(remote_endpoint_name, peer)
hello_resp = tr451_vomci_sbi_message_pb2.HelloVomciResponse(
remote_endpoint_hello=tr451_vomci_sbi_message_pb2.Hello(
endpoint_name=self._name))
return hello_resp |
Subsets and Splits