body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def boundary_integral(self, child, discretised_child, region):
'\n Implements the boundary integral for a spatial method.\n\n Parameters\n ----------\n child: :class:`pybamm.Symbol`\n The symbol to which is being integrated\n discretised_child: :class:`pybamm.Symbol`\n The discretised symbol of the correct size\n region: str\n The region of the boundary over which to integrate. If region is None\n (default) the integration is carried out over the entire boundary. If\n region is `negative tab` or `positive tab` then the integration is only\n carried out over the appropriate part of the boundary corresponding to\n the tab.\n\n Returns\n -------\n :class: `pybamm.Array`\n Contains the result of acting the discretised boundary integral on\n the child discretised_symbol\n '
raise NotImplementedError | -2,703,065,040,527,775,000 | Implements the boundary integral for a spatial method.
Parameters
----------
child: :class:`pybamm.Symbol`
The symbol to which is being integrated
discretised_child: :class:`pybamm.Symbol`
The discretised symbol of the correct size
region: str
The region of the boundary over which to integrate. If region is None
(default) the integration is carried out over the entire boundary. If
region is `negative tab` or `positive tab` then the integration is only
carried out over the appropriate part of the boundary corresponding to
the tab.
Returns
-------
:class: `pybamm.Array`
Contains the result of acting the discretised boundary integral on
the child discretised_symbol | pybamm/spatial_methods/spatial_method.py | boundary_integral | jedgedrudd/PyBaMM | python | def boundary_integral(self, child, discretised_child, region):
'\n Implements the boundary integral for a spatial method.\n\n Parameters\n ----------\n child: :class:`pybamm.Symbol`\n The symbol to which is being integrated\n discretised_child: :class:`pybamm.Symbol`\n The discretised symbol of the correct size\n region: str\n The region of the boundary over which to integrate. If region is None\n (default) the integration is carried out over the entire boundary. If\n region is `negative tab` or `positive tab` then the integration is only\n carried out over the appropriate part of the boundary corresponding to\n the tab.\n\n Returns\n -------\n :class: `pybamm.Array`\n Contains the result of acting the discretised boundary integral on\n the child discretised_symbol\n '
raise NotImplementedError |
def delta_function(self, symbol, discretised_symbol):
'\n Implements the delta function on the approriate side for a spatial method.\n\n Parameters\n ----------\n symbol: :class:`pybamm.Symbol`\n The symbol to which is being integrated\n discretised_symbol: :class:`pybamm.Symbol`\n The discretised symbol of the correct size\n '
raise NotImplementedError | 3,166,707,958,765,238,300 | Implements the delta function on the approriate side for a spatial method.
Parameters
----------
symbol: :class:`pybamm.Symbol`
The symbol to which is being integrated
discretised_symbol: :class:`pybamm.Symbol`
The discretised symbol of the correct size | pybamm/spatial_methods/spatial_method.py | delta_function | jedgedrudd/PyBaMM | python | def delta_function(self, symbol, discretised_symbol):
'\n Implements the delta function on the approriate side for a spatial method.\n\n Parameters\n ----------\n symbol: :class:`pybamm.Symbol`\n The symbol to which is being integrated\n discretised_symbol: :class:`pybamm.Symbol`\n The discretised symbol of the correct size\n '
raise NotImplementedError |
def internal_neumann_condition(self, left_symbol_disc, right_symbol_disc, left_mesh, right_mesh):
'\n A method to find the internal neumann conditions between two symbols\n on adjacent subdomains.\n\n Parameters\n ----------\n left_symbol_disc : :class:`pybamm.Symbol`\n The discretised symbol on the left subdomain\n right_symbol_disc : :class:`pybamm.Symbol`\n The discretised symbol on the right subdomain\n left_mesh : list\n The mesh on the left subdomain\n right_mesh : list\n The mesh on the right subdomain\n '
raise NotImplementedError | -1,846,082,221,403,767,600 | A method to find the internal neumann conditions between two symbols
on adjacent subdomains.
Parameters
----------
left_symbol_disc : :class:`pybamm.Symbol`
The discretised symbol on the left subdomain
right_symbol_disc : :class:`pybamm.Symbol`
The discretised symbol on the right subdomain
left_mesh : list
The mesh on the left subdomain
right_mesh : list
The mesh on the right subdomain | pybamm/spatial_methods/spatial_method.py | internal_neumann_condition | jedgedrudd/PyBaMM | python | def internal_neumann_condition(self, left_symbol_disc, right_symbol_disc, left_mesh, right_mesh):
'\n A method to find the internal neumann conditions between two symbols\n on adjacent subdomains.\n\n Parameters\n ----------\n left_symbol_disc : :class:`pybamm.Symbol`\n The discretised symbol on the left subdomain\n right_symbol_disc : :class:`pybamm.Symbol`\n The discretised symbol on the right subdomain\n left_mesh : list\n The mesh on the left subdomain\n right_mesh : list\n The mesh on the right subdomain\n '
raise NotImplementedError |
def boundary_value_or_flux(self, symbol, discretised_child):
'\n Returns the boundary value or flux using the approriate expression for the\n spatial method. To do this, we create a sparse vector \'bv_vector\' that extracts\n either the first (for side="left") or last (for side="right") point from\n \'discretised_child\'.\n\n Parameters\n -----------\n symbol: :class:`pybamm.Symbol`\n The boundary value or flux symbol\n discretised_child : :class:`pybamm.StateVector`\n The discretised variable from which to calculate the boundary value\n\n Returns\n -------\n :class:`pybamm.MatrixMultiplication`\n The variable representing the surface value.\n '
if any(((len(self.mesh[dom]) > 1) for dom in discretised_child.domain)):
raise NotImplementedError('Cannot process 2D symbol in base spatial method')
if isinstance(symbol, pybamm.BoundaryGradient):
raise TypeError('Cannot process BoundaryGradient in base spatial method')
n = sum((self.mesh[dom][0].npts for dom in discretised_child.domain))
if (symbol.side == 'left'):
left_vector = csr_matrix(coo_matrix(([1], ([0], [0])), shape=(1, n)))
bv_vector = pybamm.Matrix(left_vector)
elif (symbol.side == 'right'):
right_vector = csr_matrix(coo_matrix(([1], ([0], [(n - 1)])), shape=(1, n)))
bv_vector = pybamm.Matrix(right_vector)
out = (bv_vector @ discretised_child)
out.domain = []
return out | -5,708,088,011,332,040,000 | Returns the boundary value or flux using the approriate expression for the
spatial method. To do this, we create a sparse vector 'bv_vector' that extracts
either the first (for side="left") or last (for side="right") point from
'discretised_child'.
Parameters
-----------
symbol: :class:`pybamm.Symbol`
The boundary value or flux symbol
discretised_child : :class:`pybamm.StateVector`
The discretised variable from which to calculate the boundary value
Returns
-------
:class:`pybamm.MatrixMultiplication`
The variable representing the surface value. | pybamm/spatial_methods/spatial_method.py | boundary_value_or_flux | jedgedrudd/PyBaMM | python | def boundary_value_or_flux(self, symbol, discretised_child):
'\n Returns the boundary value or flux using the approriate expression for the\n spatial method. To do this, we create a sparse vector \'bv_vector\' that extracts\n either the first (for side="left") or last (for side="right") point from\n \'discretised_child\'.\n\n Parameters\n -----------\n symbol: :class:`pybamm.Symbol`\n The boundary value or flux symbol\n discretised_child : :class:`pybamm.StateVector`\n The discretised variable from which to calculate the boundary value\n\n Returns\n -------\n :class:`pybamm.MatrixMultiplication`\n The variable representing the surface value.\n '
if any(((len(self.mesh[dom]) > 1) for dom in discretised_child.domain)):
raise NotImplementedError('Cannot process 2D symbol in base spatial method')
if isinstance(symbol, pybamm.BoundaryGradient):
raise TypeError('Cannot process BoundaryGradient in base spatial method')
n = sum((self.mesh[dom][0].npts for dom in discretised_child.domain))
if (symbol.side == 'left'):
left_vector = csr_matrix(coo_matrix(([1], ([0], [0])), shape=(1, n)))
bv_vector = pybamm.Matrix(left_vector)
elif (symbol.side == 'right'):
right_vector = csr_matrix(coo_matrix(([1], ([0], [(n - 1)])), shape=(1, n)))
bv_vector = pybamm.Matrix(right_vector)
out = (bv_vector @ discretised_child)
out.domain = []
return out |
def mass_matrix(self, symbol, boundary_conditions):
'\n Calculates the mass matrix for a spatial method.\n\n Parameters\n ----------\n symbol: :class:`pybamm.Variable`\n The variable corresponding to the equation for which we are\n calculating the mass matrix.\n boundary_conditions : dict\n The boundary conditions of the model\n ({symbol.id: {"left": left bc, "right": right bc}})\n\n Returns\n -------\n :class:`pybamm.Matrix`\n The (sparse) mass matrix for the spatial method.\n '
submesh = self.mesh.combine_submeshes(*symbol.domain)
n = submesh[0].npts
prim_mass = eye(n)
sec_pts = len(submesh)
mass = csr_matrix(kron(eye(sec_pts), prim_mass))
return pybamm.Matrix(mass) | -2,986,177,874,146,341,400 | Calculates the mass matrix for a spatial method.
Parameters
----------
symbol: :class:`pybamm.Variable`
The variable corresponding to the equation for which we are
calculating the mass matrix.
boundary_conditions : dict
The boundary conditions of the model
({symbol.id: {"left": left bc, "right": right bc}})
Returns
-------
:class:`pybamm.Matrix`
The (sparse) mass matrix for the spatial method. | pybamm/spatial_methods/spatial_method.py | mass_matrix | jedgedrudd/PyBaMM | python | def mass_matrix(self, symbol, boundary_conditions):
'\n Calculates the mass matrix for a spatial method.\n\n Parameters\n ----------\n symbol: :class:`pybamm.Variable`\n The variable corresponding to the equation for which we are\n calculating the mass matrix.\n boundary_conditions : dict\n The boundary conditions of the model\n ({symbol.id: {"left": left bc, "right": right bc}})\n\n Returns\n -------\n :class:`pybamm.Matrix`\n The (sparse) mass matrix for the spatial method.\n '
submesh = self.mesh.combine_submeshes(*symbol.domain)
n = submesh[0].npts
prim_mass = eye(n)
sec_pts = len(submesh)
mass = csr_matrix(kron(eye(sec_pts), prim_mass))
return pybamm.Matrix(mass) |
def process_binary_operators(self, bin_op, left, right, disc_left, disc_right):
'Discretise binary operators in model equations. Default behaviour is to\n return a new binary operator with the discretised children.\n\n Parameters\n ----------\n bin_op : :class:`pybamm.BinaryOperator`\n Binary operator to discretise\n left : :class:`pybamm.Symbol`\n The left child of `bin_op`\n right : :class:`pybamm.Symbol`\n The right child of `bin_op`\n disc_left : :class:`pybamm.Symbol`\n The discretised left child of `bin_op`\n disc_right : :class:`pybamm.Symbol`\n The discretised right child of `bin_op`\n\n Returns\n -------\n :class:`pybamm.BinaryOperator`\n Discretised binary operator\n\n '
return bin_op.__class__(disc_left, disc_right) | 3,478,685,334,077,549,600 | Discretise binary operators in model equations. Default behaviour is to
return a new binary operator with the discretised children.
Parameters
----------
bin_op : :class:`pybamm.BinaryOperator`
Binary operator to discretise
left : :class:`pybamm.Symbol`
The left child of `bin_op`
right : :class:`pybamm.Symbol`
The right child of `bin_op`
disc_left : :class:`pybamm.Symbol`
The discretised left child of `bin_op`
disc_right : :class:`pybamm.Symbol`
The discretised right child of `bin_op`
Returns
-------
:class:`pybamm.BinaryOperator`
Discretised binary operator | pybamm/spatial_methods/spatial_method.py | process_binary_operators | jedgedrudd/PyBaMM | python | def process_binary_operators(self, bin_op, left, right, disc_left, disc_right):
'Discretise binary operators in model equations. Default behaviour is to\n return a new binary operator with the discretised children.\n\n Parameters\n ----------\n bin_op : :class:`pybamm.BinaryOperator`\n Binary operator to discretise\n left : :class:`pybamm.Symbol`\n The left child of `bin_op`\n right : :class:`pybamm.Symbol`\n The right child of `bin_op`\n disc_left : :class:`pybamm.Symbol`\n The discretised left child of `bin_op`\n disc_right : :class:`pybamm.Symbol`\n The discretised right child of `bin_op`\n\n Returns\n -------\n :class:`pybamm.BinaryOperator`\n Discretised binary operator\n\n '
return bin_op.__class__(disc_left, disc_right) |
def concatenation(self, disc_children):
'Discrete concatenation object.\n\n Parameters\n ----------\n disc_children : list\n List of discretised children\n\n Returns\n -------\n :class:`pybamm.DomainConcatenation`\n Concatenation of the discretised children\n '
return pybamm.DomainConcatenation(disc_children, self.mesh) | -2,007,725,983,006,562,800 | Discrete concatenation object.
Parameters
----------
disc_children : list
List of discretised children
Returns
-------
:class:`pybamm.DomainConcatenation`
Concatenation of the discretised children | pybamm/spatial_methods/spatial_method.py | concatenation | jedgedrudd/PyBaMM | python | def concatenation(self, disc_children):
'Discrete concatenation object.\n\n Parameters\n ----------\n disc_children : list\n List of discretised children\n\n Returns\n -------\n :class:`pybamm.DomainConcatenation`\n Concatenation of the discretised children\n '
return pybamm.DomainConcatenation(disc_children, self.mesh) |
def save_gif(gif_fname, images, fps):
'\n To generate a gif from image files, first generate palette from images\n and then generate the gif from the images and the palette.\n ffmpeg -i input_%02d.jpg -vf palettegen -y palette.png\n ffmpeg -i input_%02d.jpg -i palette.png -lavfi paletteuse -y output.gif\n\n Alternatively, use a filter to map the input images to both the palette\n and gif commands, while also passing the palette to the gif command.\n ffmpeg -i input_%02d.jpg -filter_complex "[0:v]split[x][z];[z]palettegen[y];[x][y]paletteuse" -y output.gif\n\n To directly pass in numpy images, use rawvideo format and `-i -` option.\n '
from subprocess import Popen, PIPE
(head, tail) = os.path.split(gif_fname)
if (head and (not os.path.exists(head))):
os.makedirs(head)
(h, w, c) = images[0].shape
cmd = ['ffmpeg', '-y', '-f', 'rawvideo', '-vcodec', 'rawvideo', '-r', ('%.02f' % fps), '-s', ('%dx%d' % (w, h)), '-pix_fmt', {1: 'gray', 3: 'rgb24', 4: 'rgba'}[c], '-i', '-', '-filter_complex', '[0:v]split[x][z];[z]palettegen[y];[x][y]paletteuse', '-r', ('%.02f' % fps), ('%s' % gif_fname)]
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
for image in images:
proc.stdin.write(image.tostring())
(out, err) = proc.communicate()
if proc.returncode:
err = '\n'.join([' '.join(cmd), err.decode('utf8')])
raise IOError(err)
del proc | -8,273,607,447,406,788,000 | To generate a gif from image files, first generate palette from images
and then generate the gif from the images and the palette.
ffmpeg -i input_%02d.jpg -vf palettegen -y palette.png
ffmpeg -i input_%02d.jpg -i palette.png -lavfi paletteuse -y output.gif
Alternatively, use a filter to map the input images to both the palette
and gif commands, while also passing the palette to the gif command.
ffmpeg -i input_%02d.jpg -filter_complex "[0:v]split[x][z];[z]palettegen[y];[x][y]paletteuse" -y output.gif
To directly pass in numpy images, use rawvideo format and `-i -` option. | video_prediction/utils/ffmpeg_gif.py | save_gif | Bonennult/video_prediction | python | def save_gif(gif_fname, images, fps):
'\n To generate a gif from image files, first generate palette from images\n and then generate the gif from the images and the palette.\n ffmpeg -i input_%02d.jpg -vf palettegen -y palette.png\n ffmpeg -i input_%02d.jpg -i palette.png -lavfi paletteuse -y output.gif\n\n Alternatively, use a filter to map the input images to both the palette\n and gif commands, while also passing the palette to the gif command.\n ffmpeg -i input_%02d.jpg -filter_complex "[0:v]split[x][z];[z]palettegen[y];[x][y]paletteuse" -y output.gif\n\n To directly pass in numpy images, use rawvideo format and `-i -` option.\n '
from subprocess import Popen, PIPE
(head, tail) = os.path.split(gif_fname)
if (head and (not os.path.exists(head))):
os.makedirs(head)
(h, w, c) = images[0].shape
cmd = ['ffmpeg', '-y', '-f', 'rawvideo', '-vcodec', 'rawvideo', '-r', ('%.02f' % fps), '-s', ('%dx%d' % (w, h)), '-pix_fmt', {1: 'gray', 3: 'rgb24', 4: 'rgba'}[c], '-i', '-', '-filter_complex', '[0:v]split[x][z];[z]palettegen[y];[x][y]paletteuse', '-r', ('%.02f' % fps), ('%s' % gif_fname)]
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
for image in images:
proc.stdin.write(image.tostring())
(out, err) = proc.communicate()
if proc.returncode:
err = '\n'.join([' '.join(cmd), err.decode('utf8')])
raise IOError(err)
del proc |
def __init__(self, db_uri):
"\n db_uri = f'mysql+pymysql://{username}:{password}@{host}:{port}/{database}?charset=utf8mb4'\n\n "
engine = create_engine(db_uri)
self.session = sessionmaker(bind=engine)() | 7,448,832,326,011,661,000 | db_uri = f'mysql+pymysql://{username}:{password}@{host}:{port}/{database}?charset=utf8mb4' | httprunner/database/engine.py | __init__ | AlanFightting/httprunner | python | def __init__(self, db_uri):
"\n \n\n "
engine = create_engine(db_uri)
self.session = sessionmaker(bind=engine)() |
@staticmethod
def value_decode(row: dict):
'\n Try to decode value of table\n datetime.datetime-->string\n datetime.date-->string\n json str-->dict\n :param row:\n :return:\n '
for (k, v) in row.items():
if isinstance(v, datetime.datetime):
row[k] = v.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(v, datetime.date):
row[k] = v.strftime('%Y-%m-%d')
elif isinstance(v, str):
try:
row[k] = json.loads(v)
except ValueError:
pass | -8,875,464,380,168,999,000 | Try to decode value of table
datetime.datetime-->string
datetime.date-->string
json str-->dict
:param row:
:return: | httprunner/database/engine.py | value_decode | AlanFightting/httprunner | python | @staticmethod
def value_decode(row: dict):
'\n Try to decode value of table\n datetime.datetime-->string\n datetime.date-->string\n json str-->dict\n :param row:\n :return:\n '
for (k, v) in row.items():
if isinstance(v, datetime.datetime):
row[k] = v.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(v, datetime.date):
row[k] = v.strftime('%Y-%m-%d')
elif isinstance(v, str):
try:
row[k] = json.loads(v)
except ValueError:
pass |
@property
def name(self):
'Name of the virtual server.<br/>Minimum length = 1.\n\t\t'
try:
return self._name
except Exception as e:
raise e | -3,012,199,902,447,286,300 | Name of the virtual server.<br/>Minimum length = 1. | build/lib/nssrc/com/citrix/netscaler/nitro/resource/config/vpn/vpnvserver_vpnnexthopserver_binding.py | name | guardicore/nitro-python | python | @property
def name(self):
'\n\t\t'
try:
return self._name
except Exception as e:
raise e |
@name.setter
def name(self, name):
'Name of the virtual server.<br/>Minimum length = 1\n\t\t'
try:
self._name = name
except Exception as e:
raise e | 868,641,619,903,283,700 | Name of the virtual server.<br/>Minimum length = 1 | build/lib/nssrc/com/citrix/netscaler/nitro/resource/config/vpn/vpnvserver_vpnnexthopserver_binding.py | name | guardicore/nitro-python | python | @name.setter
def name(self, name):
'\n\t\t'
try:
self._name = name
except Exception as e:
raise e |
@property
def nexthopserver(self):
'The name of the next hop server bound to the VPN virtual server.\n\t\t'
try:
return self._nexthopserver
except Exception as e:
raise e | -351,339,766,894,237,400 | The name of the next hop server bound to the VPN virtual server. | build/lib/nssrc/com/citrix/netscaler/nitro/resource/config/vpn/vpnvserver_vpnnexthopserver_binding.py | nexthopserver | guardicore/nitro-python | python | @property
def nexthopserver(self):
'\n\t\t'
try:
return self._nexthopserver
except Exception as e:
raise e |
@nexthopserver.setter
def nexthopserver(self, nexthopserver):
'The name of the next hop server bound to the VPN virtual server.\n\t\t'
try:
self._nexthopserver = nexthopserver
except Exception as e:
raise e | 5,440,545,738,043,150,000 | The name of the next hop server bound to the VPN virtual server. | build/lib/nssrc/com/citrix/netscaler/nitro/resource/config/vpn/vpnvserver_vpnnexthopserver_binding.py | nexthopserver | guardicore/nitro-python | python | @nexthopserver.setter
def nexthopserver(self, nexthopserver):
'\n\t\t'
try:
self._nexthopserver = nexthopserver
except Exception as e:
raise e |
def _get_nitro_response(self, service, response):
' converts nitro response into object and returns the object array in case of get request.\n\t\t'
try:
result = service.payload_formatter.string_to_resource(vpnvserver_vpnnexthopserver_binding_response, response, self.__class__.__name__)
if (result.errorcode != 0):
if (result.errorcode == 444):
service.clear_session(self)
if result.severity:
if (result.severity == 'ERROR'):
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else:
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.vpnvserver_vpnnexthopserver_binding
except Exception as e:
raise e | 844,322,438,921,633,900 | converts nitro response into object and returns the object array in case of get request. | build/lib/nssrc/com/citrix/netscaler/nitro/resource/config/vpn/vpnvserver_vpnnexthopserver_binding.py | _get_nitro_response | guardicore/nitro-python | python | def _get_nitro_response(self, service, response):
' \n\t\t'
try:
result = service.payload_formatter.string_to_resource(vpnvserver_vpnnexthopserver_binding_response, response, self.__class__.__name__)
if (result.errorcode != 0):
if (result.errorcode == 444):
service.clear_session(self)
if result.severity:
if (result.severity == 'ERROR'):
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else:
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.vpnvserver_vpnnexthopserver_binding
except Exception as e:
raise e |
def _get_object_name(self):
' Returns the value of object identifier argument\n\t\t'
try:
if (self.name is not None):
return str(self.name)
return None
except Exception as e:
raise e | 7,474,779,240,333,799,000 | Returns the value of object identifier argument | build/lib/nssrc/com/citrix/netscaler/nitro/resource/config/vpn/vpnvserver_vpnnexthopserver_binding.py | _get_object_name | guardicore/nitro-python | python | def _get_object_name(self):
' \n\t\t'
try:
if (self.name is not None):
return str(self.name)
return None
except Exception as e:
raise e |
@classmethod
def filter_add_parameters(cls, resource):
' Use this function to create a resource with only add operation specific parameters.\n\t\t'
addresource = vpnvserver_vpnnexthopserver_binding()
addresource.name = resource.name
addresource.nexthopserver = resource.nexthopserver
return addresource | -2,142,231,376,633,605,400 | Use this function to create a resource with only add operation specific parameters. | build/lib/nssrc/com/citrix/netscaler/nitro/resource/config/vpn/vpnvserver_vpnnexthopserver_binding.py | filter_add_parameters | guardicore/nitro-python | python | @classmethod
def filter_add_parameters(cls, resource):
' \n\t\t'
addresource = vpnvserver_vpnnexthopserver_binding()
addresource.name = resource.name
addresource.nexthopserver = resource.nexthopserver
return addresource |
@classmethod
def filter_delete_parameters(cls, resource):
' Use this function to create a resource with only delete operation specific parameters.\n\t\t'
deleteresource = vpnvserver_vpnnexthopserver_binding()
deleteresource.name = resource.name
deleteresource.nexthopserver = resource.nexthopserver
return deleteresource | 3,789,293,309,144,867,000 | Use this function to create a resource with only delete operation specific parameters. | build/lib/nssrc/com/citrix/netscaler/nitro/resource/config/vpn/vpnvserver_vpnnexthopserver_binding.py | filter_delete_parameters | guardicore/nitro-python | python | @classmethod
def filter_delete_parameters(cls, resource):
' \n\t\t'
deleteresource = vpnvserver_vpnnexthopserver_binding()
deleteresource.name = resource.name
deleteresource.nexthopserver = resource.nexthopserver
return deleteresource |
@classmethod
def get(cls, service, name='', option_=''):
' Use this API to fetch vpnvserver_vpnnexthopserver_binding resources.\n\t\t'
try:
if (not name):
obj = vpnvserver_vpnnexthopserver_binding()
response = obj.get_resources(service, option_)
else:
obj = vpnvserver_vpnnexthopserver_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e | -2,785,300,714,051,055,000 | Use this API to fetch vpnvserver_vpnnexthopserver_binding resources. | build/lib/nssrc/com/citrix/netscaler/nitro/resource/config/vpn/vpnvserver_vpnnexthopserver_binding.py | get | guardicore/nitro-python | python | @classmethod
def get(cls, service, name=, option_=):
' \n\t\t'
try:
if (not name):
obj = vpnvserver_vpnnexthopserver_binding()
response = obj.get_resources(service, option_)
else:
obj = vpnvserver_vpnnexthopserver_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e |
@classmethod
def get_filtered(cls, service, name, filter_):
' Use this API to fetch filtered set of vpnvserver_vpnnexthopserver_binding resources.\n\t\tFilter string should be in JSON format.eg: "port:80,servicetype:HTTP".\n\t\t'
try:
obj = vpnvserver_vpnnexthopserver_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e | 8,369,779,430,298,223,000 | Use this API to fetch filtered set of vpnvserver_vpnnexthopserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". | build/lib/nssrc/com/citrix/netscaler/nitro/resource/config/vpn/vpnvserver_vpnnexthopserver_binding.py | get_filtered | guardicore/nitro-python | python | @classmethod
def get_filtered(cls, service, name, filter_):
' Use this API to fetch filtered set of vpnvserver_vpnnexthopserver_binding resources.\n\t\tFilter string should be in JSON format.eg: "port:80,servicetype:HTTP".\n\t\t'
try:
obj = vpnvserver_vpnnexthopserver_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e |
@classmethod
def count(cls, service, name):
' Use this API to count vpnvserver_vpnnexthopserver_binding resources configued on NetScaler.\n\t\t'
try:
obj = vpnvserver_vpnnexthopserver_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response:
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e | 2,512,976,632,532,896,300 | Use this API to count vpnvserver_vpnnexthopserver_binding resources configued on NetScaler. | build/lib/nssrc/com/citrix/netscaler/nitro/resource/config/vpn/vpnvserver_vpnnexthopserver_binding.py | count | guardicore/nitro-python | python | @classmethod
def count(cls, service, name):
' \n\t\t'
try:
obj = vpnvserver_vpnnexthopserver_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response:
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e |
@classmethod
def count_filtered(cls, service, name, filter_):
' Use this API to count the filtered set of vpnvserver_vpnnexthopserver_binding resources.\n\t\tFilter string should be in JSON format.eg: "port:80,servicetype:HTTP".\n\t\t'
try:
obj = vpnvserver_vpnnexthopserver_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response:
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e | -2,350,699,591,094,263,300 | Use this API to count the filtered set of vpnvserver_vpnnexthopserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". | build/lib/nssrc/com/citrix/netscaler/nitro/resource/config/vpn/vpnvserver_vpnnexthopserver_binding.py | count_filtered | guardicore/nitro-python | python | @classmethod
def count_filtered(cls, service, name, filter_):
' Use this API to count the filtered set of vpnvserver_vpnnexthopserver_binding resources.\n\t\tFilter string should be in JSON format.eg: "port:80,servicetype:HTTP".\n\t\t'
try:
obj = vpnvserver_vpnnexthopserver_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response:
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e |
def get_external_lb_endpoints():
'\n Return a list of any external API load-balancer endpoints that have\n been manually configured.\n '
ha_connected = is_flag_set('ha.connected')
forced_lb_ips = hookenv.config('loadbalancer-ips').split()
vips = hookenv.config('ha-cluster-vip').split()
dns_record = hookenv.config('ha-cluster-dns')
if forced_lb_ips:
return [(address, STANDARD_API_PORT) for address in forced_lb_ips]
elif (ha_connected and vips):
return [(vip, STANDARD_API_PORT) for vip in vips]
elif (ha_connected and dns_record):
return [(dns_record, STANDARD_API_PORT)]
else:
return [] | -8,604,976,135,310,179,000 | Return a list of any external API load-balancer endpoints that have
been manually configured. | lib/charms/layer/kubernetes_master.py | get_external_lb_endpoints | hemanthnakkina/charm-kubernetes-master | python | def get_external_lb_endpoints():
'\n Return a list of any external API load-balancer endpoints that have\n been manually configured.\n '
ha_connected = is_flag_set('ha.connected')
forced_lb_ips = hookenv.config('loadbalancer-ips').split()
vips = hookenv.config('ha-cluster-vip').split()
dns_record = hookenv.config('ha-cluster-dns')
if forced_lb_ips:
return [(address, STANDARD_API_PORT) for address in forced_lb_ips]
elif (ha_connected and vips):
return [(vip, STANDARD_API_PORT) for vip in vips]
elif (ha_connected and dns_record):
return [(dns_record, STANDARD_API_PORT)]
else:
return [] |
def get_lb_endpoints():
'\n Return all load-balancer endpoints, whether from manual config or via\n relation.\n '
external_lb_endpoints = get_external_lb_endpoints()
loadbalancer = endpoint_from_flag('loadbalancer.available')
if external_lb_endpoints:
return external_lb_endpoints
elif loadbalancer:
lb_addresses = loadbalancer.get_addresses_ports()
return [(host.get('public-address'), host.get('port')) for host in lb_addresses]
else:
return [] | -1,953,696,430,980,908,300 | Return all load-balancer endpoints, whether from manual config or via
relation. | lib/charms/layer/kubernetes_master.py | get_lb_endpoints | hemanthnakkina/charm-kubernetes-master | python | def get_lb_endpoints():
'\n Return all load-balancer endpoints, whether from manual config or via\n relation.\n '
external_lb_endpoints = get_external_lb_endpoints()
loadbalancer = endpoint_from_flag('loadbalancer.available')
if external_lb_endpoints:
return external_lb_endpoints
elif loadbalancer:
lb_addresses = loadbalancer.get_addresses_ports()
return [(host.get('public-address'), host.get('port')) for host in lb_addresses]
else:
return [] |
def get_api_endpoint(relation=None):
'\n Determine the best endpoint for a client to connect to.\n\n If a relation is given, it will take that into account when choosing an\n endpoint.\n '
endpoints = get_lb_endpoints()
if endpoints:
return endpoints[(kubernetes_common.get_unit_number() % len(endpoints))]
elif relation:
ingress_address = hookenv.ingress_address(relation.relation_id, hookenv.local_unit())
return (ingress_address, STANDARD_API_PORT)
else:
return (hookenv.unit_public_ip(), STANDARD_API_PORT) | -4,188,973,924,212,686,000 | Determine the best endpoint for a client to connect to.
If a relation is given, it will take that into account when choosing an
endpoint. | lib/charms/layer/kubernetes_master.py | get_api_endpoint | hemanthnakkina/charm-kubernetes-master | python | def get_api_endpoint(relation=None):
'\n Determine the best endpoint for a client to connect to.\n\n If a relation is given, it will take that into account when choosing an\n endpoint.\n '
endpoints = get_lb_endpoints()
if endpoints:
return endpoints[(kubernetes_common.get_unit_number() % len(endpoints))]
elif relation:
ingress_address = hookenv.ingress_address(relation.relation_id, hookenv.local_unit())
return (ingress_address, STANDARD_API_PORT)
else:
return (hookenv.unit_public_ip(), STANDARD_API_PORT) |
def install_ceph_common():
'Install ceph-common tools.\n\n :return: None\n '
ceph_admin = endpoint_from_flag('ceph-storage.available')
ceph_context = {'mon_hosts': ceph_admin.mon_hosts(), 'fsid': ceph_admin.fsid(), 'auth_supported': ceph_admin.auth(), 'use_syslog': 'true', 'ceph_public_network': '', 'ceph_cluster_network': '', 'loglevel': 1, 'hostname': socket.gethostname()}
apt_install(['ceph-common'], fatal=True)
CEPH_CONF_DIR.mkdir(exist_ok=True, parents=True)
render('ceph.conf', str(CEPH_CONF), ceph_context)
try:
with open(str(CEPH_KEYRING), 'w') as key_file:
key_file.write('[client.admin]\n\tkey = {}\n'.format(ceph_admin.key()))
except IOError as err:
hookenv.log('IOError writing admin.keyring: {}'.format(err)) | -1,121,089,419,783,745,400 | Install ceph-common tools.
:return: None | lib/charms/layer/kubernetes_master.py | install_ceph_common | hemanthnakkina/charm-kubernetes-master | python | def install_ceph_common():
'Install ceph-common tools.\n\n :return: None\n '
ceph_admin = endpoint_from_flag('ceph-storage.available')
ceph_context = {'mon_hosts': ceph_admin.mon_hosts(), 'fsid': ceph_admin.fsid(), 'auth_supported': ceph_admin.auth(), 'use_syslog': 'true', 'ceph_public_network': , 'ceph_cluster_network': , 'loglevel': 1, 'hostname': socket.gethostname()}
apt_install(['ceph-common'], fatal=True)
CEPH_CONF_DIR.mkdir(exist_ok=True, parents=True)
render('ceph.conf', str(CEPH_CONF), ceph_context)
try:
with open(str(CEPH_KEYRING), 'w') as key_file:
key_file.write('[client.admin]\n\tkey = {}\n'.format(ceph_admin.key()))
except IOError as err:
hookenv.log('IOError writing admin.keyring: {}'.format(err)) |
def deprecate_auth_file(auth_file):
'\n In 1.19+, file-based authentication was deprecated in favor of webhook\n auth. Write out generic files that inform the user of this.\n '
csv_file = Path(auth_file)
csv_file.parent.mkdir(exist_ok=True)
csv_backup = Path('{}.{}'.format(csv_file, AUTH_BACKUP_EXT))
if (csv_file.exists() and (not csv_backup.exists())):
csv_file.rename(csv_backup)
with csv_file.open('w') as f:
f.write('# File-based authentication was removed in Charmed Kubernetes 1.19\n') | 5,085,122,092,301,805,000 | In 1.19+, file-based authentication was deprecated in favor of webhook
auth. Write out generic files that inform the user of this. | lib/charms/layer/kubernetes_master.py | deprecate_auth_file | hemanthnakkina/charm-kubernetes-master | python | def deprecate_auth_file(auth_file):
'\n In 1.19+, file-based authentication was deprecated in favor of webhook\n auth. Write out generic files that inform the user of this.\n '
csv_file = Path(auth_file)
csv_file.parent.mkdir(exist_ok=True)
csv_backup = Path('{}.{}'.format(csv_file, AUTH_BACKUP_EXT))
if (csv_file.exists() and (not csv_backup.exists())):
csv_file.rename(csv_backup)
with csv_file.open('w') as f:
f.write('# File-based authentication was removed in Charmed Kubernetes 1.19\n') |
def migrate_auth_file(filename):
'Create secrets or known tokens depending on what file is being migrated.'
with open(str(filename), 'r') as f:
rows = list(csv.reader(f))
for row in rows:
try:
if row[0].startswith('#'):
continue
elif (filename == AUTH_BASIC_FILE):
create_known_token(*row)
elif (filename == AUTH_TOKENS_FILE):
create_secret(*row)
else:
hookenv.log('Unknown auth file: {}'.format(filename))
return False
except IndexError:
pass
deprecate_auth_file(filename)
return True | -7,641,784,702,578,400,000 | Create secrets or known tokens depending on what file is being migrated. | lib/charms/layer/kubernetes_master.py | migrate_auth_file | hemanthnakkina/charm-kubernetes-master | python | def migrate_auth_file(filename):
with open(str(filename), 'r') as f:
rows = list(csv.reader(f))
for row in rows:
try:
if row[0].startswith('#'):
continue
elif (filename == AUTH_BASIC_FILE):
create_known_token(*row)
elif (filename == AUTH_TOKENS_FILE):
create_secret(*row)
else:
hookenv.log('Unknown auth file: {}'.format(filename))
return False
except IndexError:
pass
deprecate_auth_file(filename)
return True |
def generate_rfc1123(length=10):
'Generate a random string compliant with RFC 1123.\n\n https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names\n\n param: length - the length of the string to generate\n '
length = (253 if (length > 253) else length)
first_last_opts = (string.ascii_lowercase + string.digits)
middle_opts = ((first_last_opts + '-') + '.')
length -= 2
rand_str = ((random.SystemRandom().choice(first_last_opts) + ''.join((random.SystemRandom().choice(middle_opts) for _ in range(length)))) + random.SystemRandom().choice(first_last_opts))
return rand_str | 1,612,866,948,318,523,600 | Generate a random string compliant with RFC 1123.
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names
param: length - the length of the string to generate | lib/charms/layer/kubernetes_master.py | generate_rfc1123 | hemanthnakkina/charm-kubernetes-master | python | def generate_rfc1123(length=10):
'Generate a random string compliant with RFC 1123.\n\n https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names\n\n param: length - the length of the string to generate\n '
length = (253 if (length > 253) else length)
first_last_opts = (string.ascii_lowercase + string.digits)
middle_opts = ((first_last_opts + '-') + '.')
length -= 2
rand_str = ((random.SystemRandom().choice(first_last_opts) + .join((random.SystemRandom().choice(middle_opts) for _ in range(length)))) + random.SystemRandom().choice(first_last_opts))
return rand_str |
def token_generator(length=32):
'Generate a random token for use in account tokens.\n\n param: length - the length of the token to generate\n '
alpha = (string.ascii_letters + string.digits)
token = ''.join((random.SystemRandom().choice(alpha) for _ in range(length)))
return token | 5,597,463,047,298,483,000 | Generate a random token for use in account tokens.
param: length - the length of the token to generate | lib/charms/layer/kubernetes_master.py | token_generator | hemanthnakkina/charm-kubernetes-master | python | def token_generator(length=32):
'Generate a random token for use in account tokens.\n\n param: length - the length of the token to generate\n '
alpha = (string.ascii_letters + string.digits)
token = .join((random.SystemRandom().choice(alpha) for _ in range(length)))
return token |
def delete_secret(secret_id):
'Delete a given secret id.'
return kubernetes_common.kubectl_success('delete', 'secret', '-n', AUTH_SECRET_NS, secret_id) | 8,358,800,692,941,659,000 | Delete a given secret id. | lib/charms/layer/kubernetes_master.py | delete_secret | hemanthnakkina/charm-kubernetes-master | python | def delete_secret(secret_id):
return kubernetes_common.kubectl_success('delete', 'secret', '-n', AUTH_SECRET_NS, secret_id) |
def get_csv_password(csv_fname, user):
'Get the password for the given user within the csv file provided.'
root_cdk = '/root/cdk'
tokens_fname = (Path(root_cdk) / csv_fname)
if (not tokens_fname.is_file()):
return None
with tokens_fname.open('r') as stream:
for line in stream:
record = line.split(',')
try:
if (record[1] == user):
return record[0]
except IndexError:
continue
return None | 3,880,112,355,788,806,700 | Get the password for the given user within the csv file provided. | lib/charms/layer/kubernetes_master.py | get_csv_password | hemanthnakkina/charm-kubernetes-master | python | def get_csv_password(csv_fname, user):
root_cdk = '/root/cdk'
tokens_fname = (Path(root_cdk) / csv_fname)
if (not tokens_fname.is_file()):
return None
with tokens_fname.open('r') as stream:
for line in stream:
record = line.split(',')
try:
if (record[1] == user):
return record[0]
except IndexError:
continue
return None |
def get_secret_password(username):
'Get the password for the given user from the secret that CK created.'
try:
output = kubernetes_common.kubectl('get', 'secrets', '-n', AUTH_SECRET_NS, '--field-selector', 'type={}'.format(AUTH_SECRET_TYPE), '-o', 'json').decode('UTF-8')
except CalledProcessError:
token = None
if (username == 'admin'):
admin_kubeconfig = Path('/root/.kube/config')
if admin_kubeconfig.exists():
with admin_kubeconfig.open('r') as f:
data = safe_load(f)
try:
token = data['users'][0]['user']['token']
except (KeyError, ValueError):
pass
return token
except FileNotFoundError:
return None
secrets = json.loads(output)
if ('items' in secrets):
for secret in secrets['items']:
try:
data_b64 = secret['data']
password_b64 = data_b64['password'].encode('UTF-8')
username_b64 = data_b64['username'].encode('UTF-8')
except (KeyError, TypeError):
continue
password = b64decode(password_b64).decode('UTF-8')
secret_user = b64decode(username_b64).decode('UTF-8')
if (username == secret_user):
return password
return None | -7,479,111,451,008,058,000 | Get the password for the given user from the secret that CK created. | lib/charms/layer/kubernetes_master.py | get_secret_password | hemanthnakkina/charm-kubernetes-master | python | def get_secret_password(username):
try:
output = kubernetes_common.kubectl('get', 'secrets', '-n', AUTH_SECRET_NS, '--field-selector', 'type={}'.format(AUTH_SECRET_TYPE), '-o', 'json').decode('UTF-8')
except CalledProcessError:
token = None
if (username == 'admin'):
admin_kubeconfig = Path('/root/.kube/config')
if admin_kubeconfig.exists():
with admin_kubeconfig.open('r') as f:
data = safe_load(f)
try:
token = data['users'][0]['user']['token']
except (KeyError, ValueError):
pass
return token
except FileNotFoundError:
return None
secrets = json.loads(output)
if ('items' in secrets):
for secret in secrets['items']:
try:
data_b64 = secret['data']
password_b64 = data_b64['password'].encode('UTF-8')
username_b64 = data_b64['username'].encode('UTF-8')
except (KeyError, TypeError):
continue
password = b64decode(password_b64).decode('UTF-8')
secret_user = b64decode(username_b64).decode('UTF-8')
if (username == secret_user):
return password
return None |
def service_cidr():
" Return the charm's service-cidr config"
frozen_cidr = db.get('kubernetes-master.service-cidr')
return (frozen_cidr or hookenv.config('service-cidr')) | -7,382,090,746,641,430,000 | Return the charm's service-cidr config | lib/charms/layer/kubernetes_master.py | service_cidr | hemanthnakkina/charm-kubernetes-master | python | def service_cidr():
" "
frozen_cidr = db.get('kubernetes-master.service-cidr')
return (frozen_cidr or hookenv.config('service-cidr')) |
def freeze_service_cidr():
' Freeze the service CIDR. Once the apiserver has started, we can no\n longer safely change this value. '
frozen_service_cidr = db.get('kubernetes-master.service-cidr')
if ((not frozen_service_cidr) or is_service_cidr_expansion()):
db.set('kubernetes-master.service-cidr', hookenv.config('service-cidr')) | 7,960,924,385,793,680,000 | Freeze the service CIDR. Once the apiserver has started, we can no
longer safely change this value. | lib/charms/layer/kubernetes_master.py | freeze_service_cidr | hemanthnakkina/charm-kubernetes-master | python | def freeze_service_cidr():
' Freeze the service CIDR. Once the apiserver has started, we can no\n longer safely change this value. '
frozen_service_cidr = db.get('kubernetes-master.service-cidr')
if ((not frozen_service_cidr) or is_service_cidr_expansion()):
db.set('kubernetes-master.service-cidr', hookenv.config('service-cidr')) |
def get_preferred_service_network(service_cidrs):
'Get the network preferred for cluster service, preferring IPv4'
net_ipv4 = kubernetes_common.get_ipv4_network(service_cidrs)
net_ipv6 = kubernetes_common.get_ipv6_network(service_cidrs)
return (net_ipv4 or net_ipv6) | -629,480,460,032,981,200 | Get the network preferred for cluster service, preferring IPv4 | lib/charms/layer/kubernetes_master.py | get_preferred_service_network | hemanthnakkina/charm-kubernetes-master | python | def get_preferred_service_network(service_cidrs):
net_ipv4 = kubernetes_common.get_ipv4_network(service_cidrs)
net_ipv6 = kubernetes_common.get_ipv6_network(service_cidrs)
return (net_ipv4 or net_ipv6) |
def get_kubernetes_service_ips():
'Get the IP address(es) for the kubernetes service based on the cidr.'
return [next(network.hosts()).exploded for network in kubernetes_common.get_networks(service_cidr())] | 381,198,669,786,912,200 | Get the IP address(es) for the kubernetes service based on the cidr. | lib/charms/layer/kubernetes_master.py | get_kubernetes_service_ips | hemanthnakkina/charm-kubernetes-master | python | def get_kubernetes_service_ips():
return [next(network.hosts()).exploded for network in kubernetes_common.get_networks(service_cidr())] |
def get_snap_revs(snaps):
'Get a dict of snap revisions for a given list of snaps.'
channel = hookenv.config('channel')
rev_info = {}
for s in sorted(snaps):
try:
info = check_output(['snap', 'info', s]).decode('utf8', errors='ignore')
except CalledProcessError:
info = ''
snap_rev = None
yaml_data = safe_load(info)
if (yaml_data and ('channels' in yaml_data)):
try:
d = yaml_data['channels'][channel].split()
snap_rev = d[2].strip('()')
except (KeyError, IndexError):
hookenv.log('Could not determine revision for snap: {}'.format(s), level=hookenv.WARNING)
rev_info[s] = snap_rev
return rev_info | 3,718,453,624,753,426,000 | Get a dict of snap revisions for a given list of snaps. | lib/charms/layer/kubernetes_master.py | get_snap_revs | hemanthnakkina/charm-kubernetes-master | python | def get_snap_revs(snaps):
channel = hookenv.config('channel')
rev_info = {}
for s in sorted(snaps):
try:
info = check_output(['snap', 'info', s]).decode('utf8', errors='ignore')
except CalledProcessError:
info =
snap_rev = None
yaml_data = safe_load(info)
if (yaml_data and ('channels' in yaml_data)):
try:
d = yaml_data['channels'][channel].split()
snap_rev = d[2].strip('()')
except (KeyError, IndexError):
hookenv.log('Could not determine revision for snap: {}'.format(s), level=hookenv.WARNING)
rev_info[s] = snap_rev
return rev_info |
@parameterized.named_parameters(('1', None, np.int32([]), dtypes.bool), ('2', None, np.int32([]), dtypes.int32), ('3', None, np.int32([]), dtypes.float32), ('4', None, np.int32([]), dtypes.string), ('5', None, np.int32([2]), dtypes.int32), ('6', None, np.int32([2, 2]), dtypes.int32), ('7', (None, None, None), np.int32([]), dtypes.int32), ('8', (None, (None, None)), np.int32([]), dtypes.int32))
def testWindowDatasetFlatMap(self, structure, shape, dtype):
'Tests windowing by chaining it with flat map.\n\n Args:\n structure: the input structure\n shape: the input shape\n dtype: the input data type\n '
def fn(*args):
if ((len(args) == 1) and (not isinstance(args[0], tuple))):
return args[0]
return dataset_ops.Dataset.zip(tuple([(fn(*arg) if isinstance(arg, tuple) else arg) for arg in args]))
dataset = self._structuredDataset(structure, shape, dtype).repeat(5).apply(grouping.window_dataset(5)).flat_map(fn)
get_next = dataset.make_one_shot_iterator().get_next()
with self.cached_session() as sess:
expected = sess.run(self._structuredElement(structure, shape, dtype))
for _ in range(5):
actual = sess.run(get_next)
self._assertEqual(expected, actual) | 5,215,181,121,348,939,000 | Tests windowing by chaining it with flat map.
Args:
structure: the input structure
shape: the input shape
dtype: the input data type | tensorflow/contrib/data/python/kernel_tests/window_dataset_op_test.py | testWindowDatasetFlatMap | Esail/tensorflow | python | @parameterized.named_parameters(('1', None, np.int32([]), dtypes.bool), ('2', None, np.int32([]), dtypes.int32), ('3', None, np.int32([]), dtypes.float32), ('4', None, np.int32([]), dtypes.string), ('5', None, np.int32([2]), dtypes.int32), ('6', None, np.int32([2, 2]), dtypes.int32), ('7', (None, None, None), np.int32([]), dtypes.int32), ('8', (None, (None, None)), np.int32([]), dtypes.int32))
def testWindowDatasetFlatMap(self, structure, shape, dtype):
'Tests windowing by chaining it with flat map.\n\n Args:\n structure: the input structure\n shape: the input shape\n dtype: the input data type\n '
def fn(*args):
if ((len(args) == 1) and (not isinstance(args[0], tuple))):
return args[0]
return dataset_ops.Dataset.zip(tuple([(fn(*arg) if isinstance(arg, tuple) else arg) for arg in args]))
dataset = self._structuredDataset(structure, shape, dtype).repeat(5).apply(grouping.window_dataset(5)).flat_map(fn)
get_next = dataset.make_one_shot_iterator().get_next()
with self.cached_session() as sess:
expected = sess.run(self._structuredElement(structure, shape, dtype))
for _ in range(5):
actual = sess.run(get_next)
self._assertEqual(expected, actual) |
@parameterized.named_parameters(('1', None, np.int32([]), dtypes.bool), ('2', None, np.int32([]), dtypes.int32), ('3', None, np.int32([]), dtypes.float32), ('4', None, np.int32([]), dtypes.string), ('5', None, np.int32([2]), dtypes.int32), ('6', None, np.int32([2, 2]), dtypes.int32), ('7', (None, None, None), np.int32([]), dtypes.int32), ('8', (None, (None, None)), np.int32([]), dtypes.int32))
def testWindowDatasetBatchDense(self, structure, shape, dtype):
'Tests batching of dense tensor windows.\n\n Args:\n structure: the input structure\n shape: the input shape\n dtype: the input data type\n '
def fn(*args):
if ((len(args) == 1) and (not isinstance(args[0], tuple))):
return batching.batch_window(args[0])
return tuple([(fn(*arg) if isinstance(arg, tuple) else batching.batch_window(arg)) for arg in args])
dataset = self._structuredDataset(structure, shape, dtype).repeat(5).apply(grouping.window_dataset(5)).apply(grouping._map_x_dataset(fn))
get_next = dataset.make_one_shot_iterator().get_next()
with self.cached_session() as sess:
expected = sess.run(self._structuredElement(structure, np.concatenate(([5], shape), axis=0), dtype))
actual = sess.run(get_next)
self._assertEqual(expected, actual) | 7,058,848,213,807,847,000 | Tests batching of dense tensor windows.
Args:
structure: the input structure
shape: the input shape
dtype: the input data type | tensorflow/contrib/data/python/kernel_tests/window_dataset_op_test.py | testWindowDatasetBatchDense | Esail/tensorflow | python | @parameterized.named_parameters(('1', None, np.int32([]), dtypes.bool), ('2', None, np.int32([]), dtypes.int32), ('3', None, np.int32([]), dtypes.float32), ('4', None, np.int32([]), dtypes.string), ('5', None, np.int32([2]), dtypes.int32), ('6', None, np.int32([2, 2]), dtypes.int32), ('7', (None, None, None), np.int32([]), dtypes.int32), ('8', (None, (None, None)), np.int32([]), dtypes.int32))
def testWindowDatasetBatchDense(self, structure, shape, dtype):
'Tests batching of dense tensor windows.\n\n Args:\n structure: the input structure\n shape: the input shape\n dtype: the input data type\n '
def fn(*args):
if ((len(args) == 1) and (not isinstance(args[0], tuple))):
return batching.batch_window(args[0])
return tuple([(fn(*arg) if isinstance(arg, tuple) else batching.batch_window(arg)) for arg in args])
dataset = self._structuredDataset(structure, shape, dtype).repeat(5).apply(grouping.window_dataset(5)).apply(grouping._map_x_dataset(fn))
get_next = dataset.make_one_shot_iterator().get_next()
with self.cached_session() as sess:
expected = sess.run(self._structuredElement(structure, np.concatenate(([5], shape), axis=0), dtype))
actual = sess.run(get_next)
self._assertEqual(expected, actual) |
@parameterized.named_parameters(('1', np.int32([])), ('2', np.int32([1])), ('3', np.int32([1, 2, 3])))
def testWindowDatasetBatchDenseDynamicShape(self, shape):
'Tests batching of dynamically shaped dense tensor windows.\n\n Args:\n shape: the input shape\n '
shape_t = array_ops.placeholder(dtypes.int32)
dataset = dataset_ops.Dataset.from_tensors(array_ops.zeros(shape_t)).repeat(5).apply(grouping.window_dataset(5)).apply(grouping._map_x_dataset(batching.batch_window))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, {shape_t: shape})
expected = sess.run(self._structuredElement(None, np.concatenate(([5], shape), axis=0), dtypes.int32))
actual = sess.run(get_next)
self._assertEqual(expected, actual) | -5,008,434,139,535,988,000 | Tests batching of dynamically shaped dense tensor windows.
Args:
shape: the input shape | tensorflow/contrib/data/python/kernel_tests/window_dataset_op_test.py | testWindowDatasetBatchDenseDynamicShape | Esail/tensorflow | python | @parameterized.named_parameters(('1', np.int32([])), ('2', np.int32([1])), ('3', np.int32([1, 2, 3])))
def testWindowDatasetBatchDenseDynamicShape(self, shape):
'Tests batching of dynamically shaped dense tensor windows.\n\n Args:\n shape: the input shape\n '
shape_t = array_ops.placeholder(dtypes.int32)
dataset = dataset_ops.Dataset.from_tensors(array_ops.zeros(shape_t)).repeat(5).apply(grouping.window_dataset(5)).apply(grouping._map_x_dataset(batching.batch_window))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, {shape_t: shape})
expected = sess.run(self._structuredElement(None, np.concatenate(([5], shape), axis=0), dtypes.int32))
actual = sess.run(get_next)
self._assertEqual(expected, actual) |
@parameterized.named_parameters(('1', None, np.int32([]), dtypes.bool), ('2', None, np.int32([]), dtypes.int32), ('3', None, np.int32([]), dtypes.float32), ('4', None, np.int32([]), dtypes.string), ('5', None, np.int32([2]), dtypes.int32), ('6', None, np.int32([2, 2]), dtypes.int32), ('7', (None, None, None), np.int32([]), dtypes.int32), ('8', (None, (None, None)), np.int32([]), dtypes.int32))
def testWindowDatasetBatchSparse(self, structure, shape, dtype):
'Tests batching of sparse tensor windows.\n\n Args:\n structure: the input structure\n shape: the input shape\n dtype: the input data type\n '
def fn(*args):
if ((len(args) == 1) and (not isinstance(args[0], tuple))):
return batching.batch_window(args[0])
return tuple([(fn(*arg) if isinstance(arg, tuple) else batching.batch_window(arg)) for arg in args])
dataset = self._structuredSparseDataset(structure, shape, dtype).repeat(5).apply(grouping.window_dataset(5)).apply(grouping._map_x_dataset(fn))
get_next = dataset.make_one_shot_iterator().get_next()
with self.cached_session() as sess:
expected = sess.run(self._structuredSparseElement(structure, np.concatenate(([5], shape), axis=0), dtype))
actual = sess.run(get_next)
self._assertEqual(expected, actual) | -2,907,993,216,005,055,500 | Tests batching of sparse tensor windows.
Args:
structure: the input structure
shape: the input shape
dtype: the input data type | tensorflow/contrib/data/python/kernel_tests/window_dataset_op_test.py | testWindowDatasetBatchSparse | Esail/tensorflow | python | @parameterized.named_parameters(('1', None, np.int32([]), dtypes.bool), ('2', None, np.int32([]), dtypes.int32), ('3', None, np.int32([]), dtypes.float32), ('4', None, np.int32([]), dtypes.string), ('5', None, np.int32([2]), dtypes.int32), ('6', None, np.int32([2, 2]), dtypes.int32), ('7', (None, None, None), np.int32([]), dtypes.int32), ('8', (None, (None, None)), np.int32([]), dtypes.int32))
def testWindowDatasetBatchSparse(self, structure, shape, dtype):
'Tests batching of sparse tensor windows.\n\n Args:\n structure: the input structure\n shape: the input shape\n dtype: the input data type\n '
def fn(*args):
if ((len(args) == 1) and (not isinstance(args[0], tuple))):
return batching.batch_window(args[0])
return tuple([(fn(*arg) if isinstance(arg, tuple) else batching.batch_window(arg)) for arg in args])
dataset = self._structuredSparseDataset(structure, shape, dtype).repeat(5).apply(grouping.window_dataset(5)).apply(grouping._map_x_dataset(fn))
get_next = dataset.make_one_shot_iterator().get_next()
with self.cached_session() as sess:
expected = sess.run(self._structuredSparseElement(structure, np.concatenate(([5], shape), axis=0), dtype))
actual = sess.run(get_next)
self._assertEqual(expected, actual) |
@parameterized.named_parameters(('1', np.int32([])), ('2', np.int32([1])), ('3', np.int32([1, 2, 3])))
def testWindowDatasetBatchSparseDynamicShape(self, shape):
'Tests batching of dynamically shaped sparse tensor windows.\n\n Args:\n shape: the input shape\n '
shape_t = array_ops.placeholder(dtypes.int32)
dataset = dataset_ops.Dataset.from_tensors(array_ops.zeros(shape_t)).map(self._make_dense_to_sparse_fn((len(shape) == 0))).repeat(5).apply(grouping.window_dataset(5)).apply(grouping._map_x_dataset(batching.batch_window))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, {shape_t: shape})
expected = sess.run(self._structuredSparseElement(None, np.concatenate(([5], shape), axis=0), dtypes.int32))
actual = sess.run(get_next)
self._assertEqual(expected, actual) | -4,503,179,503,890,113,000 | Tests batching of dynamically shaped sparse tensor windows.
Args:
shape: the input shape | tensorflow/contrib/data/python/kernel_tests/window_dataset_op_test.py | testWindowDatasetBatchSparseDynamicShape | Esail/tensorflow | python | @parameterized.named_parameters(('1', np.int32([])), ('2', np.int32([1])), ('3', np.int32([1, 2, 3])))
def testWindowDatasetBatchSparseDynamicShape(self, shape):
'Tests batching of dynamically shaped sparse tensor windows.\n\n Args:\n shape: the input shape\n '
shape_t = array_ops.placeholder(dtypes.int32)
dataset = dataset_ops.Dataset.from_tensors(array_ops.zeros(shape_t)).map(self._make_dense_to_sparse_fn((len(shape) == 0))).repeat(5).apply(grouping.window_dataset(5)).apply(grouping._map_x_dataset(batching.batch_window))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, {shape_t: shape})
expected = sess.run(self._structuredSparseElement(None, np.concatenate(([5], shape), axis=0), dtypes.int32))
actual = sess.run(get_next)
self._assertEqual(expected, actual) |
@parameterized.named_parameters(('1', None, np.int32([[1], [2], [3]]), dtypes.bool, [(- 1)]), ('2', None, np.int32([[1], [2], [3]]), dtypes.int32, [(- 1)]), ('3', None, np.int32([[1], [2], [3]]), dtypes.float32, [(- 1)]), ('4', None, np.int32([[1], [2], [3]]), dtypes.string, [(- 1)]), ('5', None, np.int32([[1, 3], [2, 2], [3, 1]]), dtypes.int32, [(- 1), (- 1)]), ('6', None, np.int32([[3, 1, 3], [1, 3, 1]]), dtypes.int32, [(- 1), (- 1), (- 1)]), ('7', (None, None, None), np.int32([[1], [2], [3]]), dtypes.int32, [(- 1)]), ('8', (None, (None, None)), np.int32([[1], [2], [3]]), dtypes.int32, [(- 1)]), ('9', None, np.int32([[1], [2], [3]]), dtypes.int32, [(- 1)]), ('10', None, np.int32([[1], [2], [3]]), dtypes.int32, np.int32([10])))
def testWindowDatasetPaddedBatchDense(self, structure, shapes, dtype, padded_shape):
'Tests padded batching of dense tensor windows.\n\n Args:\n structure: the input structure\n shapes: the input shapes\n dtype: the input data type\n padded_shape: the shape to pad the output to\n '
def fn(*args):
if ((len(args) == 1) and (not isinstance(args[0], tuple))):
return batching.padded_batch_window(args[0], padded_shape)
return tuple([(fn(*arg) if isinstance(arg, tuple) else batching.padded_batch_window(arg, padded_shape)) for arg in args])
dataset = self._structuredRaggedDataset(structure, shapes, dtype).apply(grouping.window_dataset(len(shapes))).apply(grouping._map_x_dataset(fn))
get_next = dataset.make_one_shot_iterator().get_next()
with self.cached_session() as sess:
expected_shape = np.maximum(np.amax(shapes, axis=0), padded_shape)
expected = sess.run(self._structuredElement(structure, np.concatenate((np.int32([len(shapes)]), expected_shape)), dtype))
actual = sess.run(get_next)
self._assertEqual(expected, actual) | -4,706,676,081,892,534,000 | Tests padded batching of dense tensor windows.
Args:
structure: the input structure
shapes: the input shapes
dtype: the input data type
padded_shape: the shape to pad the output to | tensorflow/contrib/data/python/kernel_tests/window_dataset_op_test.py | testWindowDatasetPaddedBatchDense | Esail/tensorflow | python | @parameterized.named_parameters(('1', None, np.int32([[1], [2], [3]]), dtypes.bool, [(- 1)]), ('2', None, np.int32([[1], [2], [3]]), dtypes.int32, [(- 1)]), ('3', None, np.int32([[1], [2], [3]]), dtypes.float32, [(- 1)]), ('4', None, np.int32([[1], [2], [3]]), dtypes.string, [(- 1)]), ('5', None, np.int32([[1, 3], [2, 2], [3, 1]]), dtypes.int32, [(- 1), (- 1)]), ('6', None, np.int32([[3, 1, 3], [1, 3, 1]]), dtypes.int32, [(- 1), (- 1), (- 1)]), ('7', (None, None, None), np.int32([[1], [2], [3]]), dtypes.int32, [(- 1)]), ('8', (None, (None, None)), np.int32([[1], [2], [3]]), dtypes.int32, [(- 1)]), ('9', None, np.int32([[1], [2], [3]]), dtypes.int32, [(- 1)]), ('10', None, np.int32([[1], [2], [3]]), dtypes.int32, np.int32([10])))
def testWindowDatasetPaddedBatchDense(self, structure, shapes, dtype, padded_shape):
'Tests padded batching of dense tensor windows.\n\n Args:\n structure: the input structure\n shapes: the input shapes\n dtype: the input data type\n padded_shape: the shape to pad the output to\n '
def fn(*args):
if ((len(args) == 1) and (not isinstance(args[0], tuple))):
return batching.padded_batch_window(args[0], padded_shape)
return tuple([(fn(*arg) if isinstance(arg, tuple) else batching.padded_batch_window(arg, padded_shape)) for arg in args])
dataset = self._structuredRaggedDataset(structure, shapes, dtype).apply(grouping.window_dataset(len(shapes))).apply(grouping._map_x_dataset(fn))
get_next = dataset.make_one_shot_iterator().get_next()
with self.cached_session() as sess:
expected_shape = np.maximum(np.amax(shapes, axis=0), padded_shape)
expected = sess.run(self._structuredElement(structure, np.concatenate((np.int32([len(shapes)]), expected_shape)), dtype))
actual = sess.run(get_next)
self._assertEqual(expected, actual) |
@parameterized.named_parameters(('1', np.int32([[1], [2], [3]]), [(- 1)]), ('2', np.int32([[1, 3], [2, 2], [3, 1]]), [(- 1), (- 1)]), ('3', np.int32([[3, 1, 3], [1, 3, 1]]), [(- 1), (- 1), (- 1)]))
def testWindowDatasetPaddedBatchDenseDynamicShape(self, shapes, padded_shape):
'Tests padded batching of dynamically shaped dense tensor windows.\n\n Args:\n shapes: the input shapes\n padded_shape: the shape to pad the output to\n '
shapes_t = array_ops.placeholder(dtypes.int32)
dataset = dataset_ops.Dataset.from_tensor_slices(shapes_t).map((lambda shape: array_ops.zeros(shape, dtype=dtypes.int32))).apply(grouping.window_dataset(len(shapes))).apply(grouping._map_x_dataset((lambda x: batching.padded_batch_window(x, padded_shape))))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, {shapes_t: shapes})
expected_shape = np.maximum(np.amax(shapes, axis=0), padded_shape)
expected = sess.run(self._structuredElement(None, np.concatenate((np.int32([len(shapes)]), expected_shape)), dtypes.int32))
actual = sess.run(get_next)
self._assertEqual(expected, actual) | -7,236,535,758,031,820,000 | Tests padded batching of dynamically shaped dense tensor windows.
Args:
shapes: the input shapes
padded_shape: the shape to pad the output to | tensorflow/contrib/data/python/kernel_tests/window_dataset_op_test.py | testWindowDatasetPaddedBatchDenseDynamicShape | Esail/tensorflow | python | @parameterized.named_parameters(('1', np.int32([[1], [2], [3]]), [(- 1)]), ('2', np.int32([[1, 3], [2, 2], [3, 1]]), [(- 1), (- 1)]), ('3', np.int32([[3, 1, 3], [1, 3, 1]]), [(- 1), (- 1), (- 1)]))
def testWindowDatasetPaddedBatchDenseDynamicShape(self, shapes, padded_shape):
'Tests padded batching of dynamically shaped dense tensor windows.\n\n Args:\n shapes: the input shapes\n padded_shape: the shape to pad the output to\n '
shapes_t = array_ops.placeholder(dtypes.int32)
dataset = dataset_ops.Dataset.from_tensor_slices(shapes_t).map((lambda shape: array_ops.zeros(shape, dtype=dtypes.int32))).apply(grouping.window_dataset(len(shapes))).apply(grouping._map_x_dataset((lambda x: batching.padded_batch_window(x, padded_shape))))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, {shapes_t: shapes})
expected_shape = np.maximum(np.amax(shapes, axis=0), padded_shape)
expected = sess.run(self._structuredElement(None, np.concatenate((np.int32([len(shapes)]), expected_shape)), dtypes.int32))
actual = sess.run(get_next)
self._assertEqual(expected, actual) |
@parameterized.named_parameters(('1', np.int32([[1]]), np.int32([0])), ('2', np.int32([[10], [20]]), np.int32([15])))
def testWindowDatasetPaddedBatchDenseInvalid(self, shapes, padded_shape):
'Tests invalid padded batching of dense tensor windows.\n\n Args:\n shapes: the input shapes\n padded_shape: the shape to pad the output to\n '
dataset = dataset_ops.Dataset.from_tensor_slices(shapes).map((lambda shape: array_ops.zeros(shape, dtype=dtypes.int32))).apply(grouping.window_dataset(len(shapes))).apply(grouping._map_x_dataset((lambda x: batching.padded_batch_window(x, padded_shape))))
get_next = dataset.make_one_shot_iterator().get_next()
with self.cached_session() as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next) | 8,705,696,122,364,075,000 | Tests invalid padded batching of dense tensor windows.
Args:
shapes: the input shapes
padded_shape: the shape to pad the output to | tensorflow/contrib/data/python/kernel_tests/window_dataset_op_test.py | testWindowDatasetPaddedBatchDenseInvalid | Esail/tensorflow | python | @parameterized.named_parameters(('1', np.int32([[1]]), np.int32([0])), ('2', np.int32([[10], [20]]), np.int32([15])))
def testWindowDatasetPaddedBatchDenseInvalid(self, shapes, padded_shape):
'Tests invalid padded batching of dense tensor windows.\n\n Args:\n shapes: the input shapes\n padded_shape: the shape to pad the output to\n '
dataset = dataset_ops.Dataset.from_tensor_slices(shapes).map((lambda shape: array_ops.zeros(shape, dtype=dtypes.int32))).apply(grouping.window_dataset(len(shapes))).apply(grouping._map_x_dataset((lambda x: batching.padded_batch_window(x, padded_shape))))
get_next = dataset.make_one_shot_iterator().get_next()
with self.cached_session() as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next) |
@parameterized.named_parameters(('1', None, np.int64([[1], [2], [3]]), dtypes.bool, [(- 1)]), ('2', None, np.int64([[1], [2], [3]]), dtypes.int32, [(- 1)]), ('3', None, np.int64([[1], [2], [3]]), dtypes.float32, [(- 1)]), ('4', None, np.int64([[1], [2], [3]]), dtypes.string, [(- 1)]), ('5', None, np.int64([[1, 3], [2, 2], [3, 1]]), dtypes.int32, [(- 1), (- 1)]), ('6', None, np.int64([[1, 3, 1], [3, 1, 3]]), dtypes.int32, [(- 1), (- 1), (- 1)]), ('7', (None, None, None), np.int64([[1], [2], [3]]), dtypes.int32, [(- 1)]), ('8', (None, (None, None)), np.int64([[1], [2], [3]]), dtypes.int32, [(- 1)]), ('9', None, np.int64([[1], [2], [3]]), dtypes.int32, [(- 1)]), ('10', None, np.int64([[1], [2], [3]]), dtypes.int32, np.int64([10])))
def testWindowDatasetPaddedBatchSparse(self, structure, shapes, dtype, padded_shape):
'Tests padded batching of sparse tensor windows.\n\n Args:\n structure: the input structure\n shapes: the input shapes\n dtype: the input data type\n padded_shape: the shape to pad the output to\n '
def fn(*args):
if ((len(args) == 1) and (not isinstance(args[0], tuple))):
return batching.padded_batch_window(args[0], padded_shape)
return tuple([(fn(*arg) if isinstance(arg, tuple) else batching.padded_batch_window(arg, padded_shape)) for arg in args])
dataset = self._structuredRaggedSparseDataset(structure, shapes, dtype).apply(grouping.window_dataset(len(shapes))).apply(grouping._map_x_dataset(fn))
get_next = dataset.make_one_shot_iterator().get_next()
with self.cached_session() as sess:
expected = sess.run(self._structuredRaggedSparseElement(structure, shapes, dtype, padded_shape))
actual = sess.run(get_next)
self._assertEqual(expected, actual) | 283,449,577,564,227,520 | Tests padded batching of sparse tensor windows.
Args:
structure: the input structure
shapes: the input shapes
dtype: the input data type
padded_shape: the shape to pad the output to | tensorflow/contrib/data/python/kernel_tests/window_dataset_op_test.py | testWindowDatasetPaddedBatchSparse | Esail/tensorflow | python | @parameterized.named_parameters(('1', None, np.int64([[1], [2], [3]]), dtypes.bool, [(- 1)]), ('2', None, np.int64([[1], [2], [3]]), dtypes.int32, [(- 1)]), ('3', None, np.int64([[1], [2], [3]]), dtypes.float32, [(- 1)]), ('4', None, np.int64([[1], [2], [3]]), dtypes.string, [(- 1)]), ('5', None, np.int64([[1, 3], [2, 2], [3, 1]]), dtypes.int32, [(- 1), (- 1)]), ('6', None, np.int64([[1, 3, 1], [3, 1, 3]]), dtypes.int32, [(- 1), (- 1), (- 1)]), ('7', (None, None, None), np.int64([[1], [2], [3]]), dtypes.int32, [(- 1)]), ('8', (None, (None, None)), np.int64([[1], [2], [3]]), dtypes.int32, [(- 1)]), ('9', None, np.int64([[1], [2], [3]]), dtypes.int32, [(- 1)]), ('10', None, np.int64([[1], [2], [3]]), dtypes.int32, np.int64([10])))
def testWindowDatasetPaddedBatchSparse(self, structure, shapes, dtype, padded_shape):
'Tests padded batching of sparse tensor windows.\n\n Args:\n structure: the input structure\n shapes: the input shapes\n dtype: the input data type\n padded_shape: the shape to pad the output to\n '
def fn(*args):
if ((len(args) == 1) and (not isinstance(args[0], tuple))):
return batching.padded_batch_window(args[0], padded_shape)
return tuple([(fn(*arg) if isinstance(arg, tuple) else batching.padded_batch_window(arg, padded_shape)) for arg in args])
dataset = self._structuredRaggedSparseDataset(structure, shapes, dtype).apply(grouping.window_dataset(len(shapes))).apply(grouping._map_x_dataset(fn))
get_next = dataset.make_one_shot_iterator().get_next()
with self.cached_session() as sess:
expected = sess.run(self._structuredRaggedSparseElement(structure, shapes, dtype, padded_shape))
actual = sess.run(get_next)
self._assertEqual(expected, actual) |
@parameterized.named_parameters(('1', np.int64([[1], [2], [3]]), [(- 1)]), ('2', np.int64([[1, 3], [2, 2], [3, 1]]), [(- 1), (- 1)]), ('3', np.int64([[3, 1, 3], [1, 3, 1]]), [(- 1), (- 1), (- 1)]))
def testWindowDatasetPaddedBatchSparseDynamicShape(self, shapes, padded_shape):
'Tests padded batching of dynamically shaped sparse tensor windows.\n\n Args:\n shapes: the input shapes\n padded_shape: the shape to pad the output to\n '
shapes_t = array_ops.placeholder(dtypes.int32)
dataset = dataset_ops.Dataset.from_tensor_slices(shapes_t).map((lambda shape: array_ops.zeros(shape, dtype=dtypes.int32))).map(self._make_dense_to_sparse_fn(False)).apply(grouping.window_dataset(len(shapes))).apply(grouping._map_x_dataset((lambda x: batching.padded_batch_window(x, padded_shape))))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, {shapes_t: shapes})
expected = sess.run(self._structuredRaggedSparseElement(None, shapes, dtypes.int32, padded_shape))
actual = sess.run(get_next)
self._assertEqual(expected, actual) | -5,328,868,751,272,801,000 | Tests padded batching of dynamically shaped sparse tensor windows.
Args:
shapes: the input shapes
padded_shape: the shape to pad the output to | tensorflow/contrib/data/python/kernel_tests/window_dataset_op_test.py | testWindowDatasetPaddedBatchSparseDynamicShape | Esail/tensorflow | python | @parameterized.named_parameters(('1', np.int64([[1], [2], [3]]), [(- 1)]), ('2', np.int64([[1, 3], [2, 2], [3, 1]]), [(- 1), (- 1)]), ('3', np.int64([[3, 1, 3], [1, 3, 1]]), [(- 1), (- 1), (- 1)]))
def testWindowDatasetPaddedBatchSparseDynamicShape(self, shapes, padded_shape):
'Tests padded batching of dynamically shaped sparse tensor windows.\n\n Args:\n shapes: the input shapes\n padded_shape: the shape to pad the output to\n '
shapes_t = array_ops.placeholder(dtypes.int32)
dataset = dataset_ops.Dataset.from_tensor_slices(shapes_t).map((lambda shape: array_ops.zeros(shape, dtype=dtypes.int32))).map(self._make_dense_to_sparse_fn(False)).apply(grouping.window_dataset(len(shapes))).apply(grouping._map_x_dataset((lambda x: batching.padded_batch_window(x, padded_shape))))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, {shapes_t: shapes})
expected = sess.run(self._structuredRaggedSparseElement(None, shapes, dtypes.int32, padded_shape))
actual = sess.run(get_next)
self._assertEqual(expected, actual) |
@parameterized.named_parameters(('1', np.int64([[1]]), [0]), ('2', np.int64([[10], [20]]), [15]))
def testWindowDatasetPaddedBatchSparseInvalid(self, shapes, padded_shape):
'Tests invalid padded batching of sparse tensor windows.\n\n Args:\n shapes: the input shapes\n padded_shape: the shape to pad the output to\n '
dataset = dataset_ops.Dataset.from_tensor_slices(shapes).map((lambda shape: array_ops.zeros(shape, dtype=dtypes.int32))).map(self._make_dense_to_sparse_fn(False)).apply(grouping.window_dataset(len(shapes))).apply(grouping._map_x_dataset((lambda x: batching.padded_batch_window(x, padded_shape))))
get_next = dataset.make_one_shot_iterator().get_next()
with self.cached_session() as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next) | 5,317,069,918,889,011,000 | Tests invalid padded batching of sparse tensor windows.
Args:
shapes: the input shapes
padded_shape: the shape to pad the output to | tensorflow/contrib/data/python/kernel_tests/window_dataset_op_test.py | testWindowDatasetPaddedBatchSparseInvalid | Esail/tensorflow | python | @parameterized.named_parameters(('1', np.int64([[1]]), [0]), ('2', np.int64([[10], [20]]), [15]))
def testWindowDatasetPaddedBatchSparseInvalid(self, shapes, padded_shape):
'Tests invalid padded batching of sparse tensor windows.\n\n Args:\n shapes: the input shapes\n padded_shape: the shape to pad the output to\n '
dataset = dataset_ops.Dataset.from_tensor_slices(shapes).map((lambda shape: array_ops.zeros(shape, dtype=dtypes.int32))).map(self._make_dense_to_sparse_fn(False)).apply(grouping.window_dataset(len(shapes))).apply(grouping._map_x_dataset((lambda x: batching.padded_batch_window(x, padded_shape))))
get_next = dataset.make_one_shot_iterator().get_next()
with self.cached_session() as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next) |
def calc_sample_norms(named_params: Iterator[Tuple[(str, torch.Tensor)]], flat: bool=True) -> List[torch.Tensor]:
'\n Calculates the norm of the given tensors for each sample.\n\n This function calculates the overall norm of the given tensors for each sample,\n assuming the each batch\'s dim is zero.\n\n Args:\n named_params: An iterator of tuples <name, param> with name being a\n string and param being a tensor of shape ``[B, ...]`` where ``B``\n is the size of the batch and is the 0th dimension.\n flat: A flag, when set to `True` returns a flat norm over all\n layers norms\n\n Example:\n >>> t1 = torch.rand((2, 5))\n >>> t2 = torch.rand((2, 5))\n >>> calc_sample_norms([("1", t1), ("2", t2)])\n [tensor([1.5117, 1.0618])]\n\n Returns:\n A list of tensor norms where length of the list is the number of layers\n '
norms = [param.view(len(param), (- 1)).norm(2, dim=(- 1)) for (name, param) in named_params]
if flat:
norms = [torch.stack(norms, dim=0).norm(2, dim=0)]
return norms | 1,104,742,471,149,859,100 | Calculates the norm of the given tensors for each sample.
This function calculates the overall norm of the given tensors for each sample,
assuming the each batch's dim is zero.
Args:
named_params: An iterator of tuples <name, param> with name being a
string and param being a tensor of shape ``[B, ...]`` where ``B``
is the size of the batch and is the 0th dimension.
flat: A flag, when set to `True` returns a flat norm over all
layers norms
Example:
>>> t1 = torch.rand((2, 5))
>>> t2 = torch.rand((2, 5))
>>> calc_sample_norms([("1", t1), ("2", t2)])
[tensor([1.5117, 1.0618])]
Returns:
A list of tensor norms where length of the list is the number of layers | opacus/utils/tensor_utils.py | calc_sample_norms | DaveBrind/SynthVAE | python | def calc_sample_norms(named_params: Iterator[Tuple[(str, torch.Tensor)]], flat: bool=True) -> List[torch.Tensor]:
'\n Calculates the norm of the given tensors for each sample.\n\n This function calculates the overall norm of the given tensors for each sample,\n assuming the each batch\'s dim is zero.\n\n Args:\n named_params: An iterator of tuples <name, param> with name being a\n string and param being a tensor of shape ``[B, ...]`` where ``B``\n is the size of the batch and is the 0th dimension.\n flat: A flag, when set to `True` returns a flat norm over all\n layers norms\n\n Example:\n >>> t1 = torch.rand((2, 5))\n >>> t2 = torch.rand((2, 5))\n >>> calc_sample_norms([("1", t1), ("2", t2)])\n [tensor([1.5117, 1.0618])]\n\n Returns:\n A list of tensor norms where length of the list is the number of layers\n '
norms = [param.view(len(param), (- 1)).norm(2, dim=(- 1)) for (name, param) in named_params]
if flat:
norms = [torch.stack(norms, dim=0).norm(2, dim=0)]
return norms |
def sum_over_all_but_batch_and_last_n(tensor: torch.Tensor, n_dims: int) -> torch.Tensor:
'\n Calculates the sum over all dimensions, except the first\n (batch dimension), and excluding the last n_dims.\n\n This function will ignore the first dimension and it will\n not aggregate over the last n_dims dimensions.\n\n Args:\n tensor: An input tensor of shape ``(B, ..., X[n_dims-1])``.\n n_dims: Number of dimensions to keep.\n\n Example:\n >>> tensor = torch.ones(1, 2, 3, 4, 5)\n >>> sum_over_all_but_batch_and_last_n(tensor, n_dims=2).shape\n torch.Size([1, 4, 5])\n\n Returns:\n A tensor of shape ``(B, ..., X[n_dims-1])``\n '
if (tensor.dim() == (n_dims + 1)):
return tensor
else:
dims = list(range(1, (tensor.dim() - n_dims)))
return tensor.sum(dim=dims) | -8,455,549,789,229,907,000 | Calculates the sum over all dimensions, except the first
(batch dimension), and excluding the last n_dims.
This function will ignore the first dimension and it will
not aggregate over the last n_dims dimensions.
Args:
tensor: An input tensor of shape ``(B, ..., X[n_dims-1])``.
n_dims: Number of dimensions to keep.
Example:
>>> tensor = torch.ones(1, 2, 3, 4, 5)
>>> sum_over_all_but_batch_and_last_n(tensor, n_dims=2).shape
torch.Size([1, 4, 5])
Returns:
A tensor of shape ``(B, ..., X[n_dims-1])`` | opacus/utils/tensor_utils.py | sum_over_all_but_batch_and_last_n | DaveBrind/SynthVAE | python | def sum_over_all_but_batch_and_last_n(tensor: torch.Tensor, n_dims: int) -> torch.Tensor:
'\n Calculates the sum over all dimensions, except the first\n (batch dimension), and excluding the last n_dims.\n\n This function will ignore the first dimension and it will\n not aggregate over the last n_dims dimensions.\n\n Args:\n tensor: An input tensor of shape ``(B, ..., X[n_dims-1])``.\n n_dims: Number of dimensions to keep.\n\n Example:\n >>> tensor = torch.ones(1, 2, 3, 4, 5)\n >>> sum_over_all_but_batch_and_last_n(tensor, n_dims=2).shape\n torch.Size([1, 4, 5])\n\n Returns:\n A tensor of shape ``(B, ..., X[n_dims-1])``\n '
if (tensor.dim() == (n_dims + 1)):
return tensor
else:
dims = list(range(1, (tensor.dim() - n_dims)))
return tensor.sum(dim=dims) |
def unfold3d(tensor: torch.Tensor, kernel_size: Union[(int, Tuple[(int, int, int)])], padding: Union[(int, Tuple[(int, int, int)])]=0, stride: Union[(int, Tuple[(int, int, int)])]=1, dilation: Union[(int, Tuple[(int, int, int)])]=1):
'\n Extracts sliding local blocks from an batched input tensor.\n\n :class:`torch.nn.Unfold` only supports 4D inputs (batched image-like tensors).\n This method implements the same action for 5D inputs\n\n Args:\n tensor: An input tensor of shape ``(B, C, D, H, W)``.\n kernel_size: the size of the sliding blocks\n padding: implicit zero padding to be added on both sides of input\n stride: the stride of the sliding blocks in the input spatial dimensions\n dilation: the spacing between the kernel points.\n\n Example:\n >>> B, C, D, H, W = 3, 4, 5, 6, 7\n >>> tensor = torch.arange(1,B*C*D*H*W+1.).view(B,C,D,H,W)\n >>> unfold3d(tensor, kernel_size=2, padding=0, stride=1).shape\n torch.Size([3, 32, 120])\n\n Returns:\n A tensor of shape ``(B, C * np.product(kernel_size), L)``, where L - output spatial dimensions.\n See :class:`torch.nn.Unfold` for more details\n '
if (len(tensor.shape) != 5):
raise ValueError(f'Input tensor must be of the shape [B, C, D, H, W]. Got{tensor.shape}')
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size, kernel_size)
if isinstance(padding, int):
padding = (padding, padding, padding)
if isinstance(stride, int):
stride = (stride, stride, stride)
if isinstance(dilation, int):
dilation = (dilation, dilation, dilation)
if (dilation != (1, 1, 1)):
raise NotImplementedError(f"dilation={dilation} not supported. We'd love a PR!")
(batch_size, channels, _, _, _) = tensor.shape
tensor = F.pad(tensor, (padding[2], padding[2], padding[1], padding[1], padding[0], padding[0]))
tensor = tensor.unfold(dimension=2, size=kernel_size[0], step=stride[0])
tensor = tensor.unfold(dimension=3, size=kernel_size[1], step=stride[1])
tensor = tensor.unfold(dimension=4, size=kernel_size[2], step=stride[2])
tensor = tensor.permute(0, 2, 3, 4, 1, 5, 6, 7)
tensor = tensor.reshape(batch_size, (- 1), (channels * np.prod(kernel_size))).transpose(1, 2)
return tensor | 2,870,308,372,176,920,000 | Extracts sliding local blocks from an batched input tensor.
:class:`torch.nn.Unfold` only supports 4D inputs (batched image-like tensors).
This method implements the same action for 5D inputs
Args:
tensor: An input tensor of shape ``(B, C, D, H, W)``.
kernel_size: the size of the sliding blocks
padding: implicit zero padding to be added on both sides of input
stride: the stride of the sliding blocks in the input spatial dimensions
dilation: the spacing between the kernel points.
Example:
>>> B, C, D, H, W = 3, 4, 5, 6, 7
>>> tensor = torch.arange(1,B*C*D*H*W+1.).view(B,C,D,H,W)
>>> unfold3d(tensor, kernel_size=2, padding=0, stride=1).shape
torch.Size([3, 32, 120])
Returns:
A tensor of shape ``(B, C * np.product(kernel_size), L)``, where L - output spatial dimensions.
See :class:`torch.nn.Unfold` for more details | opacus/utils/tensor_utils.py | unfold3d | DaveBrind/SynthVAE | python | def unfold3d(tensor: torch.Tensor, kernel_size: Union[(int, Tuple[(int, int, int)])], padding: Union[(int, Tuple[(int, int, int)])]=0, stride: Union[(int, Tuple[(int, int, int)])]=1, dilation: Union[(int, Tuple[(int, int, int)])]=1):
'\n Extracts sliding local blocks from an batched input tensor.\n\n :class:`torch.nn.Unfold` only supports 4D inputs (batched image-like tensors).\n This method implements the same action for 5D inputs\n\n Args:\n tensor: An input tensor of shape ``(B, C, D, H, W)``.\n kernel_size: the size of the sliding blocks\n padding: implicit zero padding to be added on both sides of input\n stride: the stride of the sliding blocks in the input spatial dimensions\n dilation: the spacing between the kernel points.\n\n Example:\n >>> B, C, D, H, W = 3, 4, 5, 6, 7\n >>> tensor = torch.arange(1,B*C*D*H*W+1.).view(B,C,D,H,W)\n >>> unfold3d(tensor, kernel_size=2, padding=0, stride=1).shape\n torch.Size([3, 32, 120])\n\n Returns:\n A tensor of shape ``(B, C * np.product(kernel_size), L)``, where L - output spatial dimensions.\n See :class:`torch.nn.Unfold` for more details\n '
if (len(tensor.shape) != 5):
raise ValueError(f'Input tensor must be of the shape [B, C, D, H, W]. Got{tensor.shape}')
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size, kernel_size)
if isinstance(padding, int):
padding = (padding, padding, padding)
if isinstance(stride, int):
stride = (stride, stride, stride)
if isinstance(dilation, int):
dilation = (dilation, dilation, dilation)
if (dilation != (1, 1, 1)):
raise NotImplementedError(f"dilation={dilation} not supported. We'd love a PR!")
(batch_size, channels, _, _, _) = tensor.shape
tensor = F.pad(tensor, (padding[2], padding[2], padding[1], padding[1], padding[0], padding[0]))
tensor = tensor.unfold(dimension=2, size=kernel_size[0], step=stride[0])
tensor = tensor.unfold(dimension=3, size=kernel_size[1], step=stride[1])
tensor = tensor.unfold(dimension=4, size=kernel_size[2], step=stride[2])
tensor = tensor.permute(0, 2, 3, 4, 1, 5, 6, 7)
tensor = tensor.reshape(batch_size, (- 1), (channels * np.prod(kernel_size))).transpose(1, 2)
return tensor |
def set_logger(log_path):
'Set the logger to log info in terminal and file `log_path`.\n\n In general, it is useful to have a logger so that every output to the terminal is saved\n in a permanent file. Here we save it to `model_dir/train.log`.\n\n Example:\n ```\n logging.info("Starting training...")\n ```\n\n Args:\n log_path: (string) where to log\n '
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if (not logger.handlers):
file_handler = logging.FileHandler(log_path)
file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(stream_handler) | 9,111,767,959,850,705,000 | Set the logger to log info in terminal and file `log_path`.
In general, it is useful to have a logger so that every output to the terminal is saved
in a permanent file. Here we save it to `model_dir/train.log`.
Example:
```
logging.info("Starting training...")
```
Args:
log_path: (string) where to log | utils.py | set_logger | haamoon/finding_common_object | python | def set_logger(log_path):
'Set the logger to log info in terminal and file `log_path`.\n\n In general, it is useful to have a logger so that every output to the terminal is saved\n in a permanent file. Here we save it to `model_dir/train.log`.\n\n Example:\n ```\n logging.info("Starting training...")\n ```\n\n Args:\n log_path: (string) where to log\n '
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if (not logger.handlers):
file_handler = logging.FileHandler(log_path)
file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(stream_handler) |
def copy_most_recent_model():
" Copy the most recent model to the 'models/' directory "
best_model = get_most_recent_model()
if best_model:
print('Warm-starting from {}'.format(best_model), end='', flush=True)
for blob in bucket.list_blobs(prefix=best_model):
dest_file = 'models/{}/{}'.format(basename(best_model), basename(blob.name))
if (not blob_already_exists(blob, dest_file)):
if isfile(dest_file):
makedirs(dirname(dest_file), exist_ok=True)
with open(dest_file, 'wb') as file:
print('.', end='', flush=True)
blob.download_to_file(file)
print()
return best_model | -1,350,659,773,957,631,200 | Copy the most recent model to the 'models/' directory | contrib/distr-env/dg_storage.py | copy_most_recent_model | Chicoryn/dream-go | python | def copy_most_recent_model():
" "
best_model = get_most_recent_model()
if best_model:
print('Warm-starting from {}'.format(best_model), end=, flush=True)
for blob in bucket.list_blobs(prefix=best_model):
dest_file = 'models/{}/{}'.format(basename(best_model), basename(blob.name))
if (not blob_already_exists(blob, dest_file)):
if isfile(dest_file):
makedirs(dirname(dest_file), exist_ok=True)
with open(dest_file, 'wb') as file:
print('.', end=, flush=True)
blob.download_to_file(file)
print()
return best_model |
def wait_until_all_models_rated():
' Wait until all models has been assigned an ELO score. '
while True:
models = {}
for blob in bucket.list_blobs(prefix='models/'):
if (blob.size > 0):
models[dirname(blob.name)] = True
if (blob.metadata and ('elo' in blob.metadata)):
return True
if (len(models) <= 1):
return True
sleep(600) | 5,103,271,914,473,873,000 | Wait until all models has been assigned an ELO score. | contrib/distr-env/dg_storage.py | wait_until_all_models_rated | Chicoryn/dream-go | python | def wait_until_all_models_rated():
' '
while True:
models = {}
for blob in bucket.list_blobs(prefix='models/'):
if (blob.size > 0):
models[dirname(blob.name)] = True
if (blob.metadata and ('elo' in blob.metadata)):
return True
if (len(models) <= 1):
return True
sleep(600) |
def copy_most_recent_games():
' Download the 200,000 most recent games, each file should\n contain 1,000 game records. So we need to download the 200\n most recent files. '
files = []
blobs = sorted([blob for blob in bucket.list_blobs(prefix='games/') if (blob.size > 0)], key=(lambda blob: blob.time_created))
print('Loading training data...', end='', flush=True)
for blob in blobs[(- 200):]:
dest_file = 'data/{}'.format(basename(blob.name))
files += (dest_file,)
if (not blob_already_exists(blob, dest_file)):
with open(dest_file, 'wb') as file:
print('.', end='', flush=True)
blob.download_to_file(file)
print('', flush=True)
return files | 7,008,108,335,458,877,000 | Download the 200,000 most recent games, each file should
contain 1,000 game records. So we need to download the 200
most recent files. | contrib/distr-env/dg_storage.py | copy_most_recent_games | Chicoryn/dream-go | python | def copy_most_recent_games():
' Download the 200,000 most recent games, each file should\n contain 1,000 game records. So we need to download the 200\n most recent files. '
files = []
blobs = sorted([blob for blob in bucket.list_blobs(prefix='games/') if (blob.size > 0)], key=(lambda blob: blob.time_created))
print('Loading training data...', end=, flush=True)
for blob in blobs[(- 200):]:
dest_file = 'data/{}'.format(basename(blob.name))
files += (dest_file,)
if (not blob_already_exists(blob, dest_file)):
with open(dest_file, 'wb') as file:
print('.', end=, flush=True)
blob.download_to_file(file)
print(, flush=True)
return files |
def upload_next_model(next_model):
' Upload the specified model to google storage. '
for src_file in glob('models/*{}/*'.format(next_model)):
if isfile(src_file):
print('Uploading', src_file)
blob = bucket.blob(src_file)
blob.upload_from_filename(filename=src_file) | 2,930,675,446,021,968,000 | Upload the specified model to google storage. | contrib/distr-env/dg_storage.py | upload_next_model | Chicoryn/dream-go | python | def upload_next_model(next_model):
' '
for src_file in glob('models/*{}/*'.format(next_model)):
if isfile(src_file):
print('Uploading', src_file)
blob = bucket.blob(src_file)
blob.upload_from_filename(filename=src_file) |
def upload_next_network(next_model, data, args=None):
' Upload the specified network to google storage. '
blob = bucket.blob('networks/{}.json'.format(next_model))
blob.metadata = {'args': json.dumps(args, sort_keys=True), 'rev': getenv('GIT_REV')}
blob.upload_from_string(data, 'application/json') | 20,544,352,858,846,290 | Upload the specified network to google storage. | contrib/distr-env/dg_storage.py | upload_next_network | Chicoryn/dream-go | python | def upload_next_network(next_model, data, args=None):
' '
blob = bucket.blob('networks/{}.json'.format(next_model))
blob.metadata = {'args': json.dumps(args, sort_keys=True), 'rev': getenv('GIT_REV')}
blob.upload_from_string(data, 'application/json') |
def upload_game_records(data, from_network=None, env=None, args=None):
' Upload the specified game records to google storage. '
dest_file = 'games/{}.sgf'.format(datetime.now().strftime('%Y%m%d.%H%M'))
print('Uploading', dest_file)
blob = bucket.blob(dest_file)
blob.metadata = {'args': json.dumps(args, sort_keys=True), 'env': json.dumps(env, sort_keys=True), 'network': from_network, 'rev': getenv('GIT_REV')}
blob.upload_from_string(data, 'application/x-go-sgf') | -2,438,408,460,449,279,500 | Upload the specified game records to google storage. | contrib/distr-env/dg_storage.py | upload_game_records | Chicoryn/dream-go | python | def upload_game_records(data, from_network=None, env=None, args=None):
' '
dest_file = 'games/{}.sgf'.format(datetime.now().strftime('%Y%m%d.%H%M'))
print('Uploading', dest_file)
blob = bucket.blob(dest_file)
blob.metadata = {'args': json.dumps(args, sort_keys=True), 'env': json.dumps(env, sort_keys=True), 'network': from_network, 'rev': getenv('GIT_REV')}
blob.upload_from_string(data, 'application/x-go-sgf') |
def events(self, event_id: int=None, _from: str=None, published: bool=True, page_size: int=100, page: int=1, field_sort: str=None, sort: str='ASC', fields: str=None) -> dict:
'\n Esta API fornece acesso às informações de eventos criados na plataforma Sympla, exclusivamente aqueles vinculados ao usuário proprietário do token.\n\n A API também permite a personalização dos resultados, possibilitando filtrar eventos dentro de uma janela de data ou restringir quais campos são relevantes e devem ser exibidos no retorno, como apenas nome do evento e descrição.\n\n Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#tag/Eventos\n '
path = 'events'
if (event_id is not None):
path = f'events/{event_id}'
params = {'from': _from, 'published': published, 'page_size': page_size, 'page': page, 'field_sort': field_sort, 'sort': sort, 'fields': fields}
request = self._request(method='get', path=path, params=params)
return request | -4,554,569,738,001,877,000 | Esta API fornece acesso às informações de eventos criados na plataforma Sympla, exclusivamente aqueles vinculados ao usuário proprietário do token.
A API também permite a personalização dos resultados, possibilitando filtrar eventos dentro de uma janela de data ou restringir quais campos são relevantes e devem ser exibidos no retorno, como apenas nome do evento e descrição.
Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#tag/Eventos | pysympla/sympla.py | events | hudsonbrendon/pysympla | python | def events(self, event_id: int=None, _from: str=None, published: bool=True, page_size: int=100, page: int=1, field_sort: str=None, sort: str='ASC', fields: str=None) -> dict:
'\n Esta API fornece acesso às informações de eventos criados na plataforma Sympla, exclusivamente aqueles vinculados ao usuário proprietário do token.\n\n A API também permite a personalização dos resultados, possibilitando filtrar eventos dentro de uma janela de data ou restringir quais campos são relevantes e devem ser exibidos no retorno, como apenas nome do evento e descrição.\n\n Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#tag/Eventos\n '
path = 'events'
if (event_id is not None):
path = f'events/{event_id}'
params = {'from': _from, 'published': published, 'page_size': page_size, 'page': page, 'field_sort': field_sort, 'sort': sort, 'fields': fields}
request = self._request(method='get', path=path, params=params)
return request |
def orders_by_event(self, event_id: int, status: bool=False, page_size: int=100, page: int=1, field_sort: str=None, sort: str='ASC', fields: str=None) -> dict:
'\n Retorna os pedidos de um determinado evento.\n\n Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/getListOrders\n\n :param event_id: Identificador único do evento\n\n :param status: Retorna todos os pedidos com qualquer status.\n True: Retorna os pedidos de todos os status;\n False: Retorna apenas os pedidos com status "A".\n\n :param page_size: Especifica quantos registros por página o usuário deseja. Mínimo 1 e maxímo 200.\n :param page: Número da página dos resultados.\n :param field_sort: Permite que os resultados sejam ordenados.\n :param sort: Ordena por \'ASC\' ou \'DESC\'\n :param fields: Deve ser utilizado para retornar apenas os atributos indicados do objeto.\n Os atributos indicados devem ser separados por ",".\n '
path: str = f'events/{event_id}/orders'
params = {'status': status, 'page_size': page_size, 'page': page, 'field_sort': field_sort, 'sort': sort, 'fields': fields}
request = self._request(method='get', path=path, params=params)
return request | 4,699,294,017,509,968,000 | Retorna os pedidos de um determinado evento.
Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/getListOrders
:param event_id: Identificador único do evento
:param status: Retorna todos os pedidos com qualquer status.
True: Retorna os pedidos de todos os status;
False: Retorna apenas os pedidos com status "A".
:param page_size: Especifica quantos registros por página o usuário deseja. Mínimo 1 e maxímo 200.
:param page: Número da página dos resultados.
:param field_sort: Permite que os resultados sejam ordenados.
:param sort: Ordena por 'ASC' ou 'DESC'
:param fields: Deve ser utilizado para retornar apenas os atributos indicados do objeto.
Os atributos indicados devem ser separados por ",". | pysympla/sympla.py | orders_by_event | hudsonbrendon/pysympla | python | def orders_by_event(self, event_id: int, status: bool=False, page_size: int=100, page: int=1, field_sort: str=None, sort: str='ASC', fields: str=None) -> dict:
'\n Retorna os pedidos de um determinado evento.\n\n Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/getListOrders\n\n :param event_id: Identificador único do evento\n\n :param status: Retorna todos os pedidos com qualquer status.\n True: Retorna os pedidos de todos os status;\n False: Retorna apenas os pedidos com status "A".\n\n :param page_size: Especifica quantos registros por página o usuário deseja. Mínimo 1 e maxímo 200.\n :param page: Número da página dos resultados.\n :param field_sort: Permite que os resultados sejam ordenados.\n :param sort: Ordena por \'ASC\' ou \'DESC\'\n :param fields: Deve ser utilizado para retornar apenas os atributos indicados do objeto.\n Os atributos indicados devem ser separados por ",".\n '
path: str = f'events/{event_id}/orders'
params = {'status': status, 'page_size': page_size, 'page': page, 'field_sort': field_sort, 'sort': sort, 'fields': fields}
request = self._request(method='get', path=path, params=params)
return request |
def order_by_identifier(self, event_id: int, order_id: str, fields: str=None) -> dict:
'\n Retorna o pedido correspondente ao identificador informado.\n\n Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/getOneOrder\n\n :param event_id: Identificador único do evento\n :param order_id: id do pedido\n\n :param fields: Deve ser utilizado para retornar apenas os atributos indicados do objeto.\n Os atributos indicados devem ser separados por ",".\n '
path: str = f'events/{event_id}/orders/{order_id}'
params = {'fields': fields}
request = self._request(method='get', path=path, params=params)
return request | -428,633,821,227,176,300 | Retorna o pedido correspondente ao identificador informado.
Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/getOneOrder
:param event_id: Identificador único do evento
:param order_id: id do pedido
:param fields: Deve ser utilizado para retornar apenas os atributos indicados do objeto.
Os atributos indicados devem ser separados por ",". | pysympla/sympla.py | order_by_identifier | hudsonbrendon/pysympla | python | def order_by_identifier(self, event_id: int, order_id: str, fields: str=None) -> dict:
'\n Retorna o pedido correspondente ao identificador informado.\n\n Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/getOneOrder\n\n :param event_id: Identificador único do evento\n :param order_id: id do pedido\n\n :param fields: Deve ser utilizado para retornar apenas os atributos indicados do objeto.\n Os atributos indicados devem ser separados por ",".\n '
path: str = f'events/{event_id}/orders/{order_id}'
params = {'fields': fields}
request = self._request(method='get', path=path, params=params)
return request |
def participants_by_order(self, event_id: int, order_id: str, page_size: int=100, page: int=1, field_sort: str=None, sort: str='ASC', fields: str=None) -> dict:
'\n Retorna o(s) participante(s) contido(s) em um determinado pedido.\n\n Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/getAllParticipantsForOrder\n\n :param event_id: Identificador único do evento\n :param order_id: Identificador único do pedido\n\n :param page_size: Especifica quantos registros por página o usuário deseja. Mínimo 1 e maxímo 200.\n :param page: Número da página dos resultados.\n :param field_sort: Permite que os resultados sejam ordenados.\n :param sort: Ordena por \'ASC\' ou \'DESC\'\n :param fields: Deve ser utilizado para retornar apenas os atributos indicados do objeto.\n Os atributos indicados devem ser separados por ",".\n '
path: str = f'events/{event_id}/orders/{order_id}/participants'
params = {'page_size': page_size, 'page': page, 'field_sort': field_sort, 'sort': sort, 'fields': fields}
request = self._request(method='get', path=path, params=params)
return request | -4,891,483,069,128,448,000 | Retorna o(s) participante(s) contido(s) em um determinado pedido.
Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/getAllParticipantsForOrder
:param event_id: Identificador único do evento
:param order_id: Identificador único do pedido
:param page_size: Especifica quantos registros por página o usuário deseja. Mínimo 1 e maxímo 200.
:param page: Número da página dos resultados.
:param field_sort: Permite que os resultados sejam ordenados.
:param sort: Ordena por 'ASC' ou 'DESC'
:param fields: Deve ser utilizado para retornar apenas os atributos indicados do objeto.
Os atributos indicados devem ser separados por ",". | pysympla/sympla.py | participants_by_order | hudsonbrendon/pysympla | python | def participants_by_order(self, event_id: int, order_id: str, page_size: int=100, page: int=1, field_sort: str=None, sort: str='ASC', fields: str=None) -> dict:
'\n Retorna o(s) participante(s) contido(s) em um determinado pedido.\n\n Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/getAllParticipantsForOrder\n\n :param event_id: Identificador único do evento\n :param order_id: Identificador único do pedido\n\n :param page_size: Especifica quantos registros por página o usuário deseja. Mínimo 1 e maxímo 200.\n :param page: Número da página dos resultados.\n :param field_sort: Permite que os resultados sejam ordenados.\n :param sort: Ordena por \'ASC\' ou \'DESC\'\n :param fields: Deve ser utilizado para retornar apenas os atributos indicados do objeto.\n Os atributos indicados devem ser separados por ",".\n '
path: str = f'events/{event_id}/orders/{order_id}/participants'
params = {'page_size': page_size, 'page': page, 'field_sort': field_sort, 'sort': sort, 'fields': fields}
request = self._request(method='get', path=path, params=params)
return request |
def participants_by_event(self, event_id: int, ticket_number: str=None, page_size: int=100, page: int=1, field_sort: str=None, sort: str='ASC', fields: str=None) -> dict:
'\n Retorna os participantes de um determinado evento.\n\n Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/getAllParticipants\n\n :param event_id: Identificador único do evento\n\n :param ticket_number: Código escrito no ingresso.\n :param page_size: Especifica quantos registros por página o usuário deseja. Mínimo 1 e maxímo 200.\n :param page: Número da página dos resultados.\n :param field_sort: Permite que os resultados sejam ordenados.\n :param sort: Ordena por \'ASC\' ou \'DESC\'\n :param fields: Deve ser utilizado para retornar apenas os atributos indicados do objeto.\n Os atributos indicados devem ser separados por ",".\n '
path: str = f'events/{event_id}/participants'
params = {'ticket_number': ticket_number, 'page_size': page_size, 'page': page, 'field_sort': field_sort, 'sort': sort, 'fields': fields}
request = self._request(method='get', path=path, params=params)
return request | 6,278,634,500,556,122,000 | Retorna os participantes de um determinado evento.
Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/getAllParticipants
:param event_id: Identificador único do evento
:param ticket_number: Código escrito no ingresso.
:param page_size: Especifica quantos registros por página o usuário deseja. Mínimo 1 e maxímo 200.
:param page: Número da página dos resultados.
:param field_sort: Permite que os resultados sejam ordenados.
:param sort: Ordena por 'ASC' ou 'DESC'
:param fields: Deve ser utilizado para retornar apenas os atributos indicados do objeto.
Os atributos indicados devem ser separados por ",". | pysympla/sympla.py | participants_by_event | hudsonbrendon/pysympla | python | def participants_by_event(self, event_id: int, ticket_number: str=None, page_size: int=100, page: int=1, field_sort: str=None, sort: str='ASC', fields: str=None) -> dict:
'\n Retorna os participantes de um determinado evento.\n\n Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/getAllParticipants\n\n :param event_id: Identificador único do evento\n\n :param ticket_number: Código escrito no ingresso.\n :param page_size: Especifica quantos registros por página o usuário deseja. Mínimo 1 e maxímo 200.\n :param page: Número da página dos resultados.\n :param field_sort: Permite que os resultados sejam ordenados.\n :param sort: Ordena por \'ASC\' ou \'DESC\'\n :param fields: Deve ser utilizado para retornar apenas os atributos indicados do objeto.\n Os atributos indicados devem ser separados por ",".\n '
path: str = f'events/{event_id}/participants'
params = {'ticket_number': ticket_number, 'page_size': page_size, 'page': page, 'field_sort': field_sort, 'sort': sort, 'fields': fields}
request = self._request(method='get', path=path, params=params)
return request |
def participant_by_ticket_id(self, event_id: int, participant_id: int, fields: str=None) -> dict:
'\n Retorna o participante correspondente ao ingresso informado.\n\n Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/getOneParticipant\n\n :param event_id: Identificador único do evento\n :param participant_id: Identificador único do ingresso\n\n :param fields: Deve ser utilizado para retornar apenas os atributos indicados do objeto.\n Os atributos indicados devem ser separados por ",".\n '
path: str = f'events/{event_id}/participants/{participant_id}'
params = {'fields': fields}
request = self._request(method='get', path=path, params=params)
return request | -7,480,940,268,470,330,000 | Retorna o participante correspondente ao ingresso informado.
Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/getOneParticipant
:param event_id: Identificador único do evento
:param participant_id: Identificador único do ingresso
:param fields: Deve ser utilizado para retornar apenas os atributos indicados do objeto.
Os atributos indicados devem ser separados por ",". | pysympla/sympla.py | participant_by_ticket_id | hudsonbrendon/pysympla | python | def participant_by_ticket_id(self, event_id: int, participant_id: int, fields: str=None) -> dict:
'\n Retorna o participante correspondente ao ingresso informado.\n\n Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/getOneParticipant\n\n :param event_id: Identificador único do evento\n :param participant_id: Identificador único do ingresso\n\n :param fields: Deve ser utilizado para retornar apenas os atributos indicados do objeto.\n Os atributos indicados devem ser separados por ",".\n '
path: str = f'events/{event_id}/participants/{participant_id}'
params = {'fields': fields}
request = self._request(method='get', path=path, params=params)
return request |
def participant_by_ticket_number(self, event_id: int, ticket_number: str, fields: str=None) -> dict:
'\n Retorna o participante correspondente ao ingresso informado.\n\n Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/getOneParticipantByTicketNumber\n\n :param event_id: Identificador único do evento\n :param ticket_number: Número do ingresso\n\n :param fields: Deve ser utilizado para retornar apenas os atributos indicados do objeto.\n Os atributos indicados devem ser separados por ",".\n '
path: str = f'events/{event_id}/participants/ticketNumber/{ticket_number}'
params = {'fields': fields}
request = self._request(method='get', path=path, params=params)
return request | -8,268,009,442,225,868,000 | Retorna o participante correspondente ao ingresso informado.
Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/getOneParticipantByTicketNumber
:param event_id: Identificador único do evento
:param ticket_number: Número do ingresso
:param fields: Deve ser utilizado para retornar apenas os atributos indicados do objeto.
Os atributos indicados devem ser separados por ",". | pysympla/sympla.py | participant_by_ticket_number | hudsonbrendon/pysympla | python | def participant_by_ticket_number(self, event_id: int, ticket_number: str, fields: str=None) -> dict:
'\n Retorna o participante correspondente ao ingresso informado.\n\n Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/getOneParticipantByTicketNumber\n\n :param event_id: Identificador único do evento\n :param ticket_number: Número do ingresso\n\n :param fields: Deve ser utilizado para retornar apenas os atributos indicados do objeto.\n Os atributos indicados devem ser separados por ",".\n '
path: str = f'events/{event_id}/participants/ticketNumber/{ticket_number}'
params = {'fields': fields}
request = self._request(method='get', path=path, params=params)
return request |
def checkin_by_ticket_id(self, event_id: int, participant_id: int) -> dict:
'\n Realiza o check-in de um participante por id do ingresso.\n\n Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/checkInByParticipantId\n\n :param event_id: Identificador único do evento\n :param participant_id: Identificador único do ingresso\n '
path: str = f'events/{event_id}/participants/{participant_id}/checkIn'
request = self._request(method='post', path=path)
return request | 5,844,175,330,238,192,000 | Realiza o check-in de um participante por id do ingresso.
Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/checkInByParticipantId
:param event_id: Identificador único do evento
:param participant_id: Identificador único do ingresso | pysympla/sympla.py | checkin_by_ticket_id | hudsonbrendon/pysympla | python | def checkin_by_ticket_id(self, event_id: int, participant_id: int) -> dict:
'\n Realiza o check-in de um participante por id do ingresso.\n\n Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/checkInByParticipantId\n\n :param event_id: Identificador único do evento\n :param participant_id: Identificador único do ingresso\n '
path: str = f'events/{event_id}/participants/{participant_id}/checkIn'
request = self._request(method='post', path=path)
return request |
def checkin_by_ticket_number(self, event_id: int, ticket_number: str) -> dict:
'\n Realiza o check-in de um participante por número do ingresso.\n\n Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/checkInByTicketNumber\n\n :param event_id: Identificador único do evento\n :param ticket_number: Número do ingresso\n '
path: str = f'events/{event_id}/participants/ticketNumber/{ticket_number}/checkIn'
request = self._request(method='post', path=path)
return request | -1,569,102,426,574,836,500 | Realiza o check-in de um participante por número do ingresso.
Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/checkInByTicketNumber
:param event_id: Identificador único do evento
:param ticket_number: Número do ingresso | pysympla/sympla.py | checkin_by_ticket_number | hudsonbrendon/pysympla | python | def checkin_by_ticket_number(self, event_id: int, ticket_number: str) -> dict:
'\n Realiza o check-in de um participante por número do ingresso.\n\n Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/checkInByTicketNumber\n\n :param event_id: Identificador único do evento\n :param ticket_number: Número do ingresso\n '
path: str = f'events/{event_id}/participants/ticketNumber/{ticket_number}/checkIn'
request = self._request(method='post', path=path)
return request |
def affiliates(self, event_id: int) -> dict:
'\n Esta API fornece acesso às informações relativas ao programa de afiliados e seus respectivos afiliados.\n\n Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#tag/Afiliados\n\n :param event_id: Identificador único do evento\n '
path: str = f'events/{event_id}/affiliates'
request = self._request(method='get', path=path)
return request | -4,809,683,616,987,814,000 | Esta API fornece acesso às informações relativas ao programa de afiliados e seus respectivos afiliados.
Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#tag/Afiliados
:param event_id: Identificador único do evento | pysympla/sympla.py | affiliates | hudsonbrendon/pysympla | python | def affiliates(self, event_id: int) -> dict:
'\n Esta API fornece acesso às informações relativas ao programa de afiliados e seus respectivos afiliados.\n\n Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#tag/Afiliados\n\n :param event_id: Identificador único do evento\n '
path: str = f'events/{event_id}/affiliates'
request = self._request(method='get', path=path)
return request |
def _normalize(tensor, norm_layer):
'\n Broadcast layer norm\n '
size = tensor.size()
return norm_layer(tensor.view((- 1), size[(- 1)])).view(size) | -6,191,567,655,660,572,000 | Broadcast layer norm | parlai/agents/transformer/modules.py | _normalize | jinjiren/ParlAI | python | def _normalize(tensor, norm_layer):
'\n \n '
size = tensor.size()
return norm_layer(tensor.view((- 1), size[(- 1)])).view(size) |
def _create_embeddings(dictionary, embedding_size, padding_idx):
'Create and initialize word embeddings.'
e = nn.Embedding(len(dictionary), embedding_size, padding_idx)
nn.init.normal_(e.weight, mean=0, std=(embedding_size ** (- 0.5)))
nn.init.constant_(e.weight[padding_idx], 0)
return e | 2,160,154,638,503,389,400 | Create and initialize word embeddings. | parlai/agents/transformer/modules.py | _create_embeddings | jinjiren/ParlAI | python | def _create_embeddings(dictionary, embedding_size, padding_idx):
e = nn.Embedding(len(dictionary), embedding_size, padding_idx)
nn.init.normal_(e.weight, mean=0, std=(embedding_size ** (- 0.5)))
nn.init.constant_(e.weight[padding_idx], 0)
return e |
def forward(self, input):
'\n input data is a FloatTensor of shape [batch, seq_len, dim]\n mask is a ByteTensor of shape [batch, seq_len], filled with 1 when\n inside the sequence and 0 outside.\n '
mask = (input != self.padding_idx)
seq_len = input.size(1)
positions = input.new(seq_len).long()
positions = torch.arange(seq_len, out=positions).unsqueeze(0)
tensor = self.embeddings(input)
if self.embeddings_scale:
tensor = (tensor * np.sqrt(self.dim))
tensor = (tensor + self.position_embeddings(positions).expand_as(tensor))
tensor *= mask.unsqueeze((- 1)).float()
for i in range(self.n_layers):
tensor = self.layers[i](tensor, mask)
if self.reduction:
divisor = mask.float().sum(dim=1).unsqueeze((- 1)).clamp(min=1e-20)
output = (tensor.sum(dim=1) / divisor)
return output
else:
output = tensor
return (output, mask) | 225,423,487,833,513,200 | input data is a FloatTensor of shape [batch, seq_len, dim]
mask is a ByteTensor of shape [batch, seq_len], filled with 1 when
inside the sequence and 0 outside. | parlai/agents/transformer/modules.py | forward | jinjiren/ParlAI | python | def forward(self, input):
'\n input data is a FloatTensor of shape [batch, seq_len, dim]\n mask is a ByteTensor of shape [batch, seq_len], filled with 1 when\n inside the sequence and 0 outside.\n '
mask = (input != self.padding_idx)
seq_len = input.size(1)
positions = input.new(seq_len).long()
positions = torch.arange(seq_len, out=positions).unsqueeze(0)
tensor = self.embeddings(input)
if self.embeddings_scale:
tensor = (tensor * np.sqrt(self.dim))
tensor = (tensor + self.position_embeddings(positions).expand_as(tensor))
tensor *= mask.unsqueeze((- 1)).float()
for i in range(self.n_layers):
tensor = self.layers[i](tensor, mask)
if self.reduction:
divisor = mask.float().sum(dim=1).unsqueeze((- 1)).clamp(min=1e-20)
output = (tensor.sum(dim=1) / divisor)
return output
else:
output = tensor
return (output, mask) |
def list_by_subscription(self, **kwargs):
'Lists ExpressRoute gateways under a given subscription.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: ExpressRouteGatewayList, or the result of cls(response)\n :rtype: ~azure.mgmt.network.v2019_07_01.models.ExpressRouteGatewayList\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2019-07-01'
accept = 'application/json'
url = self.list_by_subscription.metadata['url']
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteGatewayList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized | -6,477,420,243,702,260,000 | Lists ExpressRoute gateways under a given subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteGatewayList, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.ExpressRouteGatewayList
:raises: ~azure.core.exceptions.HttpResponseError | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/operations/_express_route_gateways_operations.py | list_by_subscription | Co0olboi/azure-sdk-for-python | python | def list_by_subscription(self, **kwargs):
'Lists ExpressRoute gateways under a given subscription.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: ExpressRouteGatewayList, or the result of cls(response)\n :rtype: ~azure.mgmt.network.v2019_07_01.models.ExpressRouteGatewayList\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2019-07-01'
accept = 'application/json'
url = self.list_by_subscription.metadata['url']
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteGatewayList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized |
def list_by_resource_group(self, resource_group_name, **kwargs):
'Lists ExpressRoute gateways in a given resource group.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: ExpressRouteGatewayList, or the result of cls(response)\n :rtype: ~azure.mgmt.network.v2019_07_01.models.ExpressRouteGatewayList\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2019-07-01'
accept = 'application/json'
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteGatewayList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized | 3,271,096,405,484,876,000 | Lists ExpressRoute gateways in a given resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteGatewayList, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.ExpressRouteGatewayList
:raises: ~azure.core.exceptions.HttpResponseError | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/operations/_express_route_gateways_operations.py | list_by_resource_group | Co0olboi/azure-sdk-for-python | python | def list_by_resource_group(self, resource_group_name, **kwargs):
'Lists ExpressRoute gateways in a given resource group.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: ExpressRouteGatewayList, or the result of cls(response)\n :rtype: ~azure.mgmt.network.v2019_07_01.models.ExpressRouteGatewayList\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2019-07-01'
accept = 'application/json'
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteGatewayList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized |
def begin_create_or_update(self, resource_group_name, express_route_gateway_name, put_express_route_gateway_parameters, **kwargs):
'Creates or updates a ExpressRoute gateway in a specified resource group.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param express_route_gateway_name: The name of the ExpressRoute gateway.\n :type express_route_gateway_name: str\n :param put_express_route_gateway_parameters: Parameters required in an ExpressRoute gateway PUT\n operation.\n :type put_express_route_gateway_parameters: ~azure.mgmt.network.v2019_07_01.models.ExpressRouteGateway\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :paramtype polling: bool or ~azure.core.polling.PollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of LROPoller that returns either ExpressRouteGateway or the result of cls(response)\n :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_07_01.models.ExpressRouteGateway]\n :raises ~azure.core.exceptions.HttpResponseError:\n '
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = self._create_or_update_initial(resource_group_name=resource_group_name, express_route_gateway_name=express_route_gateway_name, put_express_route_gateway_parameters=put_express_route_gateway_parameters, cls=(lambda x, y, z: x), **kwargs)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if (polling is True):
polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif (polling is False):
polling_method = NoPolling()
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) | 630,927,761,922,093,800 | Creates or updates a ExpressRoute gateway in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:param put_express_route_gateway_parameters: Parameters required in an ExpressRoute gateway PUT
operation.
:type put_express_route_gateway_parameters: ~azure.mgmt.network.v2019_07_01.models.ExpressRouteGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_07_01.models.ExpressRouteGateway]
:raises ~azure.core.exceptions.HttpResponseError: | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/operations/_express_route_gateways_operations.py | begin_create_or_update | Co0olboi/azure-sdk-for-python | python | def begin_create_or_update(self, resource_group_name, express_route_gateway_name, put_express_route_gateway_parameters, **kwargs):
'Creates or updates a ExpressRoute gateway in a specified resource group.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param express_route_gateway_name: The name of the ExpressRoute gateway.\n :type express_route_gateway_name: str\n :param put_express_route_gateway_parameters: Parameters required in an ExpressRoute gateway PUT\n operation.\n :type put_express_route_gateway_parameters: ~azure.mgmt.network.v2019_07_01.models.ExpressRouteGateway\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :paramtype polling: bool or ~azure.core.polling.PollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of LROPoller that returns either ExpressRouteGateway or the result of cls(response)\n :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_07_01.models.ExpressRouteGateway]\n :raises ~azure.core.exceptions.HttpResponseError:\n '
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = self._create_or_update_initial(resource_group_name=resource_group_name, express_route_gateway_name=express_route_gateway_name, put_express_route_gateway_parameters=put_express_route_gateway_parameters, cls=(lambda x, y, z: x), **kwargs)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if (polling is True):
polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif (polling is False):
polling_method = NoPolling()
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) |
def get(self, resource_group_name, express_route_gateway_name, **kwargs):
'Fetches the details of a ExpressRoute gateway in a resource group.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param express_route_gateway_name: The name of the ExpressRoute gateway.\n :type express_route_gateway_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: ExpressRouteGateway, or the result of cls(response)\n :rtype: ~azure.mgmt.network.v2019_07_01.models.ExpressRouteGateway\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2019-07-01'
accept = 'application/json'
url = self.get.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'expressRouteGatewayName': self._serialize.url('express_route_gateway_name', express_route_gateway_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized | 7,877,672,485,163,125,000 | Fetches the details of a ExpressRoute gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.ExpressRouteGateway
:raises: ~azure.core.exceptions.HttpResponseError | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/operations/_express_route_gateways_operations.py | get | Co0olboi/azure-sdk-for-python | python | def get(self, resource_group_name, express_route_gateway_name, **kwargs):
'Fetches the details of a ExpressRoute gateway in a resource group.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param express_route_gateway_name: The name of the ExpressRoute gateway.\n :type express_route_gateway_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: ExpressRouteGateway, or the result of cls(response)\n :rtype: ~azure.mgmt.network.v2019_07_01.models.ExpressRouteGateway\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2019-07-01'
accept = 'application/json'
url = self.get.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'expressRouteGatewayName': self._serialize.url('express_route_gateway_name', express_route_gateway_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized |
def begin_delete(self, resource_group_name, express_route_gateway_name, **kwargs):
'Deletes the specified ExpressRoute gateway in a resource group. An ExpressRoute gateway\n resource can only be deleted when there are no connection subresources.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param express_route_gateway_name: The name of the ExpressRoute gateway.\n :type express_route_gateway_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :paramtype polling: bool or ~azure.core.polling.PollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of LROPoller that returns either None or the result of cls(response)\n :rtype: ~azure.core.polling.LROPoller[None]\n :raises ~azure.core.exceptions.HttpResponseError:\n '
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = self._delete_initial(resource_group_name=resource_group_name, express_route_gateway_name=express_route_gateway_name, cls=(lambda x, y, z: x), **kwargs)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if (polling is True):
polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif (polling is False):
polling_method = NoPolling()
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) | -4,264,051,642,790,468,000 | Deletes the specified ExpressRoute gateway in a resource group. An ExpressRoute gateway
resource can only be deleted when there are no connection subresources.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError: | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/operations/_express_route_gateways_operations.py | begin_delete | Co0olboi/azure-sdk-for-python | python | def begin_delete(self, resource_group_name, express_route_gateway_name, **kwargs):
'Deletes the specified ExpressRoute gateway in a resource group. An ExpressRoute gateway\n resource can only be deleted when there are no connection subresources.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param express_route_gateway_name: The name of the ExpressRoute gateway.\n :type express_route_gateway_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :paramtype polling: bool or ~azure.core.polling.PollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of LROPoller that returns either None or the result of cls(response)\n :rtype: ~azure.core.polling.LROPoller[None]\n :raises ~azure.core.exceptions.HttpResponseError:\n '
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = self._delete_initial(resource_group_name=resource_group_name, express_route_gateway_name=express_route_gateway_name, cls=(lambda x, y, z: x), **kwargs)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if (polling is True):
polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif (polling is False):
polling_method = NoPolling()
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) |
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]) -> bool:
'\n Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that\n necessary configuration arguments have been provided for the validation of the expectation.\n\n Args:\n configuration (OPTIONAL[ExpectationConfiguration]): An optional Expectation Configuration entry that will be used to configure the expectation\n Returns:\n True if the configuration has been validated successfully. Otherwise, raises an exception\n '
super().validate_configuration(configuration)
self.validate_metric_value_between_configuration(configuration=configuration)
return True | 2,431,498,238,814,919,000 | Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception | great_expectations/expectations/core/expect_select_column_values_to_be_unique_within_record.py | validate_configuration | MeganBeckett/great_expectations | python | def validate_configuration(self, configuration: Optional[ExpectationConfiguration]) -> bool:
'\n Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that\n necessary configuration arguments have been provided for the validation of the expectation.\n\n Args:\n configuration (OPTIONAL[ExpectationConfiguration]): An optional Expectation Configuration entry that will be used to configure the expectation\n Returns:\n True if the configuration has been validated successfully. Otherwise, raises an exception\n '
super().validate_configuration(configuration)
self.validate_metric_value_between_configuration(configuration=configuration)
return True |
def __init__(self, string):
' Initialize the exception\n :param string: The message to append to the error\n '
self.string = string | -3,299,297,612,292,880,400 | Initialize the exception
:param string: The message to append to the error | pymodbus/exceptions.py | __init__ | Biondoap/pymodbus | python | def __init__(self, string):
' Initialize the exception\n :param string: The message to append to the error\n '
self.string = string |
def isError(self):
'Error'
return True | -4,660,284,373,349,241,000 | Error | pymodbus/exceptions.py | isError | Biondoap/pymodbus | python | def is(self):
return True |
def __init__(self, string='', function_code=None):
' Initialize the exception\n :param string: The message to append to the error\n '
self.fcode = function_code
self.message = ('[Input/Output] %s' % string)
ModbusException.__init__(self, self.message) | 5,281,375,963,429,550,000 | Initialize the exception
:param string: The message to append to the error | pymodbus/exceptions.py | __init__ | Biondoap/pymodbus | python | def __init__(self, string=, function_code=None):
' Initialize the exception\n :param string: The message to append to the error\n '
self.fcode = function_code
self.message = ('[Input/Output] %s' % string)
ModbusException.__init__(self, self.message) |
def __init__(self, string=''):
' Initialize the exception\n\n :param string: The message to append to the error\n '
message = ('[Invalid Parameter] %s' % string)
ModbusException.__init__(self, message) | -2,489,978,286,978,406,400 | Initialize the exception
:param string: The message to append to the error | pymodbus/exceptions.py | __init__ | Biondoap/pymodbus | python | def __init__(self, string=):
' Initialize the exception\n\n :param string: The message to append to the error\n '
message = ('[Invalid Parameter] %s' % string)
ModbusException.__init__(self, message) |
def __init__(self, string=''):
' Initialize the exception\n\n :param string: The message to append to the error\n '
message = ('[No Such Slave] %s' % string)
ModbusException.__init__(self, message) | 2,132,472,531,180,455,200 | Initialize the exception
:param string: The message to append to the error | pymodbus/exceptions.py | __init__ | Biondoap/pymodbus | python | def __init__(self, string=):
' Initialize the exception\n\n :param string: The message to append to the error\n '
message = ('[No Such Slave] %s' % string)
ModbusException.__init__(self, message) |
def __init__(self, string=''):
' Initialize the exception\n :param string: The message to append to the error\n '
message = ('[Not Implemented] %s' % string)
ModbusException.__init__(self, message) | 5,851,030,548,588,889,000 | Initialize the exception
:param string: The message to append to the error | pymodbus/exceptions.py | __init__ | Biondoap/pymodbus | python | def __init__(self, string=):
' Initialize the exception\n :param string: The message to append to the error\n '
message = ('[Not Implemented] %s' % string)
ModbusException.__init__(self, message) |
def __init__(self, string=''):
' Initialize the exception\n\n :param string: The message to append to the error\n '
message = ('[Connection] %s' % string)
ModbusException.__init__(self, message) | 1,008,842,865,108,339,600 | Initialize the exception
:param string: The message to append to the error | pymodbus/exceptions.py | __init__ | Biondoap/pymodbus | python | def __init__(self, string=):
' Initialize the exception\n\n :param string: The message to append to the error\n '
message = ('[Connection] %s' % string)
ModbusException.__init__(self, message) |
def __init__(self, string=''):
' Initialize the exception\n\n :param string: The message to append to the error\n '
message = ('[Invalid Message] %s' % string)
ModbusException.__init__(self, message) | 6,749,463,681,928,901,000 | Initialize the exception
:param string: The message to append to the error | pymodbus/exceptions.py | __init__ | Biondoap/pymodbus | python | def __init__(self, string=):
' Initialize the exception\n\n :param string: The message to append to the error\n '
message = ('[Invalid Message] %s' % string)
ModbusException.__init__(self, message) |
def _send_request(self, request, **kwargs):
'Runs the network request through the client\'s chained policies.\n\n >>> from azure.core.rest import HttpRequest\n >>> request = HttpRequest("GET", "https://www.example.org/")\n <HttpRequest [GET], url: \'https://www.example.org/\'>\n >>> response = client._send_request(request)\n <HttpResponse: 200 OK>\n\n For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart\n\n :param request: The network request you want to make. Required.\n :type request: ~azure.core.rest.HttpRequest\n :keyword bool stream: Whether the response payload will be streamed. Defaults to False.\n :return: The response of your network call. Does not do error handling on your response.\n :rtype: ~azure.core.rest.HttpResponse\n '
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs) | -703,176,707,303,729,200 | Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse | docs/samples/specification/multiapi/generated/azure/multiapi/sample/v3/_multiapi_service_client.py | _send_request | changlong-liu/autorest.python | python | def _send_request(self, request, **kwargs):
'Runs the network request through the client\'s chained policies.\n\n >>> from azure.core.rest import HttpRequest\n >>> request = HttpRequest("GET", "https://www.example.org/")\n <HttpRequest [GET], url: \'https://www.example.org/\'>\n >>> response = client._send_request(request)\n <HttpResponse: 200 OK>\n\n For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart\n\n :param request: The network request you want to make. Required.\n :type request: ~azure.core.rest.HttpRequest\n :keyword bool stream: Whether the response payload will be streamed. Defaults to False.\n :return: The response of your network call. Does not do error handling on your response.\n :rtype: ~azure.core.rest.HttpResponse\n '
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs) |
def configure_trigger(cam):
'\n This function configures the camera to use a trigger. First, trigger mode is\n set to off in order to select the trigger source. Once the trigger source\n has been selected, trigger mode is then enabled, which has the camera\n capture only a single image upon the execution of the chosen trigger.\n\n :param cam: Camera to configure trigger for.\n :type cam: CameraPtr\n :return: True if successful, False otherwise.\n :rtype: bool\n '
result = True
print('*** CONFIGURING TRIGGER ***\n')
print('Note that if the application / user software triggers faster than frame time, the trigger may be dropped / skipped by the camera.\n')
print('If several frames are needed per trigger, a more reliable alternative for such case, is to use the multi-frame mode.\n\n')
if (CHOSEN_TRIGGER == TriggerType.SOFTWARE):
print('Software trigger chosen ...')
elif (CHOSEN_TRIGGER == TriggerType.HARDWARE):
print('Hardware trigger chose ...')
try:
nodemap = cam.GetNodeMap()
node_trigger_mode = PySpin.CEnumerationPtr(nodemap.GetNode('TriggerMode'))
if ((not PySpin.IsAvailable(node_trigger_mode)) or (not PySpin.IsReadable(node_trigger_mode))):
print('Unable to disable trigger mode (node retrieval). Aborting...')
return False
node_trigger_mode_off = node_trigger_mode.GetEntryByName('Off')
if ((not PySpin.IsAvailable(node_trigger_mode_off)) or (not PySpin.IsReadable(node_trigger_mode_off))):
print('Unable to disable trigger mode (enum entry retrieval). Aborting...')
return False
node_trigger_mode.SetIntValue(node_trigger_mode_off.GetValue())
print('Trigger mode disabled...')
node_trigger_selector = PySpin.CEnumerationPtr(nodemap.GetNode('TriggerSelector'))
if ((not PySpin.IsAvailable(node_trigger_selector)) or (not PySpin.IsWritable(node_trigger_selector))):
print('Unable to get trigger selector (node retrieval). Aborting...')
return False
node_trigger_selector_framestart = node_trigger_selector.GetEntryByName('FrameStart')
if ((not PySpin.IsAvailable(node_trigger_selector_framestart)) or (not PySpin.IsReadable(node_trigger_selector_framestart))):
print('Unable to set trigger selector (enum entry retrieval). Aborting...')
return False
node_trigger_selector.SetIntValue(node_trigger_selector_framestart.GetValue())
print('Trigger selector set to frame start...')
node_trigger_source = PySpin.CEnumerationPtr(nodemap.GetNode('TriggerSource'))
if ((not PySpin.IsAvailable(node_trigger_source)) or (not PySpin.IsWritable(node_trigger_source))):
print('Unable to get trigger source (node retrieval). Aborting...')
return False
if (CHOSEN_TRIGGER == TriggerType.SOFTWARE):
node_trigger_source_software = node_trigger_source.GetEntryByName('Software')
if ((not PySpin.IsAvailable(node_trigger_source_software)) or (not PySpin.IsReadable(node_trigger_source_software))):
print('Unable to set trigger source (enum entry retrieval). Aborting...')
return False
node_trigger_source.SetIntValue(node_trigger_source_software.GetValue())
print('Trigger source set to software...')
elif (CHOSEN_TRIGGER == TriggerType.HARDWARE):
node_trigger_source_hardware = node_trigger_source.GetEntryByName('Line0')
if ((not PySpin.IsAvailable(node_trigger_source_hardware)) or (not PySpin.IsReadable(node_trigger_source_hardware))):
print('Unable to set trigger source (enum entry retrieval). Aborting...')
return False
node_trigger_source.SetIntValue(node_trigger_source_hardware.GetValue())
print('Trigger source set to hardware...')
node_trigger_mode_on = node_trigger_mode.GetEntryByName('On')
if ((not PySpin.IsAvailable(node_trigger_mode_on)) or (not PySpin.IsReadable(node_trigger_mode_on))):
print('Unable to enable trigger mode (enum entry retrieval). Aborting...')
return False
node_trigger_mode.SetIntValue(node_trigger_mode_on.GetValue())
print('Trigger mode turned back on...')
except PySpin.SpinnakerException as ex:
print(('Error: %s' % ex))
return False
return result | 7,378,235,373,236,104,000 | This function configures the camera to use a trigger. First, trigger mode is
set to off in order to select the trigger source. Once the trigger source
has been selected, trigger mode is then enabled, which has the camera
capture only a single image upon the execution of the chosen trigger.
:param cam: Camera to configure trigger for.
:type cam: CameraPtr
:return: True if successful, False otherwise.
:rtype: bool | Sample/spinnaker_python-2.2.0.48-cp37-cp37m-win_amd64/Examples/Python3/Trigger.py | configure_trigger | BevanLab/Recording_Script | python | def configure_trigger(cam):
'\n This function configures the camera to use a trigger. First, trigger mode is\n set to off in order to select the trigger source. Once the trigger source\n has been selected, trigger mode is then enabled, which has the camera\n capture only a single image upon the execution of the chosen trigger.\n\n :param cam: Camera to configure trigger for.\n :type cam: CameraPtr\n :return: True if successful, False otherwise.\n :rtype: bool\n '
result = True
print('*** CONFIGURING TRIGGER ***\n')
print('Note that if the application / user software triggers faster than frame time, the trigger may be dropped / skipped by the camera.\n')
print('If several frames are needed per trigger, a more reliable alternative for such case, is to use the multi-frame mode.\n\n')
if (CHOSEN_TRIGGER == TriggerType.SOFTWARE):
print('Software trigger chosen ...')
elif (CHOSEN_TRIGGER == TriggerType.HARDWARE):
print('Hardware trigger chose ...')
try:
nodemap = cam.GetNodeMap()
node_trigger_mode = PySpin.CEnumerationPtr(nodemap.GetNode('TriggerMode'))
if ((not PySpin.IsAvailable(node_trigger_mode)) or (not PySpin.IsReadable(node_trigger_mode))):
print('Unable to disable trigger mode (node retrieval). Aborting...')
return False
node_trigger_mode_off = node_trigger_mode.GetEntryByName('Off')
if ((not PySpin.IsAvailable(node_trigger_mode_off)) or (not PySpin.IsReadable(node_trigger_mode_off))):
print('Unable to disable trigger mode (enum entry retrieval). Aborting...')
return False
node_trigger_mode.SetIntValue(node_trigger_mode_off.GetValue())
print('Trigger mode disabled...')
node_trigger_selector = PySpin.CEnumerationPtr(nodemap.GetNode('TriggerSelector'))
if ((not PySpin.IsAvailable(node_trigger_selector)) or (not PySpin.IsWritable(node_trigger_selector))):
print('Unable to get trigger selector (node retrieval). Aborting...')
return False
node_trigger_selector_framestart = node_trigger_selector.GetEntryByName('FrameStart')
if ((not PySpin.IsAvailable(node_trigger_selector_framestart)) or (not PySpin.IsReadable(node_trigger_selector_framestart))):
print('Unable to set trigger selector (enum entry retrieval). Aborting...')
return False
node_trigger_selector.SetIntValue(node_trigger_selector_framestart.GetValue())
print('Trigger selector set to frame start...')
node_trigger_source = PySpin.CEnumerationPtr(nodemap.GetNode('TriggerSource'))
if ((not PySpin.IsAvailable(node_trigger_source)) or (not PySpin.IsWritable(node_trigger_source))):
print('Unable to get trigger source (node retrieval). Aborting...')
return False
if (CHOSEN_TRIGGER == TriggerType.SOFTWARE):
node_trigger_source_software = node_trigger_source.GetEntryByName('Software')
if ((not PySpin.IsAvailable(node_trigger_source_software)) or (not PySpin.IsReadable(node_trigger_source_software))):
print('Unable to set trigger source (enum entry retrieval). Aborting...')
return False
node_trigger_source.SetIntValue(node_trigger_source_software.GetValue())
print('Trigger source set to software...')
elif (CHOSEN_TRIGGER == TriggerType.HARDWARE):
node_trigger_source_hardware = node_trigger_source.GetEntryByName('Line0')
if ((not PySpin.IsAvailable(node_trigger_source_hardware)) or (not PySpin.IsReadable(node_trigger_source_hardware))):
print('Unable to set trigger source (enum entry retrieval). Aborting...')
return False
node_trigger_source.SetIntValue(node_trigger_source_hardware.GetValue())
print('Trigger source set to hardware...')
node_trigger_mode_on = node_trigger_mode.GetEntryByName('On')
if ((not PySpin.IsAvailable(node_trigger_mode_on)) or (not PySpin.IsReadable(node_trigger_mode_on))):
print('Unable to enable trigger mode (enum entry retrieval). Aborting...')
return False
node_trigger_mode.SetIntValue(node_trigger_mode_on.GetValue())
print('Trigger mode turned back on...')
except PySpin.SpinnakerException as ex:
print(('Error: %s' % ex))
return False
return result |
def grab_next_image_by_trigger(nodemap, cam):
'\n This function acquires an image by executing the trigger node.\n\n :param cam: Camera to acquire images from.\n :param nodemap: Device nodemap.\n :type cam: CameraPtr\n :type nodemap: INodeMap\n :return: True if successful, False otherwise.\n :rtype: bool\n '
try:
result = True
if (CHOSEN_TRIGGER == TriggerType.SOFTWARE):
input('Press the Enter key to initiate software trigger.')
node_softwaretrigger_cmd = PySpin.CCommandPtr(nodemap.GetNode('TriggerSoftware'))
if ((not PySpin.IsAvailable(node_softwaretrigger_cmd)) or (not PySpin.IsWritable(node_softwaretrigger_cmd))):
print('Unable to execute trigger. Aborting...')
return False
node_softwaretrigger_cmd.Execute()
elif (CHOSEN_TRIGGER == TriggerType.HARDWARE):
print('Use the hardware to trigger image acquisition.')
except PySpin.SpinnakerException as ex:
print(('Error: %s' % ex))
return False
return result | -2,424,736,275,929,776,000 | This function acquires an image by executing the trigger node.
:param cam: Camera to acquire images from.
:param nodemap: Device nodemap.
:type cam: CameraPtr
:type nodemap: INodeMap
:return: True if successful, False otherwise.
:rtype: bool | Sample/spinnaker_python-2.2.0.48-cp37-cp37m-win_amd64/Examples/Python3/Trigger.py | grab_next_image_by_trigger | BevanLab/Recording_Script | python | def grab_next_image_by_trigger(nodemap, cam):
'\n This function acquires an image by executing the trigger node.\n\n :param cam: Camera to acquire images from.\n :param nodemap: Device nodemap.\n :type cam: CameraPtr\n :type nodemap: INodeMap\n :return: True if successful, False otherwise.\n :rtype: bool\n '
try:
result = True
if (CHOSEN_TRIGGER == TriggerType.SOFTWARE):
input('Press the Enter key to initiate software trigger.')
node_softwaretrigger_cmd = PySpin.CCommandPtr(nodemap.GetNode('TriggerSoftware'))
if ((not PySpin.IsAvailable(node_softwaretrigger_cmd)) or (not PySpin.IsWritable(node_softwaretrigger_cmd))):
print('Unable to execute trigger. Aborting...')
return False
node_softwaretrigger_cmd.Execute()
elif (CHOSEN_TRIGGER == TriggerType.HARDWARE):
print('Use the hardware to trigger image acquisition.')
except PySpin.SpinnakerException as ex:
print(('Error: %s' % ex))
return False
return result |
def acquire_images(cam, nodemap, nodemap_tldevice):
'\n This function acquires and saves 10 images from a device.\n Please see Acquisition example for more in-depth comments on acquiring images.\n\n :param cam: Camera to acquire images from.\n :param nodemap: Device nodemap.\n :param nodemap_tldevice: Transport layer device nodemap.\n :type cam: CameraPtr\n :type nodemap: INodeMap\n :type nodemap_tldevice: INodeMap\n :return: True if successful, False otherwise.\n :rtype: bool\n '
print('*** IMAGE ACQUISITION ***\n')
try:
result = True
node_acquisition_mode = PySpin.CEnumerationPtr(nodemap.GetNode('AcquisitionMode'))
if ((not PySpin.IsAvailable(node_acquisition_mode)) or (not PySpin.IsWritable(node_acquisition_mode))):
print('Unable to set acquisition mode to continuous (enum retrieval). Aborting...')
return False
node_acquisition_mode_continuous = node_acquisition_mode.GetEntryByName('Continuous')
if ((not PySpin.IsAvailable(node_acquisition_mode_continuous)) or (not PySpin.IsReadable(node_acquisition_mode_continuous))):
print('Unable to set acquisition mode to continuous (entry retrieval). Aborting...')
return False
acquisition_mode_continuous = node_acquisition_mode_continuous.GetValue()
node_acquisition_mode.SetIntValue(acquisition_mode_continuous)
print('Acquisition mode set to continuous...')
cam.BeginAcquisition()
print('Acquiring images...')
device_serial_number = ''
node_device_serial_number = PySpin.CStringPtr(nodemap_tldevice.GetNode('DeviceSerialNumber'))
if (PySpin.IsAvailable(node_device_serial_number) and PySpin.IsReadable(node_device_serial_number)):
device_serial_number = node_device_serial_number.GetValue()
print(('Device serial number retrieved as %s...' % device_serial_number))
for i in range(NUM_IMAGES):
try:
result &= grab_next_image_by_trigger(nodemap, cam)
image_result = cam.GetNextImage(1000)
if image_result.IsIncomplete():
print(('Image incomplete with image status %d ...' % image_result.GetImageStatus()))
else:
width = image_result.GetWidth()
height = image_result.GetHeight()
print(('Grabbed Image %d, width = %d, height = %d' % (i, width, height)))
image_converted = image_result.Convert(PySpin.PixelFormat_Mono8, PySpin.HQ_LINEAR)
if device_serial_number:
filename = ('Trigger-%s-%d.jpg' % (device_serial_number, i))
else:
filename = ('Trigger-%d.jpg' % i)
image_converted.Save(filename)
print(('Image saved at %s\n' % filename))
image_result.Release()
except PySpin.SpinnakerException as ex:
print(('Error: %s' % ex))
return False
cam.EndAcquisition()
except PySpin.SpinnakerException as ex:
print(('Error: %s' % ex))
return False
return result | -6,248,954,651,740,775,000 | This function acquires and saves 10 images from a device.
Please see Acquisition example for more in-depth comments on acquiring images.
:param cam: Camera to acquire images from.
:param nodemap: Device nodemap.
:param nodemap_tldevice: Transport layer device nodemap.
:type cam: CameraPtr
:type nodemap: INodeMap
:type nodemap_tldevice: INodeMap
:return: True if successful, False otherwise.
:rtype: bool | Sample/spinnaker_python-2.2.0.48-cp37-cp37m-win_amd64/Examples/Python3/Trigger.py | acquire_images | BevanLab/Recording_Script | python | def acquire_images(cam, nodemap, nodemap_tldevice):
'\n This function acquires and saves 10 images from a device.\n Please see Acquisition example for more in-depth comments on acquiring images.\n\n :param cam: Camera to acquire images from.\n :param nodemap: Device nodemap.\n :param nodemap_tldevice: Transport layer device nodemap.\n :type cam: CameraPtr\n :type nodemap: INodeMap\n :type nodemap_tldevice: INodeMap\n :return: True if successful, False otherwise.\n :rtype: bool\n '
print('*** IMAGE ACQUISITION ***\n')
try:
result = True
node_acquisition_mode = PySpin.CEnumerationPtr(nodemap.GetNode('AcquisitionMode'))
if ((not PySpin.IsAvailable(node_acquisition_mode)) or (not PySpin.IsWritable(node_acquisition_mode))):
print('Unable to set acquisition mode to continuous (enum retrieval). Aborting...')
return False
node_acquisition_mode_continuous = node_acquisition_mode.GetEntryByName('Continuous')
if ((not PySpin.IsAvailable(node_acquisition_mode_continuous)) or (not PySpin.IsReadable(node_acquisition_mode_continuous))):
print('Unable to set acquisition mode to continuous (entry retrieval). Aborting...')
return False
acquisition_mode_continuous = node_acquisition_mode_continuous.GetValue()
node_acquisition_mode.SetIntValue(acquisition_mode_continuous)
print('Acquisition mode set to continuous...')
cam.BeginAcquisition()
print('Acquiring images...')
device_serial_number =
node_device_serial_number = PySpin.CStringPtr(nodemap_tldevice.GetNode('DeviceSerialNumber'))
if (PySpin.IsAvailable(node_device_serial_number) and PySpin.IsReadable(node_device_serial_number)):
device_serial_number = node_device_serial_number.GetValue()
print(('Device serial number retrieved as %s...' % device_serial_number))
for i in range(NUM_IMAGES):
try:
result &= grab_next_image_by_trigger(nodemap, cam)
image_result = cam.GetNextImage(1000)
if image_result.IsIncomplete():
print(('Image incomplete with image status %d ...' % image_result.GetImageStatus()))
else:
width = image_result.GetWidth()
height = image_result.GetHeight()
print(('Grabbed Image %d, width = %d, height = %d' % (i, width, height)))
image_converted = image_result.Convert(PySpin.PixelFormat_Mono8, PySpin.HQ_LINEAR)
if device_serial_number:
filename = ('Trigger-%s-%d.jpg' % (device_serial_number, i))
else:
filename = ('Trigger-%d.jpg' % i)
image_converted.Save(filename)
print(('Image saved at %s\n' % filename))
image_result.Release()
except PySpin.SpinnakerException as ex:
print(('Error: %s' % ex))
return False
cam.EndAcquisition()
except PySpin.SpinnakerException as ex:
print(('Error: %s' % ex))
return False
return result |
def reset_trigger(nodemap):
'\n This function returns the camera to a normal state by turning off trigger mode.\n \n :param nodemap: Transport layer device nodemap.\n :type nodemap: INodeMap\n :returns: True if successful, False otherwise.\n :rtype: bool\n '
try:
result = True
node_trigger_mode = PySpin.CEnumerationPtr(nodemap.GetNode('TriggerMode'))
if ((not PySpin.IsAvailable(node_trigger_mode)) or (not PySpin.IsReadable(node_trigger_mode))):
print('Unable to disable trigger mode (node retrieval). Aborting...')
return False
node_trigger_mode_off = node_trigger_mode.GetEntryByName('Off')
if ((not PySpin.IsAvailable(node_trigger_mode_off)) or (not PySpin.IsReadable(node_trigger_mode_off))):
print('Unable to disable trigger mode (enum entry retrieval). Aborting...')
return False
node_trigger_mode.SetIntValue(node_trigger_mode_off.GetValue())
print('Trigger mode disabled...')
except PySpin.SpinnakerException as ex:
print(('Error: %s' % ex))
result = False
return result | -3,314,745,031,887,221,000 | This function returns the camera to a normal state by turning off trigger mode.
:param nodemap: Transport layer device nodemap.
:type nodemap: INodeMap
:returns: True if successful, False otherwise.
:rtype: bool | Sample/spinnaker_python-2.2.0.48-cp37-cp37m-win_amd64/Examples/Python3/Trigger.py | reset_trigger | BevanLab/Recording_Script | python | def reset_trigger(nodemap):
'\n This function returns the camera to a normal state by turning off trigger mode.\n \n :param nodemap: Transport layer device nodemap.\n :type nodemap: INodeMap\n :returns: True if successful, False otherwise.\n :rtype: bool\n '
try:
result = True
node_trigger_mode = PySpin.CEnumerationPtr(nodemap.GetNode('TriggerMode'))
if ((not PySpin.IsAvailable(node_trigger_mode)) or (not PySpin.IsReadable(node_trigger_mode))):
print('Unable to disable trigger mode (node retrieval). Aborting...')
return False
node_trigger_mode_off = node_trigger_mode.GetEntryByName('Off')
if ((not PySpin.IsAvailable(node_trigger_mode_off)) or (not PySpin.IsReadable(node_trigger_mode_off))):
print('Unable to disable trigger mode (enum entry retrieval). Aborting...')
return False
node_trigger_mode.SetIntValue(node_trigger_mode_off.GetValue())
print('Trigger mode disabled...')
except PySpin.SpinnakerException as ex:
print(('Error: %s' % ex))
result = False
return result |
def print_device_info(nodemap):
'\n This function prints the device information of the camera from the transport\n layer; please see NodeMapInfo example for more in-depth comments on printing\n device information from the nodemap.\n\n :param nodemap: Transport layer device nodemap.\n :type nodemap: INodeMap\n :returns: True if successful, False otherwise.\n :rtype: bool\n '
print('*** DEVICE INFORMATION ***\n')
try:
result = True
node_device_information = PySpin.CCategoryPtr(nodemap.GetNode('DeviceInformation'))
if (PySpin.IsAvailable(node_device_information) and PySpin.IsReadable(node_device_information)):
features = node_device_information.GetFeatures()
for feature in features:
node_feature = PySpin.CValuePtr(feature)
print(('%s: %s' % (node_feature.GetName(), (node_feature.ToString() if PySpin.IsReadable(node_feature) else 'Node not readable'))))
else:
print('Device control information not available.')
except PySpin.SpinnakerException as ex:
print(('Error: %s' % ex))
return False
return result | 4,143,192,014,156,832,300 | This function prints the device information of the camera from the transport
layer; please see NodeMapInfo example for more in-depth comments on printing
device information from the nodemap.
:param nodemap: Transport layer device nodemap.
:type nodemap: INodeMap
:returns: True if successful, False otherwise.
:rtype: bool | Sample/spinnaker_python-2.2.0.48-cp37-cp37m-win_amd64/Examples/Python3/Trigger.py | print_device_info | BevanLab/Recording_Script | python | def print_device_info(nodemap):
'\n This function prints the device information of the camera from the transport\n layer; please see NodeMapInfo example for more in-depth comments on printing\n device information from the nodemap.\n\n :param nodemap: Transport layer device nodemap.\n :type nodemap: INodeMap\n :returns: True if successful, False otherwise.\n :rtype: bool\n '
print('*** DEVICE INFORMATION ***\n')
try:
result = True
node_device_information = PySpin.CCategoryPtr(nodemap.GetNode('DeviceInformation'))
if (PySpin.IsAvailable(node_device_information) and PySpin.IsReadable(node_device_information)):
features = node_device_information.GetFeatures()
for feature in features:
node_feature = PySpin.CValuePtr(feature)
print(('%s: %s' % (node_feature.GetName(), (node_feature.ToString() if PySpin.IsReadable(node_feature) else 'Node not readable'))))
else:
print('Device control information not available.')
except PySpin.SpinnakerException as ex:
print(('Error: %s' % ex))
return False
return result |
def run_single_camera(cam):
'\n This function acts as the body of the example; please see NodeMapInfo example\n for more in-depth comments on setting up cameras.\n\n :param cam: Camera to run on.\n :type cam: CameraPtr\n :return: True if successful, False otherwise.\n :rtype: bool\n '
try:
result = True
err = False
nodemap_tldevice = cam.GetTLDeviceNodeMap()
result &= print_device_info(nodemap_tldevice)
cam.Init()
nodemap = cam.GetNodeMap()
if (configure_trigger(cam) is False):
return False
result &= acquire_images(cam, nodemap, nodemap_tldevice)
result &= reset_trigger(nodemap)
cam.DeInit()
except PySpin.SpinnakerException as ex:
print(('Error: %s' % ex))
result = False
return result | 3,507,507,488,458,038,000 | This function acts as the body of the example; please see NodeMapInfo example
for more in-depth comments on setting up cameras.
:param cam: Camera to run on.
:type cam: CameraPtr
:return: True if successful, False otherwise.
:rtype: bool | Sample/spinnaker_python-2.2.0.48-cp37-cp37m-win_amd64/Examples/Python3/Trigger.py | run_single_camera | BevanLab/Recording_Script | python | def run_single_camera(cam):
'\n This function acts as the body of the example; please see NodeMapInfo example\n for more in-depth comments on setting up cameras.\n\n :param cam: Camera to run on.\n :type cam: CameraPtr\n :return: True if successful, False otherwise.\n :rtype: bool\n '
try:
result = True
err = False
nodemap_tldevice = cam.GetTLDeviceNodeMap()
result &= print_device_info(nodemap_tldevice)
cam.Init()
nodemap = cam.GetNodeMap()
if (configure_trigger(cam) is False):
return False
result &= acquire_images(cam, nodemap, nodemap_tldevice)
result &= reset_trigger(nodemap)
cam.DeInit()
except PySpin.SpinnakerException as ex:
print(('Error: %s' % ex))
result = False
return result |
def main():
'\n Example entry point; please see Enumeration example for more in-depth\n comments on preparing and cleaning up the system.\n\n :return: True if successful, False otherwise.\n :rtype: bool\n '
try:
test_file = open('test.txt', 'w+')
except IOError:
print('Unable to write to current directory. Please check permissions.')
input('Press Enter to exit...')
return False
test_file.close()
os.remove(test_file.name)
result = True
system = PySpin.System.GetInstance()
version = system.GetLibraryVersion()
print(('Library version: %d.%d.%d.%d' % (version.major, version.minor, version.type, version.build)))
cam_list = system.GetCameras()
num_cameras = cam_list.GetSize()
print(('Number of cameras detected: %d' % num_cameras))
if (num_cameras == 0):
cam_list.Clear()
system.ReleaseInstance()
print('Not enough cameras!')
input('Done! Press Enter to exit...')
return False
for (i, cam) in enumerate(cam_list):
print(('Running example for camera %d...' % i))
result &= run_single_camera(cam)
print(('Camera %d example complete... \n' % i))
del cam
cam_list.Clear()
system.ReleaseInstance()
input('Done! Press Enter to exit...')
return result | -5,703,057,015,471,915,000 | Example entry point; please see Enumeration example for more in-depth
comments on preparing and cleaning up the system.
:return: True if successful, False otherwise.
:rtype: bool | Sample/spinnaker_python-2.2.0.48-cp37-cp37m-win_amd64/Examples/Python3/Trigger.py | main | BevanLab/Recording_Script | python | def main():
'\n Example entry point; please see Enumeration example for more in-depth\n comments on preparing and cleaning up the system.\n\n :return: True if successful, False otherwise.\n :rtype: bool\n '
try:
test_file = open('test.txt', 'w+')
except IOError:
print('Unable to write to current directory. Please check permissions.')
input('Press Enter to exit...')
return False
test_file.close()
os.remove(test_file.name)
result = True
system = PySpin.System.GetInstance()
version = system.GetLibraryVersion()
print(('Library version: %d.%d.%d.%d' % (version.major, version.minor, version.type, version.build)))
cam_list = system.GetCameras()
num_cameras = cam_list.GetSize()
print(('Number of cameras detected: %d' % num_cameras))
if (num_cameras == 0):
cam_list.Clear()
system.ReleaseInstance()
print('Not enough cameras!')
input('Done! Press Enter to exit...')
return False
for (i, cam) in enumerate(cam_list):
print(('Running example for camera %d...' % i))
result &= run_single_camera(cam)
print(('Camera %d example complete... \n' % i))
del cam
cam_list.Clear()
system.ReleaseInstance()
input('Done! Press Enter to exit...')
return result |
def make_prediction_net(num_out_channels, kernel_size=3, num_filters=256, bias_fill=None):
'Creates a network to predict the given number of output channels.\n\n This function is intended to make the prediction heads for the CenterNet\n meta architecture.\n\n Args:\n num_out_channels: Number of output channels.\n kernel_size: The size of the conv kernel in the intermediate layer\n num_filters: The number of filters in the intermediate conv layer.\n bias_fill: If not None, is used to initialize the bias in the final conv\n layer.\n\n Returns:\n net: A keras module which when called on an input tensor of size\n [batch_size, height, width, num_in_channels] returns an output\n of size [batch_size, height, width, num_out_channels]\n '
out_conv = tf.keras.layers.Conv2D(num_out_channels, kernel_size=1)
if (bias_fill is not None):
out_conv.bias_initializer = tf.keras.initializers.constant(bias_fill)
net = tf.keras.Sequential([tf.keras.layers.Conv2D(num_filters, kernel_size=kernel_size, padding='same'), tf.keras.layers.ReLU(), out_conv])
return net | 6,019,871,673,096,493,000 | Creates a network to predict the given number of output channels.
This function is intended to make the prediction heads for the CenterNet
meta architecture.
Args:
num_out_channels: Number of output channels.
kernel_size: The size of the conv kernel in the intermediate layer
num_filters: The number of filters in the intermediate conv layer.
bias_fill: If not None, is used to initialize the bias in the final conv
layer.
Returns:
net: A keras module which when called on an input tensor of size
[batch_size, height, width, num_in_channels] returns an output
of size [batch_size, height, width, num_out_channels] | research/object_detection/meta_architectures/center_net_meta_arch.py | make_prediction_net | AvikantSrivastava/models | python | def make_prediction_net(num_out_channels, kernel_size=3, num_filters=256, bias_fill=None):
'Creates a network to predict the given number of output channels.\n\n This function is intended to make the prediction heads for the CenterNet\n meta architecture.\n\n Args:\n num_out_channels: Number of output channels.\n kernel_size: The size of the conv kernel in the intermediate layer\n num_filters: The number of filters in the intermediate conv layer.\n bias_fill: If not None, is used to initialize the bias in the final conv\n layer.\n\n Returns:\n net: A keras module which when called on an input tensor of size\n [batch_size, height, width, num_in_channels] returns an output\n of size [batch_size, height, width, num_out_channels]\n '
out_conv = tf.keras.layers.Conv2D(num_out_channels, kernel_size=1)
if (bias_fill is not None):
out_conv.bias_initializer = tf.keras.initializers.constant(bias_fill)
net = tf.keras.Sequential([tf.keras.layers.Conv2D(num_filters, kernel_size=kernel_size, padding='same'), tf.keras.layers.ReLU(), out_conv])
return net |
def top_k_feature_map_locations(feature_map, max_pool_kernel_size=3, k=100, per_channel=False):
'Returns the top k scores and their locations in a feature map.\n\n Given a feature map, the top k values (based on activation) are returned. If\n `per_channel` is True, the top k values **per channel** are returned.\n\n The `max_pool_kernel_size` argument allows for selecting local peaks in a\n region. This filtering is done per channel, so nothing prevents two values at\n the same location to be returned.\n\n Args:\n feature_map: [batch, height, width, channels] float32 feature map.\n max_pool_kernel_size: integer, the max pool kernel size to use to pull off\n peak score locations in a neighborhood (independently for each channel).\n For example, to make sure no two neighboring values (in the same channel)\n are returned, set max_pool_kernel_size=3. If None or 1, will not apply max\n pooling.\n k: The number of highest scoring locations to return.\n per_channel: If True, will return the top k scores and locations per\n feature map channel. If False, the top k across the entire feature map\n (height x width x channels) are returned.\n\n Returns:\n Tuple of\n scores: A [batch, N] float32 tensor with scores from the feature map in\n descending order. If per_channel is False, N = k. Otherwise,\n N = k * channels, and the first k elements correspond to channel 0, the\n second k correspond to channel 1, etc.\n y_indices: A [batch, N] int tensor with y indices of the top k feature map\n locations. If per_channel is False, N = k. Otherwise,\n N = k * channels.\n x_indices: A [batch, N] int tensor with x indices of the top k feature map\n locations. If per_channel is False, N = k. Otherwise,\n N = k * channels.\n channel_indices: A [batch, N] int tensor with channel indices of the top k\n feature map locations. If per_channel is False, N = k. Otherwise,\n N = k * channels.\n '
if ((not max_pool_kernel_size) or (max_pool_kernel_size == 1)):
feature_map_peaks = feature_map
else:
feature_map_max_pool = tf.nn.max_pool(feature_map, ksize=max_pool_kernel_size, strides=1, padding='SAME')
feature_map_peak_mask = (tf.math.abs((feature_map - feature_map_max_pool)) < PEAK_EPSILON)
feature_map_peaks = (feature_map * _to_float32(feature_map_peak_mask))
(batch_size, _, width, num_channels) = _get_shape(feature_map, 4)
if per_channel:
feature_map_peaks_transposed = tf.transpose(feature_map_peaks, perm=[0, 3, 1, 2])
feature_map_peaks_transposed = tf.reshape(feature_map_peaks_transposed, [batch_size, num_channels, (- 1)])
(scores, peak_flat_indices) = tf.math.top_k(feature_map_peaks_transposed, k=k)
channel_idx = tf.range(num_channels)[tf.newaxis, :, tf.newaxis]
peak_flat_indices = ((num_channels * peak_flat_indices) + channel_idx)
scores = tf.reshape(scores, [batch_size, (- 1)])
peak_flat_indices = tf.reshape(peak_flat_indices, [batch_size, (- 1)])
else:
feature_map_peaks_flat = tf.reshape(feature_map_peaks, [batch_size, (- 1)])
(scores, peak_flat_indices) = tf.math.top_k(feature_map_peaks_flat, k=k)
(y_indices, x_indices, channel_indices) = row_col_channel_indices_from_flattened_indices(peak_flat_indices, width, num_channels)
return (scores, y_indices, x_indices, channel_indices) | 8,923,855,060,416,032,000 | Returns the top k scores and their locations in a feature map.
Given a feature map, the top k values (based on activation) are returned. If
`per_channel` is True, the top k values **per channel** are returned.
The `max_pool_kernel_size` argument allows for selecting local peaks in a
region. This filtering is done per channel, so nothing prevents two values at
the same location to be returned.
Args:
feature_map: [batch, height, width, channels] float32 feature map.
max_pool_kernel_size: integer, the max pool kernel size to use to pull off
peak score locations in a neighborhood (independently for each channel).
For example, to make sure no two neighboring values (in the same channel)
are returned, set max_pool_kernel_size=3. If None or 1, will not apply max
pooling.
k: The number of highest scoring locations to return.
per_channel: If True, will return the top k scores and locations per
feature map channel. If False, the top k across the entire feature map
(height x width x channels) are returned.
Returns:
Tuple of
scores: A [batch, N] float32 tensor with scores from the feature map in
descending order. If per_channel is False, N = k. Otherwise,
N = k * channels, and the first k elements correspond to channel 0, the
second k correspond to channel 1, etc.
y_indices: A [batch, N] int tensor with y indices of the top k feature map
locations. If per_channel is False, N = k. Otherwise,
N = k * channels.
x_indices: A [batch, N] int tensor with x indices of the top k feature map
locations. If per_channel is False, N = k. Otherwise,
N = k * channels.
channel_indices: A [batch, N] int tensor with channel indices of the top k
feature map locations. If per_channel is False, N = k. Otherwise,
N = k * channels. | research/object_detection/meta_architectures/center_net_meta_arch.py | top_k_feature_map_locations | AvikantSrivastava/models | python | def top_k_feature_map_locations(feature_map, max_pool_kernel_size=3, k=100, per_channel=False):
'Returns the top k scores and their locations in a feature map.\n\n Given a feature map, the top k values (based on activation) are returned. If\n `per_channel` is True, the top k values **per channel** are returned.\n\n The `max_pool_kernel_size` argument allows for selecting local peaks in a\n region. This filtering is done per channel, so nothing prevents two values at\n the same location to be returned.\n\n Args:\n feature_map: [batch, height, width, channels] float32 feature map.\n max_pool_kernel_size: integer, the max pool kernel size to use to pull off\n peak score locations in a neighborhood (independently for each channel).\n For example, to make sure no two neighboring values (in the same channel)\n are returned, set max_pool_kernel_size=3. If None or 1, will not apply max\n pooling.\n k: The number of highest scoring locations to return.\n per_channel: If True, will return the top k scores and locations per\n feature map channel. If False, the top k across the entire feature map\n (height x width x channels) are returned.\n\n Returns:\n Tuple of\n scores: A [batch, N] float32 tensor with scores from the feature map in\n descending order. If per_channel is False, N = k. Otherwise,\n N = k * channels, and the first k elements correspond to channel 0, the\n second k correspond to channel 1, etc.\n y_indices: A [batch, N] int tensor with y indices of the top k feature map\n locations. If per_channel is False, N = k. Otherwise,\n N = k * channels.\n x_indices: A [batch, N] int tensor with x indices of the top k feature map\n locations. If per_channel is False, N = k. Otherwise,\n N = k * channels.\n channel_indices: A [batch, N] int tensor with channel indices of the top k\n feature map locations. If per_channel is False, N = k. Otherwise,\n N = k * channels.\n '
if ((not max_pool_kernel_size) or (max_pool_kernel_size == 1)):
feature_map_peaks = feature_map
else:
feature_map_max_pool = tf.nn.max_pool(feature_map, ksize=max_pool_kernel_size, strides=1, padding='SAME')
feature_map_peak_mask = (tf.math.abs((feature_map - feature_map_max_pool)) < PEAK_EPSILON)
feature_map_peaks = (feature_map * _to_float32(feature_map_peak_mask))
(batch_size, _, width, num_channels) = _get_shape(feature_map, 4)
if per_channel:
feature_map_peaks_transposed = tf.transpose(feature_map_peaks, perm=[0, 3, 1, 2])
feature_map_peaks_transposed = tf.reshape(feature_map_peaks_transposed, [batch_size, num_channels, (- 1)])
(scores, peak_flat_indices) = tf.math.top_k(feature_map_peaks_transposed, k=k)
channel_idx = tf.range(num_channels)[tf.newaxis, :, tf.newaxis]
peak_flat_indices = ((num_channels * peak_flat_indices) + channel_idx)
scores = tf.reshape(scores, [batch_size, (- 1)])
peak_flat_indices = tf.reshape(peak_flat_indices, [batch_size, (- 1)])
else:
feature_map_peaks_flat = tf.reshape(feature_map_peaks, [batch_size, (- 1)])
(scores, peak_flat_indices) = tf.math.top_k(feature_map_peaks_flat, k=k)
(y_indices, x_indices, channel_indices) = row_col_channel_indices_from_flattened_indices(peak_flat_indices, width, num_channels)
return (scores, y_indices, x_indices, channel_indices) |
def prediction_tensors_to_boxes(detection_scores, y_indices, x_indices, channel_indices, height_width_predictions, offset_predictions):
'Converts CenterNet class-center, offset and size predictions to boxes.\n\n Args:\n detection_scores: A [batch, num_boxes] float32 tensor with detection\n scores in range [0, 1].\n y_indices: A [batch, num_boxes] int32 tensor with y indices corresponding to\n object center locations (expressed in output coordinate frame).\n x_indices: A [batch, num_boxes] int32 tensor with x indices corresponding to\n object center locations (expressed in output coordinate frame).\n channel_indices: A [batch, num_boxes] int32 tensor with channel indices\n corresponding to object classes.\n height_width_predictions: A float tensor of shape [batch_size, height,\n width, 2] representing the height and width of a box centered at each\n pixel.\n offset_predictions: A float tensor of shape [batch_size, height, width, 2]\n representing the y and x offsets of a box centered at each pixel. This\n helps reduce the error from downsampling.\n\n Returns:\n detection_boxes: A tensor of shape [batch_size, num_boxes, 4] holding the\n the raw bounding box coordinates of boxes.\n detection_classes: An integer tensor of shape [batch_size, num_boxes]\n indicating the predicted class for each box.\n detection_scores: A float tensor of shape [batch_size, num_boxes] indicating\n the score for each box.\n num_detections: An integer tensor of shape [batch_size,] indicating the\n number of boxes detected for each sample in the batch.\n\n '
(_, _, width, _) = _get_shape(height_width_predictions, 4)
peak_spatial_indices = flattened_indices_from_row_col_indices(y_indices, x_indices, width)
y_indices = _to_float32(y_indices)
x_indices = _to_float32(x_indices)
height_width_flat = _flatten_spatial_dimensions(height_width_predictions)
offsets_flat = _flatten_spatial_dimensions(offset_predictions)
height_width = tf.gather(height_width_flat, peak_spatial_indices, batch_dims=1)
height_width = tf.maximum(height_width, 0)
offsets = tf.gather(offsets_flat, peak_spatial_indices, batch_dims=1)
(heights, widths) = tf.unstack(height_width, axis=2)
(y_offsets, x_offsets) = tf.unstack(offsets, axis=2)
detection_classes = channel_indices
num_detections = tf.reduce_sum(tf.to_int32((detection_scores > 0)), axis=1)
boxes = tf.stack([((y_indices + y_offsets) - (heights / 2.0)), ((x_indices + x_offsets) - (widths / 2.0)), ((y_indices + y_offsets) + (heights / 2.0)), ((x_indices + x_offsets) + (widths / 2.0))], axis=2)
return (boxes, detection_classes, detection_scores, num_detections) | 4,516,369,718,014,506,000 | Converts CenterNet class-center, offset and size predictions to boxes.
Args:
detection_scores: A [batch, num_boxes] float32 tensor with detection
scores in range [0, 1].
y_indices: A [batch, num_boxes] int32 tensor with y indices corresponding to
object center locations (expressed in output coordinate frame).
x_indices: A [batch, num_boxes] int32 tensor with x indices corresponding to
object center locations (expressed in output coordinate frame).
channel_indices: A [batch, num_boxes] int32 tensor with channel indices
corresponding to object classes.
height_width_predictions: A float tensor of shape [batch_size, height,
width, 2] representing the height and width of a box centered at each
pixel.
offset_predictions: A float tensor of shape [batch_size, height, width, 2]
representing the y and x offsets of a box centered at each pixel. This
helps reduce the error from downsampling.
Returns:
detection_boxes: A tensor of shape [batch_size, num_boxes, 4] holding the
the raw bounding box coordinates of boxes.
detection_classes: An integer tensor of shape [batch_size, num_boxes]
indicating the predicted class for each box.
detection_scores: A float tensor of shape [batch_size, num_boxes] indicating
the score for each box.
num_detections: An integer tensor of shape [batch_size,] indicating the
number of boxes detected for each sample in the batch. | research/object_detection/meta_architectures/center_net_meta_arch.py | prediction_tensors_to_boxes | AvikantSrivastava/models | python | def prediction_tensors_to_boxes(detection_scores, y_indices, x_indices, channel_indices, height_width_predictions, offset_predictions):
'Converts CenterNet class-center, offset and size predictions to boxes.\n\n Args:\n detection_scores: A [batch, num_boxes] float32 tensor with detection\n scores in range [0, 1].\n y_indices: A [batch, num_boxes] int32 tensor with y indices corresponding to\n object center locations (expressed in output coordinate frame).\n x_indices: A [batch, num_boxes] int32 tensor with x indices corresponding to\n object center locations (expressed in output coordinate frame).\n channel_indices: A [batch, num_boxes] int32 tensor with channel indices\n corresponding to object classes.\n height_width_predictions: A float tensor of shape [batch_size, height,\n width, 2] representing the height and width of a box centered at each\n pixel.\n offset_predictions: A float tensor of shape [batch_size, height, width, 2]\n representing the y and x offsets of a box centered at each pixel. This\n helps reduce the error from downsampling.\n\n Returns:\n detection_boxes: A tensor of shape [batch_size, num_boxes, 4] holding the\n the raw bounding box coordinates of boxes.\n detection_classes: An integer tensor of shape [batch_size, num_boxes]\n indicating the predicted class for each box.\n detection_scores: A float tensor of shape [batch_size, num_boxes] indicating\n the score for each box.\n num_detections: An integer tensor of shape [batch_size,] indicating the\n number of boxes detected for each sample in the batch.\n\n '
(_, _, width, _) = _get_shape(height_width_predictions, 4)
peak_spatial_indices = flattened_indices_from_row_col_indices(y_indices, x_indices, width)
y_indices = _to_float32(y_indices)
x_indices = _to_float32(x_indices)
height_width_flat = _flatten_spatial_dimensions(height_width_predictions)
offsets_flat = _flatten_spatial_dimensions(offset_predictions)
height_width = tf.gather(height_width_flat, peak_spatial_indices, batch_dims=1)
height_width = tf.maximum(height_width, 0)
offsets = tf.gather(offsets_flat, peak_spatial_indices, batch_dims=1)
(heights, widths) = tf.unstack(height_width, axis=2)
(y_offsets, x_offsets) = tf.unstack(offsets, axis=2)
detection_classes = channel_indices
num_detections = tf.reduce_sum(tf.to_int32((detection_scores > 0)), axis=1)
boxes = tf.stack([((y_indices + y_offsets) - (heights / 2.0)), ((x_indices + x_offsets) - (widths / 2.0)), ((y_indices + y_offsets) + (heights / 2.0)), ((x_indices + x_offsets) + (widths / 2.0))], axis=2)
return (boxes, detection_classes, detection_scores, num_detections) |
def prediction_tensors_to_temporal_offsets(y_indices, x_indices, offset_predictions):
"Converts CenterNet temporal offset map predictions to batched format.\n\n This function is similiar to the box offset conversion function, as both\n temporal offsets and box offsets are size-2 vectors.\n\n Args:\n y_indices: A [batch, num_boxes] int32 tensor with y indices corresponding to\n object center locations (expressed in output coordinate frame).\n x_indices: A [batch, num_boxes] int32 tensor with x indices corresponding to\n object center locations (expressed in output coordinate frame).\n offset_predictions: A float tensor of shape [batch_size, height, width, 2]\n representing the y and x offsets of a box's center across adjacent frames.\n\n Returns:\n offsets: A tensor of shape [batch_size, num_boxes, 2] holding the\n the object temporal offsets of (y, x) dimensions.\n\n "
(_, _, width, _) = _get_shape(offset_predictions, 4)
peak_spatial_indices = flattened_indices_from_row_col_indices(y_indices, x_indices, width)
y_indices = _to_float32(y_indices)
x_indices = _to_float32(x_indices)
offsets_flat = _flatten_spatial_dimensions(offset_predictions)
offsets = tf.gather(offsets_flat, peak_spatial_indices, batch_dims=1)
return offsets | 8,263,310,012,307,342,000 | Converts CenterNet temporal offset map predictions to batched format.
This function is similiar to the box offset conversion function, as both
temporal offsets and box offsets are size-2 vectors.
Args:
y_indices: A [batch, num_boxes] int32 tensor with y indices corresponding to
object center locations (expressed in output coordinate frame).
x_indices: A [batch, num_boxes] int32 tensor with x indices corresponding to
object center locations (expressed in output coordinate frame).
offset_predictions: A float tensor of shape [batch_size, height, width, 2]
representing the y and x offsets of a box's center across adjacent frames.
Returns:
offsets: A tensor of shape [batch_size, num_boxes, 2] holding the
the object temporal offsets of (y, x) dimensions. | research/object_detection/meta_architectures/center_net_meta_arch.py | prediction_tensors_to_temporal_offsets | AvikantSrivastava/models | python | def prediction_tensors_to_temporal_offsets(y_indices, x_indices, offset_predictions):
"Converts CenterNet temporal offset map predictions to batched format.\n\n This function is similiar to the box offset conversion function, as both\n temporal offsets and box offsets are size-2 vectors.\n\n Args:\n y_indices: A [batch, num_boxes] int32 tensor with y indices corresponding to\n object center locations (expressed in output coordinate frame).\n x_indices: A [batch, num_boxes] int32 tensor with x indices corresponding to\n object center locations (expressed in output coordinate frame).\n offset_predictions: A float tensor of shape [batch_size, height, width, 2]\n representing the y and x offsets of a box's center across adjacent frames.\n\n Returns:\n offsets: A tensor of shape [batch_size, num_boxes, 2] holding the\n the object temporal offsets of (y, x) dimensions.\n\n "
(_, _, width, _) = _get_shape(offset_predictions, 4)
peak_spatial_indices = flattened_indices_from_row_col_indices(y_indices, x_indices, width)
y_indices = _to_float32(y_indices)
x_indices = _to_float32(x_indices)
offsets_flat = _flatten_spatial_dimensions(offset_predictions)
offsets = tf.gather(offsets_flat, peak_spatial_indices, batch_dims=1)
return offsets |
def prediction_tensors_to_keypoint_candidates(keypoint_heatmap_predictions, keypoint_heatmap_offsets, keypoint_score_threshold=0.1, max_pool_kernel_size=1, max_candidates=20):
"Convert keypoint heatmap predictions and offsets to keypoint candidates.\n\n Args:\n keypoint_heatmap_predictions: A float tensor of shape [batch_size, height,\n width, num_keypoints] representing the per-keypoint heatmaps.\n keypoint_heatmap_offsets: A float tensor of shape [batch_size, height,\n width, 2] (or [batch_size, height, width, 2 * num_keypoints] if\n 'per_keypoint_offset' is set True) representing the per-keypoint offsets.\n keypoint_score_threshold: float, the threshold for considering a keypoint\n a candidate.\n max_pool_kernel_size: integer, the max pool kernel size to use to pull off\n peak score locations in a neighborhood. For example, to make sure no two\n neighboring values for the same keypoint are returned, set\n max_pool_kernel_size=3. If None or 1, will not apply any local filtering.\n max_candidates: integer, maximum number of keypoint candidates per\n keypoint type.\n\n Returns:\n keypoint_candidates: A tensor of shape\n [batch_size, max_candidates, num_keypoints, 2] holding the\n location of keypoint candidates in [y, x] format (expressed in absolute\n coordinates in the output coordinate frame).\n keypoint_scores: A float tensor of shape\n [batch_size, max_candidates, num_keypoints] with the scores for each\n keypoint candidate. The scores come directly from the heatmap predictions.\n num_keypoint_candidates: An integer tensor of shape\n [batch_size, num_keypoints] with the number of candidates for each\n keypoint type, as it's possible to filter some candidates due to the score\n threshold.\n "
(batch_size, _, width, num_keypoints) = _get_shape(keypoint_heatmap_predictions, 4)
(keypoint_scores, y_indices, x_indices, channel_indices) = top_k_feature_map_locations(keypoint_heatmap_predictions, max_pool_kernel_size=max_pool_kernel_size, k=max_candidates, per_channel=True)
peak_spatial_indices = flattened_indices_from_row_col_indices(y_indices, x_indices, width)
y_indices = _to_float32(y_indices)
x_indices = _to_float32(x_indices)
offsets_flat = _flatten_spatial_dimensions(keypoint_heatmap_offsets)
selected_offsets = tf.gather(offsets_flat, peak_spatial_indices, batch_dims=1)
(_, num_indices, num_channels) = _get_shape(selected_offsets, 3)
if (num_channels > 2):
reshaped_offsets = tf.reshape(selected_offsets, [batch_size, num_indices, (- 1), 2])
offsets = tf.gather(reshaped_offsets, channel_indices, batch_dims=2)
else:
offsets = selected_offsets
(y_offsets, x_offsets) = tf.unstack(offsets, axis=2)
keypoint_candidates = tf.stack([(y_indices + y_offsets), (x_indices + x_offsets)], axis=2)
keypoint_candidates = tf.reshape(keypoint_candidates, [batch_size, num_keypoints, max_candidates, 2])
keypoint_candidates = tf.transpose(keypoint_candidates, [0, 2, 1, 3])
keypoint_scores = tf.reshape(keypoint_scores, [batch_size, num_keypoints, max_candidates])
keypoint_scores = tf.transpose(keypoint_scores, [0, 2, 1])
num_candidates = tf.reduce_sum(tf.to_int32((keypoint_scores >= keypoint_score_threshold)), axis=1)
return (keypoint_candidates, keypoint_scores, num_candidates) | 7,684,851,093,205,623,000 | Convert keypoint heatmap predictions and offsets to keypoint candidates.
Args:
keypoint_heatmap_predictions: A float tensor of shape [batch_size, height,
width, num_keypoints] representing the per-keypoint heatmaps.
keypoint_heatmap_offsets: A float tensor of shape [batch_size, height,
width, 2] (or [batch_size, height, width, 2 * num_keypoints] if
'per_keypoint_offset' is set True) representing the per-keypoint offsets.
keypoint_score_threshold: float, the threshold for considering a keypoint
a candidate.
max_pool_kernel_size: integer, the max pool kernel size to use to pull off
peak score locations in a neighborhood. For example, to make sure no two
neighboring values for the same keypoint are returned, set
max_pool_kernel_size=3. If None or 1, will not apply any local filtering.
max_candidates: integer, maximum number of keypoint candidates per
keypoint type.
Returns:
keypoint_candidates: A tensor of shape
[batch_size, max_candidates, num_keypoints, 2] holding the
location of keypoint candidates in [y, x] format (expressed in absolute
coordinates in the output coordinate frame).
keypoint_scores: A float tensor of shape
[batch_size, max_candidates, num_keypoints] with the scores for each
keypoint candidate. The scores come directly from the heatmap predictions.
num_keypoint_candidates: An integer tensor of shape
[batch_size, num_keypoints] with the number of candidates for each
keypoint type, as it's possible to filter some candidates due to the score
threshold. | research/object_detection/meta_architectures/center_net_meta_arch.py | prediction_tensors_to_keypoint_candidates | AvikantSrivastava/models | python | def prediction_tensors_to_keypoint_candidates(keypoint_heatmap_predictions, keypoint_heatmap_offsets, keypoint_score_threshold=0.1, max_pool_kernel_size=1, max_candidates=20):
"Convert keypoint heatmap predictions and offsets to keypoint candidates.\n\n Args:\n keypoint_heatmap_predictions: A float tensor of shape [batch_size, height,\n width, num_keypoints] representing the per-keypoint heatmaps.\n keypoint_heatmap_offsets: A float tensor of shape [batch_size, height,\n width, 2] (or [batch_size, height, width, 2 * num_keypoints] if\n 'per_keypoint_offset' is set True) representing the per-keypoint offsets.\n keypoint_score_threshold: float, the threshold for considering a keypoint\n a candidate.\n max_pool_kernel_size: integer, the max pool kernel size to use to pull off\n peak score locations in a neighborhood. For example, to make sure no two\n neighboring values for the same keypoint are returned, set\n max_pool_kernel_size=3. If None or 1, will not apply any local filtering.\n max_candidates: integer, maximum number of keypoint candidates per\n keypoint type.\n\n Returns:\n keypoint_candidates: A tensor of shape\n [batch_size, max_candidates, num_keypoints, 2] holding the\n location of keypoint candidates in [y, x] format (expressed in absolute\n coordinates in the output coordinate frame).\n keypoint_scores: A float tensor of shape\n [batch_size, max_candidates, num_keypoints] with the scores for each\n keypoint candidate. The scores come directly from the heatmap predictions.\n num_keypoint_candidates: An integer tensor of shape\n [batch_size, num_keypoints] with the number of candidates for each\n keypoint type, as it's possible to filter some candidates due to the score\n threshold.\n "
(batch_size, _, width, num_keypoints) = _get_shape(keypoint_heatmap_predictions, 4)
(keypoint_scores, y_indices, x_indices, channel_indices) = top_k_feature_map_locations(keypoint_heatmap_predictions, max_pool_kernel_size=max_pool_kernel_size, k=max_candidates, per_channel=True)
peak_spatial_indices = flattened_indices_from_row_col_indices(y_indices, x_indices, width)
y_indices = _to_float32(y_indices)
x_indices = _to_float32(x_indices)
offsets_flat = _flatten_spatial_dimensions(keypoint_heatmap_offsets)
selected_offsets = tf.gather(offsets_flat, peak_spatial_indices, batch_dims=1)
(_, num_indices, num_channels) = _get_shape(selected_offsets, 3)
if (num_channels > 2):
reshaped_offsets = tf.reshape(selected_offsets, [batch_size, num_indices, (- 1), 2])
offsets = tf.gather(reshaped_offsets, channel_indices, batch_dims=2)
else:
offsets = selected_offsets
(y_offsets, x_offsets) = tf.unstack(offsets, axis=2)
keypoint_candidates = tf.stack([(y_indices + y_offsets), (x_indices + x_offsets)], axis=2)
keypoint_candidates = tf.reshape(keypoint_candidates, [batch_size, num_keypoints, max_candidates, 2])
keypoint_candidates = tf.transpose(keypoint_candidates, [0, 2, 1, 3])
keypoint_scores = tf.reshape(keypoint_scores, [batch_size, num_keypoints, max_candidates])
keypoint_scores = tf.transpose(keypoint_scores, [0, 2, 1])
num_candidates = tf.reduce_sum(tf.to_int32((keypoint_scores >= keypoint_score_threshold)), axis=1)
return (keypoint_candidates, keypoint_scores, num_candidates) |
def regressed_keypoints_at_object_centers(regressed_keypoint_predictions, y_indices, x_indices):
'Returns the regressed keypoints at specified object centers.\n\n The original keypoint predictions are regressed relative to each feature map\n location. The returned keypoints are expressed in absolute coordinates in the\n output frame (i.e. the center offsets are added to each individual regressed\n set of keypoints).\n\n Args:\n regressed_keypoint_predictions: A float tensor of shape\n [batch_size, height, width, 2 * num_keypoints] holding regressed\n keypoints. The last dimension has keypoint coordinates ordered as follows:\n [y0, x0, y1, x1, ..., y{J-1}, x{J-1}] where J is the number of keypoints.\n y_indices: A [batch, num_instances] int tensor holding y indices for object\n centers. These indices correspond to locations in the output feature map.\n x_indices: A [batch, num_instances] int tensor holding x indices for object\n centers. These indices correspond to locations in the output feature map.\n\n Returns:\n A float tensor of shape [batch_size, num_objects, 2 * num_keypoints] where\n regressed keypoints are gathered at the provided locations, and converted\n to absolute coordinates in the output coordinate frame.\n '
(batch_size, _, width, _) = _get_shape(regressed_keypoint_predictions, 4)
flattened_indices = flattened_indices_from_row_col_indices(y_indices, x_indices, width)
(_, num_instances) = _get_shape(flattened_indices, 2)
regressed_keypoints_flat = _flatten_spatial_dimensions(regressed_keypoint_predictions)
relative_regressed_keypoints = tf.gather(regressed_keypoints_flat, flattened_indices, batch_dims=1)
relative_regressed_keypoints = tf.reshape(relative_regressed_keypoints, [batch_size, num_instances, (- 1), 2])
(relative_regressed_keypoints_y, relative_regressed_keypoints_x) = tf.unstack(relative_regressed_keypoints, axis=3)
y_indices = _to_float32(tf.expand_dims(y_indices, axis=(- 1)))
x_indices = _to_float32(tf.expand_dims(x_indices, axis=(- 1)))
absolute_regressed_keypoints = tf.stack([(y_indices + relative_regressed_keypoints_y), (x_indices + relative_regressed_keypoints_x)], axis=3)
return tf.reshape(absolute_regressed_keypoints, [batch_size, num_instances, (- 1)]) | -4,714,983,378,805,419,000 | Returns the regressed keypoints at specified object centers.
The original keypoint predictions are regressed relative to each feature map
location. The returned keypoints are expressed in absolute coordinates in the
output frame (i.e. the center offsets are added to each individual regressed
set of keypoints).
Args:
regressed_keypoint_predictions: A float tensor of shape
[batch_size, height, width, 2 * num_keypoints] holding regressed
keypoints. The last dimension has keypoint coordinates ordered as follows:
[y0, x0, y1, x1, ..., y{J-1}, x{J-1}] where J is the number of keypoints.
y_indices: A [batch, num_instances] int tensor holding y indices for object
centers. These indices correspond to locations in the output feature map.
x_indices: A [batch, num_instances] int tensor holding x indices for object
centers. These indices correspond to locations in the output feature map.
Returns:
A float tensor of shape [batch_size, num_objects, 2 * num_keypoints] where
regressed keypoints are gathered at the provided locations, and converted
to absolute coordinates in the output coordinate frame. | research/object_detection/meta_architectures/center_net_meta_arch.py | regressed_keypoints_at_object_centers | AvikantSrivastava/models | python | def regressed_keypoints_at_object_centers(regressed_keypoint_predictions, y_indices, x_indices):
'Returns the regressed keypoints at specified object centers.\n\n The original keypoint predictions are regressed relative to each feature map\n location. The returned keypoints are expressed in absolute coordinates in the\n output frame (i.e. the center offsets are added to each individual regressed\n set of keypoints).\n\n Args:\n regressed_keypoint_predictions: A float tensor of shape\n [batch_size, height, width, 2 * num_keypoints] holding regressed\n keypoints. The last dimension has keypoint coordinates ordered as follows:\n [y0, x0, y1, x1, ..., y{J-1}, x{J-1}] where J is the number of keypoints.\n y_indices: A [batch, num_instances] int tensor holding y indices for object\n centers. These indices correspond to locations in the output feature map.\n x_indices: A [batch, num_instances] int tensor holding x indices for object\n centers. These indices correspond to locations in the output feature map.\n\n Returns:\n A float tensor of shape [batch_size, num_objects, 2 * num_keypoints] where\n regressed keypoints are gathered at the provided locations, and converted\n to absolute coordinates in the output coordinate frame.\n '
(batch_size, _, width, _) = _get_shape(regressed_keypoint_predictions, 4)
flattened_indices = flattened_indices_from_row_col_indices(y_indices, x_indices, width)
(_, num_instances) = _get_shape(flattened_indices, 2)
regressed_keypoints_flat = _flatten_spatial_dimensions(regressed_keypoint_predictions)
relative_regressed_keypoints = tf.gather(regressed_keypoints_flat, flattened_indices, batch_dims=1)
relative_regressed_keypoints = tf.reshape(relative_regressed_keypoints, [batch_size, num_instances, (- 1), 2])
(relative_regressed_keypoints_y, relative_regressed_keypoints_x) = tf.unstack(relative_regressed_keypoints, axis=3)
y_indices = _to_float32(tf.expand_dims(y_indices, axis=(- 1)))
x_indices = _to_float32(tf.expand_dims(x_indices, axis=(- 1)))
absolute_regressed_keypoints = tf.stack([(y_indices + relative_regressed_keypoints_y), (x_indices + relative_regressed_keypoints_x)], axis=3)
return tf.reshape(absolute_regressed_keypoints, [batch_size, num_instances, (- 1)]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.