code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def is_data_dependent(fmto, data):
"""Check whether a formatoption is data dependent
Parameters
----------
fmto: Formatoption
The :class:`Formatoption` instance to check
data: xarray.DataArray
The data array to use if the :attr:`~Formatoption.data_dependent`
attribute is a callable
Returns
-------
bool
True, if the formatoption depends on the data"""
if callable(fmto.data_dependent):
return fmto.data_dependent(data)
return fmto.data_dependent | Check whether a formatoption is data dependent
Parameters
----------
fmto: Formatoption
The :class:`Formatoption` instance to check
data: xarray.DataArray
The data array to use if the :attr:`~Formatoption.data_dependent`
attribute is a callable
Returns
-------
bool
True, if the formatoption depends on the data | Below is the the instruction that describes the task:
### Input:
Check whether a formatoption is data dependent
Parameters
----------
fmto: Formatoption
The :class:`Formatoption` instance to check
data: xarray.DataArray
The data array to use if the :attr:`~Formatoption.data_dependent`
attribute is a callable
Returns
-------
bool
True, if the formatoption depends on the data
### Response:
def is_data_dependent(fmto, data):
"""Check whether a formatoption is data dependent
Parameters
----------
fmto: Formatoption
The :class:`Formatoption` instance to check
data: xarray.DataArray
The data array to use if the :attr:`~Formatoption.data_dependent`
attribute is a callable
Returns
-------
bool
True, if the formatoption depends on the data"""
if callable(fmto.data_dependent):
return fmto.data_dependent(data)
return fmto.data_dependent |
def metadata(self, run_id=None):
"""Provide metadata on a Ding0 run
Parameters
----------
run_id: str, (defaults to current date)
Distinguish multiple versions of Ding0 data by a `run_id`. If not
set it defaults to current date in the format YYYYMMDDhhmmss
Returns
-------
dict
Metadata
"""
# Get latest version and/or git commit hash
try:
version = subprocess.check_output(
["git", "describe", "--tags", "--always"]).decode('utf8')
except:
version = None
# Collect names of database table used to run Ding0 and data version
if self.config['input_data_source']['input_data'] == 'versioned':
data_version = self.config['versioned']['version']
database_tables = self.config['versioned']
elif self.config['input_data_source']['input_data'] == 'model_draft':
data_version = 'model_draft'
database_tables = self.config['model_draft']
else:
data_version = 'unknown'
database_tables = 'unknown'
# Collect assumptions
assumptions = {}
assumptions.update(self.config['assumptions'])
assumptions.update(self.config['mv_connect'])
assumptions.update(self.config['mv_routing'])
assumptions.update(self.config['mv_routing_tech_constraints'])
# Determine run_id if not set
if not run_id:
run_id = datetime.now().strftime("%Y%m%d%H%M%S")
# Set instance attribute run_id
if not self._run_id:
self._run_id = run_id
# Assing data to dict
metadata = dict(
version=version,
mv_grid_districts=[int(_.id_db) for _ in self._mv_grid_districts],
database_tables=database_tables,
data_version=data_version,
assumptions=assumptions,
run_id=self._run_id
)
return metadata | Provide metadata on a Ding0 run
Parameters
----------
run_id: str, (defaults to current date)
Distinguish multiple versions of Ding0 data by a `run_id`. If not
set it defaults to current date in the format YYYYMMDDhhmmss
Returns
-------
dict
Metadata | Below is the the instruction that describes the task:
### Input:
Provide metadata on a Ding0 run
Parameters
----------
run_id: str, (defaults to current date)
Distinguish multiple versions of Ding0 data by a `run_id`. If not
set it defaults to current date in the format YYYYMMDDhhmmss
Returns
-------
dict
Metadata
### Response:
def metadata(self, run_id=None):
"""Provide metadata on a Ding0 run
Parameters
----------
run_id: str, (defaults to current date)
Distinguish multiple versions of Ding0 data by a `run_id`. If not
set it defaults to current date in the format YYYYMMDDhhmmss
Returns
-------
dict
Metadata
"""
# Get latest version and/or git commit hash
try:
version = subprocess.check_output(
["git", "describe", "--tags", "--always"]).decode('utf8')
except:
version = None
# Collect names of database table used to run Ding0 and data version
if self.config['input_data_source']['input_data'] == 'versioned':
data_version = self.config['versioned']['version']
database_tables = self.config['versioned']
elif self.config['input_data_source']['input_data'] == 'model_draft':
data_version = 'model_draft'
database_tables = self.config['model_draft']
else:
data_version = 'unknown'
database_tables = 'unknown'
# Collect assumptions
assumptions = {}
assumptions.update(self.config['assumptions'])
assumptions.update(self.config['mv_connect'])
assumptions.update(self.config['mv_routing'])
assumptions.update(self.config['mv_routing_tech_constraints'])
# Determine run_id if not set
if not run_id:
run_id = datetime.now().strftime("%Y%m%d%H%M%S")
# Set instance attribute run_id
if not self._run_id:
self._run_id = run_id
# Assing data to dict
metadata = dict(
version=version,
mv_grid_districts=[int(_.id_db) for _ in self._mv_grid_districts],
database_tables=database_tables,
data_version=data_version,
assumptions=assumptions,
run_id=self._run_id
)
return metadata |
def getAllData(self, temp = True, accel = True, gyro = True):
"""!
Get all the available data.
@param temp: True - Allow to return Temperature data
@param accel: True - Allow to return Accelerometer data
@param gyro: True - Allow to return Gyroscope data
@return a dictionary data
@retval {} Did not read any data
@retval {"temp":32.3,"accel":{"x":0.45634,"y":0.2124,"z":1.334},"gyro":{"x":0.45634,"y":0.2124,"z":1.334}} Returned all data
"""
allData = {}
if temp:
allData["temp"] = self.getTemp()
if accel:
allData["accel"] = self.getAccelData( raw = False )
if gyro:
allData["gyro"] = self.getGyroData()
return allData | !
Get all the available data.
@param temp: True - Allow to return Temperature data
@param accel: True - Allow to return Accelerometer data
@param gyro: True - Allow to return Gyroscope data
@return a dictionary data
@retval {} Did not read any data
@retval {"temp":32.3,"accel":{"x":0.45634,"y":0.2124,"z":1.334},"gyro":{"x":0.45634,"y":0.2124,"z":1.334}} Returned all data | Below is the the instruction that describes the task:
### Input:
!
Get all the available data.
@param temp: True - Allow to return Temperature data
@param accel: True - Allow to return Accelerometer data
@param gyro: True - Allow to return Gyroscope data
@return a dictionary data
@retval {} Did not read any data
@retval {"temp":32.3,"accel":{"x":0.45634,"y":0.2124,"z":1.334},"gyro":{"x":0.45634,"y":0.2124,"z":1.334}} Returned all data
### Response:
def getAllData(self, temp = True, accel = True, gyro = True):
"""!
Get all the available data.
@param temp: True - Allow to return Temperature data
@param accel: True - Allow to return Accelerometer data
@param gyro: True - Allow to return Gyroscope data
@return a dictionary data
@retval {} Did not read any data
@retval {"temp":32.3,"accel":{"x":0.45634,"y":0.2124,"z":1.334},"gyro":{"x":0.45634,"y":0.2124,"z":1.334}} Returned all data
"""
allData = {}
if temp:
allData["temp"] = self.getTemp()
if accel:
allData["accel"] = self.getAccelData( raw = False )
if gyro:
allData["gyro"] = self.getGyroData()
return allData |
def file_detector_context(self, file_detector_class, *args, **kwargs):
"""
Overrides the current file detector (if necessary) in limited context.
Ensures the original file detector is set afterwards.
Example:
with webdriver.file_detector_context(UselessFileDetector):
someinput.send_keys('/etc/hosts')
:Args:
- file_detector_class - Class of the desired file detector. If the class is different
from the current file_detector, then the class is instantiated with args and kwargs
and used as a file detector during the duration of the context manager.
- args - Optional arguments that get passed to the file detector class during
instantiation.
- kwargs - Keyword arguments, passed the same way as args.
"""
last_detector = None
if not isinstance(self.file_detector, file_detector_class):
last_detector = self.file_detector
self.file_detector = file_detector_class(*args, **kwargs)
try:
yield
finally:
if last_detector is not None:
self.file_detector = last_detector | Overrides the current file detector (if necessary) in limited context.
Ensures the original file detector is set afterwards.
Example:
with webdriver.file_detector_context(UselessFileDetector):
someinput.send_keys('/etc/hosts')
:Args:
- file_detector_class - Class of the desired file detector. If the class is different
from the current file_detector, then the class is instantiated with args and kwargs
and used as a file detector during the duration of the context manager.
- args - Optional arguments that get passed to the file detector class during
instantiation.
- kwargs - Keyword arguments, passed the same way as args. | Below is the the instruction that describes the task:
### Input:
Overrides the current file detector (if necessary) in limited context.
Ensures the original file detector is set afterwards.
Example:
with webdriver.file_detector_context(UselessFileDetector):
someinput.send_keys('/etc/hosts')
:Args:
- file_detector_class - Class of the desired file detector. If the class is different
from the current file_detector, then the class is instantiated with args and kwargs
and used as a file detector during the duration of the context manager.
- args - Optional arguments that get passed to the file detector class during
instantiation.
- kwargs - Keyword arguments, passed the same way as args.
### Response:
def file_detector_context(self, file_detector_class, *args, **kwargs):
"""
Overrides the current file detector (if necessary) in limited context.
Ensures the original file detector is set afterwards.
Example:
with webdriver.file_detector_context(UselessFileDetector):
someinput.send_keys('/etc/hosts')
:Args:
- file_detector_class - Class of the desired file detector. If the class is different
from the current file_detector, then the class is instantiated with args and kwargs
and used as a file detector during the duration of the context manager.
- args - Optional arguments that get passed to the file detector class during
instantiation.
- kwargs - Keyword arguments, passed the same way as args.
"""
last_detector = None
if not isinstance(self.file_detector, file_detector_class):
last_detector = self.file_detector
self.file_detector = file_detector_class(*args, **kwargs)
try:
yield
finally:
if last_detector is not None:
self.file_detector = last_detector |
def cmdloop(self):
"""Start CLI REPL."""
while True:
cmdline = input(self.prompt)
tokens = shlex.split(cmdline)
if not tokens:
if self.last_cmd:
tokens = self.last_cmd
else:
print('No previous command.')
continue
if tokens[0] not in self.commands:
print('Invalid command')
continue
command = self.commands[tokens[0]]
self.last_cmd = tokens
try:
if command(self.state, tokens):
break
except CmdExit:
continue
except Exception as e:
if e not in self.safe_exceptions:
logger.exception('Error!') | Start CLI REPL. | Below is the the instruction that describes the task:
### Input:
Start CLI REPL.
### Response:
def cmdloop(self):
"""Start CLI REPL."""
while True:
cmdline = input(self.prompt)
tokens = shlex.split(cmdline)
if not tokens:
if self.last_cmd:
tokens = self.last_cmd
else:
print('No previous command.')
continue
if tokens[0] not in self.commands:
print('Invalid command')
continue
command = self.commands[tokens[0]]
self.last_cmd = tokens
try:
if command(self.state, tokens):
break
except CmdExit:
continue
except Exception as e:
if e not in self.safe_exceptions:
logger.exception('Error!') |
def isTemporal(inferenceType):
""" Returns True if the inference type is 'temporal', i.e. requires a
temporal memory in the network.
"""
if InferenceType.__temporalInferenceTypes is None:
InferenceType.__temporalInferenceTypes = \
set([InferenceType.TemporalNextStep,
InferenceType.TemporalClassification,
InferenceType.TemporalAnomaly,
InferenceType.TemporalMultiStep,
InferenceType.NontemporalMultiStep])
return inferenceType in InferenceType.__temporalInferenceTypes | Returns True if the inference type is 'temporal', i.e. requires a
temporal memory in the network. | Below is the the instruction that describes the task:
### Input:
Returns True if the inference type is 'temporal', i.e. requires a
temporal memory in the network.
### Response:
def isTemporal(inferenceType):
""" Returns True if the inference type is 'temporal', i.e. requires a
temporal memory in the network.
"""
if InferenceType.__temporalInferenceTypes is None:
InferenceType.__temporalInferenceTypes = \
set([InferenceType.TemporalNextStep,
InferenceType.TemporalClassification,
InferenceType.TemporalAnomaly,
InferenceType.TemporalMultiStep,
InferenceType.NontemporalMultiStep])
return inferenceType in InferenceType.__temporalInferenceTypes |
def is_instance_factory(_type):
"""
Parameters
----------
`_type` - the type to be checked against
Returns
-------
validator - a function of a single argument x , which returns the
True if x is an instance of `_type`
"""
if isinstance(_type, (tuple, list)):
_type = tuple(_type)
type_repr = "|".join(map(str, _type))
else:
type_repr = "'%s'" % _type
def inner(x):
if not isinstance(x, _type):
raise ValueError("Value must be an instance of %s" % type_repr)
return inner | Parameters
----------
`_type` - the type to be checked against
Returns
-------
validator - a function of a single argument x , which returns the
True if x is an instance of `_type` | Below is the the instruction that describes the task:
### Input:
Parameters
----------
`_type` - the type to be checked against
Returns
-------
validator - a function of a single argument x , which returns the
True if x is an instance of `_type`
### Response:
def is_instance_factory(_type):
"""
Parameters
----------
`_type` - the type to be checked against
Returns
-------
validator - a function of a single argument x , which returns the
True if x is an instance of `_type`
"""
if isinstance(_type, (tuple, list)):
_type = tuple(_type)
type_repr = "|".join(map(str, _type))
else:
type_repr = "'%s'" % _type
def inner(x):
if not isinstance(x, _type):
raise ValueError("Value must be an instance of %s" % type_repr)
return inner |
def _get_filename(class_name, language):
"""
Generate the specific filename.
Parameters
----------
:param class_name : str
The used class name.
:param language : {'c', 'go', 'java', 'js', 'php', 'ruby'}
The target programming language.
Returns
-------
filename : str
The generated filename.
"""
name = str(class_name).strip()
lang = str(language)
# Name:
if language in ['java', 'php']:
name = "".join([name[0].upper() + name[1:]])
# Suffix:
suffix = {
'c': 'c', 'java': 'java', 'js': 'js',
'go': 'go', 'php': 'php', 'ruby': 'rb'
}
suffix = suffix.get(lang, lang)
# Filename:
return '{}.{}'.format(name, suffix) | Generate the specific filename.
Parameters
----------
:param class_name : str
The used class name.
:param language : {'c', 'go', 'java', 'js', 'php', 'ruby'}
The target programming language.
Returns
-------
filename : str
The generated filename. | Below is the the instruction that describes the task:
### Input:
Generate the specific filename.
Parameters
----------
:param class_name : str
The used class name.
:param language : {'c', 'go', 'java', 'js', 'php', 'ruby'}
The target programming language.
Returns
-------
filename : str
The generated filename.
### Response:
def _get_filename(class_name, language):
"""
Generate the specific filename.
Parameters
----------
:param class_name : str
The used class name.
:param language : {'c', 'go', 'java', 'js', 'php', 'ruby'}
The target programming language.
Returns
-------
filename : str
The generated filename.
"""
name = str(class_name).strip()
lang = str(language)
# Name:
if language in ['java', 'php']:
name = "".join([name[0].upper() + name[1:]])
# Suffix:
suffix = {
'c': 'c', 'java': 'java', 'js': 'js',
'go': 'go', 'php': 'php', 'ruby': 'rb'
}
suffix = suffix.get(lang, lang)
# Filename:
return '{}.{}'.format(name, suffix) |
def __check_mem(self):
''' raise exception on RAM exceeded '''
mem_free = psutil.virtual_memory().available / 2**20
self.log.debug("Memory free: %s/%s", mem_free, self.mem_limit)
if mem_free < self.mem_limit:
raise RuntimeError(
"Not enough resources: free memory less "
"than %sMB: %sMB" % (self.mem_limit, mem_free)) | raise exception on RAM exceeded | Below is the the instruction that describes the task:
### Input:
raise exception on RAM exceeded
### Response:
def __check_mem(self):
''' raise exception on RAM exceeded '''
mem_free = psutil.virtual_memory().available / 2**20
self.log.debug("Memory free: %s/%s", mem_free, self.mem_limit)
if mem_free < self.mem_limit:
raise RuntimeError(
"Not enough resources: free memory less "
"than %sMB: %sMB" % (self.mem_limit, mem_free)) |
def as_tree(self, visitor=None, children=None):
""" Recursively traverses each tree (starting from each root) in order
to generate a dictionary-based tree structure of the entire forest.
Each level of the forest/tree is a list of nodes, and each node
consists of a dictionary representation, where the entry
``children`` (by default) consists of a list of dictionary
representations of its children.
See :meth:`CTENodeManager.as_tree` and
:meth:`CTENodeManager.node_as_tree` for details on how this method
works, as well as its expected arguments.
:param visitor: optional function responsible for generating the
dictionary representation of a node.
:param children: optional function responsible for generating a
children key and list for a node.
:return: a dictionary representation of the structure of the forest.
"""
_parameters = {"node": self}
if visitor is not None:
_parameters["visitor"] = visitor
if children is not None:
_parameters["children"] = children
return self.__class__.objects.node_as_tree(**_parameters) | Recursively traverses each tree (starting from each root) in order
to generate a dictionary-based tree structure of the entire forest.
Each level of the forest/tree is a list of nodes, and each node
consists of a dictionary representation, where the entry
``children`` (by default) consists of a list of dictionary
representations of its children.
See :meth:`CTENodeManager.as_tree` and
:meth:`CTENodeManager.node_as_tree` for details on how this method
works, as well as its expected arguments.
:param visitor: optional function responsible for generating the
dictionary representation of a node.
:param children: optional function responsible for generating a
children key and list for a node.
:return: a dictionary representation of the structure of the forest. | Below is the the instruction that describes the task:
### Input:
Recursively traverses each tree (starting from each root) in order
to generate a dictionary-based tree structure of the entire forest.
Each level of the forest/tree is a list of nodes, and each node
consists of a dictionary representation, where the entry
``children`` (by default) consists of a list of dictionary
representations of its children.
See :meth:`CTENodeManager.as_tree` and
:meth:`CTENodeManager.node_as_tree` for details on how this method
works, as well as its expected arguments.
:param visitor: optional function responsible for generating the
dictionary representation of a node.
:param children: optional function responsible for generating a
children key and list for a node.
:return: a dictionary representation of the structure of the forest.
### Response:
def as_tree(self, visitor=None, children=None):
""" Recursively traverses each tree (starting from each root) in order
to generate a dictionary-based tree structure of the entire forest.
Each level of the forest/tree is a list of nodes, and each node
consists of a dictionary representation, where the entry
``children`` (by default) consists of a list of dictionary
representations of its children.
See :meth:`CTENodeManager.as_tree` and
:meth:`CTENodeManager.node_as_tree` for details on how this method
works, as well as its expected arguments.
:param visitor: optional function responsible for generating the
dictionary representation of a node.
:param children: optional function responsible for generating a
children key and list for a node.
:return: a dictionary representation of the structure of the forest.
"""
_parameters = {"node": self}
if visitor is not None:
_parameters["visitor"] = visitor
if children is not None:
_parameters["children"] = children
return self.__class__.objects.node_as_tree(**_parameters) |
def convenience_calc_fisher_approx(self, params):
"""
Calculates the BHHH approximation of the Fisher Information Matrix for
this model / dataset.
"""
shapes, intercepts, betas = self.convenience_split_params(params)
args = [betas,
self.design,
self.alt_id_vector,
self.rows_to_obs,
self.rows_to_alts,
self.choice_vector,
self.utility_transform,
self.calc_dh_d_shape,
self.calc_dh_dv,
self.calc_dh_d_alpha,
intercepts,
shapes,
self.ridge,
self.weights]
return cc.calc_fisher_info_matrix(*args) | Calculates the BHHH approximation of the Fisher Information Matrix for
this model / dataset. | Below is the the instruction that describes the task:
### Input:
Calculates the BHHH approximation of the Fisher Information Matrix for
this model / dataset.
### Response:
def convenience_calc_fisher_approx(self, params):
"""
Calculates the BHHH approximation of the Fisher Information Matrix for
this model / dataset.
"""
shapes, intercepts, betas = self.convenience_split_params(params)
args = [betas,
self.design,
self.alt_id_vector,
self.rows_to_obs,
self.rows_to_alts,
self.choice_vector,
self.utility_transform,
self.calc_dh_d_shape,
self.calc_dh_dv,
self.calc_dh_d_alpha,
intercepts,
shapes,
self.ridge,
self.weights]
return cc.calc_fisher_info_matrix(*args) |
def enver(*args):
"""
%prog [<name>=[value]]
To show all environment variables, call with no parameters:
%prog
To Add/Modify/Delete environment variable:
%prog <name>=[value]
If <name> is PATH or PATHEXT, %prog will by default append the value using
a semicolon as a separator. Use -r to disable this behavior or -a to force
it for variables other than PATH and PATHEXT.
If append is prescribed, but the value doesn't exist, the value will be
created.
If there is no value, %prog will delete the <name> environment variable.
i.e. "PATH="
To remove a specific value or values from a semicolon-separated
multi-value variable (such as PATH), use --remove-value.
e.g. enver --remove-value PATH=C:\\Unwanted\\Dir\\In\\Path
Remove-value matches case-insensitive and also matches any substring
so the following would also be sufficient to remove the aforementioned
undesirable dir.
enver --remove-value PATH=UNWANTED
Note that %prog does not affect the current running environment, and can
only affect subsequently spawned applications.
"""
from optparse import OptionParser
parser = OptionParser(usage=trim(enver.__doc__))
parser.add_option(
'-U', '--user-environment',
action='store_const', const=UserRegisteredEnvironment,
default=MachineRegisteredEnvironment,
dest='class_',
help="Use the current user's environment",
)
parser.add_option(
'-a', '--append',
action='store_true', default=False,
help="Append the value to any existing value (default for PATH and PATHEXT)",
)
parser.add_option(
'-r', '--replace',
action='store_true', default=False,
help="Replace any existing value (used to override default append "
"for PATH and PATHEXT)",
)
parser.add_option(
'--remove-value', action='store_true', default=False,
help="Remove any matching values from a semicolon-separated "
"multi-value variable",
)
parser.add_option(
'-e', '--edit', action='store_true', default=False,
help="Edit the value in a local editor",
)
options, args = parser.parse_args(*args)
try:
param = args.pop()
if args:
parser.error("Too many parameters specified")
raise SystemExit(1)
if '=' not in param and not options.edit:
parser.error("Expected <name>= or <name>=<value>")
raise SystemExit(2)
name, sep, value = param.partition('=')
method_name = 'set'
if options.remove_value:
method_name = 'remove_values'
if options.edit:
method_name = 'edit'
method = getattr(options.class_, method_name)
method(name, value, options)
except IndexError:
options.class_.show() | %prog [<name>=[value]]
To show all environment variables, call with no parameters:
%prog
To Add/Modify/Delete environment variable:
%prog <name>=[value]
If <name> is PATH or PATHEXT, %prog will by default append the value using
a semicolon as a separator. Use -r to disable this behavior or -a to force
it for variables other than PATH and PATHEXT.
If append is prescribed, but the value doesn't exist, the value will be
created.
If there is no value, %prog will delete the <name> environment variable.
i.e. "PATH="
To remove a specific value or values from a semicolon-separated
multi-value variable (such as PATH), use --remove-value.
e.g. enver --remove-value PATH=C:\\Unwanted\\Dir\\In\\Path
Remove-value matches case-insensitive and also matches any substring
so the following would also be sufficient to remove the aforementioned
undesirable dir.
enver --remove-value PATH=UNWANTED
Note that %prog does not affect the current running environment, and can
only affect subsequently spawned applications. | Below is the the instruction that describes the task:
### Input:
%prog [<name>=[value]]
To show all environment variables, call with no parameters:
%prog
To Add/Modify/Delete environment variable:
%prog <name>=[value]
If <name> is PATH or PATHEXT, %prog will by default append the value using
a semicolon as a separator. Use -r to disable this behavior or -a to force
it for variables other than PATH and PATHEXT.
If append is prescribed, but the value doesn't exist, the value will be
created.
If there is no value, %prog will delete the <name> environment variable.
i.e. "PATH="
To remove a specific value or values from a semicolon-separated
multi-value variable (such as PATH), use --remove-value.
e.g. enver --remove-value PATH=C:\\Unwanted\\Dir\\In\\Path
Remove-value matches case-insensitive and also matches any substring
so the following would also be sufficient to remove the aforementioned
undesirable dir.
enver --remove-value PATH=UNWANTED
Note that %prog does not affect the current running environment, and can
only affect subsequently spawned applications.
### Response:
def enver(*args):
"""
%prog [<name>=[value]]
To show all environment variables, call with no parameters:
%prog
To Add/Modify/Delete environment variable:
%prog <name>=[value]
If <name> is PATH or PATHEXT, %prog will by default append the value using
a semicolon as a separator. Use -r to disable this behavior or -a to force
it for variables other than PATH and PATHEXT.
If append is prescribed, but the value doesn't exist, the value will be
created.
If there is no value, %prog will delete the <name> environment variable.
i.e. "PATH="
To remove a specific value or values from a semicolon-separated
multi-value variable (such as PATH), use --remove-value.
e.g. enver --remove-value PATH=C:\\Unwanted\\Dir\\In\\Path
Remove-value matches case-insensitive and also matches any substring
so the following would also be sufficient to remove the aforementioned
undesirable dir.
enver --remove-value PATH=UNWANTED
Note that %prog does not affect the current running environment, and can
only affect subsequently spawned applications.
"""
from optparse import OptionParser
parser = OptionParser(usage=trim(enver.__doc__))
parser.add_option(
'-U', '--user-environment',
action='store_const', const=UserRegisteredEnvironment,
default=MachineRegisteredEnvironment,
dest='class_',
help="Use the current user's environment",
)
parser.add_option(
'-a', '--append',
action='store_true', default=False,
help="Append the value to any existing value (default for PATH and PATHEXT)",
)
parser.add_option(
'-r', '--replace',
action='store_true', default=False,
help="Replace any existing value (used to override default append "
"for PATH and PATHEXT)",
)
parser.add_option(
'--remove-value', action='store_true', default=False,
help="Remove any matching values from a semicolon-separated "
"multi-value variable",
)
parser.add_option(
'-e', '--edit', action='store_true', default=False,
help="Edit the value in a local editor",
)
options, args = parser.parse_args(*args)
try:
param = args.pop()
if args:
parser.error("Too many parameters specified")
raise SystemExit(1)
if '=' not in param and not options.edit:
parser.error("Expected <name>= or <name>=<value>")
raise SystemExit(2)
name, sep, value = param.partition('=')
method_name = 'set'
if options.remove_value:
method_name = 'remove_values'
if options.edit:
method_name = 'edit'
method = getattr(options.class_, method_name)
method(name, value, options)
except IndexError:
options.class_.show() |
def save_tip_length(labware: Labware, length: float):
"""
Function to be used whenever an updated tip length is found for
of a given tip rack. If an offset file does not exist, create the file
using labware id as the filename. If the file does exist, load it and
modify the length and the lastModified fields under the "tipLength" key.
"""
calibration_path = CONFIG['labware_calibration_offsets_dir_v4']
if not calibration_path.exists():
calibration_path.mkdir(parents=True, exist_ok=True)
labware_offset_path = calibration_path/'{}.json'.format(labware._id)
calibration_data = _helper_tip_length_data_format(
str(labware_offset_path), length)
with labware_offset_path.open('w') as f:
json.dump(calibration_data, f)
labware.tip_length = length | Function to be used whenever an updated tip length is found for
of a given tip rack. If an offset file does not exist, create the file
using labware id as the filename. If the file does exist, load it and
modify the length and the lastModified fields under the "tipLength" key. | Below is the the instruction that describes the task:
### Input:
Function to be used whenever an updated tip length is found for
of a given tip rack. If an offset file does not exist, create the file
using labware id as the filename. If the file does exist, load it and
modify the length and the lastModified fields under the "tipLength" key.
### Response:
def save_tip_length(labware: Labware, length: float):
"""
Function to be used whenever an updated tip length is found for
of a given tip rack. If an offset file does not exist, create the file
using labware id as the filename. If the file does exist, load it and
modify the length and the lastModified fields under the "tipLength" key.
"""
calibration_path = CONFIG['labware_calibration_offsets_dir_v4']
if not calibration_path.exists():
calibration_path.mkdir(parents=True, exist_ok=True)
labware_offset_path = calibration_path/'{}.json'.format(labware._id)
calibration_data = _helper_tip_length_data_format(
str(labware_offset_path), length)
with labware_offset_path.open('w') as f:
json.dump(calibration_data, f)
labware.tip_length = length |
def step2(expnums, ccd, version, prefix=None, dry_run=False, default="WCS"):
"""run the actual step2 on the given exp/ccd combo"""
jmp_trans = ['step2ajmp']
jmp_args = ['step2bjmp']
matt_args = ['step2matt_jmp']
idx = 0
for expnum in expnums:
jmp_args.append(
storage.get_file(expnum, ccd=ccd, version=version, ext='obj.jmp', prefix=prefix)[0:-8]
)
jmp_trans.append(
storage.get_file(expnum, ccd=ccd, version=version, ext='obj.jmp', prefix=prefix)[0:-8]
)
idx += 1
matt_args.append('-f%d' % idx)
matt_args.append(
storage.get_file(expnum, ccd=ccd, version=version, ext='obj.matt', prefix=prefix)[0:-9]
)
logging.info(util.exec_prog(jmp_trans))
if default == "WCS":
logging.info(compute_trans(expnums, ccd, version, prefix, default=default))
logging.info(util.exec_prog(jmp_args))
logging.info(util.exec_prog(matt_args))
## check that the shifts from step2 are rational
check_args = ['checktrans']
if os.access('proc-these-files', os.R_OK):
os.unlink('proc-these-files')
ptf = open('proc-these-files', 'w')
ptf.write("# A dummy file that is created so checktrans could run.\n")
ptf.write("# Frame FWHM PSF?\n")
for expnum in expnums:
filename = os.path.splitext(storage.get_image(expnum, ccd, version=version, prefix=prefix))[0]
if not os.access(filename + ".bright.psf", os.R_OK):
os.link(filename + ".bright.jmp", filename + ".bright.psf")
if not os.access(filename + ".obj.psf", os.R_OK):
os.link(filename + ".obj.jmp", filename + ".obj.psf")
ptf.write("{:>19s}{:>10.1f}{:>5s}\n".format(filename,
_FWHM,
"NO"))
ptf.close()
if os.access('BAD_TRANS', os.F_OK):
os.unlink('BAD_TRANS')
logging.info(util.exec_prog(check_args))
if os.access('BAD_TRANS', os.F_OK):
raise OSError(errno.EBADMSG, 'BAD_TRANS')
if os.access('proc-these-files', os.F_OK):
os.unlink('proc-these-files')
if dry_run:
return
for expnum in expnums:
for ext in ['unid.jmp', 'unid.matt', 'trans.jmp']:
uri = storage.dbimages_uri(expnum, ccd=ccd, version=version, ext=ext, prefix=prefix)
filename = os.path.basename(uri)
storage.copy(filename, uri)
return | run the actual step2 on the given exp/ccd combo | Below is the the instruction that describes the task:
### Input:
run the actual step2 on the given exp/ccd combo
### Response:
def step2(expnums, ccd, version, prefix=None, dry_run=False, default="WCS"):
"""run the actual step2 on the given exp/ccd combo"""
jmp_trans = ['step2ajmp']
jmp_args = ['step2bjmp']
matt_args = ['step2matt_jmp']
idx = 0
for expnum in expnums:
jmp_args.append(
storage.get_file(expnum, ccd=ccd, version=version, ext='obj.jmp', prefix=prefix)[0:-8]
)
jmp_trans.append(
storage.get_file(expnum, ccd=ccd, version=version, ext='obj.jmp', prefix=prefix)[0:-8]
)
idx += 1
matt_args.append('-f%d' % idx)
matt_args.append(
storage.get_file(expnum, ccd=ccd, version=version, ext='obj.matt', prefix=prefix)[0:-9]
)
logging.info(util.exec_prog(jmp_trans))
if default == "WCS":
logging.info(compute_trans(expnums, ccd, version, prefix, default=default))
logging.info(util.exec_prog(jmp_args))
logging.info(util.exec_prog(matt_args))
## check that the shifts from step2 are rational
check_args = ['checktrans']
if os.access('proc-these-files', os.R_OK):
os.unlink('proc-these-files')
ptf = open('proc-these-files', 'w')
ptf.write("# A dummy file that is created so checktrans could run.\n")
ptf.write("# Frame FWHM PSF?\n")
for expnum in expnums:
filename = os.path.splitext(storage.get_image(expnum, ccd, version=version, prefix=prefix))[0]
if not os.access(filename + ".bright.psf", os.R_OK):
os.link(filename + ".bright.jmp", filename + ".bright.psf")
if not os.access(filename + ".obj.psf", os.R_OK):
os.link(filename + ".obj.jmp", filename + ".obj.psf")
ptf.write("{:>19s}{:>10.1f}{:>5s}\n".format(filename,
_FWHM,
"NO"))
ptf.close()
if os.access('BAD_TRANS', os.F_OK):
os.unlink('BAD_TRANS')
logging.info(util.exec_prog(check_args))
if os.access('BAD_TRANS', os.F_OK):
raise OSError(errno.EBADMSG, 'BAD_TRANS')
if os.access('proc-these-files', os.F_OK):
os.unlink('proc-these-files')
if dry_run:
return
for expnum in expnums:
for ext in ['unid.jmp', 'unid.matt', 'trans.jmp']:
uri = storage.dbimages_uri(expnum, ccd=ccd, version=version, ext=ext, prefix=prefix)
filename = os.path.basename(uri)
storage.copy(filename, uri)
return |
def broadcast(self, command, *args, **kwargs):
"""
Notifies each user with a specified command.
"""
criterion = kwargs.pop('criterion', self.BROADCAST_FILTER_ALL)
for index, user in items(self.users()):
if criterion(user, command, *args, **kwargs):
self.notify(user, command, *args, **kwargs) | Notifies each user with a specified command. | Below is the the instruction that describes the task:
### Input:
Notifies each user with a specified command.
### Response:
def broadcast(self, command, *args, **kwargs):
"""
Notifies each user with a specified command.
"""
criterion = kwargs.pop('criterion', self.BROADCAST_FILTER_ALL)
for index, user in items(self.users()):
if criterion(user, command, *args, **kwargs):
self.notify(user, command, *args, **kwargs) |
def page(title=None, pageid=None, auto_suggest=True, redirect=True, preload=False):
'''
Get a WikipediaPage object for the page with title `title` or the pageid
`pageid` (mutually exclusive).
Keyword arguments:
* title - the title of the page to load
* pageid - the numeric pageid of the page to load
* auto_suggest - let Wikipedia find a valid page title for the query
* redirect - allow redirection without raising RedirectError
* preload - load content, summary, images, references, and links during initialization
'''
if title is not None:
if auto_suggest:
results, suggestion = search(title, results=1, suggestion=True)
try:
title = suggestion or results[0]
except IndexError:
# if there is no suggestion or search results, the page doesn't exist
raise PageError(title)
return WikipediaPage(title, redirect=redirect, preload=preload)
elif pageid is not None:
return WikipediaPage(pageid=pageid, preload=preload)
else:
raise ValueError("Either a title or a pageid must be specified") | Get a WikipediaPage object for the page with title `title` or the pageid
`pageid` (mutually exclusive).
Keyword arguments:
* title - the title of the page to load
* pageid - the numeric pageid of the page to load
* auto_suggest - let Wikipedia find a valid page title for the query
* redirect - allow redirection without raising RedirectError
* preload - load content, summary, images, references, and links during initialization | Below is the the instruction that describes the task:
### Input:
Get a WikipediaPage object for the page with title `title` or the pageid
`pageid` (mutually exclusive).
Keyword arguments:
* title - the title of the page to load
* pageid - the numeric pageid of the page to load
* auto_suggest - let Wikipedia find a valid page title for the query
* redirect - allow redirection without raising RedirectError
* preload - load content, summary, images, references, and links during initialization
### Response:
def page(title=None, pageid=None, auto_suggest=True, redirect=True, preload=False):
'''
Get a WikipediaPage object for the page with title `title` or the pageid
`pageid` (mutually exclusive).
Keyword arguments:
* title - the title of the page to load
* pageid - the numeric pageid of the page to load
* auto_suggest - let Wikipedia find a valid page title for the query
* redirect - allow redirection without raising RedirectError
* preload - load content, summary, images, references, and links during initialization
'''
if title is not None:
if auto_suggest:
results, suggestion = search(title, results=1, suggestion=True)
try:
title = suggestion or results[0]
except IndexError:
# if there is no suggestion or search results, the page doesn't exist
raise PageError(title)
return WikipediaPage(title, redirect=redirect, preload=preload)
elif pageid is not None:
return WikipediaPage(pageid=pageid, preload=preload)
else:
raise ValueError("Either a title or a pageid must be specified") |
def _lookup_vpc_allocs(query_type, session=None, order=None, **bfilter):
"""Look up 'query_type' Nexus VPC Allocs matching the filter.
:param query_type: 'all', 'one' or 'first'
:param session: db session
:param order: select what field to order data
:param bfilter: filter for mappings query
:returns: VPCs if query gave a result, else
raise NexusVPCAllocNotFound.
"""
if session is None:
session = bc.get_reader_session()
if order:
query_method = getattr(session.query(
nexus_models_v2.NexusVPCAlloc).filter_by(**bfilter).order_by(
order),
query_type)
else:
query_method = getattr(session.query(
nexus_models_v2.NexusVPCAlloc).filter_by(**bfilter), query_type)
try:
vpcs = query_method()
if vpcs:
return vpcs
except sa_exc.NoResultFound:
pass
raise c_exc.NexusVPCAllocNotFound(**bfilter) | Look up 'query_type' Nexus VPC Allocs matching the filter.
:param query_type: 'all', 'one' or 'first'
:param session: db session
:param order: select what field to order data
:param bfilter: filter for mappings query
:returns: VPCs if query gave a result, else
raise NexusVPCAllocNotFound. | Below is the the instruction that describes the task:
### Input:
Look up 'query_type' Nexus VPC Allocs matching the filter.
:param query_type: 'all', 'one' or 'first'
:param session: db session
:param order: select what field to order data
:param bfilter: filter for mappings query
:returns: VPCs if query gave a result, else
raise NexusVPCAllocNotFound.
### Response:
def _lookup_vpc_allocs(query_type, session=None, order=None, **bfilter):
"""Look up 'query_type' Nexus VPC Allocs matching the filter.
:param query_type: 'all', 'one' or 'first'
:param session: db session
:param order: select what field to order data
:param bfilter: filter for mappings query
:returns: VPCs if query gave a result, else
raise NexusVPCAllocNotFound.
"""
if session is None:
session = bc.get_reader_session()
if order:
query_method = getattr(session.query(
nexus_models_v2.NexusVPCAlloc).filter_by(**bfilter).order_by(
order),
query_type)
else:
query_method = getattr(session.query(
nexus_models_v2.NexusVPCAlloc).filter_by(**bfilter), query_type)
try:
vpcs = query_method()
if vpcs:
return vpcs
except sa_exc.NoResultFound:
pass
raise c_exc.NexusVPCAllocNotFound(**bfilter) |
def system_find_affiliates(input_params={}, always_retry=True, **kwargs):
"""
Invokes the /system/findAffiliates API method.
"""
return DXHTTPRequest('/system/findAffiliates', input_params, always_retry=always_retry, **kwargs) | Invokes the /system/findAffiliates API method. | Below is the the instruction that describes the task:
### Input:
Invokes the /system/findAffiliates API method.
### Response:
def system_find_affiliates(input_params={}, always_retry=True, **kwargs):
"""
Invokes the /system/findAffiliates API method.
"""
return DXHTTPRequest('/system/findAffiliates', input_params, always_retry=always_retry, **kwargs) |
def echo(text, **kwargs):
""" Print results to the console
:param text: the text string to print
:type text: str
:return: a string
:rtype: str
"""
if shakedown.cli.quiet:
return
if not 'n' in kwargs:
kwargs['n'] = True
if 'd' in kwargs:
text = decorate(text, kwargs['d'])
if 'TERM' in os.environ and os.environ['TERM'] == 'velocity':
if text:
print(text, end="", flush=True)
if kwargs.get('n'):
print()
else:
click.echo(text, nl=kwargs.get('n')) | Print results to the console
:param text: the text string to print
:type text: str
:return: a string
:rtype: str | Below is the the instruction that describes the task:
### Input:
Print results to the console
:param text: the text string to print
:type text: str
:return: a string
:rtype: str
### Response:
def echo(text, **kwargs):
""" Print results to the console
:param text: the text string to print
:type text: str
:return: a string
:rtype: str
"""
if shakedown.cli.quiet:
return
if not 'n' in kwargs:
kwargs['n'] = True
if 'd' in kwargs:
text = decorate(text, kwargs['d'])
if 'TERM' in os.environ and os.environ['TERM'] == 'velocity':
if text:
print(text, end="", flush=True)
if kwargs.get('n'):
print()
else:
click.echo(text, nl=kwargs.get('n')) |
def _base_type(self):
"""Return str like 'enum.numeric' representing dimension type.
This string is a 'type.subclass' concatenation of the str keys
used to identify the dimension type in the cube response JSON.
The '.subclass' suffix only appears where a subtype is present.
"""
type_class = self._dimension_dict["type"]["class"]
if type_class == "categorical":
return "categorical"
if type_class == "enum":
subclass = self._dimension_dict["type"]["subtype"]["class"]
return "enum.%s" % subclass
raise NotImplementedError("unexpected dimension type class '%s'" % type_class) | Return str like 'enum.numeric' representing dimension type.
This string is a 'type.subclass' concatenation of the str keys
used to identify the dimension type in the cube response JSON.
The '.subclass' suffix only appears where a subtype is present. | Below is the the instruction that describes the task:
### Input:
Return str like 'enum.numeric' representing dimension type.
This string is a 'type.subclass' concatenation of the str keys
used to identify the dimension type in the cube response JSON.
The '.subclass' suffix only appears where a subtype is present.
### Response:
def _base_type(self):
"""Return str like 'enum.numeric' representing dimension type.
This string is a 'type.subclass' concatenation of the str keys
used to identify the dimension type in the cube response JSON.
The '.subclass' suffix only appears where a subtype is present.
"""
type_class = self._dimension_dict["type"]["class"]
if type_class == "categorical":
return "categorical"
if type_class == "enum":
subclass = self._dimension_dict["type"]["subtype"]["class"]
return "enum.%s" % subclass
raise NotImplementedError("unexpected dimension type class '%s'" % type_class) |
def tpx(mt, x, t):
""" tpx : Returns the probability that x will survive within t years """
""" npx : Returns n years survival probability at age x """
return mt.lx[x + t] / mt.lx[x] | tpx : Returns the probability that x will survive within t years | Below is the the instruction that describes the task:
### Input:
tpx : Returns the probability that x will survive within t years
### Response:
def tpx(mt, x, t):
""" tpx : Returns the probability that x will survive within t years """
""" npx : Returns n years survival probability at age x """
return mt.lx[x + t] / mt.lx[x] |
def parse_field(field: str) -> Tuple[str, Optional[str]]:
"""Parses fields with underscores, and return field and suffix.
Example:
foo => foo, None
metric.foo => metric, foo
"""
_field = field.split('.')
_field = [f.strip() for f in _field]
if len(_field) == 1 and _field[0]:
return _field[0], None
elif len(_field) == 2 and _field[0] and _field[1]:
return _field[0], _field[1]
raise QueryParserException('Query field must be either a single value,'
'possibly with single underscores, '
'or a prefix double underscore field. '
'Received `{}`'.format(field)) | Parses fields with underscores, and return field and suffix.
Example:
foo => foo, None
metric.foo => metric, foo | Below is the the instruction that describes the task:
### Input:
Parses fields with underscores, and return field and suffix.
Example:
foo => foo, None
metric.foo => metric, foo
### Response:
def parse_field(field: str) -> Tuple[str, Optional[str]]:
"""Parses fields with underscores, and return field and suffix.
Example:
foo => foo, None
metric.foo => metric, foo
"""
_field = field.split('.')
_field = [f.strip() for f in _field]
if len(_field) == 1 and _field[0]:
return _field[0], None
elif len(_field) == 2 and _field[0] and _field[1]:
return _field[0], _field[1]
raise QueryParserException('Query field must be either a single value,'
'possibly with single underscores, '
'or a prefix double underscore field. '
'Received `{}`'.format(field)) |
def _pwl_gen_costs(self, generators, base_mva):
""" Returns the basin constraints for piece-wise linear gen cost
variables. CCV cost formulation expressed as Ay * x <= by.
Based on makeAy.m from MATPOWER by C. E. Murillo-Sanchez, developed at
PSERC Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more
information.
"""
ng = len(generators)
gpwl = [g for g in generators if g.pcost_model == PW_LINEAR]
# nq = len([g for g in gpwl if g.qcost_model is not None])
if self.dc:
pgbas = 0 # starting index within x for active sources
nq = 0 # number of Qg vars
# qgbas = None # index of 1st Qg column in Ay
ybas = ng # starting index within x for y variables
else:
pgbas = 0
nq = ng
# qgbas = ng + 1 # index of 1st Qg column in Ay
ybas = ng + nq
# Number of extra y variables.
ny = len(gpwl)
if ny == 0:
return None, None
# Total number of cost points.
nc = len([co for gn in gpwl for co in gn.p_cost])
# Ay = lil_matrix((nc - ny, ybas + ny))
# Fill rows and then transpose.
Ay = lil_matrix((ybas + ny, nc - ny))
by = array([])
j = 0
k = 0
for i, g in enumerate(gpwl):
# Number of cost points: segments = ns-1
ns = len(g.p_cost)
p = array([x / base_mva for x, c in g.p_cost])
c = array([c for x, c in g.p_cost])
m = diff(c) / diff(p) # Slopes for Pg (or Qg).
if 0.0 in diff(p):
raise ValueError, "Bad Pcost data: %s (%s)" % (p, g.name)
logger.error("Bad Pcost data: %s" % p)
b = m * p[:ns-1] - c[:ns-1] # rhs
by = r_[by, b.T]
# if i > ng:
# sidx = qgbas + (i-ng) - 1 # this was for a q cost
# else:
# sidx = pgbas + i - 1 # this was for a p cost
Ay[pgbas + i, k:k + ns - 1] = m
# FIXME: Repeat for Q costs.
# Now fill the y rows with -1's
Ay[ybas + j, k:k + ns - 1] = -ones(ns-1)
k += (ns - 1)
j += 1
y = Variable("y", ny)
# Transpose Ay since lil_matrix stores in rows.
if self.dc:
ycon = LinearConstraint("ycon", Ay.T, None, by, ["Pg", "y"])
else:
ycon = LinearConstraint("ycon", Ay.T, None, by, ["Pg", "Qg","y"])
return y, ycon | Returns the basin constraints for piece-wise linear gen cost
variables. CCV cost formulation expressed as Ay * x <= by.
Based on makeAy.m from MATPOWER by C. E. Murillo-Sanchez, developed at
PSERC Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more
information. | Below is the the instruction that describes the task:
### Input:
Returns the basin constraints for piece-wise linear gen cost
variables. CCV cost formulation expressed as Ay * x <= by.
Based on makeAy.m from MATPOWER by C. E. Murillo-Sanchez, developed at
PSERC Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more
information.
### Response:
def _pwl_gen_costs(self, generators, base_mva):
""" Returns the basin constraints for piece-wise linear gen cost
variables. CCV cost formulation expressed as Ay * x <= by.
Based on makeAy.m from MATPOWER by C. E. Murillo-Sanchez, developed at
PSERC Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more
information.
"""
ng = len(generators)
gpwl = [g for g in generators if g.pcost_model == PW_LINEAR]
# nq = len([g for g in gpwl if g.qcost_model is not None])
if self.dc:
pgbas = 0 # starting index within x for active sources
nq = 0 # number of Qg vars
# qgbas = None # index of 1st Qg column in Ay
ybas = ng # starting index within x for y variables
else:
pgbas = 0
nq = ng
# qgbas = ng + 1 # index of 1st Qg column in Ay
ybas = ng + nq
# Number of extra y variables.
ny = len(gpwl)
if ny == 0:
return None, None
# Total number of cost points.
nc = len([co for gn in gpwl for co in gn.p_cost])
# Ay = lil_matrix((nc - ny, ybas + ny))
# Fill rows and then transpose.
Ay = lil_matrix((ybas + ny, nc - ny))
by = array([])
j = 0
k = 0
for i, g in enumerate(gpwl):
# Number of cost points: segments = ns-1
ns = len(g.p_cost)
p = array([x / base_mva for x, c in g.p_cost])
c = array([c for x, c in g.p_cost])
m = diff(c) / diff(p) # Slopes for Pg (or Qg).
if 0.0 in diff(p):
raise ValueError, "Bad Pcost data: %s (%s)" % (p, g.name)
logger.error("Bad Pcost data: %s" % p)
b = m * p[:ns-1] - c[:ns-1] # rhs
by = r_[by, b.T]
# if i > ng:
# sidx = qgbas + (i-ng) - 1 # this was for a q cost
# else:
# sidx = pgbas + i - 1 # this was for a p cost
Ay[pgbas + i, k:k + ns - 1] = m
# FIXME: Repeat for Q costs.
# Now fill the y rows with -1's
Ay[ybas + j, k:k + ns - 1] = -ones(ns-1)
k += (ns - 1)
j += 1
y = Variable("y", ny)
# Transpose Ay since lil_matrix stores in rows.
if self.dc:
ycon = LinearConstraint("ycon", Ay.T, None, by, ["Pg", "y"])
else:
ycon = LinearConstraint("ycon", Ay.T, None, by, ["Pg", "Qg","y"])
return y, ycon |
def index_of(self, name):
"""
Returns the index of the actor with the given name.
:param name: the name of the Actor to find
:type name: str
:return: the index, -1 if not found
:rtype: int
"""
result = -1
for index, actor in enumerate(self.actors):
if actor.name == name:
result = index
break
return result | Returns the index of the actor with the given name.
:param name: the name of the Actor to find
:type name: str
:return: the index, -1 if not found
:rtype: int | Below is the the instruction that describes the task:
### Input:
Returns the index of the actor with the given name.
:param name: the name of the Actor to find
:type name: str
:return: the index, -1 if not found
:rtype: int
### Response:
def index_of(self, name):
"""
Returns the index of the actor with the given name.
:param name: the name of the Actor to find
:type name: str
:return: the index, -1 if not found
:rtype: int
"""
result = -1
for index, actor in enumerate(self.actors):
if actor.name == name:
result = index
break
return result |
def from_grpc_error(rpc_exc):
"""Create a :class:`GoogleAPICallError` from a :class:`grpc.RpcError`.
Args:
rpc_exc (grpc.RpcError): The gRPC error.
Returns:
GoogleAPICallError: An instance of the appropriate subclass of
:class:`GoogleAPICallError`.
"""
if isinstance(rpc_exc, grpc.Call):
return from_grpc_status(
rpc_exc.code(), rpc_exc.details(), errors=(rpc_exc,), response=rpc_exc
)
else:
return GoogleAPICallError(str(rpc_exc), errors=(rpc_exc,), response=rpc_exc) | Create a :class:`GoogleAPICallError` from a :class:`grpc.RpcError`.
Args:
rpc_exc (grpc.RpcError): The gRPC error.
Returns:
GoogleAPICallError: An instance of the appropriate subclass of
:class:`GoogleAPICallError`. | Below is the the instruction that describes the task:
### Input:
Create a :class:`GoogleAPICallError` from a :class:`grpc.RpcError`.
Args:
rpc_exc (grpc.RpcError): The gRPC error.
Returns:
GoogleAPICallError: An instance of the appropriate subclass of
:class:`GoogleAPICallError`.
### Response:
def from_grpc_error(rpc_exc):
"""Create a :class:`GoogleAPICallError` from a :class:`grpc.RpcError`.
Args:
rpc_exc (grpc.RpcError): The gRPC error.
Returns:
GoogleAPICallError: An instance of the appropriate subclass of
:class:`GoogleAPICallError`.
"""
if isinstance(rpc_exc, grpc.Call):
return from_grpc_status(
rpc_exc.code(), rpc_exc.details(), errors=(rpc_exc,), response=rpc_exc
)
else:
return GoogleAPICallError(str(rpc_exc), errors=(rpc_exc,), response=rpc_exc) |
def rpcexec(self, payload):
""" Execute a call by sending the payload
:param dict payload: Payload data
:raises ValueError: if the server does not respond in proper JSON format
:raises RPCError: if the server returns an error
"""
log.debug(json.dumps(payload))
self.ws.send(json.dumps(payload, ensure_ascii=False).encode("utf8")) | Execute a call by sending the payload
:param dict payload: Payload data
:raises ValueError: if the server does not respond in proper JSON format
:raises RPCError: if the server returns an error | Below is the the instruction that describes the task:
### Input:
Execute a call by sending the payload
:param dict payload: Payload data
:raises ValueError: if the server does not respond in proper JSON format
:raises RPCError: if the server returns an error
### Response:
def rpcexec(self, payload):
""" Execute a call by sending the payload
:param dict payload: Payload data
:raises ValueError: if the server does not respond in proper JSON format
:raises RPCError: if the server returns an error
"""
log.debug(json.dumps(payload))
self.ws.send(json.dumps(payload, ensure_ascii=False).encode("utf8")) |
def process_event(self, event, ipmicmd, seldata):
"""Modify an event according with OEM understanding.
Given an event, allow an OEM module to augment it. For example,
event data fields can have OEM bytes. Other times an OEM may wish
to apply some transform to some field to suit their conventions.
"""
event['oem_handler'] = None
evdata = event['event_data_bytes']
if evdata[0] & 0b11000000 == 0b10000000:
event['oem_byte2'] = evdata[1]
if evdata[0] & 0b110000 == 0b100000:
event['oem_byte3'] = evdata[2] | Modify an event according with OEM understanding.
Given an event, allow an OEM module to augment it. For example,
event data fields can have OEM bytes. Other times an OEM may wish
to apply some transform to some field to suit their conventions. | Below is the the instruction that describes the task:
### Input:
Modify an event according with OEM understanding.
Given an event, allow an OEM module to augment it. For example,
event data fields can have OEM bytes. Other times an OEM may wish
to apply some transform to some field to suit their conventions.
### Response:
def process_event(self, event, ipmicmd, seldata):
"""Modify an event according with OEM understanding.
Given an event, allow an OEM module to augment it. For example,
event data fields can have OEM bytes. Other times an OEM may wish
to apply some transform to some field to suit their conventions.
"""
event['oem_handler'] = None
evdata = event['event_data_bytes']
if evdata[0] & 0b11000000 == 0b10000000:
event['oem_byte2'] = evdata[1]
if evdata[0] & 0b110000 == 0b100000:
event['oem_byte3'] = evdata[2] |
def set_pump_status(self, status):
"""
Updates pump status and logs update to console.
"""
self.pump_status = status
_logger.info("%r partition %r", status, self.lease.partition_id) | Updates pump status and logs update to console. | Below is the the instruction that describes the task:
### Input:
Updates pump status and logs update to console.
### Response:
def set_pump_status(self, status):
"""
Updates pump status and logs update to console.
"""
self.pump_status = status
_logger.info("%r partition %r", status, self.lease.partition_id) |
async def connect(channel: discord.VoiceChannel):
"""
Connects to a discord voice channel.
This is the publicly exposed way to connect to a discord voice channel.
The :py:func:`initialize` function must be called first!
Parameters
----------
channel
Returns
-------
Player
The created Player object.
Raises
------
IndexError
If there are no available lavalink nodes ready to connect to discord.
"""
node_ = node.get_node(channel.guild.id)
p = await node_.player_manager.create_player(channel)
return p | Connects to a discord voice channel.
This is the publicly exposed way to connect to a discord voice channel.
The :py:func:`initialize` function must be called first!
Parameters
----------
channel
Returns
-------
Player
The created Player object.
Raises
------
IndexError
If there are no available lavalink nodes ready to connect to discord. | Below is the the instruction that describes the task:
### Input:
Connects to a discord voice channel.
This is the publicly exposed way to connect to a discord voice channel.
The :py:func:`initialize` function must be called first!
Parameters
----------
channel
Returns
-------
Player
The created Player object.
Raises
------
IndexError
If there are no available lavalink nodes ready to connect to discord.
### Response:
async def connect(channel: discord.VoiceChannel):
"""
Connects to a discord voice channel.
This is the publicly exposed way to connect to a discord voice channel.
The :py:func:`initialize` function must be called first!
Parameters
----------
channel
Returns
-------
Player
The created Player object.
Raises
------
IndexError
If there are no available lavalink nodes ready to connect to discord.
"""
node_ = node.get_node(channel.guild.id)
p = await node_.player_manager.create_player(channel)
return p |
def loop_read(self, max_packets=1):
"""Process read network events. Use in place of calling loop() if you
wish to handle your client reads as part of your own application.
Use socket() to obtain the client socket to call select() or equivalent
on.
Do not use if you are using the threaded interface loop_start()."""
if self._sock is None and self._ssl is None:
return MQTT_ERR_NO_CONN
max_packets = len(self._out_messages) + len(self._in_messages)
if max_packets < 1:
max_packets = 1
for i in range(0, max_packets):
rc = self._packet_read()
if rc > 0:
return self._loop_rc_handle(rc)
elif rc == MQTT_ERR_AGAIN:
return MQTT_ERR_SUCCESS
return MQTT_ERR_SUCCESS | Process read network events. Use in place of calling loop() if you
wish to handle your client reads as part of your own application.
Use socket() to obtain the client socket to call select() or equivalent
on.
Do not use if you are using the threaded interface loop_start(). | Below is the the instruction that describes the task:
### Input:
Process read network events. Use in place of calling loop() if you
wish to handle your client reads as part of your own application.
Use socket() to obtain the client socket to call select() or equivalent
on.
Do not use if you are using the threaded interface loop_start().
### Response:
def loop_read(self, max_packets=1):
"""Process read network events. Use in place of calling loop() if you
wish to handle your client reads as part of your own application.
Use socket() to obtain the client socket to call select() or equivalent
on.
Do not use if you are using the threaded interface loop_start()."""
if self._sock is None and self._ssl is None:
return MQTT_ERR_NO_CONN
max_packets = len(self._out_messages) + len(self._in_messages)
if max_packets < 1:
max_packets = 1
for i in range(0, max_packets):
rc = self._packet_read()
if rc > 0:
return self._loop_rc_handle(rc)
elif rc == MQTT_ERR_AGAIN:
return MQTT_ERR_SUCCESS
return MQTT_ERR_SUCCESS |
def occult(target1, shape1, frame1, target2, shape2, frame2, abcorr, observer,
et):
"""
Determines the occultation condition (not occulted, partially,
etc.) of one target relative to another target as seen by
an observer at a given time.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/occult_c.html
:param target1: Name or ID of first target.
:type target1: str
:param shape1: Type of shape model used for first target.
:type shape1: str
:param frame1: Body-fixed, body-centered frame for first body.
:type frame1: str
:param target2: Name or ID of second target.
:type target2: str
:param shape2: Type of shape model used for second target.
:type shape2: str
:param frame2: Body-fixed, body-centered frame for second body.
:type frame2: str
:param abcorr: Aberration correction flag.
:type abcorr: str
:param observer: Name or ID of the observer.
:type observer: str
:param et: Time of the observation (seconds past J2000).
:type et: float
:return: Occultation identification code.
:rtype: int
"""
target1 = stypes.stringToCharP(target1)
shape1 = stypes.stringToCharP(shape1)
frame1 = stypes.stringToCharP(frame1)
target2 = stypes.stringToCharP(target2)
shape2 = stypes.stringToCharP(shape2)
frame2 = stypes.stringToCharP(frame2)
abcorr = stypes.stringToCharP(abcorr)
observer = stypes.stringToCharP(observer)
et = ctypes.c_double(et)
occult_code = ctypes.c_int()
libspice.occult_c(target1, shape1, frame1, target2, shape2, frame2, abcorr,
observer, et, ctypes.byref(occult_code))
return occult_code.value | Determines the occultation condition (not occulted, partially,
etc.) of one target relative to another target as seen by
an observer at a given time.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/occult_c.html
:param target1: Name or ID of first target.
:type target1: str
:param shape1: Type of shape model used for first target.
:type shape1: str
:param frame1: Body-fixed, body-centered frame for first body.
:type frame1: str
:param target2: Name or ID of second target.
:type target2: str
:param shape2: Type of shape model used for second target.
:type shape2: str
:param frame2: Body-fixed, body-centered frame for second body.
:type frame2: str
:param abcorr: Aberration correction flag.
:type abcorr: str
:param observer: Name or ID of the observer.
:type observer: str
:param et: Time of the observation (seconds past J2000).
:type et: float
:return: Occultation identification code.
:rtype: int | Below is the the instruction that describes the task:
### Input:
Determines the occultation condition (not occulted, partially,
etc.) of one target relative to another target as seen by
an observer at a given time.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/occult_c.html
:param target1: Name or ID of first target.
:type target1: str
:param shape1: Type of shape model used for first target.
:type shape1: str
:param frame1: Body-fixed, body-centered frame for first body.
:type frame1: str
:param target2: Name or ID of second target.
:type target2: str
:param shape2: Type of shape model used for second target.
:type shape2: str
:param frame2: Body-fixed, body-centered frame for second body.
:type frame2: str
:param abcorr: Aberration correction flag.
:type abcorr: str
:param observer: Name or ID of the observer.
:type observer: str
:param et: Time of the observation (seconds past J2000).
:type et: float
:return: Occultation identification code.
:rtype: int
### Response:
def occult(target1, shape1, frame1, target2, shape2, frame2, abcorr, observer,
et):
"""
Determines the occultation condition (not occulted, partially,
etc.) of one target relative to another target as seen by
an observer at a given time.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/occult_c.html
:param target1: Name or ID of first target.
:type target1: str
:param shape1: Type of shape model used for first target.
:type shape1: str
:param frame1: Body-fixed, body-centered frame for first body.
:type frame1: str
:param target2: Name or ID of second target.
:type target2: str
:param shape2: Type of shape model used for second target.
:type shape2: str
:param frame2: Body-fixed, body-centered frame for second body.
:type frame2: str
:param abcorr: Aberration correction flag.
:type abcorr: str
:param observer: Name or ID of the observer.
:type observer: str
:param et: Time of the observation (seconds past J2000).
:type et: float
:return: Occultation identification code.
:rtype: int
"""
target1 = stypes.stringToCharP(target1)
shape1 = stypes.stringToCharP(shape1)
frame1 = stypes.stringToCharP(frame1)
target2 = stypes.stringToCharP(target2)
shape2 = stypes.stringToCharP(shape2)
frame2 = stypes.stringToCharP(frame2)
abcorr = stypes.stringToCharP(abcorr)
observer = stypes.stringToCharP(observer)
et = ctypes.c_double(et)
occult_code = ctypes.c_int()
libspice.occult_c(target1, shape1, frame1, target2, shape2, frame2, abcorr,
observer, et, ctypes.byref(occult_code))
return occult_code.value |
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(KafkaConsumerLagCollector, self).get_default_config()
config.update({
'path': 'kafka.ConsumerLag',
'bin': '/opt/kafka/bin/kafka-run-class.sh',
'zookeeper': 'localhost:2181'
})
return config | Returns the default collector settings | Below is the the instruction that describes the task:
### Input:
Returns the default collector settings
### Response:
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(KafkaConsumerLagCollector, self).get_default_config()
config.update({
'path': 'kafka.ConsumerLag',
'bin': '/opt/kafka/bin/kafka-run-class.sh',
'zookeeper': 'localhost:2181'
})
return config |
def from_span(cls, inputs, window_length, span, **kwargs):
"""
Convenience constructor for passing `decay_rate` in terms of `span`.
Forwards `decay_rate` as `1 - (2.0 / (1 + span))`. This provides the
behavior equivalent to passing `span` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[EquityPricing.close],
# window_length=30,
# decay_rate=(1 - (2.0 / (1 + 15.0))),
# )
my_ewma = EWMA.from_span(
inputs=[EquityPricing.close],
window_length=30,
span=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
if span <= 1:
raise ValueError(
"`span` must be a positive number. %s was passed." % span
)
decay_rate = (1.0 - (2.0 / (1.0 + span)))
assert 0.0 < decay_rate <= 1.0
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=decay_rate,
**kwargs
) | Convenience constructor for passing `decay_rate` in terms of `span`.
Forwards `decay_rate` as `1 - (2.0 / (1 + span))`. This provides the
behavior equivalent to passing `span` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[EquityPricing.close],
# window_length=30,
# decay_rate=(1 - (2.0 / (1 + 15.0))),
# )
my_ewma = EWMA.from_span(
inputs=[EquityPricing.close],
window_length=30,
span=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`. | Below is the the instruction that describes the task:
### Input:
Convenience constructor for passing `decay_rate` in terms of `span`.
Forwards `decay_rate` as `1 - (2.0 / (1 + span))`. This provides the
behavior equivalent to passing `span` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[EquityPricing.close],
# window_length=30,
# decay_rate=(1 - (2.0 / (1 + 15.0))),
# )
my_ewma = EWMA.from_span(
inputs=[EquityPricing.close],
window_length=30,
span=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
### Response:
def from_span(cls, inputs, window_length, span, **kwargs):
"""
Convenience constructor for passing `decay_rate` in terms of `span`.
Forwards `decay_rate` as `1 - (2.0 / (1 + span))`. This provides the
behavior equivalent to passing `span` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[EquityPricing.close],
# window_length=30,
# decay_rate=(1 - (2.0 / (1 + 15.0))),
# )
my_ewma = EWMA.from_span(
inputs=[EquityPricing.close],
window_length=30,
span=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
if span <= 1:
raise ValueError(
"`span` must be a positive number. %s was passed." % span
)
decay_rate = (1.0 - (2.0 / (1.0 + span)))
assert 0.0 < decay_rate <= 1.0
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=decay_rate,
**kwargs
) |
def add_filter(self, filter_, frequencies=None, dB=True,
analog=False, sample_rate=None, **kwargs):
"""Add a linear time-invariant filter to this BodePlot
Parameters
----------
filter_ : `~scipy.signal.lti`, `tuple`
the filter to plot, either as a `~scipy.signal.lti`, or a
`tuple` with the following number and meaning of elements
- 2: (numerator, denominator)
- 3: (zeros, poles, gain)
- 4: (A, B, C, D)
frequencies : `numpy.ndarray`, optional
list of frequencies (in Hertz) at which to plot
dB : `bool`, optional
if `True`, display magnitude in decibels, otherwise display
amplitude, default: `True`
**kwargs
any other keyword arguments accepted by
:meth:`~matplotlib.axes.Axes.plot`
Returns
-------
mag, phase : `tuple` of `lines <matplotlib.lines.Line2D>`
the lines drawn for the magnitude and phase of the filter.
"""
if not analog:
if not sample_rate:
raise ValueError("Must give sample_rate frequency to display "
"digital (analog=False) filter")
sample_rate = Quantity(sample_rate, 'Hz').value
dt = 2 * pi / sample_rate
if not isinstance(frequencies, (type(None), int)):
frequencies = numpy.atleast_1d(frequencies).copy()
frequencies *= dt
# parse filter (without digital conversions)
_, fcomp = parse_filter(filter_, analog=False)
if analog:
lti = signal.lti(*fcomp)
else:
lti = signal.dlti(*fcomp, dt=dt)
# calculate frequency response
w, mag, phase = lti.bode(w=frequencies)
# convert from decibels
if not dB:
mag = 10 ** (mag / 10.)
# draw
mline = self.maxes.plot(w, mag, **kwargs)[0]
pline = self.paxes.plot(w, phase, **kwargs)[0]
return mline, pline | Add a linear time-invariant filter to this BodePlot
Parameters
----------
filter_ : `~scipy.signal.lti`, `tuple`
the filter to plot, either as a `~scipy.signal.lti`, or a
`tuple` with the following number and meaning of elements
- 2: (numerator, denominator)
- 3: (zeros, poles, gain)
- 4: (A, B, C, D)
frequencies : `numpy.ndarray`, optional
list of frequencies (in Hertz) at which to plot
dB : `bool`, optional
if `True`, display magnitude in decibels, otherwise display
amplitude, default: `True`
**kwargs
any other keyword arguments accepted by
:meth:`~matplotlib.axes.Axes.plot`
Returns
-------
mag, phase : `tuple` of `lines <matplotlib.lines.Line2D>`
the lines drawn for the magnitude and phase of the filter. | Below is the the instruction that describes the task:
### Input:
Add a linear time-invariant filter to this BodePlot
Parameters
----------
filter_ : `~scipy.signal.lti`, `tuple`
the filter to plot, either as a `~scipy.signal.lti`, or a
`tuple` with the following number and meaning of elements
- 2: (numerator, denominator)
- 3: (zeros, poles, gain)
- 4: (A, B, C, D)
frequencies : `numpy.ndarray`, optional
list of frequencies (in Hertz) at which to plot
dB : `bool`, optional
if `True`, display magnitude in decibels, otherwise display
amplitude, default: `True`
**kwargs
any other keyword arguments accepted by
:meth:`~matplotlib.axes.Axes.plot`
Returns
-------
mag, phase : `tuple` of `lines <matplotlib.lines.Line2D>`
the lines drawn for the magnitude and phase of the filter.
### Response:
def add_filter(self, filter_, frequencies=None, dB=True,
analog=False, sample_rate=None, **kwargs):
"""Add a linear time-invariant filter to this BodePlot
Parameters
----------
filter_ : `~scipy.signal.lti`, `tuple`
the filter to plot, either as a `~scipy.signal.lti`, or a
`tuple` with the following number and meaning of elements
- 2: (numerator, denominator)
- 3: (zeros, poles, gain)
- 4: (A, B, C, D)
frequencies : `numpy.ndarray`, optional
list of frequencies (in Hertz) at which to plot
dB : `bool`, optional
if `True`, display magnitude in decibels, otherwise display
amplitude, default: `True`
**kwargs
any other keyword arguments accepted by
:meth:`~matplotlib.axes.Axes.plot`
Returns
-------
mag, phase : `tuple` of `lines <matplotlib.lines.Line2D>`
the lines drawn for the magnitude and phase of the filter.
"""
if not analog:
if not sample_rate:
raise ValueError("Must give sample_rate frequency to display "
"digital (analog=False) filter")
sample_rate = Quantity(sample_rate, 'Hz').value
dt = 2 * pi / sample_rate
if not isinstance(frequencies, (type(None), int)):
frequencies = numpy.atleast_1d(frequencies).copy()
frequencies *= dt
# parse filter (without digital conversions)
_, fcomp = parse_filter(filter_, analog=False)
if analog:
lti = signal.lti(*fcomp)
else:
lti = signal.dlti(*fcomp, dt=dt)
# calculate frequency response
w, mag, phase = lti.bode(w=frequencies)
# convert from decibels
if not dB:
mag = 10 ** (mag / 10.)
# draw
mline = self.maxes.plot(w, mag, **kwargs)[0]
pline = self.paxes.plot(w, phase, **kwargs)[0]
return mline, pline |
def __update_offsets(self, fileobj, atoms, delta, offset):
"""Update offset tables in all 'stco' and 'co64' atoms."""
if delta == 0:
return
moov = atoms[b"moov"]
for atom in moov.findall(b'stco', True):
self.__update_offset_table(fileobj, ">%dI", atom, delta, offset)
for atom in moov.findall(b'co64', True):
self.__update_offset_table(fileobj, ">%dQ", atom, delta, offset)
try:
for atom in atoms[b"moof"].findall(b'tfhd', True):
self.__update_tfhd(fileobj, atom, delta, offset)
except KeyError:
pass | Update offset tables in all 'stco' and 'co64' atoms. | Below is the the instruction that describes the task:
### Input:
Update offset tables in all 'stco' and 'co64' atoms.
### Response:
def __update_offsets(self, fileobj, atoms, delta, offset):
"""Update offset tables in all 'stco' and 'co64' atoms."""
if delta == 0:
return
moov = atoms[b"moov"]
for atom in moov.findall(b'stco', True):
self.__update_offset_table(fileobj, ">%dI", atom, delta, offset)
for atom in moov.findall(b'co64', True):
self.__update_offset_table(fileobj, ">%dQ", atom, delta, offset)
try:
for atom in atoms[b"moof"].findall(b'tfhd', True):
self.__update_tfhd(fileobj, atom, delta, offset)
except KeyError:
pass |
def from_hdf5_path(cls, hdf5_path):
"""
:param hdf5_path: hdf5 path which can be stored in a local file system, HDFS, S3, or any Hadoop-supported file system.
:return: BigDL Model
"""
from keras.models import load_model
hdf5_local_path = BCommon.get_local_file(hdf5_path)
kmodel = load_model(hdf5_local_path)
return kmodel, DefinitionLoader.from_kmodel(kmodel) | :param hdf5_path: hdf5 path which can be stored in a local file system, HDFS, S3, or any Hadoop-supported file system.
:return: BigDL Model | Below is the the instruction that describes the task:
### Input:
:param hdf5_path: hdf5 path which can be stored in a local file system, HDFS, S3, or any Hadoop-supported file system.
:return: BigDL Model
### Response:
def from_hdf5_path(cls, hdf5_path):
"""
:param hdf5_path: hdf5 path which can be stored in a local file system, HDFS, S3, or any Hadoop-supported file system.
:return: BigDL Model
"""
from keras.models import load_model
hdf5_local_path = BCommon.get_local_file(hdf5_path)
kmodel = load_model(hdf5_local_path)
return kmodel, DefinitionLoader.from_kmodel(kmodel) |
def data(self):
"""
return (data_dict, key) tuple instead of models instances
"""
clone = copy.deepcopy(self)
clone._cfg['rtype'] = ReturnType.Object
return clone | return (data_dict, key) tuple instead of models instances | Below is the the instruction that describes the task:
### Input:
return (data_dict, key) tuple instead of models instances
### Response:
def data(self):
"""
return (data_dict, key) tuple instead of models instances
"""
clone = copy.deepcopy(self)
clone._cfg['rtype'] = ReturnType.Object
return clone |
def save(self):
"""POST the object to the JSS."""
try:
response = requests.post(self._upload_url,
auth=self.jss.session.auth,
verify=self.jss.session.verify,
files=self.resource)
except JSSPostError as error:
if error.status_code == 409:
raise JSSPostError(error)
else:
raise JSSMethodNotAllowedError(self.__class__.__name__)
if response.status_code == 201:
if self.jss.verbose:
print "POST: Success"
print response.text.encode("utf-8")
elif response.status_code >= 400:
error_handler(JSSPostError, response) | POST the object to the JSS. | Below is the the instruction that describes the task:
### Input:
POST the object to the JSS.
### Response:
def save(self):
"""POST the object to the JSS."""
try:
response = requests.post(self._upload_url,
auth=self.jss.session.auth,
verify=self.jss.session.verify,
files=self.resource)
except JSSPostError as error:
if error.status_code == 409:
raise JSSPostError(error)
else:
raise JSSMethodNotAllowedError(self.__class__.__name__)
if response.status_code == 201:
if self.jss.verbose:
print "POST: Success"
print response.text.encode("utf-8")
elif response.status_code >= 400:
error_handler(JSSPostError, response) |
def get_permission_request(parser, token):
"""
Performs a permission request check with the given signature, user and objects
and assigns the result to a context variable.
Syntax::
{% get_permission_request PERMISSION_LABEL.CHECK_NAME for USER and *OBJS [as VARNAME] %}
{% get_permission_request "poll_permission.change_poll"
for request.user and poll as "asked_for_permissio" %}
{% get_permission_request "poll_permission.change_poll"
for request.user and poll,second_poll as "asked_for_permissio" %}
{% if asked_for_permissio %}
Dude, you already asked for permission!
{% else %}
Oh, please fill out this 20 page form and sign here.
{% endif %}
"""
return PermissionForObjectNode.handle_token(
parser, token, approved=False, name='"permission_request"') | Performs a permission request check with the given signature, user and objects
and assigns the result to a context variable.
Syntax::
{% get_permission_request PERMISSION_LABEL.CHECK_NAME for USER and *OBJS [as VARNAME] %}
{% get_permission_request "poll_permission.change_poll"
for request.user and poll as "asked_for_permissio" %}
{% get_permission_request "poll_permission.change_poll"
for request.user and poll,second_poll as "asked_for_permissio" %}
{% if asked_for_permissio %}
Dude, you already asked for permission!
{% else %}
Oh, please fill out this 20 page form and sign here.
{% endif %} | Below is the the instruction that describes the task:
### Input:
Performs a permission request check with the given signature, user and objects
and assigns the result to a context variable.
Syntax::
{% get_permission_request PERMISSION_LABEL.CHECK_NAME for USER and *OBJS [as VARNAME] %}
{% get_permission_request "poll_permission.change_poll"
for request.user and poll as "asked_for_permissio" %}
{% get_permission_request "poll_permission.change_poll"
for request.user and poll,second_poll as "asked_for_permissio" %}
{% if asked_for_permissio %}
Dude, you already asked for permission!
{% else %}
Oh, please fill out this 20 page form and sign here.
{% endif %}
### Response:
def get_permission_request(parser, token):
"""
Performs a permission request check with the given signature, user and objects
and assigns the result to a context variable.
Syntax::
{% get_permission_request PERMISSION_LABEL.CHECK_NAME for USER and *OBJS [as VARNAME] %}
{% get_permission_request "poll_permission.change_poll"
for request.user and poll as "asked_for_permissio" %}
{% get_permission_request "poll_permission.change_poll"
for request.user and poll,second_poll as "asked_for_permissio" %}
{% if asked_for_permissio %}
Dude, you already asked for permission!
{% else %}
Oh, please fill out this 20 page form and sign here.
{% endif %}
"""
return PermissionForObjectNode.handle_token(
parser, token, approved=False, name='"permission_request"') |
def keyphrases_table(keyphrases, texts, similarity_measure=None, synonimizer=None,
language=consts.Language.ENGLISH):
"""
Constructs the keyphrases table, containing their matching scores in a set of texts.
The resulting table is stored as a dictionary of dictionaries,
where the entry table["keyphrase"]["text"] corresponds
to the matching score (0 <= score <= 1) of keyphrase "keyphrase"
in the text named "text".
:param keyphrases: list of strings
:param texts: dictionary of form {text_name: text}
:param similarity_measure: similarity measure to use
:param synonimizer: SynonymExtractor object to be used
:param language: Language of the text collection / keyphrases
:returns: dictionary of dictionaries, having keyphrases on its first level and texts
on the second level.
"""
similarity_measure = similarity_measure or relevance.ASTRelevanceMeasure()
text_titles = texts.keys()
text_collection = texts.values()
similarity_measure.set_text_collection(text_collection, language)
i = 0
keyphrases_prepared = {keyphrase: utils.prepare_text(keyphrase)
for keyphrase in keyphrases}
total_keyphrases = len(keyphrases)
total_scores = len(text_collection) * total_keyphrases
res = {}
for keyphrase in keyphrases:
if not keyphrase:
continue
res[keyphrase] = {}
for j in xrange(len(text_collection)):
i += 1
logging.progress("Calculating matching scores", i, total_scores)
res[keyphrase][text_titles[j]] = similarity_measure.relevance(
keyphrases_prepared[keyphrase],
text=j, synonimizer=synonimizer)
logging.clear()
return res | Constructs the keyphrases table, containing their matching scores in a set of texts.
The resulting table is stored as a dictionary of dictionaries,
where the entry table["keyphrase"]["text"] corresponds
to the matching score (0 <= score <= 1) of keyphrase "keyphrase"
in the text named "text".
:param keyphrases: list of strings
:param texts: dictionary of form {text_name: text}
:param similarity_measure: similarity measure to use
:param synonimizer: SynonymExtractor object to be used
:param language: Language of the text collection / keyphrases
:returns: dictionary of dictionaries, having keyphrases on its first level and texts
on the second level. | Below is the the instruction that describes the task:
### Input:
Constructs the keyphrases table, containing their matching scores in a set of texts.
The resulting table is stored as a dictionary of dictionaries,
where the entry table["keyphrase"]["text"] corresponds
to the matching score (0 <= score <= 1) of keyphrase "keyphrase"
in the text named "text".
:param keyphrases: list of strings
:param texts: dictionary of form {text_name: text}
:param similarity_measure: similarity measure to use
:param synonimizer: SynonymExtractor object to be used
:param language: Language of the text collection / keyphrases
:returns: dictionary of dictionaries, having keyphrases on its first level and texts
on the second level.
### Response:
def keyphrases_table(keyphrases, texts, similarity_measure=None, synonimizer=None,
language=consts.Language.ENGLISH):
"""
Constructs the keyphrases table, containing their matching scores in a set of texts.
The resulting table is stored as a dictionary of dictionaries,
where the entry table["keyphrase"]["text"] corresponds
to the matching score (0 <= score <= 1) of keyphrase "keyphrase"
in the text named "text".
:param keyphrases: list of strings
:param texts: dictionary of form {text_name: text}
:param similarity_measure: similarity measure to use
:param synonimizer: SynonymExtractor object to be used
:param language: Language of the text collection / keyphrases
:returns: dictionary of dictionaries, having keyphrases on its first level and texts
on the second level.
"""
similarity_measure = similarity_measure or relevance.ASTRelevanceMeasure()
text_titles = texts.keys()
text_collection = texts.values()
similarity_measure.set_text_collection(text_collection, language)
i = 0
keyphrases_prepared = {keyphrase: utils.prepare_text(keyphrase)
for keyphrase in keyphrases}
total_keyphrases = len(keyphrases)
total_scores = len(text_collection) * total_keyphrases
res = {}
for keyphrase in keyphrases:
if not keyphrase:
continue
res[keyphrase] = {}
for j in xrange(len(text_collection)):
i += 1
logging.progress("Calculating matching scores", i, total_scores)
res[keyphrase][text_titles[j]] = similarity_measure.relevance(
keyphrases_prepared[keyphrase],
text=j, synonimizer=synonimizer)
logging.clear()
return res |
def update(self, *fields):
"""
Update this document. Optionally a specific list of fields to update can
be specified.
"""
from mongoframes.queries import to_refs
assert '_id' in self._document, "Can't update documents without `_id`"
# Send update signal
signal('update').send(self.__class__, frames=[self])
# Check for selective updates
if len(fields) > 0:
document = {}
for field in fields:
document[field] = self._path_to_value(field, self._document)
else:
document = self._document
# Prepare the document to be updated
document = to_refs(document)
document.pop('_id', None)
# Update the document
self.get_collection().update_one({'_id': self._id}, {'$set': document})
# Send updated signal
signal('updated').send(self.__class__, frames=[self]) | Update this document. Optionally a specific list of fields to update can
be specified. | Below is the the instruction that describes the task:
### Input:
Update this document. Optionally a specific list of fields to update can
be specified.
### Response:
def update(self, *fields):
"""
Update this document. Optionally a specific list of fields to update can
be specified.
"""
from mongoframes.queries import to_refs
assert '_id' in self._document, "Can't update documents without `_id`"
# Send update signal
signal('update').send(self.__class__, frames=[self])
# Check for selective updates
if len(fields) > 0:
document = {}
for field in fields:
document[field] = self._path_to_value(field, self._document)
else:
document = self._document
# Prepare the document to be updated
document = to_refs(document)
document.pop('_id', None)
# Update the document
self.get_collection().update_one({'_id': self._id}, {'$set': document})
# Send updated signal
signal('updated').send(self.__class__, frames=[self]) |
def get_client():
"""Returns an ``InfluxDBClient`` instance."""
return InfluxDBClient(
settings.INFLUXDB_HOST,
settings.INFLUXDB_PORT,
settings.INFLUXDB_USER,
settings.INFLUXDB_PASSWORD,
settings.INFLUXDB_DATABASE,
timeout=settings.INFLUXDB_TIMEOUT,
ssl=getattr(settings, 'INFLUXDB_SSL', False),
verify_ssl=getattr(settings, 'INFLUXDB_VERIFY_SSL', False),
) | Returns an ``InfluxDBClient`` instance. | Below is the the instruction that describes the task:
### Input:
Returns an ``InfluxDBClient`` instance.
### Response:
def get_client():
"""Returns an ``InfluxDBClient`` instance."""
return InfluxDBClient(
settings.INFLUXDB_HOST,
settings.INFLUXDB_PORT,
settings.INFLUXDB_USER,
settings.INFLUXDB_PASSWORD,
settings.INFLUXDB_DATABASE,
timeout=settings.INFLUXDB_TIMEOUT,
ssl=getattr(settings, 'INFLUXDB_SSL', False),
verify_ssl=getattr(settings, 'INFLUXDB_VERIFY_SSL', False),
) |
def parse_clubs(self, clubs_page):
"""Parses the DOM and returns character clubs attributes.
:type clubs_page: :class:`bs4.BeautifulSoup`
:param clubs_page: MAL character clubs page's DOM
:rtype: dict
:return: character clubs attributes.
"""
character_info = self.parse_sidebar(clubs_page)
second_col = clubs_page.find(u'div', {'id': 'content'}).find(u'table').find(u'tr').find_all(u'td', recursive=False)[1]
try:
clubs_header = second_col.find(u'div', text=u'Related Clubs')
character_info[u'clubs'] = []
if clubs_header:
curr_elt = clubs_header.nextSibling
while curr_elt is not None:
if curr_elt.name == u'div':
link = curr_elt.find(u'a')
club_id = int(re.match(r'/clubs\.php\?cid=(?P<id>[0-9]+)', link.get(u'href')).group(u'id'))
num_members = int(re.match(r'(?P<num>[0-9]+) members', curr_elt.find(u'small').text).group(u'num'))
character_info[u'clubs'].append(self.session.club(club_id).set({'name': link.text, 'num_members': num_members}))
curr_elt = curr_elt.nextSibling
except:
if not self.session.suppress_parse_exceptions:
raise
return character_info | Parses the DOM and returns character clubs attributes.
:type clubs_page: :class:`bs4.BeautifulSoup`
:param clubs_page: MAL character clubs page's DOM
:rtype: dict
:return: character clubs attributes. | Below is the the instruction that describes the task:
### Input:
Parses the DOM and returns character clubs attributes.
:type clubs_page: :class:`bs4.BeautifulSoup`
:param clubs_page: MAL character clubs page's DOM
:rtype: dict
:return: character clubs attributes.
### Response:
def parse_clubs(self, clubs_page):
"""Parses the DOM and returns character clubs attributes.
:type clubs_page: :class:`bs4.BeautifulSoup`
:param clubs_page: MAL character clubs page's DOM
:rtype: dict
:return: character clubs attributes.
"""
character_info = self.parse_sidebar(clubs_page)
second_col = clubs_page.find(u'div', {'id': 'content'}).find(u'table').find(u'tr').find_all(u'td', recursive=False)[1]
try:
clubs_header = second_col.find(u'div', text=u'Related Clubs')
character_info[u'clubs'] = []
if clubs_header:
curr_elt = clubs_header.nextSibling
while curr_elt is not None:
if curr_elt.name == u'div':
link = curr_elt.find(u'a')
club_id = int(re.match(r'/clubs\.php\?cid=(?P<id>[0-9]+)', link.get(u'href')).group(u'id'))
num_members = int(re.match(r'(?P<num>[0-9]+) members', curr_elt.find(u'small').text).group(u'num'))
character_info[u'clubs'].append(self.session.club(club_id).set({'name': link.text, 'num_members': num_members}))
curr_elt = curr_elt.nextSibling
except:
if not self.session.suppress_parse_exceptions:
raise
return character_info |
def find_l50(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L50
"""
# Initialise the dictionary
l50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.5:
l50_dict[file_name] = currentcontig
break
return l50_dict | Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L50 | Below is the the instruction that describes the task:
### Input:
Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L50
### Response:
def find_l50(contig_lengths_dict, genome_length_dict):
"""
Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: l50_dict: dictionary of strain name: L50
"""
# Initialise the dictionary
l50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths:
currentlength += contig_length
# Increment :currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50, but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict[file_name] * 0.5:
l50_dict[file_name] = currentcontig
break
return l50_dict |
def load_all(path, include_core=True, subfolders=None, path_in_arc=None):
""" Loads a full IO system with all extension in path
Parameters
----------
path : pathlib.Path or string
Path or path with para file name for the data to load.
This must either point to the directory containing the uncompressed
data or the location of a compressed zip file with the data. In the
later case and if there are several mrio's in the zip file the
parameter 'path_in_arc' need to be specifiec to further indicate the
location of the data in the compressed file.
include_core : boolean, optional
If False the load method does not include A, L and Z matrix. This
significantly reduces the required memory if the purpose is only
to analyse the results calculated beforehand.
subfolders: list of pathlib.Path or string, optional
By default (subfolders=None), all subfolders in path containing a json
parameter file (as defined in DEFAULT_FILE_NAMES['filepara']:
metadata.json) are parsed. If only a subset should be used, pass a list
of names of subfolders. These can either be strings specifying direct
subfolders of path, or absolute/relative path if the extensions are
stored at a different location. Both modes can be mixed. If the data
is read from a zip archive the path must be given as described below in
'path_in_arc', relative to the root defined in the paramter
'path_in_arc'. Extensions in a different zip archive must be read
separately by calling the function 'load' for this extension.
path_in_arc: string, optional
Path to the data in the zip file (where the fileparameters file is
located). path_in_arc must be given without leading dot and slash;
thus to point to the data in the root of the compressed file pass '',
for data in e.g. the folder 'emissions' pass 'emissions/'. Only used
if parameter 'path' points to an compressed zip file.
Can be None (default) if there is only one mrio database in the
zip archive (thus only one file_parameter file as the systemtype entry
'IOSystem'.
"""
def clean(varStr):
""" get valid python name from folder
"""
return re.sub('\W|^(?=\d)', '_', str(varStr))
path = Path(path)
if zipfile.is_zipfile(str(path)):
with zipfile.ZipFile(file=str(path), mode='r') as zz:
zipcontent = zz.namelist()
if path_in_arc:
path_in_arc = str(path_in_arc)
if path_in_arc not in zipcontent:
path_in_arc = os.path.join(path_in_arc,
DEFAULT_FILE_NAMES['filepara'])
if path_in_arc not in zipcontent:
raise ReadError('File parameter file {} not found in {}. '
'Tip: specify fileparameter filename '
'through "path_in_arc" if different '
'from default.'.format(
DEFAULT_FILE_NAMES['filepara'], path))
else:
with zipfile.ZipFile(file=str(path), mode='r') as zz:
fpfiles = [
f for f in zz.namelist()
if
os.path.basename(f) == DEFAULT_FILE_NAMES['filepara'] and
json.loads(zz.read(f).decode('utf-8')
)['systemtype'] == 'IOSystem']
if len(fpfiles) == 0:
raise ReadError('File parameter file {} not found in {}. '
'Tip: specify fileparameter filename '
'through "path_in_arc" if different '
'from default.'.format(
DEFAULT_FILE_NAMES['filepara'], path))
elif len(fpfiles) > 1:
raise ReadError('Mulitple mrio archives found in {}. '
'Specify one by the '
'parameter "path_in_arc"'.format(path))
else:
path_in_arc = os.path.dirname(fpfiles[0])
logging.debug("Expect file parameter-file at {} in {}".format(
path_in_arc, path))
io = load(path, include_core=include_core, path_in_arc=path_in_arc)
if zipfile.is_zipfile(str(path)):
root_in_zip = os.path.dirname(path_in_arc)
if subfolders is None:
subfolders = {
os.path.relpath(os.path.dirname(p), root_in_zip)
for p in zipcontent
if p.startswith(root_in_zip) and
os.path.dirname(p) != root_in_zip}
for subfolder_name in subfolders:
if subfolder_name not in zipcontent + list({
os.path.dirname(p) for p in zipcontent}):
subfolder_full = os.path.join(root_in_zip, subfolder_name)
else:
subfolder_full = subfolder_name
subfolder_name = os.path.basename(os.path.normpath(subfolder_name))
if subfolder_name not in zipcontent:
subfolder_full_meta = os.path.join(
subfolder_full, DEFAULT_FILE_NAMES['filepara'])
else:
subfolder_full_meta = subfolder_full
if subfolder_full_meta in zipcontent:
ext = load(path,
include_core=include_core,
path_in_arc=subfolder_full_meta)
setattr(io, clean(subfolder_name), ext)
io.meta._add_fileio("Added satellite account "
"from {}".format(subfolder_full))
else:
continue
else:
if subfolders is None:
subfolders = [d for d in path.iterdir() if d.is_dir()]
for subfolder_name in subfolders:
if not os.path.exists(str(subfolder_name)):
subfolder_full = path / subfolder_name
else:
subfolder_full = subfolder_name
subfolder_name = os.path.basename(os.path.normpath(subfolder_name))
if not os.path.isfile(str(subfolder_full)):
subfolder_full_meta = (subfolder_full /
DEFAULT_FILE_NAMES['filepara'])
else:
subfolder_full_meta = subfolder_full
if subfolder_full_meta.exists():
ext = load(subfolder_full, include_core=include_core)
setattr(io, clean(subfolder_name), ext)
io.meta._add_fileio("Added satellite account "
"from {}".format(subfolder_full))
else:
continue
return io | Loads a full IO system with all extension in path
Parameters
----------
path : pathlib.Path or string
Path or path with para file name for the data to load.
This must either point to the directory containing the uncompressed
data or the location of a compressed zip file with the data. In the
later case and if there are several mrio's in the zip file the
parameter 'path_in_arc' need to be specifiec to further indicate the
location of the data in the compressed file.
include_core : boolean, optional
If False the load method does not include A, L and Z matrix. This
significantly reduces the required memory if the purpose is only
to analyse the results calculated beforehand.
subfolders: list of pathlib.Path or string, optional
By default (subfolders=None), all subfolders in path containing a json
parameter file (as defined in DEFAULT_FILE_NAMES['filepara']:
metadata.json) are parsed. If only a subset should be used, pass a list
of names of subfolders. These can either be strings specifying direct
subfolders of path, or absolute/relative path if the extensions are
stored at a different location. Both modes can be mixed. If the data
is read from a zip archive the path must be given as described below in
'path_in_arc', relative to the root defined in the paramter
'path_in_arc'. Extensions in a different zip archive must be read
separately by calling the function 'load' for this extension.
path_in_arc: string, optional
Path to the data in the zip file (where the fileparameters file is
located). path_in_arc must be given without leading dot and slash;
thus to point to the data in the root of the compressed file pass '',
for data in e.g. the folder 'emissions' pass 'emissions/'. Only used
if parameter 'path' points to an compressed zip file.
Can be None (default) if there is only one mrio database in the
zip archive (thus only one file_parameter file as the systemtype entry
'IOSystem'. | Below is the the instruction that describes the task:
### Input:
Loads a full IO system with all extension in path
Parameters
----------
path : pathlib.Path or string
Path or path with para file name for the data to load.
This must either point to the directory containing the uncompressed
data or the location of a compressed zip file with the data. In the
later case and if there are several mrio's in the zip file the
parameter 'path_in_arc' need to be specifiec to further indicate the
location of the data in the compressed file.
include_core : boolean, optional
If False the load method does not include A, L and Z matrix. This
significantly reduces the required memory if the purpose is only
to analyse the results calculated beforehand.
subfolders: list of pathlib.Path or string, optional
By default (subfolders=None), all subfolders in path containing a json
parameter file (as defined in DEFAULT_FILE_NAMES['filepara']:
metadata.json) are parsed. If only a subset should be used, pass a list
of names of subfolders. These can either be strings specifying direct
subfolders of path, or absolute/relative path if the extensions are
stored at a different location. Both modes can be mixed. If the data
is read from a zip archive the path must be given as described below in
'path_in_arc', relative to the root defined in the paramter
'path_in_arc'. Extensions in a different zip archive must be read
separately by calling the function 'load' for this extension.
path_in_arc: string, optional
Path to the data in the zip file (where the fileparameters file is
located). path_in_arc must be given without leading dot and slash;
thus to point to the data in the root of the compressed file pass '',
for data in e.g. the folder 'emissions' pass 'emissions/'. Only used
if parameter 'path' points to an compressed zip file.
Can be None (default) if there is only one mrio database in the
zip archive (thus only one file_parameter file as the systemtype entry
'IOSystem'.
### Response:
def load_all(path, include_core=True, subfolders=None, path_in_arc=None):
""" Loads a full IO system with all extension in path
Parameters
----------
path : pathlib.Path or string
Path or path with para file name for the data to load.
This must either point to the directory containing the uncompressed
data or the location of a compressed zip file with the data. In the
later case and if there are several mrio's in the zip file the
parameter 'path_in_arc' need to be specifiec to further indicate the
location of the data in the compressed file.
include_core : boolean, optional
If False the load method does not include A, L and Z matrix. This
significantly reduces the required memory if the purpose is only
to analyse the results calculated beforehand.
subfolders: list of pathlib.Path or string, optional
By default (subfolders=None), all subfolders in path containing a json
parameter file (as defined in DEFAULT_FILE_NAMES['filepara']:
metadata.json) are parsed. If only a subset should be used, pass a list
of names of subfolders. These can either be strings specifying direct
subfolders of path, or absolute/relative path if the extensions are
stored at a different location. Both modes can be mixed. If the data
is read from a zip archive the path must be given as described below in
'path_in_arc', relative to the root defined in the paramter
'path_in_arc'. Extensions in a different zip archive must be read
separately by calling the function 'load' for this extension.
path_in_arc: string, optional
Path to the data in the zip file (where the fileparameters file is
located). path_in_arc must be given without leading dot and slash;
thus to point to the data in the root of the compressed file pass '',
for data in e.g. the folder 'emissions' pass 'emissions/'. Only used
if parameter 'path' points to an compressed zip file.
Can be None (default) if there is only one mrio database in the
zip archive (thus only one file_parameter file as the systemtype entry
'IOSystem'.
"""
def clean(varStr):
""" get valid python name from folder
"""
return re.sub('\W|^(?=\d)', '_', str(varStr))
path = Path(path)
if zipfile.is_zipfile(str(path)):
with zipfile.ZipFile(file=str(path), mode='r') as zz:
zipcontent = zz.namelist()
if path_in_arc:
path_in_arc = str(path_in_arc)
if path_in_arc not in zipcontent:
path_in_arc = os.path.join(path_in_arc,
DEFAULT_FILE_NAMES['filepara'])
if path_in_arc not in zipcontent:
raise ReadError('File parameter file {} not found in {}. '
'Tip: specify fileparameter filename '
'through "path_in_arc" if different '
'from default.'.format(
DEFAULT_FILE_NAMES['filepara'], path))
else:
with zipfile.ZipFile(file=str(path), mode='r') as zz:
fpfiles = [
f for f in zz.namelist()
if
os.path.basename(f) == DEFAULT_FILE_NAMES['filepara'] and
json.loads(zz.read(f).decode('utf-8')
)['systemtype'] == 'IOSystem']
if len(fpfiles) == 0:
raise ReadError('File parameter file {} not found in {}. '
'Tip: specify fileparameter filename '
'through "path_in_arc" if different '
'from default.'.format(
DEFAULT_FILE_NAMES['filepara'], path))
elif len(fpfiles) > 1:
raise ReadError('Mulitple mrio archives found in {}. '
'Specify one by the '
'parameter "path_in_arc"'.format(path))
else:
path_in_arc = os.path.dirname(fpfiles[0])
logging.debug("Expect file parameter-file at {} in {}".format(
path_in_arc, path))
io = load(path, include_core=include_core, path_in_arc=path_in_arc)
if zipfile.is_zipfile(str(path)):
root_in_zip = os.path.dirname(path_in_arc)
if subfolders is None:
subfolders = {
os.path.relpath(os.path.dirname(p), root_in_zip)
for p in zipcontent
if p.startswith(root_in_zip) and
os.path.dirname(p) != root_in_zip}
for subfolder_name in subfolders:
if subfolder_name not in zipcontent + list({
os.path.dirname(p) for p in zipcontent}):
subfolder_full = os.path.join(root_in_zip, subfolder_name)
else:
subfolder_full = subfolder_name
subfolder_name = os.path.basename(os.path.normpath(subfolder_name))
if subfolder_name not in zipcontent:
subfolder_full_meta = os.path.join(
subfolder_full, DEFAULT_FILE_NAMES['filepara'])
else:
subfolder_full_meta = subfolder_full
if subfolder_full_meta in zipcontent:
ext = load(path,
include_core=include_core,
path_in_arc=subfolder_full_meta)
setattr(io, clean(subfolder_name), ext)
io.meta._add_fileio("Added satellite account "
"from {}".format(subfolder_full))
else:
continue
else:
if subfolders is None:
subfolders = [d for d in path.iterdir() if d.is_dir()]
for subfolder_name in subfolders:
if not os.path.exists(str(subfolder_name)):
subfolder_full = path / subfolder_name
else:
subfolder_full = subfolder_name
subfolder_name = os.path.basename(os.path.normpath(subfolder_name))
if not os.path.isfile(str(subfolder_full)):
subfolder_full_meta = (subfolder_full /
DEFAULT_FILE_NAMES['filepara'])
else:
subfolder_full_meta = subfolder_full
if subfolder_full_meta.exists():
ext = load(subfolder_full, include_core=include_core)
setattr(io, clean(subfolder_name), ext)
io.meta._add_fileio("Added satellite account "
"from {}".format(subfolder_full))
else:
continue
return io |
def decrypt(ciphertext, secret, inital_vector, checksum=True, lazy=True):
"""Decrypts ciphertext with secret
ciphertext - encrypted content to decrypt
secret - secret to decrypt ciphertext
inital_vector - initial vector
lazy - pad secret if less than legal blocksize (default: True)
checksum - verify crc32 byte encoded checksum (default: True)
returns plaintext
"""
secret = _lazysecret(secret) if lazy else secret
encobj = AES.new(secret, AES.MODE_CFB, inital_vector)
try:
padded = ciphertext + ('=' * (len(ciphertext) % 4))
decoded = base64.urlsafe_b64decode(str(padded))
plaintext = encobj.decrypt(decoded)
except (TypeError, binascii.Error):
raise InvalidKeyError("invalid key")
if checksum:
try:
crc, plaintext = (base64.urlsafe_b64decode(
plaintext[-8:]), plaintext[:-8])
except (TypeError, binascii.Error):
raise CheckSumError("checksum mismatch")
if not crc == _pack_crc(plaintext):
raise CheckSumError("checksum mismatch")
return plaintext | Decrypts ciphertext with secret
ciphertext - encrypted content to decrypt
secret - secret to decrypt ciphertext
inital_vector - initial vector
lazy - pad secret if less than legal blocksize (default: True)
checksum - verify crc32 byte encoded checksum (default: True)
returns plaintext | Below is the the instruction that describes the task:
### Input:
Decrypts ciphertext with secret
ciphertext - encrypted content to decrypt
secret - secret to decrypt ciphertext
inital_vector - initial vector
lazy - pad secret if less than legal blocksize (default: True)
checksum - verify crc32 byte encoded checksum (default: True)
returns plaintext
### Response:
def decrypt(ciphertext, secret, inital_vector, checksum=True, lazy=True):
"""Decrypts ciphertext with secret
ciphertext - encrypted content to decrypt
secret - secret to decrypt ciphertext
inital_vector - initial vector
lazy - pad secret if less than legal blocksize (default: True)
checksum - verify crc32 byte encoded checksum (default: True)
returns plaintext
"""
secret = _lazysecret(secret) if lazy else secret
encobj = AES.new(secret, AES.MODE_CFB, inital_vector)
try:
padded = ciphertext + ('=' * (len(ciphertext) % 4))
decoded = base64.urlsafe_b64decode(str(padded))
plaintext = encobj.decrypt(decoded)
except (TypeError, binascii.Error):
raise InvalidKeyError("invalid key")
if checksum:
try:
crc, plaintext = (base64.urlsafe_b64decode(
plaintext[-8:]), plaintext[:-8])
except (TypeError, binascii.Error):
raise CheckSumError("checksum mismatch")
if not crc == _pack_crc(plaintext):
raise CheckSumError("checksum mismatch")
return plaintext |
def redfearn(lat, lon, false_easting=None, false_northing=None,
zone=None, central_meridian=None, scale_factor=None):
"""Compute UTM projection using Redfearn's formula
lat, lon is latitude and longitude in decimal degrees
If false easting and northing are specified they will override
the standard
If zone is specified reproject lat and long to specified zone instead of
standard zone
If meridian is specified, reproject lat and lon to that instead of zone. In this case
zone will be set to -1 to indicate non-UTM projection
Note that zone and meridian cannot both be specifed
"""
from math import pi, sqrt, sin, cos, tan
#GDA Specifications
a = 6378137.0 #Semi major axis
inverse_flattening = 298.257222101 #1/f
if scale_factor is None:
K0 = 0.9996 #Central scale factor
else:
K0 = scale_factor
#print('scale', K0)
zone_width = 6 #Degrees
longitude_of_central_meridian_zone0 = -183
longitude_of_western_edge_zone0 = -186
if false_easting is None:
false_easting = 500000
if false_northing is None:
if lat < 0:
false_northing = 10000000 #Southern hemisphere
else:
false_northing = 0 #Northern hemisphere)
#Derived constants
f = 1.0/inverse_flattening
b = a*(1-f) #Semi minor axis
e2 = 2*f - f*f# = f*(2-f) = (a^2-b^2/a^2 #Eccentricity
e = sqrt(e2)
e2_ = e2/(1-e2) # = (a^2-b^2)/b^2 #Second eccentricity
e_ = sqrt(e2_)
e4 = e2*e2
e6 = e2*e4
#Foot point latitude
n = (a-b)/(a+b) #Same as e2 - why ?
n2 = n*n
n3 = n*n2
n4 = n2*n2
G = a*(1-n)*(1-n2)*(1+9*n2/4+225*n4/64)*pi/180
phi = lat*pi/180 #Convert latitude to radians
sinphi = sin(phi)
sin2phi = sin(2*phi)
sin4phi = sin(4*phi)
sin6phi = sin(6*phi)
cosphi = cos(phi)
cosphi2 = cosphi*cosphi
cosphi3 = cosphi*cosphi2
cosphi4 = cosphi2*cosphi2
cosphi5 = cosphi*cosphi4
cosphi6 = cosphi2*cosphi4
cosphi7 = cosphi*cosphi6
cosphi8 = cosphi4*cosphi4
t = tan(phi)
t2 = t*t
t4 = t2*t2
t6 = t2*t4
#Radius of Curvature
rho = a*(1-e2)/(1-e2*sinphi*sinphi)**1.5
nu = a/(1-e2*sinphi*sinphi)**0.5
psi = nu/rho
psi2 = psi*psi
psi3 = psi*psi2
psi4 = psi2*psi2
#Meridian distance
A0 = 1 - e2/4 - 3*e4/64 - 5*e6/256
A2 = 3.0/8*(e2+e4/4+15*e6/128)
A4 = 15.0/256*(e4+3*e6/4)
A6 = 35*e6/3072
term1 = a*A0*phi
term2 = -a*A2*sin2phi
term3 = a*A4*sin4phi
term4 = -a*A6*sin6phi
m = term1 + term2 + term3 + term4 #OK
if zone is not None and central_meridian is not None:
msg = 'You specified both zone and central_meridian. Provide only one of them'
raise ValueError(msg)
# Zone
if zone is None:
zone = int((lon - longitude_of_western_edge_zone0)/zone_width)
# Central meridian
if central_meridian is None:
central_meridian = zone*zone_width+longitude_of_central_meridian_zone0
else:
zone = -1
omega = (lon-central_meridian)*pi/180 #Relative longitude (radians)
omega2 = omega*omega
omega3 = omega*omega2
omega4 = omega2*omega2
omega5 = omega*omega4
omega6 = omega3*omega3
omega7 = omega*omega6
omega8 = omega4*omega4
#Northing
term1 = nu*sinphi*cosphi*omega2/2
term2 = nu*sinphi*cosphi3*(4*psi2+psi-t2)*omega4/24
term3 = nu*sinphi*cosphi5*\
(8*psi4*(11-24*t2)-28*psi3*(1-6*t2)+\
psi2*(1-32*t2)-psi*2*t2+t4-t2)*omega6/720
term4 = nu*sinphi*cosphi7*(1385-3111*t2+543*t4-t6)*omega8/40320
northing = false_northing + K0*(m + term1 + term2 + term3 + term4)
#Easting
term1 = nu*omega*cosphi
term2 = nu*cosphi3*(psi-t2)*omega3/6
term3 = nu*cosphi5*(4*psi3*(1-6*t2)+psi2*(1+8*t2)-2*psi*t2+t4)*omega5/120
term4 = nu*cosphi7*(61-479*t2+179*t4-t6)*omega7/5040
easting = false_easting + K0*(term1 + term2 + term3 + term4)
return zone, easting, northing | Compute UTM projection using Redfearn's formula
lat, lon is latitude and longitude in decimal degrees
If false easting and northing are specified they will override
the standard
If zone is specified reproject lat and long to specified zone instead of
standard zone
If meridian is specified, reproject lat and lon to that instead of zone. In this case
zone will be set to -1 to indicate non-UTM projection
Note that zone and meridian cannot both be specifed | Below is the the instruction that describes the task:
### Input:
Compute UTM projection using Redfearn's formula
lat, lon is latitude and longitude in decimal degrees
If false easting and northing are specified they will override
the standard
If zone is specified reproject lat and long to specified zone instead of
standard zone
If meridian is specified, reproject lat and lon to that instead of zone. In this case
zone will be set to -1 to indicate non-UTM projection
Note that zone and meridian cannot both be specifed
### Response:
def redfearn(lat, lon, false_easting=None, false_northing=None,
zone=None, central_meridian=None, scale_factor=None):
"""Compute UTM projection using Redfearn's formula
lat, lon is latitude and longitude in decimal degrees
If false easting and northing are specified they will override
the standard
If zone is specified reproject lat and long to specified zone instead of
standard zone
If meridian is specified, reproject lat and lon to that instead of zone. In this case
zone will be set to -1 to indicate non-UTM projection
Note that zone and meridian cannot both be specifed
"""
from math import pi, sqrt, sin, cos, tan
#GDA Specifications
a = 6378137.0 #Semi major axis
inverse_flattening = 298.257222101 #1/f
if scale_factor is None:
K0 = 0.9996 #Central scale factor
else:
K0 = scale_factor
#print('scale', K0)
zone_width = 6 #Degrees
longitude_of_central_meridian_zone0 = -183
longitude_of_western_edge_zone0 = -186
if false_easting is None:
false_easting = 500000
if false_northing is None:
if lat < 0:
false_northing = 10000000 #Southern hemisphere
else:
false_northing = 0 #Northern hemisphere)
#Derived constants
f = 1.0/inverse_flattening
b = a*(1-f) #Semi minor axis
e2 = 2*f - f*f# = f*(2-f) = (a^2-b^2/a^2 #Eccentricity
e = sqrt(e2)
e2_ = e2/(1-e2) # = (a^2-b^2)/b^2 #Second eccentricity
e_ = sqrt(e2_)
e4 = e2*e2
e6 = e2*e4
#Foot point latitude
n = (a-b)/(a+b) #Same as e2 - why ?
n2 = n*n
n3 = n*n2
n4 = n2*n2
G = a*(1-n)*(1-n2)*(1+9*n2/4+225*n4/64)*pi/180
phi = lat*pi/180 #Convert latitude to radians
sinphi = sin(phi)
sin2phi = sin(2*phi)
sin4phi = sin(4*phi)
sin6phi = sin(6*phi)
cosphi = cos(phi)
cosphi2 = cosphi*cosphi
cosphi3 = cosphi*cosphi2
cosphi4 = cosphi2*cosphi2
cosphi5 = cosphi*cosphi4
cosphi6 = cosphi2*cosphi4
cosphi7 = cosphi*cosphi6
cosphi8 = cosphi4*cosphi4
t = tan(phi)
t2 = t*t
t4 = t2*t2
t6 = t2*t4
#Radius of Curvature
rho = a*(1-e2)/(1-e2*sinphi*sinphi)**1.5
nu = a/(1-e2*sinphi*sinphi)**0.5
psi = nu/rho
psi2 = psi*psi
psi3 = psi*psi2
psi4 = psi2*psi2
#Meridian distance
A0 = 1 - e2/4 - 3*e4/64 - 5*e6/256
A2 = 3.0/8*(e2+e4/4+15*e6/128)
A4 = 15.0/256*(e4+3*e6/4)
A6 = 35*e6/3072
term1 = a*A0*phi
term2 = -a*A2*sin2phi
term3 = a*A4*sin4phi
term4 = -a*A6*sin6phi
m = term1 + term2 + term3 + term4 #OK
if zone is not None and central_meridian is not None:
msg = 'You specified both zone and central_meridian. Provide only one of them'
raise ValueError(msg)
# Zone
if zone is None:
zone = int((lon - longitude_of_western_edge_zone0)/zone_width)
# Central meridian
if central_meridian is None:
central_meridian = zone*zone_width+longitude_of_central_meridian_zone0
else:
zone = -1
omega = (lon-central_meridian)*pi/180 #Relative longitude (radians)
omega2 = omega*omega
omega3 = omega*omega2
omega4 = omega2*omega2
omega5 = omega*omega4
omega6 = omega3*omega3
omega7 = omega*omega6
omega8 = omega4*omega4
#Northing
term1 = nu*sinphi*cosphi*omega2/2
term2 = nu*sinphi*cosphi3*(4*psi2+psi-t2)*omega4/24
term3 = nu*sinphi*cosphi5*\
(8*psi4*(11-24*t2)-28*psi3*(1-6*t2)+\
psi2*(1-32*t2)-psi*2*t2+t4-t2)*omega6/720
term4 = nu*sinphi*cosphi7*(1385-3111*t2+543*t4-t6)*omega8/40320
northing = false_northing + K0*(m + term1 + term2 + term3 + term4)
#Easting
term1 = nu*omega*cosphi
term2 = nu*cosphi3*(psi-t2)*omega3/6
term3 = nu*cosphi5*(4*psi3*(1-6*t2)+psi2*(1+8*t2)-2*psi*t2+t4)*omega5/120
term4 = nu*cosphi7*(61-479*t2+179*t4-t6)*omega7/5040
easting = false_easting + K0*(term1 + term2 + term3 + term4)
return zone, easting, northing |
def post(self, url, data, params=None):
"""
Initiate a POST request
"""
r = self.session.post(url, data=data, params=params)
return self._response_parser(r, expect_json=False) | Initiate a POST request | Below is the the instruction that describes the task:
### Input:
Initiate a POST request
### Response:
def post(self, url, data, params=None):
"""
Initiate a POST request
"""
r = self.session.post(url, data=data, params=params)
return self._response_parser(r, expect_json=False) |
def callgrind(self, out, filename=None, commandline=None, relative_path=False):
"""
Dump statistics in callgrind format.
Contains:
- per-line hit count, time and time-per-hit
- call associations (call tree)
Note: hit count is not inclusive, in that it is not the sum of all
hits inside that call.
Time unit: microsecond (1e-6 second).
out (file-ish opened for writing)
Destination of callgrind profiling data.
filename (str, collection of str)
If provided, dump stats for given source file(s) only.
By default, list for all known files.
commandline (anything with __str__)
If provided, will be output as the command line used to generate
this profiling data.
relative_path (bool)
When True, absolute elements are stripped from path. Useful when
maintaining several copies of source trees with their own
profiling result, so kcachegrind does not look in system-wide
files which may not match with profiled code.
"""
print(u'# callgrind format', file=out)
print(u'version: 1', file=out)
print(u'creator: pprofile', file=out)
print(u'event: usphit :microseconds/hit', file=out)
print(u'events: hits microseconds usphit', file=out)
if commandline is not None:
print(u'cmd:', commandline, file=out)
file_dict = self._mergeFileTiming()
if relative_path:
convertPath = _relpath
else:
convertPath = lambda x: x
if os.path.sep != "/":
# qCacheGrind (windows build) needs at least one UNIX separator
# in path to find the file. Adapt here even if this is probably
# more of a qCacheGrind issue...
convertPath = lambda x, cascade=convertPath: cascade(
'/'.join(x.split(os.path.sep))
)
code_to_name_dict = {}
homonym_counter = {}
def getCodeName(filename, code):
# Tracks code objects globally, because callee information needs
# to be consistent accross files.
# Inside a file, grants unique names to each code object.
try:
return code_to_name_dict[code]
except KeyError:
name = code.co_name + ':%i' % code.co_firstlineno
key = (filename, name)
homonym_count = homonym_counter.get(key, 0)
if homonym_count:
name += '_%i' % homonym_count
homonym_counter[key] = homonym_count + 1
code_to_name_dict[code] = name
return name
for current_file in self._getFileNameList(filename, may_sort=False):
file_timing = file_dict[current_file]
print(u'fl=%s' % convertPath(current_file), file=out)
# When a local callable is created an immediately executed, this
# loop would start a new "fn=" section but would not end it before
# emitting "cfn=" lines, making the callee appear as not being
# called by interrupted "fn=" section.
# So dispatch all functions in a first pass, and build
# uninterrupted sections in a second pass.
# Note: cost line is a list just to be mutable. A single item is
# expected.
func_dict = defaultdict(lambda: defaultdict(lambda: ([], [])))
for lineno, code, hits, duration in file_timing.iterHits():
func_dict[getCodeName(current_file, code)][lineno][0].append(
(hits, int(duration * 1000000)),
)
for (
lineno,
caller,
call_hits, call_duration,
callee_file, callee,
) in file_timing.iterCalls():
call_ticks = int(call_duration * 1000000)
func_call_list = func_dict[
getCodeName(current_file, caller)
][lineno][1]
append = func_call_list.append
append(u'cfl=' + convertPath(callee_file))
append(u'cfn=' + getCodeName(callee_file, callee))
append(u'calls=%i %i' % (call_hits, callee.co_firstlineno))
append(u'%i %i %i %i' % (lineno, call_hits, call_ticks, call_ticks // call_hits))
for func_name, line_dict in func_dict.iteritems():
print(u'fn=%s' % func_name, file=out)
for lineno, (func_hit_list, func_call_list) in sorted(line_dict.iteritems()):
if func_hit_list:
# Multiple function objects may "reside" on the same
# line of the same file (same global dict).
# Sum these up and produce a single cachegrind event.
hits = sum(x for x, _ in func_hit_list)
ticks = sum(x for _, x in func_hit_list)
print(
u'%i %i %i %i' % (
lineno,
hits,
ticks,
ticks // hits,
),
file=out,
)
for line in func_call_list:
print(line, file=out) | Dump statistics in callgrind format.
Contains:
- per-line hit count, time and time-per-hit
- call associations (call tree)
Note: hit count is not inclusive, in that it is not the sum of all
hits inside that call.
Time unit: microsecond (1e-6 second).
out (file-ish opened for writing)
Destination of callgrind profiling data.
filename (str, collection of str)
If provided, dump stats for given source file(s) only.
By default, list for all known files.
commandline (anything with __str__)
If provided, will be output as the command line used to generate
this profiling data.
relative_path (bool)
When True, absolute elements are stripped from path. Useful when
maintaining several copies of source trees with their own
profiling result, so kcachegrind does not look in system-wide
files which may not match with profiled code. | Below is the the instruction that describes the task:
### Input:
Dump statistics in callgrind format.
Contains:
- per-line hit count, time and time-per-hit
- call associations (call tree)
Note: hit count is not inclusive, in that it is not the sum of all
hits inside that call.
Time unit: microsecond (1e-6 second).
out (file-ish opened for writing)
Destination of callgrind profiling data.
filename (str, collection of str)
If provided, dump stats for given source file(s) only.
By default, list for all known files.
commandline (anything with __str__)
If provided, will be output as the command line used to generate
this profiling data.
relative_path (bool)
When True, absolute elements are stripped from path. Useful when
maintaining several copies of source trees with their own
profiling result, so kcachegrind does not look in system-wide
files which may not match with profiled code.
### Response:
def callgrind(self, out, filename=None, commandline=None, relative_path=False):
"""
Dump statistics in callgrind format.
Contains:
- per-line hit count, time and time-per-hit
- call associations (call tree)
Note: hit count is not inclusive, in that it is not the sum of all
hits inside that call.
Time unit: microsecond (1e-6 second).
out (file-ish opened for writing)
Destination of callgrind profiling data.
filename (str, collection of str)
If provided, dump stats for given source file(s) only.
By default, list for all known files.
commandline (anything with __str__)
If provided, will be output as the command line used to generate
this profiling data.
relative_path (bool)
When True, absolute elements are stripped from path. Useful when
maintaining several copies of source trees with their own
profiling result, so kcachegrind does not look in system-wide
files which may not match with profiled code.
"""
print(u'# callgrind format', file=out)
print(u'version: 1', file=out)
print(u'creator: pprofile', file=out)
print(u'event: usphit :microseconds/hit', file=out)
print(u'events: hits microseconds usphit', file=out)
if commandline is not None:
print(u'cmd:', commandline, file=out)
file_dict = self._mergeFileTiming()
if relative_path:
convertPath = _relpath
else:
convertPath = lambda x: x
if os.path.sep != "/":
# qCacheGrind (windows build) needs at least one UNIX separator
# in path to find the file. Adapt here even if this is probably
# more of a qCacheGrind issue...
convertPath = lambda x, cascade=convertPath: cascade(
'/'.join(x.split(os.path.sep))
)
code_to_name_dict = {}
homonym_counter = {}
def getCodeName(filename, code):
# Tracks code objects globally, because callee information needs
# to be consistent accross files.
# Inside a file, grants unique names to each code object.
try:
return code_to_name_dict[code]
except KeyError:
name = code.co_name + ':%i' % code.co_firstlineno
key = (filename, name)
homonym_count = homonym_counter.get(key, 0)
if homonym_count:
name += '_%i' % homonym_count
homonym_counter[key] = homonym_count + 1
code_to_name_dict[code] = name
return name
for current_file in self._getFileNameList(filename, may_sort=False):
file_timing = file_dict[current_file]
print(u'fl=%s' % convertPath(current_file), file=out)
# When a local callable is created an immediately executed, this
# loop would start a new "fn=" section but would not end it before
# emitting "cfn=" lines, making the callee appear as not being
# called by interrupted "fn=" section.
# So dispatch all functions in a first pass, and build
# uninterrupted sections in a second pass.
# Note: cost line is a list just to be mutable. A single item is
# expected.
func_dict = defaultdict(lambda: defaultdict(lambda: ([], [])))
for lineno, code, hits, duration in file_timing.iterHits():
func_dict[getCodeName(current_file, code)][lineno][0].append(
(hits, int(duration * 1000000)),
)
for (
lineno,
caller,
call_hits, call_duration,
callee_file, callee,
) in file_timing.iterCalls():
call_ticks = int(call_duration * 1000000)
func_call_list = func_dict[
getCodeName(current_file, caller)
][lineno][1]
append = func_call_list.append
append(u'cfl=' + convertPath(callee_file))
append(u'cfn=' + getCodeName(callee_file, callee))
append(u'calls=%i %i' % (call_hits, callee.co_firstlineno))
append(u'%i %i %i %i' % (lineno, call_hits, call_ticks, call_ticks // call_hits))
for func_name, line_dict in func_dict.iteritems():
print(u'fn=%s' % func_name, file=out)
for lineno, (func_hit_list, func_call_list) in sorted(line_dict.iteritems()):
if func_hit_list:
# Multiple function objects may "reside" on the same
# line of the same file (same global dict).
# Sum these up and produce a single cachegrind event.
hits = sum(x for x, _ in func_hit_list)
ticks = sum(x for _, x in func_hit_list)
print(
u'%i %i %i %i' % (
lineno,
hits,
ticks,
ticks // hits,
),
file=out,
)
for line in func_call_list:
print(line, file=out) |
def _prm_get_longest_stringsize(string_list):
""" Returns the longest string size for a string entry across data."""
maxlength = 1
for stringar in string_list:
if isinstance(stringar, np.ndarray):
if stringar.ndim > 0:
for string in stringar.ravel():
maxlength = max(len(string), maxlength)
else:
maxlength = max(len(stringar.tolist()), maxlength)
else:
maxlength = max(len(stringar), maxlength)
# Make the string Col longer than needed in order to allow later on slightly larger strings
return int(maxlength * 1.5) | Returns the longest string size for a string entry across data. | Below is the the instruction that describes the task:
### Input:
Returns the longest string size for a string entry across data.
### Response:
def _prm_get_longest_stringsize(string_list):
""" Returns the longest string size for a string entry across data."""
maxlength = 1
for stringar in string_list:
if isinstance(stringar, np.ndarray):
if stringar.ndim > 0:
for string in stringar.ravel():
maxlength = max(len(string), maxlength)
else:
maxlength = max(len(stringar.tolist()), maxlength)
else:
maxlength = max(len(stringar), maxlength)
# Make the string Col longer than needed in order to allow later on slightly larger strings
return int(maxlength * 1.5) |
def line_spacing(self):
"""
|float| or |Length| value specifying the space between baselines in
successive lines of the paragraph. A value of |None| indicates line
spacing is inherited from the style hierarchy. A float value, e.g.
``2.0`` or ``1.75``, indicates spacing is applied in multiples of
line heights. A |Length| value such as ``Pt(12)`` indicates spacing
is a fixed height. The |Pt| value class is a convenient way to apply
line spacing in units of points. Assigning |None| resets line spacing
to inherit from the style hierarchy.
"""
pPr = self._element.pPr
if pPr is None:
return None
return self._line_spacing(pPr.spacing_line, pPr.spacing_lineRule) | |float| or |Length| value specifying the space between baselines in
successive lines of the paragraph. A value of |None| indicates line
spacing is inherited from the style hierarchy. A float value, e.g.
``2.0`` or ``1.75``, indicates spacing is applied in multiples of
line heights. A |Length| value such as ``Pt(12)`` indicates spacing
is a fixed height. The |Pt| value class is a convenient way to apply
line spacing in units of points. Assigning |None| resets line spacing
to inherit from the style hierarchy. | Below is the the instruction that describes the task:
### Input:
|float| or |Length| value specifying the space between baselines in
successive lines of the paragraph. A value of |None| indicates line
spacing is inherited from the style hierarchy. A float value, e.g.
``2.0`` or ``1.75``, indicates spacing is applied in multiples of
line heights. A |Length| value such as ``Pt(12)`` indicates spacing
is a fixed height. The |Pt| value class is a convenient way to apply
line spacing in units of points. Assigning |None| resets line spacing
to inherit from the style hierarchy.
### Response:
def line_spacing(self):
"""
|float| or |Length| value specifying the space between baselines in
successive lines of the paragraph. A value of |None| indicates line
spacing is inherited from the style hierarchy. A float value, e.g.
``2.0`` or ``1.75``, indicates spacing is applied in multiples of
line heights. A |Length| value such as ``Pt(12)`` indicates spacing
is a fixed height. The |Pt| value class is a convenient way to apply
line spacing in units of points. Assigning |None| resets line spacing
to inherit from the style hierarchy.
"""
pPr = self._element.pPr
if pPr is None:
return None
return self._line_spacing(pPr.spacing_line, pPr.spacing_lineRule) |
def info(self, msg):
""" Log Info Messages """
self._execActions('info', msg)
msg = self._execFilters('info', msg)
self._processMsg('info', msg)
self._sendMsg('info', msg) | Log Info Messages | Below is the the instruction that describes the task:
### Input:
Log Info Messages
### Response:
def info(self, msg):
""" Log Info Messages """
self._execActions('info', msg)
msg = self._execFilters('info', msg)
self._processMsg('info', msg)
self._sendMsg('info', msg) |
def imagetransformerpp_base_8l_8h_big_cond_dr03_dan():
"""big 1d model for conditional image generation.2.99 on cifar10."""
hparams = imagetransformerpp_sep_channels_8l_8h()
hparams.hidden_size = 512
hparams.num_heads = 8
hparams.filter_size = 2048
hparams.batch_size = 4
hparams.max_length = 3075
hparams.layer_prepostprocess_dropout = 0.3
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.summarize_grads = True
hparams.learning_rate = 0.01
return hparams | big 1d model for conditional image generation.2.99 on cifar10. | Below is the the instruction that describes the task:
### Input:
big 1d model for conditional image generation.2.99 on cifar10.
### Response:
def imagetransformerpp_base_8l_8h_big_cond_dr03_dan():
"""big 1d model for conditional image generation.2.99 on cifar10."""
hparams = imagetransformerpp_sep_channels_8l_8h()
hparams.hidden_size = 512
hparams.num_heads = 8
hparams.filter_size = 2048
hparams.batch_size = 4
hparams.max_length = 3075
hparams.layer_prepostprocess_dropout = 0.3
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.summarize_grads = True
hparams.learning_rate = 0.01
return hparams |
def _add_secondary_if_exists(secondary, out, get_retriever):
"""Add secondary files only if present locally or remotely.
"""
secondary = [_file_local_or_remote(y, get_retriever) for y in secondary]
secondary = [z for z in secondary if z]
if secondary:
out["secondaryFiles"] = [{"class": "File", "path": f} for f in secondary]
return out | Add secondary files only if present locally or remotely. | Below is the the instruction that describes the task:
### Input:
Add secondary files only if present locally or remotely.
### Response:
def _add_secondary_if_exists(secondary, out, get_retriever):
"""Add secondary files only if present locally or remotely.
"""
secondary = [_file_local_or_remote(y, get_retriever) for y in secondary]
secondary = [z for z in secondary if z]
if secondary:
out["secondaryFiles"] = [{"class": "File", "path": f} for f in secondary]
return out |
def _produceIt(self, segments, thunk):
"""
Underlying implmeentation of L{PrefixURLMixin.produceResource} and
L{PrefixURLMixin.sessionlessProduceResource}.
@param segments: the URL segments to dispatch.
@param thunk: a 0-argument callable which returns an L{IResource}
provider, or None.
@return: a 2-tuple of C{(resource, remainingSegments)}, or L{None}.
"""
if not self.prefixURL:
needle = ()
else:
needle = tuple(self.prefixURL.split('/'))
S = len(needle)
if segments[:S] == needle:
if segments == JUST_SLASH:
# I *HATE* THE WEB
subsegments = segments
else:
subsegments = segments[S:]
res = thunk()
# Even though the URL matched up, sometimes we might still
# decide to not handle this request (eg, some prerequisite
# for our function is not met by the store). Allow None
# to be returned by createResource to indicate this case.
if res is not None:
return res, subsegments | Underlying implmeentation of L{PrefixURLMixin.produceResource} and
L{PrefixURLMixin.sessionlessProduceResource}.
@param segments: the URL segments to dispatch.
@param thunk: a 0-argument callable which returns an L{IResource}
provider, or None.
@return: a 2-tuple of C{(resource, remainingSegments)}, or L{None}. | Below is the the instruction that describes the task:
### Input:
Underlying implmeentation of L{PrefixURLMixin.produceResource} and
L{PrefixURLMixin.sessionlessProduceResource}.
@param segments: the URL segments to dispatch.
@param thunk: a 0-argument callable which returns an L{IResource}
provider, or None.
@return: a 2-tuple of C{(resource, remainingSegments)}, or L{None}.
### Response:
def _produceIt(self, segments, thunk):
"""
Underlying implmeentation of L{PrefixURLMixin.produceResource} and
L{PrefixURLMixin.sessionlessProduceResource}.
@param segments: the URL segments to dispatch.
@param thunk: a 0-argument callable which returns an L{IResource}
provider, or None.
@return: a 2-tuple of C{(resource, remainingSegments)}, or L{None}.
"""
if not self.prefixURL:
needle = ()
else:
needle = tuple(self.prefixURL.split('/'))
S = len(needle)
if segments[:S] == needle:
if segments == JUST_SLASH:
# I *HATE* THE WEB
subsegments = segments
else:
subsegments = segments[S:]
res = thunk()
# Even though the URL matched up, sometimes we might still
# decide to not handle this request (eg, some prerequisite
# for our function is not met by the store). Allow None
# to be returned by createResource to indicate this case.
if res is not None:
return res, subsegments |
def generate_csr(self, basename='djangoafip'):
"""
Creates a CSR for this TaxPayer's key
Creates a file-like object that contains the CSR which can be used to
request a new certificate from AFIP.
"""
csr = BytesIO()
crypto.create_csr(
self.key.file,
self.name,
'{}{}'.format(basename, int(datetime.now().timestamp())),
'CUIT {}'.format(self.cuit),
csr,
)
csr.seek(0)
return csr | Creates a CSR for this TaxPayer's key
Creates a file-like object that contains the CSR which can be used to
request a new certificate from AFIP. | Below is the the instruction that describes the task:
### Input:
Creates a CSR for this TaxPayer's key
Creates a file-like object that contains the CSR which can be used to
request a new certificate from AFIP.
### Response:
def generate_csr(self, basename='djangoafip'):
"""
Creates a CSR for this TaxPayer's key
Creates a file-like object that contains the CSR which can be used to
request a new certificate from AFIP.
"""
csr = BytesIO()
crypto.create_csr(
self.key.file,
self.name,
'{}{}'.format(basename, int(datetime.now().timestamp())),
'CUIT {}'.format(self.cuit),
csr,
)
csr.seek(0)
return csr |
def set_qos(self, prefetch_size=0, prefetch_count=0, apply_globally=False):
"""
Specify quality of service by requesting that messages be pre-fetched
from the server. Pre-fetching means that the server will deliver messages
to the client while the client is still processing unacknowledged messages.
This method is a :ref:`coroutine <coroutine>`.
:param int prefetch_size: Specifies a prefetch window in bytes.
Messages smaller than this will be sent from the server in advance.
This value may be set to 0, which means "no specific limit".
:param int prefetch_count: Specifies a prefetch window in terms of whole messages.
:param bool apply_globally: If true, apply these QoS settings on a global level.
The meaning of this is implementation-dependent. From the
`RabbitMQ documentation <https://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.qos.global>`_:
RabbitMQ has reinterpreted this field. The original specification said:
"By default the QoS settings apply to the current channel only.
If this field is set, they are applied to the entire connection."
Instead, RabbitMQ takes global=false to mean that the QoS settings should apply
per-consumer (for new consumers on the channel; existing ones being unaffected) and
global=true to mean that the QoS settings should apply per-channel.
"""
self.sender.send_BasicQos(prefetch_size, prefetch_count, apply_globally)
yield from self.synchroniser.wait(spec.BasicQosOK)
self.reader.ready() | Specify quality of service by requesting that messages be pre-fetched
from the server. Pre-fetching means that the server will deliver messages
to the client while the client is still processing unacknowledged messages.
This method is a :ref:`coroutine <coroutine>`.
:param int prefetch_size: Specifies a prefetch window in bytes.
Messages smaller than this will be sent from the server in advance.
This value may be set to 0, which means "no specific limit".
:param int prefetch_count: Specifies a prefetch window in terms of whole messages.
:param bool apply_globally: If true, apply these QoS settings on a global level.
The meaning of this is implementation-dependent. From the
`RabbitMQ documentation <https://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.qos.global>`_:
RabbitMQ has reinterpreted this field. The original specification said:
"By default the QoS settings apply to the current channel only.
If this field is set, they are applied to the entire connection."
Instead, RabbitMQ takes global=false to mean that the QoS settings should apply
per-consumer (for new consumers on the channel; existing ones being unaffected) and
global=true to mean that the QoS settings should apply per-channel. | Below is the the instruction that describes the task:
### Input:
Specify quality of service by requesting that messages be pre-fetched
from the server. Pre-fetching means that the server will deliver messages
to the client while the client is still processing unacknowledged messages.
This method is a :ref:`coroutine <coroutine>`.
:param int prefetch_size: Specifies a prefetch window in bytes.
Messages smaller than this will be sent from the server in advance.
This value may be set to 0, which means "no specific limit".
:param int prefetch_count: Specifies a prefetch window in terms of whole messages.
:param bool apply_globally: If true, apply these QoS settings on a global level.
The meaning of this is implementation-dependent. From the
`RabbitMQ documentation <https://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.qos.global>`_:
RabbitMQ has reinterpreted this field. The original specification said:
"By default the QoS settings apply to the current channel only.
If this field is set, they are applied to the entire connection."
Instead, RabbitMQ takes global=false to mean that the QoS settings should apply
per-consumer (for new consumers on the channel; existing ones being unaffected) and
global=true to mean that the QoS settings should apply per-channel.
### Response:
def set_qos(self, prefetch_size=0, prefetch_count=0, apply_globally=False):
"""
Specify quality of service by requesting that messages be pre-fetched
from the server. Pre-fetching means that the server will deliver messages
to the client while the client is still processing unacknowledged messages.
This method is a :ref:`coroutine <coroutine>`.
:param int prefetch_size: Specifies a prefetch window in bytes.
Messages smaller than this will be sent from the server in advance.
This value may be set to 0, which means "no specific limit".
:param int prefetch_count: Specifies a prefetch window in terms of whole messages.
:param bool apply_globally: If true, apply these QoS settings on a global level.
The meaning of this is implementation-dependent. From the
`RabbitMQ documentation <https://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.qos.global>`_:
RabbitMQ has reinterpreted this field. The original specification said:
"By default the QoS settings apply to the current channel only.
If this field is set, they are applied to the entire connection."
Instead, RabbitMQ takes global=false to mean that the QoS settings should apply
per-consumer (for new consumers on the channel; existing ones being unaffected) and
global=true to mean that the QoS settings should apply per-channel.
"""
self.sender.send_BasicQos(prefetch_size, prefetch_count, apply_globally)
yield from self.synchroniser.wait(spec.BasicQosOK)
self.reader.ready() |
def find_channel_groups(chan):
"""Channels are often organized in groups (different grids / strips or
channels in different brain locations), so we use a simple heuristic to
get these channel groups.
Parameters
----------
chan : instance of Channels
channels to group
Returns
-------
groups : dict
channel groups: key is the common string, and the item is a list of
labels
"""
labels = chan.return_label()
group_names = {match('([A-Za-z ]+)\d+', label).group(1) for label in labels}
groups = {}
for group_name in group_names:
groups[group_name] = [label for label in labels if label.startswith(group_name)]
return groups | Channels are often organized in groups (different grids / strips or
channels in different brain locations), so we use a simple heuristic to
get these channel groups.
Parameters
----------
chan : instance of Channels
channels to group
Returns
-------
groups : dict
channel groups: key is the common string, and the item is a list of
labels | Below is the the instruction that describes the task:
### Input:
Channels are often organized in groups (different grids / strips or
channels in different brain locations), so we use a simple heuristic to
get these channel groups.
Parameters
----------
chan : instance of Channels
channels to group
Returns
-------
groups : dict
channel groups: key is the common string, and the item is a list of
labels
### Response:
def find_channel_groups(chan):
"""Channels are often organized in groups (different grids / strips or
channels in different brain locations), so we use a simple heuristic to
get these channel groups.
Parameters
----------
chan : instance of Channels
channels to group
Returns
-------
groups : dict
channel groups: key is the common string, and the item is a list of
labels
"""
labels = chan.return_label()
group_names = {match('([A-Za-z ]+)\d+', label).group(1) for label in labels}
groups = {}
for group_name in group_names:
groups[group_name] = [label for label in labels if label.startswith(group_name)]
return groups |
def _update_remote_children(remote_parent, children):
"""
Update remote_ids based on on parent matching up the names of children.
:param remote_parent: RemoteProject/RemoteFolder who has children
:param children: [LocalFolder,LocalFile] children to set remote_ids based on remote children
"""
name_to_child = _name_to_child_map(children)
for remote_child in remote_parent.children:
local_child = name_to_child.get(remote_child.name)
if local_child:
local_child.update_remote_ids(remote_child) | Update remote_ids based on on parent matching up the names of children.
:param remote_parent: RemoteProject/RemoteFolder who has children
:param children: [LocalFolder,LocalFile] children to set remote_ids based on remote children | Below is the the instruction that describes the task:
### Input:
Update remote_ids based on on parent matching up the names of children.
:param remote_parent: RemoteProject/RemoteFolder who has children
:param children: [LocalFolder,LocalFile] children to set remote_ids based on remote children
### Response:
def _update_remote_children(remote_parent, children):
"""
Update remote_ids based on on parent matching up the names of children.
:param remote_parent: RemoteProject/RemoteFolder who has children
:param children: [LocalFolder,LocalFile] children to set remote_ids based on remote children
"""
name_to_child = _name_to_child_map(children)
for remote_child in remote_parent.children:
local_child = name_to_child.get(remote_child.name)
if local_child:
local_child.update_remote_ids(remote_child) |
def from_string(cls, address, case_sensitive=False):
"""Alternate constructor for building from a string.
:param str address: An email address in <user>@<domain> form
:param bool case_sensitive: passed directly to the constructor argument
of the same name.
:returns: An account from the given arguments
:rtype: :class:`Account`
"""
assert isinstance(address, str), 'address must be str'
username, domainname = address.split('@')
return cls(username, domainname, case_sensitive=case_sensitive) | Alternate constructor for building from a string.
:param str address: An email address in <user>@<domain> form
:param bool case_sensitive: passed directly to the constructor argument
of the same name.
:returns: An account from the given arguments
:rtype: :class:`Account` | Below is the the instruction that describes the task:
### Input:
Alternate constructor for building from a string.
:param str address: An email address in <user>@<domain> form
:param bool case_sensitive: passed directly to the constructor argument
of the same name.
:returns: An account from the given arguments
:rtype: :class:`Account`
### Response:
def from_string(cls, address, case_sensitive=False):
"""Alternate constructor for building from a string.
:param str address: An email address in <user>@<domain> form
:param bool case_sensitive: passed directly to the constructor argument
of the same name.
:returns: An account from the given arguments
:rtype: :class:`Account`
"""
assert isinstance(address, str), 'address must be str'
username, domainname = address.split('@')
return cls(username, domainname, case_sensitive=case_sensitive) |
def _xysxy2(date):
"""Here we deviate from what has been done everywhere else. Instead of taking the formulas
available in the Vallado, we take those described in the files tab5.2{a,b,d}.txt.
The result should be equivalent, but they are the last iteration of the IAU2000A as of June 2016
Args:
date (Date)
Return:
3-tuple of float: Values of X, Y, s + XY/2 in arcsecond
"""
planets = _planets(date)
x_tab, y_tab, s_tab = _tab('X'), _tab('Y'), _tab('s')
ttt = date.change_scale('TT').julian_century
# Units: micro-arcsecond
X = -16616.99 + 2004191742.88 * ttt - 427219.05 * ttt ** 2 - 198620.54 * ttt ** 3\
- 46.05 * ttt ** 4 + 5.98 * ttt ** 5
Y = -6950.78 - 25381.99 * ttt - 22407250.99 * ttt ** 2 + 1842.28 * ttt ** 3\
+ 1113.06 * ttt ** 4 + 0.99 * ttt ** 5
s_xy2 = 94.0 + 3808.65 * ttt - 122.68 * ttt ** 2 - 72574.11 * ttt ** 3\
+ 27.98 * ttt ** 4 + 15.62 * ttt ** 5
for j in range(5):
_x, _y, _s = 0, 0, 0
for i in range(len(x_tab[j])):
Axs, Axc, *p_coefs = x_tab[j][i]
ax_p = np.dot(p_coefs, planets)
_x += Axs * np.sin(ax_p) + Axc * np.cos(ax_p)
for i in range(len(y_tab[j])):
Ays, Ayc, *p_coefs = y_tab[j][i]
ay_p = np.dot(p_coefs, planets)
_y += Ays * np.sin(ay_p) + Ayc * np.cos(ay_p)
for i in range(len(s_tab[j])):
Ass, Asc, *p_coefs = s_tab[j][i]
as_p = np.dot(p_coefs, planets)
_s += Ass * np.sin(as_p) + Asc * np.cos(as_p)
X += _x * ttt ** j
Y += _y * ttt ** j
s_xy2 += _s * ttt ** j
# Conversion to arcsecond
return X * 1e-6, Y * 1e-6, s_xy2 * 1e-6 | Here we deviate from what has been done everywhere else. Instead of taking the formulas
available in the Vallado, we take those described in the files tab5.2{a,b,d}.txt.
The result should be equivalent, but they are the last iteration of the IAU2000A as of June 2016
Args:
date (Date)
Return:
3-tuple of float: Values of X, Y, s + XY/2 in arcsecond | Below is the the instruction that describes the task:
### Input:
Here we deviate from what has been done everywhere else. Instead of taking the formulas
available in the Vallado, we take those described in the files tab5.2{a,b,d}.txt.
The result should be equivalent, but they are the last iteration of the IAU2000A as of June 2016
Args:
date (Date)
Return:
3-tuple of float: Values of X, Y, s + XY/2 in arcsecond
### Response:
def _xysxy2(date):
"""Here we deviate from what has been done everywhere else. Instead of taking the formulas
available in the Vallado, we take those described in the files tab5.2{a,b,d}.txt.
The result should be equivalent, but they are the last iteration of the IAU2000A as of June 2016
Args:
date (Date)
Return:
3-tuple of float: Values of X, Y, s + XY/2 in arcsecond
"""
planets = _planets(date)
x_tab, y_tab, s_tab = _tab('X'), _tab('Y'), _tab('s')
ttt = date.change_scale('TT').julian_century
# Units: micro-arcsecond
X = -16616.99 + 2004191742.88 * ttt - 427219.05 * ttt ** 2 - 198620.54 * ttt ** 3\
- 46.05 * ttt ** 4 + 5.98 * ttt ** 5
Y = -6950.78 - 25381.99 * ttt - 22407250.99 * ttt ** 2 + 1842.28 * ttt ** 3\
+ 1113.06 * ttt ** 4 + 0.99 * ttt ** 5
s_xy2 = 94.0 + 3808.65 * ttt - 122.68 * ttt ** 2 - 72574.11 * ttt ** 3\
+ 27.98 * ttt ** 4 + 15.62 * ttt ** 5
for j in range(5):
_x, _y, _s = 0, 0, 0
for i in range(len(x_tab[j])):
Axs, Axc, *p_coefs = x_tab[j][i]
ax_p = np.dot(p_coefs, planets)
_x += Axs * np.sin(ax_p) + Axc * np.cos(ax_p)
for i in range(len(y_tab[j])):
Ays, Ayc, *p_coefs = y_tab[j][i]
ay_p = np.dot(p_coefs, planets)
_y += Ays * np.sin(ay_p) + Ayc * np.cos(ay_p)
for i in range(len(s_tab[j])):
Ass, Asc, *p_coefs = s_tab[j][i]
as_p = np.dot(p_coefs, planets)
_s += Ass * np.sin(as_p) + Asc * np.cos(as_p)
X += _x * ttt ** j
Y += _y * ttt ** j
s_xy2 += _s * ttt ** j
# Conversion to arcsecond
return X * 1e-6, Y * 1e-6, s_xy2 * 1e-6 |
def get_factors_iterative2(n):
"""[summary]
analog as above
Arguments:
n {[int]} -- [description]
Returns:
[list of lists] -- [all factors of n]
"""
ans, stack, x = [], [], 2
while True:
if x > n // x:
if not stack:
return ans
ans.append(stack + [n])
x = stack.pop()
n *= x
x += 1
elif n % x == 0:
stack.append(x)
n //= x
else:
x += 1 | [summary]
analog as above
Arguments:
n {[int]} -- [description]
Returns:
[list of lists] -- [all factors of n] | Below is the the instruction that describes the task:
### Input:
[summary]
analog as above
Arguments:
n {[int]} -- [description]
Returns:
[list of lists] -- [all factors of n]
### Response:
def get_factors_iterative2(n):
"""[summary]
analog as above
Arguments:
n {[int]} -- [description]
Returns:
[list of lists] -- [all factors of n]
"""
ans, stack, x = [], [], 2
while True:
if x > n // x:
if not stack:
return ans
ans.append(stack + [n])
x = stack.pop()
n *= x
x += 1
elif n % x == 0:
stack.append(x)
n //= x
else:
x += 1 |
def incremental_a_value(bval, min_mag, mag_inc):
'''
Incremental a-value from cumulative - using the version of the
Hermann (1979) formula described in Wesson et al. (2003)
:param float bval:
Gutenberg & Richter (1944) b-value
:param np.ndarray min_mag:
Minimum magnitude of completeness table
:param float mag_inc:
Magnitude increment of the completeness table
'''
a_cum = 10. ** (bval * min_mag)
a_inc = a_cum + np.log10((10. ** (bval * mag_inc)) -
(10. ** (-bval * mag_inc)))
return a_inc | Incremental a-value from cumulative - using the version of the
Hermann (1979) formula described in Wesson et al. (2003)
:param float bval:
Gutenberg & Richter (1944) b-value
:param np.ndarray min_mag:
Minimum magnitude of completeness table
:param float mag_inc:
Magnitude increment of the completeness table | Below is the the instruction that describes the task:
### Input:
Incremental a-value from cumulative - using the version of the
Hermann (1979) formula described in Wesson et al. (2003)
:param float bval:
Gutenberg & Richter (1944) b-value
:param np.ndarray min_mag:
Minimum magnitude of completeness table
:param float mag_inc:
Magnitude increment of the completeness table
### Response:
def incremental_a_value(bval, min_mag, mag_inc):
'''
Incremental a-value from cumulative - using the version of the
Hermann (1979) formula described in Wesson et al. (2003)
:param float bval:
Gutenberg & Richter (1944) b-value
:param np.ndarray min_mag:
Minimum magnitude of completeness table
:param float mag_inc:
Magnitude increment of the completeness table
'''
a_cum = 10. ** (bval * min_mag)
a_inc = a_cum + np.log10((10. ** (bval * mag_inc)) -
(10. ** (-bval * mag_inc)))
return a_inc |
def _make_input(self, action, old_quat):
"""
Helper function that returns a dictionary with keys dpos, rotation from a raw input
array. The first three elements are taken to be displacement in position, and a
quaternion indicating the change in rotation with respect to @old_quat.
"""
return {
"dpos": action[:3],
# IK controller takes an absolute orientation in robot base frame
"rotation": T.quat2mat(T.quat_multiply(old_quat, action[3:7])),
} | Helper function that returns a dictionary with keys dpos, rotation from a raw input
array. The first three elements are taken to be displacement in position, and a
quaternion indicating the change in rotation with respect to @old_quat. | Below is the the instruction that describes the task:
### Input:
Helper function that returns a dictionary with keys dpos, rotation from a raw input
array. The first three elements are taken to be displacement in position, and a
quaternion indicating the change in rotation with respect to @old_quat.
### Response:
def _make_input(self, action, old_quat):
"""
Helper function that returns a dictionary with keys dpos, rotation from a raw input
array. The first three elements are taken to be displacement in position, and a
quaternion indicating the change in rotation with respect to @old_quat.
"""
return {
"dpos": action[:3],
# IK controller takes an absolute orientation in robot base frame
"rotation": T.quat2mat(T.quat_multiply(old_quat, action[3:7])),
} |
def _elements(cls):
''' find the elements with controls '''
if not cls.__is_selector():
raise Exception("Invalid selector[%s]." %cls.__control["by"])
driver = Web.driver
try:
elements = WebDriverWait(driver, cls.__control["timeout"]).until(lambda driver: getattr(driver,"find_elements")(cls.__control["by"], cls.__control["value"]))
except:
raise Exception("Timeout at %d seconds.Element(%s) not found." %(cls.__control["timeout"],cls.__control["by"]))
return elements | find the elements with controls | Below is the the instruction that describes the task:
### Input:
find the elements with controls
### Response:
def _elements(cls):
''' find the elements with controls '''
if not cls.__is_selector():
raise Exception("Invalid selector[%s]." %cls.__control["by"])
driver = Web.driver
try:
elements = WebDriverWait(driver, cls.__control["timeout"]).until(lambda driver: getattr(driver,"find_elements")(cls.__control["by"], cls.__control["value"]))
except:
raise Exception("Timeout at %d seconds.Element(%s) not found." %(cls.__control["timeout"],cls.__control["by"]))
return elements |
def hide_me(tb, g=globals()):
"""Hide stack traceback of given stack"""
base_tb = tb
try:
while tb and tb.tb_frame.f_globals is not g:
tb = tb.tb_next
while tb and tb.tb_frame.f_globals is g:
tb = tb.tb_next
except Exception as e:
logging.exception(e)
tb = base_tb
if not tb:
tb = base_tb
return tb | Hide stack traceback of given stack | Below is the the instruction that describes the task:
### Input:
Hide stack traceback of given stack
### Response:
def hide_me(tb, g=globals()):
"""Hide stack traceback of given stack"""
base_tb = tb
try:
while tb and tb.tb_frame.f_globals is not g:
tb = tb.tb_next
while tb and tb.tb_frame.f_globals is g:
tb = tb.tb_next
except Exception as e:
logging.exception(e)
tb = base_tb
if not tb:
tb = base_tb
return tb |
def namelist(self):
"""Return a list of file names in the archive."""
names = []
for member in self.filelist:
names.append(member.filename)
return names | Return a list of file names in the archive. | Below is the the instruction that describes the task:
### Input:
Return a list of file names in the archive.
### Response:
def namelist(self):
"""Return a list of file names in the archive."""
names = []
for member in self.filelist:
names.append(member.filename)
return names |
def namespace(self):
"""
Get the element's namespace.
@return: The element's namespace by resolving the prefix, the explicit
namespace or the inherited namespace.
@rtype: (I{prefix}, I{name})
"""
if self.prefix is None:
return self.defaultNamespace()
return self.resolvePrefix(self.prefix) | Get the element's namespace.
@return: The element's namespace by resolving the prefix, the explicit
namespace or the inherited namespace.
@rtype: (I{prefix}, I{name}) | Below is the the instruction that describes the task:
### Input:
Get the element's namespace.
@return: The element's namespace by resolving the prefix, the explicit
namespace or the inherited namespace.
@rtype: (I{prefix}, I{name})
### Response:
def namespace(self):
"""
Get the element's namespace.
@return: The element's namespace by resolving the prefix, the explicit
namespace or the inherited namespace.
@rtype: (I{prefix}, I{name})
"""
if self.prefix is None:
return self.defaultNamespace()
return self.resolvePrefix(self.prefix) |
async def delayNdefProps(self):
'''
Hold this during a series of renames to delay ndef
secondary property processing until the end....
'''
async with self.getTempSlab() as slab:
seqn = s_slabseqn.SlabSeqn(slab, 'ndef')
self.ndefdelay = seqn
yield
self.ndefdelay = None
logger.info(f'Processing {seqn.index()} delayed values.')
# process them all now...
for i, (oldv, newv) in seqn.iter(0):
await self.editNdefProps(oldv, newv)
if i and i % _progress == 0:
logger.info(f'Processed {i} delayed values.') | Hold this during a series of renames to delay ndef
secondary property processing until the end.... | Below is the the instruction that describes the task:
### Input:
Hold this during a series of renames to delay ndef
secondary property processing until the end....
### Response:
async def delayNdefProps(self):
'''
Hold this during a series of renames to delay ndef
secondary property processing until the end....
'''
async with self.getTempSlab() as slab:
seqn = s_slabseqn.SlabSeqn(slab, 'ndef')
self.ndefdelay = seqn
yield
self.ndefdelay = None
logger.info(f'Processing {seqn.index()} delayed values.')
# process them all now...
for i, (oldv, newv) in seqn.iter(0):
await self.editNdefProps(oldv, newv)
if i and i % _progress == 0:
logger.info(f'Processed {i} delayed values.') |
def p_FuncDef(p):
'''
FuncDef : DEF RefModifier INDENTIFIER LPARENT ParamList RPARENT COLON ReturnTypeModifier Terminator Block
'''
p[0] = FuncDef(p[2], p[3], p[5], p[8], p[9], p[10]) | FuncDef : DEF RefModifier INDENTIFIER LPARENT ParamList RPARENT COLON ReturnTypeModifier Terminator Block | Below is the the instruction that describes the task:
### Input:
FuncDef : DEF RefModifier INDENTIFIER LPARENT ParamList RPARENT COLON ReturnTypeModifier Terminator Block
### Response:
def p_FuncDef(p):
'''
FuncDef : DEF RefModifier INDENTIFIER LPARENT ParamList RPARENT COLON ReturnTypeModifier Terminator Block
'''
p[0] = FuncDef(p[2], p[3], p[5], p[8], p[9], p[10]) |
def get(self, rid, data_callback=None, raise_on_error=True):
"""Get cached data from the data store.
Args:
rid (str): The record identifier.
data_callback (callable): A method that will return the data.
raise_on_error (bool): If True and not r.ok this method will raise a RunTimeError.
Returns:
object : Python request response.
"""
cached_data = None
ds_data = self.ds.get(rid, raise_on_error=False)
if ds_data is not None:
expired = True
if ds_data.get('found') is True:
if self.ttl < int(ds_data.get('_source', {}).get('cache-date', 0)):
cached_data = ds_data.get('_source', {}).get('cache-data')
expired = False
self.tcex.log.debug('Using cached data for ({}).'.format(rid))
else:
self.tcex.log.debug('Cached data is expired for ({}).'.format(rid))
if expired or ds_data.get('found') is False:
# when cache is expired or does not exist use callback to get data if possible
if callable(data_callback):
cached_data = data_callback(rid)
self.tcex.log.debug('Using callback data for ({}).'.format(rid))
if cached_data:
self.update(rid, cached_data, raise_on_error) # update the cache data
return cached_data | Get cached data from the data store.
Args:
rid (str): The record identifier.
data_callback (callable): A method that will return the data.
raise_on_error (bool): If True and not r.ok this method will raise a RunTimeError.
Returns:
object : Python request response. | Below is the the instruction that describes the task:
### Input:
Get cached data from the data store.
Args:
rid (str): The record identifier.
data_callback (callable): A method that will return the data.
raise_on_error (bool): If True and not r.ok this method will raise a RunTimeError.
Returns:
object : Python request response.
### Response:
def get(self, rid, data_callback=None, raise_on_error=True):
"""Get cached data from the data store.
Args:
rid (str): The record identifier.
data_callback (callable): A method that will return the data.
raise_on_error (bool): If True and not r.ok this method will raise a RunTimeError.
Returns:
object : Python request response.
"""
cached_data = None
ds_data = self.ds.get(rid, raise_on_error=False)
if ds_data is not None:
expired = True
if ds_data.get('found') is True:
if self.ttl < int(ds_data.get('_source', {}).get('cache-date', 0)):
cached_data = ds_data.get('_source', {}).get('cache-data')
expired = False
self.tcex.log.debug('Using cached data for ({}).'.format(rid))
else:
self.tcex.log.debug('Cached data is expired for ({}).'.format(rid))
if expired or ds_data.get('found') is False:
# when cache is expired or does not exist use callback to get data if possible
if callable(data_callback):
cached_data = data_callback(rid)
self.tcex.log.debug('Using callback data for ({}).'.format(rid))
if cached_data:
self.update(rid, cached_data, raise_on_error) # update the cache data
return cached_data |
def get_region_from_metadata():
'''
Try to get region from instance identity document and cache it
.. versionadded:: 2015.5.6
'''
global __Location__
if __Location__ == 'do-not-get-from-metadata':
log.debug('Previously failed to get AWS region from metadata. Not trying again.')
return None
# Cached region
if __Location__ != '':
return __Location__
try:
# Connections to instance meta-data must fail fast and never be proxied
result = requests.get(
"http://169.254.169.254/latest/dynamic/instance-identity/document",
proxies={'http': ''}, timeout=AWS_METADATA_TIMEOUT,
)
except requests.exceptions.RequestException:
log.warning('Failed to get AWS region from instance metadata.', exc_info=True)
# Do not try again
__Location__ = 'do-not-get-from-metadata'
return None
try:
region = result.json()['region']
__Location__ = region
return __Location__
except (ValueError, KeyError):
log.warning('Failed to decode JSON from instance metadata.')
return None
return None | Try to get region from instance identity document and cache it
.. versionadded:: 2015.5.6 | Below is the the instruction that describes the task:
### Input:
Try to get region from instance identity document and cache it
.. versionadded:: 2015.5.6
### Response:
def get_region_from_metadata():
'''
Try to get region from instance identity document and cache it
.. versionadded:: 2015.5.6
'''
global __Location__
if __Location__ == 'do-not-get-from-metadata':
log.debug('Previously failed to get AWS region from metadata. Not trying again.')
return None
# Cached region
if __Location__ != '':
return __Location__
try:
# Connections to instance meta-data must fail fast and never be proxied
result = requests.get(
"http://169.254.169.254/latest/dynamic/instance-identity/document",
proxies={'http': ''}, timeout=AWS_METADATA_TIMEOUT,
)
except requests.exceptions.RequestException:
log.warning('Failed to get AWS region from instance metadata.', exc_info=True)
# Do not try again
__Location__ = 'do-not-get-from-metadata'
return None
try:
region = result.json()['region']
__Location__ = region
return __Location__
except (ValueError, KeyError):
log.warning('Failed to decode JSON from instance metadata.')
return None
return None |
def _create_doc(self):
'''
Create document.
:return:
'''
root = etree.Element('image')
root.set('schemaversion', '6.3')
root.set('name', self.name)
return root | Create document.
:return: | Below is the the instruction that describes the task:
### Input:
Create document.
:return:
### Response:
def _create_doc(self):
'''
Create document.
:return:
'''
root = etree.Element('image')
root.set('schemaversion', '6.3')
root.set('name', self.name)
return root |
def licenses(self):
"""OSI Approved license."""
return {self._acronym_lic(l): l for l in self.resp_text.split('\n')
if l.startswith(self.prefix_lic)} | OSI Approved license. | Below is the the instruction that describes the task:
### Input:
OSI Approved license.
### Response:
def licenses(self):
"""OSI Approved license."""
return {self._acronym_lic(l): l for l in self.resp_text.split('\n')
if l.startswith(self.prefix_lic)} |
def as_proto(self):
"""Returns this shape as a `TensorShapeProto`."""
if self._dims is None:
return tensor_shape_pb2.TensorShapeProto(unknown_rank=True)
else:
return tensor_shape_pb2.TensorShapeProto(
dim=[
tensor_shape_pb2.TensorShapeProto.Dim(
size=-1 if d.value is None else d.value
)
for d in self._dims
]
) | Returns this shape as a `TensorShapeProto`. | Below is the the instruction that describes the task:
### Input:
Returns this shape as a `TensorShapeProto`.
### Response:
def as_proto(self):
"""Returns this shape as a `TensorShapeProto`."""
if self._dims is None:
return tensor_shape_pb2.TensorShapeProto(unknown_rank=True)
else:
return tensor_shape_pb2.TensorShapeProto(
dim=[
tensor_shape_pb2.TensorShapeProto.Dim(
size=-1 if d.value is None else d.value
)
for d in self._dims
]
) |
def cmd_rollback(self, name):
"""Rollback migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
router.rollback(name) | Rollback migrations. | Below is the the instruction that describes the task:
### Input:
Rollback migrations.
### Response:
def cmd_rollback(self, name):
"""Rollback migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
router.rollback(name) |
def getPropAllSupers(self, aURI):
"""
note: requires SPARQL 1.1
2015-06-04: currenlty not used, inferred from above
"""
aURI = aURI
try:
qres = self.rdflib_graph.query("""SELECT DISTINCT ?x
WHERE {
{ <%s> rdfs:subPropertyOf+ ?x }
FILTER (!isBlank(?x))
}
""" % (aURI))
except:
printDebug(
"... warning: the 'getPropAllSupers' query failed (maybe missing SPARQL 1.1 support?)"
)
qres = []
return list(qres) | note: requires SPARQL 1.1
2015-06-04: currenlty not used, inferred from above | Below is the the instruction that describes the task:
### Input:
note: requires SPARQL 1.1
2015-06-04: currenlty not used, inferred from above
### Response:
def getPropAllSupers(self, aURI):
"""
note: requires SPARQL 1.1
2015-06-04: currenlty not used, inferred from above
"""
aURI = aURI
try:
qres = self.rdflib_graph.query("""SELECT DISTINCT ?x
WHERE {
{ <%s> rdfs:subPropertyOf+ ?x }
FILTER (!isBlank(?x))
}
""" % (aURI))
except:
printDebug(
"... warning: the 'getPropAllSupers' query failed (maybe missing SPARQL 1.1 support?)"
)
qres = []
return list(qres) |
def _value_format(self, value):
"""
Format value for map value display.
"""
return '%s: %s' % (
self.area_names.get(self.adapt_code(value[0]), '?'),
self._y_format(value[1])
) | Format value for map value display. | Below is the the instruction that describes the task:
### Input:
Format value for map value display.
### Response:
def _value_format(self, value):
"""
Format value for map value display.
"""
return '%s: %s' % (
self.area_names.get(self.adapt_code(value[0]), '?'),
self._y_format(value[1])
) |
def delete(self):
"""
Delete this Vlan interface from the parent interface.
This will also remove stale routes if the interface has
networks associated with it.
:return: None
"""
if self in self._parent.vlan_interface:
self._parent.data['vlanInterfaces'] = [
v for v in self._parent.vlan_interface
if v != self]
self.update()
for route in self._parent._engine.routing:
if route.to_delete:
route.delete() | Delete this Vlan interface from the parent interface.
This will also remove stale routes if the interface has
networks associated with it.
:return: None | Below is the the instruction that describes the task:
### Input:
Delete this Vlan interface from the parent interface.
This will also remove stale routes if the interface has
networks associated with it.
:return: None
### Response:
def delete(self):
"""
Delete this Vlan interface from the parent interface.
This will also remove stale routes if the interface has
networks associated with it.
:return: None
"""
if self in self._parent.vlan_interface:
self._parent.data['vlanInterfaces'] = [
v for v in self._parent.vlan_interface
if v != self]
self.update()
for route in self._parent._engine.routing:
if route.to_delete:
route.delete() |
def connect(self, deleteOldVersions=False, recreate=False):
""" Locate the current version of the jobs DB or create a new one, and
optionally delete old versions laying around. If desired, this method
can be called at any time to re-create the tables from scratch, delete
old versions of the database, etc.
Parameters:
----------------------------------------------------------------
deleteOldVersions: if true, delete any old versions of the DB left
on the server
recreate: if true, recreate the database from scratch even
if it already exists.
"""
# Initialize tables, if needed
with ConnectionFactory.get() as conn:
# Initialize tables
self._initTables(cursor=conn.cursor, deleteOldVersions=deleteOldVersions,
recreate=recreate)
# Save our connection id
conn.cursor.execute('SELECT CONNECTION_ID()')
self._connectionID = conn.cursor.fetchall()[0][0]
self._logger.info("clientJobsConnectionID=%r", self._connectionID)
return | Locate the current version of the jobs DB or create a new one, and
optionally delete old versions laying around. If desired, this method
can be called at any time to re-create the tables from scratch, delete
old versions of the database, etc.
Parameters:
----------------------------------------------------------------
deleteOldVersions: if true, delete any old versions of the DB left
on the server
recreate: if true, recreate the database from scratch even
if it already exists. | Below is the the instruction that describes the task:
### Input:
Locate the current version of the jobs DB or create a new one, and
optionally delete old versions laying around. If desired, this method
can be called at any time to re-create the tables from scratch, delete
old versions of the database, etc.
Parameters:
----------------------------------------------------------------
deleteOldVersions: if true, delete any old versions of the DB left
on the server
recreate: if true, recreate the database from scratch even
if it already exists.
### Response:
def connect(self, deleteOldVersions=False, recreate=False):
""" Locate the current version of the jobs DB or create a new one, and
optionally delete old versions laying around. If desired, this method
can be called at any time to re-create the tables from scratch, delete
old versions of the database, etc.
Parameters:
----------------------------------------------------------------
deleteOldVersions: if true, delete any old versions of the DB left
on the server
recreate: if true, recreate the database from scratch even
if it already exists.
"""
# Initialize tables, if needed
with ConnectionFactory.get() as conn:
# Initialize tables
self._initTables(cursor=conn.cursor, deleteOldVersions=deleteOldVersions,
recreate=recreate)
# Save our connection id
conn.cursor.execute('SELECT CONNECTION_ID()')
self._connectionID = conn.cursor.fetchall()[0][0]
self._logger.info("clientJobsConnectionID=%r", self._connectionID)
return |
def iter_insert_items(tree):
""" Iterate over the items to insert from an INSERT statement """
if tree.list_values:
keys = tree.attrs
for values in tree.list_values:
if len(keys) != len(values):
raise SyntaxError(
"Values '%s' do not match attributes " "'%s'" % (values, keys)
)
yield dict(zip(keys, map(resolve, values)))
elif tree.map_values:
for item in tree.map_values:
data = {}
for (key, val) in item:
data[key] = resolve(val)
yield data
else:
raise SyntaxError("No insert data found") | Iterate over the items to insert from an INSERT statement | Below is the the instruction that describes the task:
### Input:
Iterate over the items to insert from an INSERT statement
### Response:
def iter_insert_items(tree):
""" Iterate over the items to insert from an INSERT statement """
if tree.list_values:
keys = tree.attrs
for values in tree.list_values:
if len(keys) != len(values):
raise SyntaxError(
"Values '%s' do not match attributes " "'%s'" % (values, keys)
)
yield dict(zip(keys, map(resolve, values)))
elif tree.map_values:
for item in tree.map_values:
data = {}
for (key, val) in item:
data[key] = resolve(val)
yield data
else:
raise SyntaxError("No insert data found") |
def sha256(message, encoder=nacl.encoding.HexEncoder):
"""
Hashes ``message`` with SHA256.
:param message: The message to hash.
:type message: bytes
:param encoder: A class that is able to encode the hashed message.
:returns: The hashed message.
:rtype: bytes
"""
return encoder.encode(nacl.bindings.crypto_hash_sha256(message)) | Hashes ``message`` with SHA256.
:param message: The message to hash.
:type message: bytes
:param encoder: A class that is able to encode the hashed message.
:returns: The hashed message.
:rtype: bytes | Below is the the instruction that describes the task:
### Input:
Hashes ``message`` with SHA256.
:param message: The message to hash.
:type message: bytes
:param encoder: A class that is able to encode the hashed message.
:returns: The hashed message.
:rtype: bytes
### Response:
def sha256(message, encoder=nacl.encoding.HexEncoder):
"""
Hashes ``message`` with SHA256.
:param message: The message to hash.
:type message: bytes
:param encoder: A class that is able to encode the hashed message.
:returns: The hashed message.
:rtype: bytes
"""
return encoder.encode(nacl.bindings.crypto_hash_sha256(message)) |
def _remove_list_item(self, beacon_config, label):
'''
Remove an item from a beacon config list
'''
index = self._get_index(beacon_config, label)
del beacon_config[index] | Remove an item from a beacon config list | Below is the the instruction that describes the task:
### Input:
Remove an item from a beacon config list
### Response:
def _remove_list_item(self, beacon_config, label):
'''
Remove an item from a beacon config list
'''
index = self._get_index(beacon_config, label)
del beacon_config[index] |
def extended_arg_patterns(self):
"""Iterator over patterns for positional arguments to be matched
This yields the elements of :attr:`args`, extended by their `mode`
value
"""
for arg in self._arg_iterator(self.args):
if isinstance(arg, Pattern):
if arg.mode > self.single:
while True:
yield arg
else:
yield arg
else:
yield arg | Iterator over patterns for positional arguments to be matched
This yields the elements of :attr:`args`, extended by their `mode`
value | Below is the the instruction that describes the task:
### Input:
Iterator over patterns for positional arguments to be matched
This yields the elements of :attr:`args`, extended by their `mode`
value
### Response:
def extended_arg_patterns(self):
"""Iterator over patterns for positional arguments to be matched
This yields the elements of :attr:`args`, extended by their `mode`
value
"""
for arg in self._arg_iterator(self.args):
if isinstance(arg, Pattern):
if arg.mode > self.single:
while True:
yield arg
else:
yield arg
else:
yield arg |
def get_still_seg_belonged(dt_str, seg_duration, fmt='%Y-%m-%d %H:%M:%S'):
"""
获取该时刻所属的非滑动时间片
:param dt_str: datetime string, eg: 2016-10-31 12:22:11
:param seg_duration: 时间片长度, unit: minute
:param fmt: datetime string format
:return:
"""
dt = time_util.str_to_datetime(dt_str, fmt)
minutes_of_day = time_util.get_minutes_of_day(dt)
return time_util.minutes_to_time_str(
minutes_of_day - minutes_of_day % seg_duration) | 获取该时刻所属的非滑动时间片
:param dt_str: datetime string, eg: 2016-10-31 12:22:11
:param seg_duration: 时间片长度, unit: minute
:param fmt: datetime string format
:return: | Below is the the instruction that describes the task:
### Input:
获取该时刻所属的非滑动时间片
:param dt_str: datetime string, eg: 2016-10-31 12:22:11
:param seg_duration: 时间片长度, unit: minute
:param fmt: datetime string format
:return:
### Response:
def get_still_seg_belonged(dt_str, seg_duration, fmt='%Y-%m-%d %H:%M:%S'):
"""
获取该时刻所属的非滑动时间片
:param dt_str: datetime string, eg: 2016-10-31 12:22:11
:param seg_duration: 时间片长度, unit: minute
:param fmt: datetime string format
:return:
"""
dt = time_util.str_to_datetime(dt_str, fmt)
minutes_of_day = time_util.get_minutes_of_day(dt)
return time_util.minutes_to_time_str(
minutes_of_day - minutes_of_day % seg_duration) |
def check_calendar(self, ds):
'''
Check the calendar attribute for variables defining time and ensure it
is a valid calendar prescribed by CF.
CF §4.4.1 In order to calculate a new date and time given a base date, base
time and a time increment one must know what calendar to use.
The values currently defined for calendar are:
- gregorian or standard
- proleptic_gregorian
- noleap or 365_day
- all_leap or 366_day
- 360_day
- julian
- none
The calendar attribute may be set to none in climate experiments that
simulate a fixed time of year.
The time of year is indicated by the date in the reference time of the
units attribute.
If none of the calendars defined above applies, a non-standard calendar
can be defined. The lengths of each month are explicitly defined with
the month_lengths attribute of the time axis.
If leap years are included, then two other attributes of the time axis
should also be defined:
leap_year, leap_month
The calendar attribute is not required when a non-standard calendar is
being used. It is sufficient to define the calendar using the
month_lengths attribute, along with leap_year, and leap_month as
appropriate. However, the calendar attribute is allowed to take
non-standard values and in that case defining the non-standard calendar
using the appropriate attributes is required.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
'''
valid_calendars = [
'gregorian',
'standard',
'proleptic_gregorian',
'noleap',
'365_day',
'all_leap',
'366_day',
'360_day',
'julian',
'none'
]
ret_val = []
# if has a calendar, check that it is within the valid values
# otherwise no calendar is valid
for time_var in ds.get_variables_by_attributes(calendar=lambda c: c is not None):
reasoning = None
valid_calendar = time_var.calendar in valid_calendars
if not valid_calendar:
reasoning = ["§4.4.1 Variable %s should have a valid calendar: '%s' is not a valid calendar" % (time_var.name, time_var.calendar)]
# passes if the calendar is valid, otherwise notify of invalid
# calendar
result = Result(BaseCheck.LOW,
valid_calendar,
self.section_titles['4.4'],
reasoning)
ret_val.append(result)
return ret_val | Check the calendar attribute for variables defining time and ensure it
is a valid calendar prescribed by CF.
CF §4.4.1 In order to calculate a new date and time given a base date, base
time and a time increment one must know what calendar to use.
The values currently defined for calendar are:
- gregorian or standard
- proleptic_gregorian
- noleap or 365_day
- all_leap or 366_day
- 360_day
- julian
- none
The calendar attribute may be set to none in climate experiments that
simulate a fixed time of year.
The time of year is indicated by the date in the reference time of the
units attribute.
If none of the calendars defined above applies, a non-standard calendar
can be defined. The lengths of each month are explicitly defined with
the month_lengths attribute of the time axis.
If leap years are included, then two other attributes of the time axis
should also be defined:
leap_year, leap_month
The calendar attribute is not required when a non-standard calendar is
being used. It is sufficient to define the calendar using the
month_lengths attribute, along with leap_year, and leap_month as
appropriate. However, the calendar attribute is allowed to take
non-standard values and in that case defining the non-standard calendar
using the appropriate attributes is required.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results | Below is the the instruction that describes the task:
### Input:
Check the calendar attribute for variables defining time and ensure it
is a valid calendar prescribed by CF.
CF §4.4.1 In order to calculate a new date and time given a base date, base
time and a time increment one must know what calendar to use.
The values currently defined for calendar are:
- gregorian or standard
- proleptic_gregorian
- noleap or 365_day
- all_leap or 366_day
- 360_day
- julian
- none
The calendar attribute may be set to none in climate experiments that
simulate a fixed time of year.
The time of year is indicated by the date in the reference time of the
units attribute.
If none of the calendars defined above applies, a non-standard calendar
can be defined. The lengths of each month are explicitly defined with
the month_lengths attribute of the time axis.
If leap years are included, then two other attributes of the time axis
should also be defined:
leap_year, leap_month
The calendar attribute is not required when a non-standard calendar is
being used. It is sufficient to define the calendar using the
month_lengths attribute, along with leap_year, and leap_month as
appropriate. However, the calendar attribute is allowed to take
non-standard values and in that case defining the non-standard calendar
using the appropriate attributes is required.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
### Response:
def check_calendar(self, ds):
'''
Check the calendar attribute for variables defining time and ensure it
is a valid calendar prescribed by CF.
CF §4.4.1 In order to calculate a new date and time given a base date, base
time and a time increment one must know what calendar to use.
The values currently defined for calendar are:
- gregorian or standard
- proleptic_gregorian
- noleap or 365_day
- all_leap or 366_day
- 360_day
- julian
- none
The calendar attribute may be set to none in climate experiments that
simulate a fixed time of year.
The time of year is indicated by the date in the reference time of the
units attribute.
If none of the calendars defined above applies, a non-standard calendar
can be defined. The lengths of each month are explicitly defined with
the month_lengths attribute of the time axis.
If leap years are included, then two other attributes of the time axis
should also be defined:
leap_year, leap_month
The calendar attribute is not required when a non-standard calendar is
being used. It is sufficient to define the calendar using the
month_lengths attribute, along with leap_year, and leap_month as
appropriate. However, the calendar attribute is allowed to take
non-standard values and in that case defining the non-standard calendar
using the appropriate attributes is required.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
'''
valid_calendars = [
'gregorian',
'standard',
'proleptic_gregorian',
'noleap',
'365_day',
'all_leap',
'366_day',
'360_day',
'julian',
'none'
]
ret_val = []
# if has a calendar, check that it is within the valid values
# otherwise no calendar is valid
for time_var in ds.get_variables_by_attributes(calendar=lambda c: c is not None):
reasoning = None
valid_calendar = time_var.calendar in valid_calendars
if not valid_calendar:
reasoning = ["§4.4.1 Variable %s should have a valid calendar: '%s' is not a valid calendar" % (time_var.name, time_var.calendar)]
# passes if the calendar is valid, otherwise notify of invalid
# calendar
result = Result(BaseCheck.LOW,
valid_calendar,
self.section_titles['4.4'],
reasoning)
ret_val.append(result)
return ret_val |
def asserts(self, *args, **kwargs):
"""Wraps match method and places under an assertion. Override this for higher-level control,
such as returning a custom object for additional validation (e.g. expect().to.change())
"""
result = self.match(*args, **kwargs)
self.expect(result)
return result | Wraps match method and places under an assertion. Override this for higher-level control,
such as returning a custom object for additional validation (e.g. expect().to.change()) | Below is the the instruction that describes the task:
### Input:
Wraps match method and places under an assertion. Override this for higher-level control,
such as returning a custom object for additional validation (e.g. expect().to.change())
### Response:
def asserts(self, *args, **kwargs):
"""Wraps match method and places under an assertion. Override this for higher-level control,
such as returning a custom object for additional validation (e.g. expect().to.change())
"""
result = self.match(*args, **kwargs)
self.expect(result)
return result |
def tasks(self):
"""
Returns a list of all tasks known to the engine.
:return: A list of task names.
"""
task_input = {'taskName': 'QueryTaskCatalog'}
output = taskengine.execute(task_input, self._engine_name, cwd=self._cwd)
return output['outputParameters']['TASKS'] | Returns a list of all tasks known to the engine.
:return: A list of task names. | Below is the the instruction that describes the task:
### Input:
Returns a list of all tasks known to the engine.
:return: A list of task names.
### Response:
def tasks(self):
"""
Returns a list of all tasks known to the engine.
:return: A list of task names.
"""
task_input = {'taskName': 'QueryTaskCatalog'}
output = taskengine.execute(task_input, self._engine_name, cwd=self._cwd)
return output['outputParameters']['TASKS'] |
def unpitched_low(dur, idx):
"""
Non-harmonic bass/lower frequency sound as a list (due to memoization).
Parameters
----------
dur:
Duration, in samples.
idx:
Zero or one (integer), for a small difference to the sound played.
Returns
-------
A list with the synthesized note.
"""
env = sinusoid(lag2freq(dur * 2)).limit(dur) ** 2
freq = 40 + 20 * sinusoid(1000 * Hz, phase=uniform(-pi, pi)) # Hz
result = (low_table(freq * Hz) + low_table(freq * 1.1 * Hz)) * env * .5
return list(result) | Non-harmonic bass/lower frequency sound as a list (due to memoization).
Parameters
----------
dur:
Duration, in samples.
idx:
Zero or one (integer), for a small difference to the sound played.
Returns
-------
A list with the synthesized note. | Below is the the instruction that describes the task:
### Input:
Non-harmonic bass/lower frequency sound as a list (due to memoization).
Parameters
----------
dur:
Duration, in samples.
idx:
Zero or one (integer), for a small difference to the sound played.
Returns
-------
A list with the synthesized note.
### Response:
def unpitched_low(dur, idx):
"""
Non-harmonic bass/lower frequency sound as a list (due to memoization).
Parameters
----------
dur:
Duration, in samples.
idx:
Zero or one (integer), for a small difference to the sound played.
Returns
-------
A list with the synthesized note.
"""
env = sinusoid(lag2freq(dur * 2)).limit(dur) ** 2
freq = 40 + 20 * sinusoid(1000 * Hz, phase=uniform(-pi, pi)) # Hz
result = (low_table(freq * Hz) + low_table(freq * 1.1 * Hz)) * env * .5
return list(result) |
def p_case_list(p):
'''case_list : empty
| case_list CASE expr case_separator inner_statement_list
| case_list DEFAULT case_separator inner_statement_list'''
if len(p) == 6:
p[0] = p[1] + [ast.Case(p[3], p[5], lineno=p.lineno(2))]
elif len(p) == 5:
p[0] = p[1] + [ast.Default(p[4], lineno=p.lineno(2))]
else:
p[0] = [] | case_list : empty
| case_list CASE expr case_separator inner_statement_list
| case_list DEFAULT case_separator inner_statement_list | Below is the the instruction that describes the task:
### Input:
case_list : empty
| case_list CASE expr case_separator inner_statement_list
| case_list DEFAULT case_separator inner_statement_list
### Response:
def p_case_list(p):
'''case_list : empty
| case_list CASE expr case_separator inner_statement_list
| case_list DEFAULT case_separator inner_statement_list'''
if len(p) == 6:
p[0] = p[1] + [ast.Case(p[3], p[5], lineno=p.lineno(2))]
elif len(p) == 5:
p[0] = p[1] + [ast.Default(p[4], lineno=p.lineno(2))]
else:
p[0] = [] |
def create_lv(self, name, length, units):
"""
Creates a logical volume and returns the LogicalVolume instance associated with
the lv_t handle::
from lvm2py import *
lvm = LVM()
vg = lvm.get_vg("myvg", "w")
lv = vg.create_lv("mylv", 40, "MiB")
*Args:*
* name (str): The desired logical volume name.
* length (int): The desired size.
* units (str): The size units.
*Raises:*
* HandleError, CommitError, ValueError
.. note::
The VolumeGroup instance must be in write mode, otherwise CommitError
is raised.
"""
if units != "%":
size = size_units[units] * length
else:
if not (0 < length <= 100) or type(length) is float:
raise ValueError("Length not supported.")
size = (self.size("B") / 100) * length
self.open()
lvh = lvm_vg_create_lv_linear(self.handle, name, c_ulonglong(size))
if not bool(lvh):
self.close()
raise CommitError("Failed to create LV.")
lv = LogicalVolume(self, lvh=lvh)
self.close()
return lv | Creates a logical volume and returns the LogicalVolume instance associated with
the lv_t handle::
from lvm2py import *
lvm = LVM()
vg = lvm.get_vg("myvg", "w")
lv = vg.create_lv("mylv", 40, "MiB")
*Args:*
* name (str): The desired logical volume name.
* length (int): The desired size.
* units (str): The size units.
*Raises:*
* HandleError, CommitError, ValueError
.. note::
The VolumeGroup instance must be in write mode, otherwise CommitError
is raised. | Below is the the instruction that describes the task:
### Input:
Creates a logical volume and returns the LogicalVolume instance associated with
the lv_t handle::
from lvm2py import *
lvm = LVM()
vg = lvm.get_vg("myvg", "w")
lv = vg.create_lv("mylv", 40, "MiB")
*Args:*
* name (str): The desired logical volume name.
* length (int): The desired size.
* units (str): The size units.
*Raises:*
* HandleError, CommitError, ValueError
.. note::
The VolumeGroup instance must be in write mode, otherwise CommitError
is raised.
### Response:
def create_lv(self, name, length, units):
"""
Creates a logical volume and returns the LogicalVolume instance associated with
the lv_t handle::
from lvm2py import *
lvm = LVM()
vg = lvm.get_vg("myvg", "w")
lv = vg.create_lv("mylv", 40, "MiB")
*Args:*
* name (str): The desired logical volume name.
* length (int): The desired size.
* units (str): The size units.
*Raises:*
* HandleError, CommitError, ValueError
.. note::
The VolumeGroup instance must be in write mode, otherwise CommitError
is raised.
"""
if units != "%":
size = size_units[units] * length
else:
if not (0 < length <= 100) or type(length) is float:
raise ValueError("Length not supported.")
size = (self.size("B") / 100) * length
self.open()
lvh = lvm_vg_create_lv_linear(self.handle, name, c_ulonglong(size))
if not bool(lvh):
self.close()
raise CommitError("Failed to create LV.")
lv = LogicalVolume(self, lvh=lvh)
self.close()
return lv |
def clean(bundle, before, after, keep_last):
"""Clean up data downloaded with the ingest command.
"""
bundles_module.clean(
bundle,
before,
after,
keep_last,
) | Clean up data downloaded with the ingest command. | Below is the the instruction that describes the task:
### Input:
Clean up data downloaded with the ingest command.
### Response:
def clean(bundle, before, after, keep_last):
"""Clean up data downloaded with the ingest command.
"""
bundles_module.clean(
bundle,
before,
after,
keep_last,
) |
def parse_child_elements(self, element):
'''parses all children of an etree element'''
for child in element.iterchildren():
self.parsers[child.tag](child) | parses all children of an etree element | Below is the the instruction that describes the task:
### Input:
parses all children of an etree element
### Response:
def parse_child_elements(self, element):
'''parses all children of an etree element'''
for child in element.iterchildren():
self.parsers[child.tag](child) |
def make_dependent(self, source, target, action):
'''
Create a dependency between path 'source' and path 'target' via the
callable 'action'.
>>> permuter._generators
[IterValueGenerator(one), IterValueGenerator(two)]
>>> permuter.make_dependent('one', 'two', lambda x: x + 1)
Going forward, 'two' will only contain values that are (one+1)
'''
if not self._generators:
return
src_permuter, src = self._resolve_child(source)
dest = self._resolve_child(target)[1]
# pylint: disable=protected-access
container = src_permuter._generators
idx = container.index(src)
container[idx] = DependentValueGenerator(src.name(), dest, action)
self._update_independent_generators() | Create a dependency between path 'source' and path 'target' via the
callable 'action'.
>>> permuter._generators
[IterValueGenerator(one), IterValueGenerator(two)]
>>> permuter.make_dependent('one', 'two', lambda x: x + 1)
Going forward, 'two' will only contain values that are (one+1) | Below is the the instruction that describes the task:
### Input:
Create a dependency between path 'source' and path 'target' via the
callable 'action'.
>>> permuter._generators
[IterValueGenerator(one), IterValueGenerator(two)]
>>> permuter.make_dependent('one', 'two', lambda x: x + 1)
Going forward, 'two' will only contain values that are (one+1)
### Response:
def make_dependent(self, source, target, action):
'''
Create a dependency between path 'source' and path 'target' via the
callable 'action'.
>>> permuter._generators
[IterValueGenerator(one), IterValueGenerator(two)]
>>> permuter.make_dependent('one', 'two', lambda x: x + 1)
Going forward, 'two' will only contain values that are (one+1)
'''
if not self._generators:
return
src_permuter, src = self._resolve_child(source)
dest = self._resolve_child(target)[1]
# pylint: disable=protected-access
container = src_permuter._generators
idx = container.index(src)
container[idx] = DependentValueGenerator(src.name(), dest, action)
self._update_independent_generators() |
def _update_dPrxy(self):
"""Update `dPrxy`."""
super(ExpCM_fitprefs, self)._update_dPrxy()
if 'zeta' in self.freeparams:
tildeFrxyQxy = self.tildeFrxy * self.Qxy
j = 0
zetaxterm = scipy.ndarray((self.nsites, N_CODON, N_CODON), dtype='float')
zetayterm = scipy.ndarray((self.nsites, N_CODON, N_CODON), dtype='float')
for r in range(self.nsites):
for i in range(N_AA - 1):
zetari = self.zeta[j]
zetaxterm.fill(0)
zetayterm.fill(0)
zetaxterm[r][self._aa_for_x > i] = -1.0 / zetari
zetaxterm[r][self._aa_for_x == i] = -1.0 / (zetari - 1.0)
zetayterm[r][self._aa_for_y > i] = 1.0 / zetari
zetayterm[r][self._aa_for_y == i] = 1.0 / (zetari - 1.0)
self.dPrxy['zeta'][j] = tildeFrxyQxy * (zetayterm + zetaxterm)
_fill_diagonals(self.dPrxy['zeta'][j], self._diag_indices)
j += 1 | Update `dPrxy`. | Below is the the instruction that describes the task:
### Input:
Update `dPrxy`.
### Response:
def _update_dPrxy(self):
"""Update `dPrxy`."""
super(ExpCM_fitprefs, self)._update_dPrxy()
if 'zeta' in self.freeparams:
tildeFrxyQxy = self.tildeFrxy * self.Qxy
j = 0
zetaxterm = scipy.ndarray((self.nsites, N_CODON, N_CODON), dtype='float')
zetayterm = scipy.ndarray((self.nsites, N_CODON, N_CODON), dtype='float')
for r in range(self.nsites):
for i in range(N_AA - 1):
zetari = self.zeta[j]
zetaxterm.fill(0)
zetayterm.fill(0)
zetaxterm[r][self._aa_for_x > i] = -1.0 / zetari
zetaxterm[r][self._aa_for_x == i] = -1.0 / (zetari - 1.0)
zetayterm[r][self._aa_for_y > i] = 1.0 / zetari
zetayterm[r][self._aa_for_y == i] = 1.0 / (zetari - 1.0)
self.dPrxy['zeta'][j] = tildeFrxyQxy * (zetayterm + zetaxterm)
_fill_diagonals(self.dPrxy['zeta'][j], self._diag_indices)
j += 1 |
def _param_callback(self, name, value):
"""Generic callback registered for all the groups"""
print('{0}: {1}'.format(name, value))
# Remove each parameter from the list and close the link when
# all are fetched
self._param_check_list.remove(name)
if len(self._param_check_list) == 0:
print('Have fetched all parameter values.')
# First remove all the group callbacks
for g in self._param_groups:
self._cf.param.remove_update_callback(group=g,
cb=self._param_callback)
# Create a new random value [0.00,1.00] for pid_attitude.pitch_kd
# and set it
pkd = random.random()
print('')
print('Write: pid_attitude.pitch_kd={:.2f}'.format(pkd))
self._cf.param.add_update_callback(group='pid_attitude',
name='pitch_kd',
cb=self._a_pitch_kd_callback)
# When setting a value the parameter is automatically read back
# and the registered callbacks will get the updated value
self._cf.param.set_value('pid_attitude.pitch_kd',
'{:.2f}'.format(pkd)) | Generic callback registered for all the groups | Below is the the instruction that describes the task:
### Input:
Generic callback registered for all the groups
### Response:
def _param_callback(self, name, value):
"""Generic callback registered for all the groups"""
print('{0}: {1}'.format(name, value))
# Remove each parameter from the list and close the link when
# all are fetched
self._param_check_list.remove(name)
if len(self._param_check_list) == 0:
print('Have fetched all parameter values.')
# First remove all the group callbacks
for g in self._param_groups:
self._cf.param.remove_update_callback(group=g,
cb=self._param_callback)
# Create a new random value [0.00,1.00] for pid_attitude.pitch_kd
# and set it
pkd = random.random()
print('')
print('Write: pid_attitude.pitch_kd={:.2f}'.format(pkd))
self._cf.param.add_update_callback(group='pid_attitude',
name='pitch_kd',
cb=self._a_pitch_kd_callback)
# When setting a value the parameter is automatically read back
# and the registered callbacks will get the updated value
self._cf.param.set_value('pid_attitude.pitch_kd',
'{:.2f}'.format(pkd)) |
def StringIO(*args, **kw):
"""Thunk to load the real StringIO on demand"""
global StringIO
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
return StringIO(*args,**kw) | Thunk to load the real StringIO on demand | Below is the the instruction that describes the task:
### Input:
Thunk to load the real StringIO on demand
### Response:
def StringIO(*args, **kw):
"""Thunk to load the real StringIO on demand"""
global StringIO
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
return StringIO(*args,**kw) |
def _check_neg(self, level, *tokens):
"""Check that the different tokens were NOT logged in one record, assert by level."""
for record in self.records:
if level is not None and record.levelno != level:
continue
if all(token in record.message for token in tokens):
break
else:
return
# didn't exit, all tokens found in the same record
msg = "Tokens {} found in the following record: {} {!r}".format(
tokens, record.levelname, record.message)
self.test_instance.fail(msg) | Check that the different tokens were NOT logged in one record, assert by level. | Below is the the instruction that describes the task:
### Input:
Check that the different tokens were NOT logged in one record, assert by level.
### Response:
def _check_neg(self, level, *tokens):
"""Check that the different tokens were NOT logged in one record, assert by level."""
for record in self.records:
if level is not None and record.levelno != level:
continue
if all(token in record.message for token in tokens):
break
else:
return
# didn't exit, all tokens found in the same record
msg = "Tokens {} found in the following record: {} {!r}".format(
tokens, record.levelname, record.message)
self.test_instance.fail(msg) |
def _set_get_vnetwork_hosts(self, v, load=False):
"""
Setter method for get_vnetwork_hosts, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_hosts (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_vnetwork_hosts is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_vnetwork_hosts() directly.
YANG Description: Shows discovered hosts
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=get_vnetwork_hosts.get_vnetwork_hosts, is_leaf=True, yang_name="get-vnetwork-hosts", rest_name="get-vnetwork-hosts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'host-name'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """get_vnetwork_hosts must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=get_vnetwork_hosts.get_vnetwork_hosts, is_leaf=True, yang_name="get-vnetwork-hosts", rest_name="get-vnetwork-hosts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'host-name'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='rpc', is_config=True)""",
})
self.__get_vnetwork_hosts = t
if hasattr(self, '_set'):
self._set() | Setter method for get_vnetwork_hosts, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_hosts (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_vnetwork_hosts is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_vnetwork_hosts() directly.
YANG Description: Shows discovered hosts | Below is the the instruction that describes the task:
### Input:
Setter method for get_vnetwork_hosts, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_hosts (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_vnetwork_hosts is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_vnetwork_hosts() directly.
YANG Description: Shows discovered hosts
### Response:
def _set_get_vnetwork_hosts(self, v, load=False):
"""
Setter method for get_vnetwork_hosts, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_hosts (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_vnetwork_hosts is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_vnetwork_hosts() directly.
YANG Description: Shows discovered hosts
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=get_vnetwork_hosts.get_vnetwork_hosts, is_leaf=True, yang_name="get-vnetwork-hosts", rest_name="get-vnetwork-hosts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'host-name'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """get_vnetwork_hosts must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=get_vnetwork_hosts.get_vnetwork_hosts, is_leaf=True, yang_name="get-vnetwork-hosts", rest_name="get-vnetwork-hosts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'host-name'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='rpc', is_config=True)""",
})
self.__get_vnetwork_hosts = t
if hasattr(self, '_set'):
self._set() |
def assert_reset(self, asserted):
"""Assert or de-assert target reset line"""
try:
self._invalidate_cached_registers()
self._link.assert_reset(asserted)
except DAPAccess.Error as exc:
six.raise_from(self._convert_exception(exc), exc) | Assert or de-assert target reset line | Below is the the instruction that describes the task:
### Input:
Assert or de-assert target reset line
### Response:
def assert_reset(self, asserted):
"""Assert or de-assert target reset line"""
try:
self._invalidate_cached_registers()
self._link.assert_reset(asserted)
except DAPAccess.Error as exc:
six.raise_from(self._convert_exception(exc), exc) |
Subsets and Splits