code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def from_config(config):
"""
Generate a matrix from a configuration dictionary.
"""
matrix = {}
variables = config.keys()
for entries in product(*config.values()):
combination = dict(zip(variables, entries))
include = True
for value in combination.values():
for reducer in value.reducers:
if reducer.pattern == '-':
match = not combination[reducer.variable].value
else:
match = fnmatch(combination[reducer.variable].value, reducer.pattern)
if match if reducer.is_exclude else not match:
include = False
if include:
key = '-'.join(entry.alias for entry in entries if entry.alias)
data = dict(
zip(variables, (entry.value for entry in entries))
)
if key in matrix and data != matrix[key]:
raise DuplicateEnvironment(key, data, matrix[key])
matrix[key] = data
return matrix | Generate a matrix from a configuration dictionary. | Below is the the instruction that describes the task:
### Input:
Generate a matrix from a configuration dictionary.
### Response:
def from_config(config):
"""
Generate a matrix from a configuration dictionary.
"""
matrix = {}
variables = config.keys()
for entries in product(*config.values()):
combination = dict(zip(variables, entries))
include = True
for value in combination.values():
for reducer in value.reducers:
if reducer.pattern == '-':
match = not combination[reducer.variable].value
else:
match = fnmatch(combination[reducer.variable].value, reducer.pattern)
if match if reducer.is_exclude else not match:
include = False
if include:
key = '-'.join(entry.alias for entry in entries if entry.alias)
data = dict(
zip(variables, (entry.value for entry in entries))
)
if key in matrix and data != matrix[key]:
raise DuplicateEnvironment(key, data, matrix[key])
matrix[key] = data
return matrix |
def path_dispatch_rename(rename_like_method):
"""
decorator for rename-like function, that need dispatch on 2 arguments
"""
def _wrapper_method(self, old_path, new_path):
old_path, _old_path, old_sentinel = _split_path(old_path);
new_path, _new_path, new_sentinel = _split_path(new_path);
if old_sentinel != new_sentinel:
raise ValueError('Does not know how to move things across contents manager mountpoints')
else:
sentinel = new_sentinel
man = self.managers.get(sentinel, None)
if man is not None:
rename_meth = getattr(man, rename_like_method.__name__)
sub = rename_meth('/'.join(_old_path), '/'.join(_new_path))
return sub
else :
return rename_meth(self, old_path, new_path)
return _wrapper_method | decorator for rename-like function, that need dispatch on 2 arguments | Below is the the instruction that describes the task:
### Input:
decorator for rename-like function, that need dispatch on 2 arguments
### Response:
def path_dispatch_rename(rename_like_method):
"""
decorator for rename-like function, that need dispatch on 2 arguments
"""
def _wrapper_method(self, old_path, new_path):
old_path, _old_path, old_sentinel = _split_path(old_path);
new_path, _new_path, new_sentinel = _split_path(new_path);
if old_sentinel != new_sentinel:
raise ValueError('Does not know how to move things across contents manager mountpoints')
else:
sentinel = new_sentinel
man = self.managers.get(sentinel, None)
if man is not None:
rename_meth = getattr(man, rename_like_method.__name__)
sub = rename_meth('/'.join(_old_path), '/'.join(_new_path))
return sub
else :
return rename_meth(self, old_path, new_path)
return _wrapper_method |
def insert_rule(self, chain, src=None, dest=None, target=None):
"""Insert a new rule in the chain
"""
if not chain:
raise ValueError("Invalid chain")
if not target:
raise ValueError("Invalid target")
if not (src or dest):
raise ValueError("Need src, dest, or both")
args = ["-I", chain]
if src:
args += ["-s", src]
if dest:
args += ["-d", dest]
args += ["-j", target]
self.call(*args) | Insert a new rule in the chain | Below is the the instruction that describes the task:
### Input:
Insert a new rule in the chain
### Response:
def insert_rule(self, chain, src=None, dest=None, target=None):
"""Insert a new rule in the chain
"""
if not chain:
raise ValueError("Invalid chain")
if not target:
raise ValueError("Invalid target")
if not (src or dest):
raise ValueError("Need src, dest, or both")
args = ["-I", chain]
if src:
args += ["-s", src]
if dest:
args += ["-d", dest]
args += ["-j", target]
self.call(*args) |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'collections') and self.collections is not None:
_dict['collections'] = [x._to_dict() for x in self.collections]
return _dict | Return a json dictionary representing this model. | Below is the the instruction that describes the task:
### Input:
Return a json dictionary representing this model.
### Response:
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'collections') and self.collections is not None:
_dict['collections'] = [x._to_dict() for x in self.collections]
return _dict |
def destroy(self, deal_id, contact_id) :
"""
Remove an associated contact
Remove a deal's associated contact
If a deal with the supplied unique identifier does not exist, it returns an error
This operation cannot be undone
:calls: ``delete /deals/{deal_id}/associated_contacts/{contact_id}``
:param int deal_id: Unique identifier of a Deal.
:param int contact_id: Unique identifier of a Contact.
:return: True if the operation succeeded.
:rtype: bool
"""
status_code, _, _ = self.http_client.delete("/deals/{deal_id}/associated_contacts/{contact_id}".format(deal_id=deal_id, contact_id=contact_id))
return status_code == 204 | Remove an associated contact
Remove a deal's associated contact
If a deal with the supplied unique identifier does not exist, it returns an error
This operation cannot be undone
:calls: ``delete /deals/{deal_id}/associated_contacts/{contact_id}``
:param int deal_id: Unique identifier of a Deal.
:param int contact_id: Unique identifier of a Contact.
:return: True if the operation succeeded.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Remove an associated contact
Remove a deal's associated contact
If a deal with the supplied unique identifier does not exist, it returns an error
This operation cannot be undone
:calls: ``delete /deals/{deal_id}/associated_contacts/{contact_id}``
:param int deal_id: Unique identifier of a Deal.
:param int contact_id: Unique identifier of a Contact.
:return: True if the operation succeeded.
:rtype: bool
### Response:
def destroy(self, deal_id, contact_id) :
"""
Remove an associated contact
Remove a deal's associated contact
If a deal with the supplied unique identifier does not exist, it returns an error
This operation cannot be undone
:calls: ``delete /deals/{deal_id}/associated_contacts/{contact_id}``
:param int deal_id: Unique identifier of a Deal.
:param int contact_id: Unique identifier of a Contact.
:return: True if the operation succeeded.
:rtype: bool
"""
status_code, _, _ = self.http_client.delete("/deals/{deal_id}/associated_contacts/{contact_id}".format(deal_id=deal_id, contact_id=contact_id))
return status_code == 204 |
def main(
output_file: str,
entry_point: Optional[str],
console_script: Optional[str],
python: Optional[str],
site_packages: Optional[str],
compressed: bool,
compile_pyc: bool,
extend_pythonpath: bool,
pip_args: List[str],
) -> None:
"""
Shiv is a command line utility for building fully self-contained Python zipapps
as outlined in PEP 441, but with all their dependencies included!
"""
if not pip_args and not site_packages:
sys.exit(NO_PIP_ARGS_OR_SITE_PACKAGES)
if output_file is None:
sys.exit(NO_OUTFILE)
# check for disallowed pip arguments
for disallowed in DISALLOWED_ARGS:
for supplied_arg in pip_args:
if supplied_arg in disallowed:
sys.exit(
DISALLOWED_PIP_ARGS.format(
arg=supplied_arg, reason=DISALLOWED_ARGS[disallowed]
)
)
with TemporaryDirectory() as working_path:
tmp_site_packages = Path(working_path, "site-packages")
if site_packages:
shutil.copytree(site_packages, tmp_site_packages)
if pip_args:
# install deps into staged site-packages
pip.install(["--target", str(tmp_site_packages)] + list(pip_args))
# if entry_point is a console script, get the callable
if entry_point is None and console_script is not None:
try:
entry_point = find_entry_point(tmp_site_packages, console_script)
except KeyError:
if not Path(tmp_site_packages, "bin", console_script).exists():
sys.exit(NO_ENTRY_POINT.format(entry_point=console_script))
# create runtime environment metadata
env = Environment(
build_id=str(uuid.uuid4()),
entry_point=entry_point,
script=console_script,
compile_pyc=compile_pyc,
extend_pythonpath=extend_pythonpath,
)
Path(working_path, "environment.json").write_text(env.to_json())
# create bootstrapping directory in working path
bootstrap_target = Path(working_path, "_bootstrap")
bootstrap_target.mkdir(parents=True, exist_ok=True)
# copy bootstrap code
copy_bootstrap(bootstrap_target)
# create the zip
builder.create_archive(
Path(working_path),
target=Path(output_file).expanduser(),
interpreter=python or _interpreter_path(),
main="_bootstrap:bootstrap",
compressed=compressed,
) | Shiv is a command line utility for building fully self-contained Python zipapps
as outlined in PEP 441, but with all their dependencies included! | Below is the the instruction that describes the task:
### Input:
Shiv is a command line utility for building fully self-contained Python zipapps
as outlined in PEP 441, but with all their dependencies included!
### Response:
def main(
output_file: str,
entry_point: Optional[str],
console_script: Optional[str],
python: Optional[str],
site_packages: Optional[str],
compressed: bool,
compile_pyc: bool,
extend_pythonpath: bool,
pip_args: List[str],
) -> None:
"""
Shiv is a command line utility for building fully self-contained Python zipapps
as outlined in PEP 441, but with all their dependencies included!
"""
if not pip_args and not site_packages:
sys.exit(NO_PIP_ARGS_OR_SITE_PACKAGES)
if output_file is None:
sys.exit(NO_OUTFILE)
# check for disallowed pip arguments
for disallowed in DISALLOWED_ARGS:
for supplied_arg in pip_args:
if supplied_arg in disallowed:
sys.exit(
DISALLOWED_PIP_ARGS.format(
arg=supplied_arg, reason=DISALLOWED_ARGS[disallowed]
)
)
with TemporaryDirectory() as working_path:
tmp_site_packages = Path(working_path, "site-packages")
if site_packages:
shutil.copytree(site_packages, tmp_site_packages)
if pip_args:
# install deps into staged site-packages
pip.install(["--target", str(tmp_site_packages)] + list(pip_args))
# if entry_point is a console script, get the callable
if entry_point is None and console_script is not None:
try:
entry_point = find_entry_point(tmp_site_packages, console_script)
except KeyError:
if not Path(tmp_site_packages, "bin", console_script).exists():
sys.exit(NO_ENTRY_POINT.format(entry_point=console_script))
# create runtime environment metadata
env = Environment(
build_id=str(uuid.uuid4()),
entry_point=entry_point,
script=console_script,
compile_pyc=compile_pyc,
extend_pythonpath=extend_pythonpath,
)
Path(working_path, "environment.json").write_text(env.to_json())
# create bootstrapping directory in working path
bootstrap_target = Path(working_path, "_bootstrap")
bootstrap_target.mkdir(parents=True, exist_ok=True)
# copy bootstrap code
copy_bootstrap(bootstrap_target)
# create the zip
builder.create_archive(
Path(working_path),
target=Path(output_file).expanduser(),
interpreter=python or _interpreter_path(),
main="_bootstrap:bootstrap",
compressed=compressed,
) |
def get_data(self):
"Get the data for this blob"
array = ctypes.POINTER(ctypes.c_char * len(self))
return ctypes.cast(self.data, array).contents.raw | Get the data for this blob | Below is the the instruction that describes the task:
### Input:
Get the data for this blob
### Response:
def get_data(self):
"Get the data for this blob"
array = ctypes.POINTER(ctypes.c_char * len(self))
return ctypes.cast(self.data, array).contents.raw |
def get_default(self, *args, **kwargs):
"""Get the default parameters as defined in the Settings instance.
This function proceeds to seamlessly retrieve the argument to pass
through, depending on either it was overidden or not: If no argument
was overridden in a function of the toolbox, the default argument will
be set to ``None``, and this function will retrieve the default
parameters as defined by the ``cdt.SETTINGS`` 's attributes.
It has two modes of processing:
1. **kwargs for retrieving a single argument: ``get_default(argument_name=value)``.
2. *args through a list of tuples of the shape ``('argument_name', value)`` to retrieve multiple values at once.
"""
def retrieve_param(i):
try:
return self.__getattribute__(i)
except AttributeError:
if i == "device":
return self.default_device
else:
return self.__getattribute__(i.upper())
if len(args) == 0:
if len(kwargs) == 1 and kwargs[list(kwargs.keys())[0]] is not None:
return kwargs[list(kwargs.keys())[0]]
elif len(kwargs) == 1:
return retrieve_param(list(kwargs.keys())[0])
else:
raise TypeError("As dict is unordered, it is impossible to give"
"the parameters in the correct order.")
else:
out = []
for i in args:
if i[1] is None:
out.append(retrieve_param(i[0]))
else:
out.append(i[1])
return out | Get the default parameters as defined in the Settings instance.
This function proceeds to seamlessly retrieve the argument to pass
through, depending on either it was overidden or not: If no argument
was overridden in a function of the toolbox, the default argument will
be set to ``None``, and this function will retrieve the default
parameters as defined by the ``cdt.SETTINGS`` 's attributes.
It has two modes of processing:
1. **kwargs for retrieving a single argument: ``get_default(argument_name=value)``.
2. *args through a list of tuples of the shape ``('argument_name', value)`` to retrieve multiple values at once. | Below is the the instruction that describes the task:
### Input:
Get the default parameters as defined in the Settings instance.
This function proceeds to seamlessly retrieve the argument to pass
through, depending on either it was overidden or not: If no argument
was overridden in a function of the toolbox, the default argument will
be set to ``None``, and this function will retrieve the default
parameters as defined by the ``cdt.SETTINGS`` 's attributes.
It has two modes of processing:
1. **kwargs for retrieving a single argument: ``get_default(argument_name=value)``.
2. *args through a list of tuples of the shape ``('argument_name', value)`` to retrieve multiple values at once.
### Response:
def get_default(self, *args, **kwargs):
"""Get the default parameters as defined in the Settings instance.
This function proceeds to seamlessly retrieve the argument to pass
through, depending on either it was overidden or not: If no argument
was overridden in a function of the toolbox, the default argument will
be set to ``None``, and this function will retrieve the default
parameters as defined by the ``cdt.SETTINGS`` 's attributes.
It has two modes of processing:
1. **kwargs for retrieving a single argument: ``get_default(argument_name=value)``.
2. *args through a list of tuples of the shape ``('argument_name', value)`` to retrieve multiple values at once.
"""
def retrieve_param(i):
try:
return self.__getattribute__(i)
except AttributeError:
if i == "device":
return self.default_device
else:
return self.__getattribute__(i.upper())
if len(args) == 0:
if len(kwargs) == 1 and kwargs[list(kwargs.keys())[0]] is not None:
return kwargs[list(kwargs.keys())[0]]
elif len(kwargs) == 1:
return retrieve_param(list(kwargs.keys())[0])
else:
raise TypeError("As dict is unordered, it is impossible to give"
"the parameters in the correct order.")
else:
out = []
for i in args:
if i[1] is None:
out.append(retrieve_param(i[0]))
else:
out.append(i[1])
return out |
def apply(self, func, keep_attrs=None, args=(), **kwargs):
"""Apply a function over the data variables in this dataset.
Parameters
----------
func : function
Function which can be called in the form `func(x, *args, **kwargs)`
to transform each DataArray `x` in this dataset into another
DataArray.
keep_attrs : bool, optional
If True, the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False, the new object will
be returned without attributes.
args : tuple, optional
Positional arguments passed on to `func`.
**kwargs : dict
Keyword arguments passed on to `func`.
Returns
-------
applied : Dataset
Resulting dataset from applying ``func`` over each data variable.
Examples
--------
>>> da = xr.DataArray(np.random.randn(2, 3))
>>> ds = xr.Dataset({'foo': da, 'bar': ('x', [-1, 2])})
>>> ds
<xarray.Dataset>
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Dimensions without coordinates: dim_0, dim_1, x
Data variables:
foo (dim_0, dim_1) float64 -0.3751 -1.951 -1.945 0.2948 0.711 -0.3948
bar (x) int64 -1 2
>>> ds.apply(np.fabs)
<xarray.Dataset>
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Dimensions without coordinates: dim_0, dim_1, x
Data variables:
foo (dim_0, dim_1) float64 0.3751 1.951 1.945 0.2948 0.711 0.3948
bar (x) float64 1.0 2.0
""" # noqa
variables = OrderedDict(
(k, maybe_wrap_array(v, func(v, *args, **kwargs)))
for k, v in self.data_vars.items())
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
attrs = self.attrs if keep_attrs else None
return type(self)(variables, attrs=attrs) | Apply a function over the data variables in this dataset.
Parameters
----------
func : function
Function which can be called in the form `func(x, *args, **kwargs)`
to transform each DataArray `x` in this dataset into another
DataArray.
keep_attrs : bool, optional
If True, the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False, the new object will
be returned without attributes.
args : tuple, optional
Positional arguments passed on to `func`.
**kwargs : dict
Keyword arguments passed on to `func`.
Returns
-------
applied : Dataset
Resulting dataset from applying ``func`` over each data variable.
Examples
--------
>>> da = xr.DataArray(np.random.randn(2, 3))
>>> ds = xr.Dataset({'foo': da, 'bar': ('x', [-1, 2])})
>>> ds
<xarray.Dataset>
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Dimensions without coordinates: dim_0, dim_1, x
Data variables:
foo (dim_0, dim_1) float64 -0.3751 -1.951 -1.945 0.2948 0.711 -0.3948
bar (x) int64 -1 2
>>> ds.apply(np.fabs)
<xarray.Dataset>
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Dimensions without coordinates: dim_0, dim_1, x
Data variables:
foo (dim_0, dim_1) float64 0.3751 1.951 1.945 0.2948 0.711 0.3948
bar (x) float64 1.0 2.0 | Below is the the instruction that describes the task:
### Input:
Apply a function over the data variables in this dataset.
Parameters
----------
func : function
Function which can be called in the form `func(x, *args, **kwargs)`
to transform each DataArray `x` in this dataset into another
DataArray.
keep_attrs : bool, optional
If True, the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False, the new object will
be returned without attributes.
args : tuple, optional
Positional arguments passed on to `func`.
**kwargs : dict
Keyword arguments passed on to `func`.
Returns
-------
applied : Dataset
Resulting dataset from applying ``func`` over each data variable.
Examples
--------
>>> da = xr.DataArray(np.random.randn(2, 3))
>>> ds = xr.Dataset({'foo': da, 'bar': ('x', [-1, 2])})
>>> ds
<xarray.Dataset>
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Dimensions without coordinates: dim_0, dim_1, x
Data variables:
foo (dim_0, dim_1) float64 -0.3751 -1.951 -1.945 0.2948 0.711 -0.3948
bar (x) int64 -1 2
>>> ds.apply(np.fabs)
<xarray.Dataset>
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Dimensions without coordinates: dim_0, dim_1, x
Data variables:
foo (dim_0, dim_1) float64 0.3751 1.951 1.945 0.2948 0.711 0.3948
bar (x) float64 1.0 2.0
### Response:
def apply(self, func, keep_attrs=None, args=(), **kwargs):
"""Apply a function over the data variables in this dataset.
Parameters
----------
func : function
Function which can be called in the form `func(x, *args, **kwargs)`
to transform each DataArray `x` in this dataset into another
DataArray.
keep_attrs : bool, optional
If True, the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False, the new object will
be returned without attributes.
args : tuple, optional
Positional arguments passed on to `func`.
**kwargs : dict
Keyword arguments passed on to `func`.
Returns
-------
applied : Dataset
Resulting dataset from applying ``func`` over each data variable.
Examples
--------
>>> da = xr.DataArray(np.random.randn(2, 3))
>>> ds = xr.Dataset({'foo': da, 'bar': ('x', [-1, 2])})
>>> ds
<xarray.Dataset>
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Dimensions without coordinates: dim_0, dim_1, x
Data variables:
foo (dim_0, dim_1) float64 -0.3751 -1.951 -1.945 0.2948 0.711 -0.3948
bar (x) int64 -1 2
>>> ds.apply(np.fabs)
<xarray.Dataset>
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Dimensions without coordinates: dim_0, dim_1, x
Data variables:
foo (dim_0, dim_1) float64 0.3751 1.951 1.945 0.2948 0.711 0.3948
bar (x) float64 1.0 2.0
""" # noqa
variables = OrderedDict(
(k, maybe_wrap_array(v, func(v, *args, **kwargs)))
for k, v in self.data_vars.items())
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
attrs = self.attrs if keep_attrs else None
return type(self)(variables, attrs=attrs) |
def get_manuelles(self, site, code_parametre, debut, fin, court=False):
"""
Recupération des mesures manuelles (labo) pour un site
site: numéro du site (voir fonction liste_sites_prelevement)
code_parametre: code ISO du paramètre à rechercher (C6H6=V4)
debut: date de début du premier prélèvement
fin: date de fin du dernier prélèvement
court: Renvoie un tableau au format court ou long (colonnes)
"""
condition = "WHERE MESLA.NOPOL='%s' " % code_parametre
condition += "AND SITMETH.NSIT=%s " % site
condition += "AND PRELEV.DATE_DEB>=TO_DATE('%s', 'YYYY-MM-DD') " % debut
condition += "AND PRELEV.DATE_FIN<=TO_DATE('%s', 'YYYY-MM-DD') " % fin
if court == False:
select = """SELECT
MESLA.LIBELLE AS MESURE,
METH.LIBELLE AS METHODE,
ANA.VALEUR AS VALEUR,
MESLA.UNITE AS UNITE,
ANA.CODE_QUALITE AS CODE_QUALITE,
ANA.DATE_ANA AS DATE_ANALYSE,
ANA.ID_LABO AS LABO,
PRELEV.DATE_DEB AS DEBUT,
PRELEV.DATE_FIN AS FIN,
ANA.COMMENTAIRE AS COMMENTAIRE,
SITE.LIBELLE AS SITE,
SITE.AXE AS ADRESSE,
COM.NOM_COMMUNE AS COMMUNE"""
else:
select = """SELECT
MESLA.LIBELLE AS MESURE,
ANA.VALEUR AS VALEUR,
MESLA.UNITE AS UNITE,
ANA.CODE_QUALITE AS CODE_QUALITE,
PRELEV.DATE_DEB AS DEBUT,
PRELEV.DATE_FIN AS FIN,
SITE.AXE AS ADRESSE,
COM.NOM_COMMUNE AS COMMUNE"""
_sql = """%s
FROM ANALYSE ANA
INNER JOIN PRELEVEMENT PRELEV ON (ANA.CODE_PRELEV=PRELEV.CODE_PRELEV AND ANA.CODE_SMP=PRELEV.CODE_SMP)
INNER JOIN MESURE_LABO MESLA ON (ANA.CODE_MES_LABO=MESLA.CODE_MES_LABO AND ANA.CODE_SMP=MESLA.CODE_SMP)
INNER JOIN SITE_METH_PRELEV SITMETH ON (ANA.CODE_SMP=SITMETH.CODE_SMP)
INNER JOIN METH_PRELEVEMENT METH ON (SITMETH.CODE_METH_P=METH.CODE_METH_P)
INNER JOIN SITE_PRELEVEMENT SITE ON (SITE.NSIT=SITMETH.NSIT)
INNER JOIN COMMUNE COM ON (COM.NINSEE=SITE.NINSEE)
%s
ORDER BY MESLA.NOPOL,MESLA.LIBELLE,PRELEV.DATE_DEB""" % (select, condition)
return psql.read_sql(_sql, self.conn) | Recupération des mesures manuelles (labo) pour un site
site: numéro du site (voir fonction liste_sites_prelevement)
code_parametre: code ISO du paramètre à rechercher (C6H6=V4)
debut: date de début du premier prélèvement
fin: date de fin du dernier prélèvement
court: Renvoie un tableau au format court ou long (colonnes) | Below is the the instruction that describes the task:
### Input:
Recupération des mesures manuelles (labo) pour un site
site: numéro du site (voir fonction liste_sites_prelevement)
code_parametre: code ISO du paramètre à rechercher (C6H6=V4)
debut: date de début du premier prélèvement
fin: date de fin du dernier prélèvement
court: Renvoie un tableau au format court ou long (colonnes)
### Response:
def get_manuelles(self, site, code_parametre, debut, fin, court=False):
"""
Recupération des mesures manuelles (labo) pour un site
site: numéro du site (voir fonction liste_sites_prelevement)
code_parametre: code ISO du paramètre à rechercher (C6H6=V4)
debut: date de début du premier prélèvement
fin: date de fin du dernier prélèvement
court: Renvoie un tableau au format court ou long (colonnes)
"""
condition = "WHERE MESLA.NOPOL='%s' " % code_parametre
condition += "AND SITMETH.NSIT=%s " % site
condition += "AND PRELEV.DATE_DEB>=TO_DATE('%s', 'YYYY-MM-DD') " % debut
condition += "AND PRELEV.DATE_FIN<=TO_DATE('%s', 'YYYY-MM-DD') " % fin
if court == False:
select = """SELECT
MESLA.LIBELLE AS MESURE,
METH.LIBELLE AS METHODE,
ANA.VALEUR AS VALEUR,
MESLA.UNITE AS UNITE,
ANA.CODE_QUALITE AS CODE_QUALITE,
ANA.DATE_ANA AS DATE_ANALYSE,
ANA.ID_LABO AS LABO,
PRELEV.DATE_DEB AS DEBUT,
PRELEV.DATE_FIN AS FIN,
ANA.COMMENTAIRE AS COMMENTAIRE,
SITE.LIBELLE AS SITE,
SITE.AXE AS ADRESSE,
COM.NOM_COMMUNE AS COMMUNE"""
else:
select = """SELECT
MESLA.LIBELLE AS MESURE,
ANA.VALEUR AS VALEUR,
MESLA.UNITE AS UNITE,
ANA.CODE_QUALITE AS CODE_QUALITE,
PRELEV.DATE_DEB AS DEBUT,
PRELEV.DATE_FIN AS FIN,
SITE.AXE AS ADRESSE,
COM.NOM_COMMUNE AS COMMUNE"""
_sql = """%s
FROM ANALYSE ANA
INNER JOIN PRELEVEMENT PRELEV ON (ANA.CODE_PRELEV=PRELEV.CODE_PRELEV AND ANA.CODE_SMP=PRELEV.CODE_SMP)
INNER JOIN MESURE_LABO MESLA ON (ANA.CODE_MES_LABO=MESLA.CODE_MES_LABO AND ANA.CODE_SMP=MESLA.CODE_SMP)
INNER JOIN SITE_METH_PRELEV SITMETH ON (ANA.CODE_SMP=SITMETH.CODE_SMP)
INNER JOIN METH_PRELEVEMENT METH ON (SITMETH.CODE_METH_P=METH.CODE_METH_P)
INNER JOIN SITE_PRELEVEMENT SITE ON (SITE.NSIT=SITMETH.NSIT)
INNER JOIN COMMUNE COM ON (COM.NINSEE=SITE.NINSEE)
%s
ORDER BY MESLA.NOPOL,MESLA.LIBELLE,PRELEV.DATE_DEB""" % (select, condition)
return psql.read_sql(_sql, self.conn) |
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
"""Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
"""
dynamic_info_size_error_reported = False
tasks_key = registry_key.GetSubkeyByName('Tasks')
tree_key = registry_key.GetSubkeyByName('Tree')
if not tasks_key or not tree_key:
parser_mediator.ProduceExtractionWarning(
'Task Cache is missing a Tasks or Tree sub key.')
return
task_guids = {}
for sub_key in tree_key.GetSubkeys():
for value_key, id_value in self._GetIdValue(sub_key):
# TODO: improve this check to a regex.
# The GUID is in the form {%GUID%} and stored an UTF-16 little-endian
# string and should be 78 bytes in size.
id_value_data_size = len(id_value.data)
if id_value_data_size != 78:
parser_mediator.ProduceExtractionWarning(
'unsupported Id value data size: {0:d}.'.format(
id_value_data_size))
continue
guid_string = id_value.GetDataAsObject()
task_guids[guid_string] = value_key.name
dynamic_info_map = self._GetDataTypeMap('dynamic_info_record')
dynamic_info2_map = self._GetDataTypeMap('dynamic_info2_record')
dynamic_info_size = dynamic_info_map.GetByteSize()
dynamic_info2_size = dynamic_info2_map.GetByteSize()
for sub_key in tasks_key.GetSubkeys():
dynamic_info_value = sub_key.GetValueByName('DynamicInfo')
if not dynamic_info_value:
continue
dynamic_info_record_map = None
dynamic_info_value_data_size = len(dynamic_info_value.data)
if dynamic_info_value_data_size == dynamic_info_size:
dynamic_info_record_map = dynamic_info_map
elif dynamic_info_value_data_size == dynamic_info2_size:
dynamic_info_record_map = dynamic_info2_map
else:
if not dynamic_info_size_error_reported:
parser_mediator.ProduceExtractionWarning(
'unsupported DynamicInfo value data size: {0:d}.'.format(
dynamic_info_value_data_size))
dynamic_info_size_error_reported = True
continue
try:
dynamic_info_record = self._ReadStructureFromByteStream(
dynamic_info_value.data, 0, dynamic_info_record_map)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse DynamicInfo record with error: {0!s}.'.format(
exception))
name = task_guids.get(sub_key.name, sub_key.name)
values_dict = {}
values_dict['Task: {0:s}'.format(name)] = '[ID: {0:s}]'.format(
sub_key.name)
event_data = windows_events.WindowsRegistryEventData()
event_data.key_path = registry_key.path
event_data.offset = registry_key.offset
event_data.regvalue = values_dict
event = time_events.DateTimeValuesEvent(
registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
event_data = TaskCacheEventData()
event_data.task_name = name
event_data.task_identifier = sub_key.name
last_registered_time = dynamic_info_record.last_registered_time
if last_registered_time:
# Note this is likely either the last registered time or
# the update time.
date_time = dfdatetime_filetime.Filetime(timestamp=last_registered_time)
event = time_events.DateTimeValuesEvent(
date_time, 'Last registered time')
parser_mediator.ProduceEventWithEventData(event, event_data)
launch_time = dynamic_info_record.launch_time
if launch_time:
# Note this is likely the launch time.
date_time = dfdatetime_filetime.Filetime(timestamp=launch_time)
event = time_events.DateTimeValuesEvent(
date_time, 'Launch time')
parser_mediator.ProduceEventWithEventData(event, event_data)
unknown_time = getattr(dynamic_info_record, 'unknown_time', None)
if unknown_time:
date_time = dfdatetime_filetime.Filetime(timestamp=unknown_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_UNKNOWN)
parser_mediator.ProduceEventWithEventData(event, event_data) | Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key. | Below is the the instruction that describes the task:
### Input:
Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
### Response:
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
"""Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
"""
dynamic_info_size_error_reported = False
tasks_key = registry_key.GetSubkeyByName('Tasks')
tree_key = registry_key.GetSubkeyByName('Tree')
if not tasks_key or not tree_key:
parser_mediator.ProduceExtractionWarning(
'Task Cache is missing a Tasks or Tree sub key.')
return
task_guids = {}
for sub_key in tree_key.GetSubkeys():
for value_key, id_value in self._GetIdValue(sub_key):
# TODO: improve this check to a regex.
# The GUID is in the form {%GUID%} and stored an UTF-16 little-endian
# string and should be 78 bytes in size.
id_value_data_size = len(id_value.data)
if id_value_data_size != 78:
parser_mediator.ProduceExtractionWarning(
'unsupported Id value data size: {0:d}.'.format(
id_value_data_size))
continue
guid_string = id_value.GetDataAsObject()
task_guids[guid_string] = value_key.name
dynamic_info_map = self._GetDataTypeMap('dynamic_info_record')
dynamic_info2_map = self._GetDataTypeMap('dynamic_info2_record')
dynamic_info_size = dynamic_info_map.GetByteSize()
dynamic_info2_size = dynamic_info2_map.GetByteSize()
for sub_key in tasks_key.GetSubkeys():
dynamic_info_value = sub_key.GetValueByName('DynamicInfo')
if not dynamic_info_value:
continue
dynamic_info_record_map = None
dynamic_info_value_data_size = len(dynamic_info_value.data)
if dynamic_info_value_data_size == dynamic_info_size:
dynamic_info_record_map = dynamic_info_map
elif dynamic_info_value_data_size == dynamic_info2_size:
dynamic_info_record_map = dynamic_info2_map
else:
if not dynamic_info_size_error_reported:
parser_mediator.ProduceExtractionWarning(
'unsupported DynamicInfo value data size: {0:d}.'.format(
dynamic_info_value_data_size))
dynamic_info_size_error_reported = True
continue
try:
dynamic_info_record = self._ReadStructureFromByteStream(
dynamic_info_value.data, 0, dynamic_info_record_map)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse DynamicInfo record with error: {0!s}.'.format(
exception))
name = task_guids.get(sub_key.name, sub_key.name)
values_dict = {}
values_dict['Task: {0:s}'.format(name)] = '[ID: {0:s}]'.format(
sub_key.name)
event_data = windows_events.WindowsRegistryEventData()
event_data.key_path = registry_key.path
event_data.offset = registry_key.offset
event_data.regvalue = values_dict
event = time_events.DateTimeValuesEvent(
registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
event_data = TaskCacheEventData()
event_data.task_name = name
event_data.task_identifier = sub_key.name
last_registered_time = dynamic_info_record.last_registered_time
if last_registered_time:
# Note this is likely either the last registered time or
# the update time.
date_time = dfdatetime_filetime.Filetime(timestamp=last_registered_time)
event = time_events.DateTimeValuesEvent(
date_time, 'Last registered time')
parser_mediator.ProduceEventWithEventData(event, event_data)
launch_time = dynamic_info_record.launch_time
if launch_time:
# Note this is likely the launch time.
date_time = dfdatetime_filetime.Filetime(timestamp=launch_time)
event = time_events.DateTimeValuesEvent(
date_time, 'Launch time')
parser_mediator.ProduceEventWithEventData(event, event_data)
unknown_time = getattr(dynamic_info_record, 'unknown_time', None)
if unknown_time:
date_time = dfdatetime_filetime.Filetime(timestamp=unknown_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_UNKNOWN)
parser_mediator.ProduceEventWithEventData(event, event_data) |
def main(global_config, **settings):
"""
This function returns a Pyramid WSGI application.
"""
from pyramid.config import Configurator
config = Configurator(settings=settings)
# include twitcher components
config.include('twitcher.config')
config.include('twitcher.frontpage')
config.include('twitcher.rpcinterface')
config.include('twitcher.owsproxy')
# tweens/middleware
# TODO: maybe add tween for exception handling or use unknown_failure view
config.include('twitcher.tweens')
config.scan()
return config.make_wsgi_app() | This function returns a Pyramid WSGI application. | Below is the the instruction that describes the task:
### Input:
This function returns a Pyramid WSGI application.
### Response:
def main(global_config, **settings):
"""
This function returns a Pyramid WSGI application.
"""
from pyramid.config import Configurator
config = Configurator(settings=settings)
# include twitcher components
config.include('twitcher.config')
config.include('twitcher.frontpage')
config.include('twitcher.rpcinterface')
config.include('twitcher.owsproxy')
# tweens/middleware
# TODO: maybe add tween for exception handling or use unknown_failure view
config.include('twitcher.tweens')
config.scan()
return config.make_wsgi_app() |
def post(self, text, attachments=None):
"""Post a message as the bot.
:param str text: the text of the message
:param attachments: a list of attachments
:type attachments: :class:`list`
:return: ``True`` if successful
:rtype: bool
"""
return self.manager.post(self.bot_id, text, attachments) | Post a message as the bot.
:param str text: the text of the message
:param attachments: a list of attachments
:type attachments: :class:`list`
:return: ``True`` if successful
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Post a message as the bot.
:param str text: the text of the message
:param attachments: a list of attachments
:type attachments: :class:`list`
:return: ``True`` if successful
:rtype: bool
### Response:
def post(self, text, attachments=None):
"""Post a message as the bot.
:param str text: the text of the message
:param attachments: a list of attachments
:type attachments: :class:`list`
:return: ``True`` if successful
:rtype: bool
"""
return self.manager.post(self.bot_id, text, attachments) |
def main():
"""Open B6/M8 file, filter entries by E-Value, and write said entries"""
for entry in b6_evalue_filter(args.b6, args.e_value):
args.output.write(entry.write()) | Open B6/M8 file, filter entries by E-Value, and write said entries | Below is the the instruction that describes the task:
### Input:
Open B6/M8 file, filter entries by E-Value, and write said entries
### Response:
def main():
"""Open B6/M8 file, filter entries by E-Value, and write said entries"""
for entry in b6_evalue_filter(args.b6, args.e_value):
args.output.write(entry.write()) |
def is_installed(self, pkgname):
"""Given a package name, returns whether it is installed in the environment
:param str pkgname: The name of a package
:return: Whether the supplied package is installed in the environment
:rtype: bool
"""
return any(d for d in self.get_distributions() if d.project_name == pkgname) | Given a package name, returns whether it is installed in the environment
:param str pkgname: The name of a package
:return: Whether the supplied package is installed in the environment
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Given a package name, returns whether it is installed in the environment
:param str pkgname: The name of a package
:return: Whether the supplied package is installed in the environment
:rtype: bool
### Response:
def is_installed(self, pkgname):
"""Given a package name, returns whether it is installed in the environment
:param str pkgname: The name of a package
:return: Whether the supplied package is installed in the environment
:rtype: bool
"""
return any(d for d in self.get_distributions() if d.project_name == pkgname) |
def normalize(self, mode="max", value=1):
"""
Normalize the spectrum with respect to the sum of intensity
Args:
mode (str): Normalization mode. Supported modes are "max" (set the
max y value to value, e.g., in XRD patterns), "sum" (set the
sum of y to a value, i.e., like a probability density).
value (float): Value to normalize to. Defaults to 1.
"""
if mode.lower() == "sum":
factor = np.sum(self.y, axis=0)
elif mode.lower() == "max":
factor = np.max(self.y, axis=0)
else:
raise ValueError("Unsupported normalization mode %s!" % mode)
self.y /= factor / value | Normalize the spectrum with respect to the sum of intensity
Args:
mode (str): Normalization mode. Supported modes are "max" (set the
max y value to value, e.g., in XRD patterns), "sum" (set the
sum of y to a value, i.e., like a probability density).
value (float): Value to normalize to. Defaults to 1. | Below is the the instruction that describes the task:
### Input:
Normalize the spectrum with respect to the sum of intensity
Args:
mode (str): Normalization mode. Supported modes are "max" (set the
max y value to value, e.g., in XRD patterns), "sum" (set the
sum of y to a value, i.e., like a probability density).
value (float): Value to normalize to. Defaults to 1.
### Response:
def normalize(self, mode="max", value=1):
"""
Normalize the spectrum with respect to the sum of intensity
Args:
mode (str): Normalization mode. Supported modes are "max" (set the
max y value to value, e.g., in XRD patterns), "sum" (set the
sum of y to a value, i.e., like a probability density).
value (float): Value to normalize to. Defaults to 1.
"""
if mode.lower() == "sum":
factor = np.sum(self.y, axis=0)
elif mode.lower() == "max":
factor = np.max(self.y, axis=0)
else:
raise ValueError("Unsupported normalization mode %s!" % mode)
self.y /= factor / value |
def simxCopyPasteObjects(clientID, objectHandles, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
c_objectHandles = (ct.c_int*len(objectHandles))(*objectHandles)
c_objectHandles = ct.cast(c_objectHandles,ct.POINTER(ct.c_int)) # IronPython needs this
newObjectCount = ct.c_int()
newObjectHandles = ct.POINTER(ct.c_int)()
ret = c_CopyPasteObjects(clientID, c_objectHandles, len(objectHandles), ct.byref(newObjectHandles), ct.byref(newObjectCount), operationMode)
newobj = []
if ret == 0:
for i in range(newObjectCount.value):
newobj.append(newObjectHandles[i])
return ret, newobj | Please have a look at the function description/documentation in the V-REP user manual | Below is the the instruction that describes the task:
### Input:
Please have a look at the function description/documentation in the V-REP user manual
### Response:
def simxCopyPasteObjects(clientID, objectHandles, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
c_objectHandles = (ct.c_int*len(objectHandles))(*objectHandles)
c_objectHandles = ct.cast(c_objectHandles,ct.POINTER(ct.c_int)) # IronPython needs this
newObjectCount = ct.c_int()
newObjectHandles = ct.POINTER(ct.c_int)()
ret = c_CopyPasteObjects(clientID, c_objectHandles, len(objectHandles), ct.byref(newObjectHandles), ct.byref(newObjectCount), operationMode)
newobj = []
if ret == 0:
for i in range(newObjectCount.value):
newobj.append(newObjectHandles[i])
return ret, newobj |
def list_subdomains(self, domain, limit=None, offset=None):
"""
Returns a list of all subdomains for the specified domain.
"""
return domain.list_subdomains(limit=limit, offset=offset) | Returns a list of all subdomains for the specified domain. | Below is the the instruction that describes the task:
### Input:
Returns a list of all subdomains for the specified domain.
### Response:
def list_subdomains(self, domain, limit=None, offset=None):
"""
Returns a list of all subdomains for the specified domain.
"""
return domain.list_subdomains(limit=limit, offset=offset) |
def dump_dataflow_images(df, index=0, batched=True,
number=1000, output_dir=None,
scale=1, resize=None, viz=None,
flipRGB=False):
"""
Dump or visualize images of a :class:`DataFlow`.
Args:
df (DataFlow): the DataFlow.
index (int): the index of the image component.
batched (bool): whether the component contains batched images (NHW or
NHWC) or not (HW or HWC).
number (int): how many datapoint to take from the DataFlow.
output_dir (str): output directory to save images, default to not save.
scale (float): scale the value, usually either 1 or 255.
resize (tuple or None): tuple of (h, w) to resize the images to.
viz (tuple or None): tuple of (h, w) determining the grid size to use
with :func:`gen_stack_patches` for visualization. No visualization will happen by
default.
flipRGB (bool): apply a RGB<->BGR conversion or not.
"""
if output_dir:
mkdir_p(output_dir)
if viz is not None:
viz = shape2d(viz)
vizsize = viz[0] * viz[1]
if resize is not None:
resize = tuple(shape2d(resize))
vizlist = []
df.reset_state()
cnt = 0
while True:
for dp in df:
if not batched:
imgbatch = [dp[index]]
else:
imgbatch = dp[index]
for img in imgbatch:
cnt += 1
if cnt == number:
return
if scale != 1:
img = img * scale
if resize is not None:
img = cv2.resize(img, resize)
if flipRGB:
img = img[:, :, ::-1]
if output_dir:
fname = os.path.join(output_dir, '{:03d}.jpg'.format(cnt))
cv2.imwrite(fname, img)
if viz is not None:
vizlist.append(img)
if viz is not None and len(vizlist) >= vizsize:
stack_patches(
vizlist[:vizsize],
nr_row=viz[0], nr_col=viz[1], viz=True)
vizlist = vizlist[vizsize:] | Dump or visualize images of a :class:`DataFlow`.
Args:
df (DataFlow): the DataFlow.
index (int): the index of the image component.
batched (bool): whether the component contains batched images (NHW or
NHWC) or not (HW or HWC).
number (int): how many datapoint to take from the DataFlow.
output_dir (str): output directory to save images, default to not save.
scale (float): scale the value, usually either 1 or 255.
resize (tuple or None): tuple of (h, w) to resize the images to.
viz (tuple or None): tuple of (h, w) determining the grid size to use
with :func:`gen_stack_patches` for visualization. No visualization will happen by
default.
flipRGB (bool): apply a RGB<->BGR conversion or not. | Below is the the instruction that describes the task:
### Input:
Dump or visualize images of a :class:`DataFlow`.
Args:
df (DataFlow): the DataFlow.
index (int): the index of the image component.
batched (bool): whether the component contains batched images (NHW or
NHWC) or not (HW or HWC).
number (int): how many datapoint to take from the DataFlow.
output_dir (str): output directory to save images, default to not save.
scale (float): scale the value, usually either 1 or 255.
resize (tuple or None): tuple of (h, w) to resize the images to.
viz (tuple or None): tuple of (h, w) determining the grid size to use
with :func:`gen_stack_patches` for visualization. No visualization will happen by
default.
flipRGB (bool): apply a RGB<->BGR conversion or not.
### Response:
def dump_dataflow_images(df, index=0, batched=True,
number=1000, output_dir=None,
scale=1, resize=None, viz=None,
flipRGB=False):
"""
Dump or visualize images of a :class:`DataFlow`.
Args:
df (DataFlow): the DataFlow.
index (int): the index of the image component.
batched (bool): whether the component contains batched images (NHW or
NHWC) or not (HW or HWC).
number (int): how many datapoint to take from the DataFlow.
output_dir (str): output directory to save images, default to not save.
scale (float): scale the value, usually either 1 or 255.
resize (tuple or None): tuple of (h, w) to resize the images to.
viz (tuple or None): tuple of (h, w) determining the grid size to use
with :func:`gen_stack_patches` for visualization. No visualization will happen by
default.
flipRGB (bool): apply a RGB<->BGR conversion or not.
"""
if output_dir:
mkdir_p(output_dir)
if viz is not None:
viz = shape2d(viz)
vizsize = viz[0] * viz[1]
if resize is not None:
resize = tuple(shape2d(resize))
vizlist = []
df.reset_state()
cnt = 0
while True:
for dp in df:
if not batched:
imgbatch = [dp[index]]
else:
imgbatch = dp[index]
for img in imgbatch:
cnt += 1
if cnt == number:
return
if scale != 1:
img = img * scale
if resize is not None:
img = cv2.resize(img, resize)
if flipRGB:
img = img[:, :, ::-1]
if output_dir:
fname = os.path.join(output_dir, '{:03d}.jpg'.format(cnt))
cv2.imwrite(fname, img)
if viz is not None:
vizlist.append(img)
if viz is not None and len(vizlist) >= vizsize:
stack_patches(
vizlist[:vizsize],
nr_row=viz[0], nr_col=viz[1], viz=True)
vizlist = vizlist[vizsize:] |
def to_rst(cls) -> str:
"""Output the registry as reStructuredText, for documentation."""
sep_line = '+' + 6 * '-' + '+' + '-' * 71 + '+\n'
blank_line = '|' + 78 * ' ' + '|\n'
table = ''
for group in cls.groups:
table += sep_line
table += blank_line
table += '|' + '**{}**'.format(group.name).center(78) + '|\n'
table += blank_line
for error in group.errors:
table += sep_line
table += ('|' + error.code.center(6) + '| ' +
error.short_desc.ljust(70) + '|\n')
table += sep_line
return table | Output the registry as reStructuredText, for documentation. | Below is the the instruction that describes the task:
### Input:
Output the registry as reStructuredText, for documentation.
### Response:
def to_rst(cls) -> str:
"""Output the registry as reStructuredText, for documentation."""
sep_line = '+' + 6 * '-' + '+' + '-' * 71 + '+\n'
blank_line = '|' + 78 * ' ' + '|\n'
table = ''
for group in cls.groups:
table += sep_line
table += blank_line
table += '|' + '**{}**'.format(group.name).center(78) + '|\n'
table += blank_line
for error in group.errors:
table += sep_line
table += ('|' + error.code.center(6) + '| ' +
error.short_desc.ljust(70) + '|\n')
table += sep_line
return table |
def _run_check(self):
"""Execute a check command.
Returns:
True if the exit code of the command is 0 otherwise False.
"""
cmd = shlex.split(self.config['check_cmd'])
self.log.info("running %s", ' '.join(cmd))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
start_time = time.time()
try:
outs, errs = proc.communicate(timeout=self.config['check_timeout'])
except subprocess.TimeoutExpired:
self.log.error("check timed out")
if proc.poll() is None:
try:
proc.kill()
except PermissionError:
self.log.warning("failed to kill check due to adequate "
"access rights, check could be running "
"under another user(root) via sudo")
return False
else:
msg = "check duration {t:.3f}ms".format(
t=(time.time() - start_time) * 1000)
self.log.info(msg)
if proc.returncode != 0:
self.log.info("stderr from the check %s", errs)
self.log.info("stdout from the check %s", outs)
return proc.returncode == 0 | Execute a check command.
Returns:
True if the exit code of the command is 0 otherwise False. | Below is the the instruction that describes the task:
### Input:
Execute a check command.
Returns:
True if the exit code of the command is 0 otherwise False.
### Response:
def _run_check(self):
"""Execute a check command.
Returns:
True if the exit code of the command is 0 otherwise False.
"""
cmd = shlex.split(self.config['check_cmd'])
self.log.info("running %s", ' '.join(cmd))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
start_time = time.time()
try:
outs, errs = proc.communicate(timeout=self.config['check_timeout'])
except subprocess.TimeoutExpired:
self.log.error("check timed out")
if proc.poll() is None:
try:
proc.kill()
except PermissionError:
self.log.warning("failed to kill check due to adequate "
"access rights, check could be running "
"under another user(root) via sudo")
return False
else:
msg = "check duration {t:.3f}ms".format(
t=(time.time() - start_time) * 1000)
self.log.info(msg)
if proc.returncode != 0:
self.log.info("stderr from the check %s", errs)
self.log.info("stdout from the check %s", outs)
return proc.returncode == 0 |
def is_unlocked(self):
"""``False`` if this is a private key that is protected with a passphrase and has not yet been unlocked, otherwise ``True``"""
if self.is_public:
return True
if not self.is_protected:
return True
return self._key.unlocked | ``False`` if this is a private key that is protected with a passphrase and has not yet been unlocked, otherwise ``True`` | Below is the the instruction that describes the task:
### Input:
``False`` if this is a private key that is protected with a passphrase and has not yet been unlocked, otherwise ``True``
### Response:
def is_unlocked(self):
"""``False`` if this is a private key that is protected with a passphrase and has not yet been unlocked, otherwise ``True``"""
if self.is_public:
return True
if not self.is_protected:
return True
return self._key.unlocked |
def members(self):
"""Gets members of current team
Returns:
list of User
Throws:
RTMServiceError when request failed
"""
resp = self._rtm_client.get('v1/current_team.members?all=true')
if resp.is_fail():
raise RTMServiceError(
'Failed to get members of current team',
resp
)
return resp.data['result'] | Gets members of current team
Returns:
list of User
Throws:
RTMServiceError when request failed | Below is the the instruction that describes the task:
### Input:
Gets members of current team
Returns:
list of User
Throws:
RTMServiceError when request failed
### Response:
def members(self):
"""Gets members of current team
Returns:
list of User
Throws:
RTMServiceError when request failed
"""
resp = self._rtm_client.get('v1/current_team.members?all=true')
if resp.is_fail():
raise RTMServiceError(
'Failed to get members of current team',
resp
)
return resp.data['result'] |
def _encrypt(cipher, key, data, iv, padding):
"""
Encrypts plaintext
:param cipher:
A unicode string of "aes", "des", "tripledes_2key", "tripledes_3key",
"rc2", "rc4"
:param key:
The encryption key - a byte string 5-16 bytes long
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - a byte string - unused for RC4
:param padding:
Boolean, if padding should be used - unused for RC4
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext
"""
if not isinstance(key, byte_cls):
raise TypeError(pretty_message(
'''
key must be a byte string, not %s
''',
type_name(key)
))
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
'''
data must be a byte string, not %s
''',
type_name(data)
))
if cipher != 'rc4' and not isinstance(iv, byte_cls):
raise TypeError(pretty_message(
'''
iv must be a byte string, not %s
''',
type_name(iv)
))
if cipher != 'rc4' and not padding:
raise ValueError('padding must be specified')
if _backend == 'winlegacy':
return _advapi32_encrypt(cipher, key, data, iv, padding)
return _bcrypt_encrypt(cipher, key, data, iv, padding) | Encrypts plaintext
:param cipher:
A unicode string of "aes", "des", "tripledes_2key", "tripledes_3key",
"rc2", "rc4"
:param key:
The encryption key - a byte string 5-16 bytes long
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - a byte string - unused for RC4
:param padding:
Boolean, if padding should be used - unused for RC4
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext | Below is the the instruction that describes the task:
### Input:
Encrypts plaintext
:param cipher:
A unicode string of "aes", "des", "tripledes_2key", "tripledes_3key",
"rc2", "rc4"
:param key:
The encryption key - a byte string 5-16 bytes long
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - a byte string - unused for RC4
:param padding:
Boolean, if padding should be used - unused for RC4
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext
### Response:
def _encrypt(cipher, key, data, iv, padding):
"""
Encrypts plaintext
:param cipher:
A unicode string of "aes", "des", "tripledes_2key", "tripledes_3key",
"rc2", "rc4"
:param key:
The encryption key - a byte string 5-16 bytes long
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - a byte string - unused for RC4
:param padding:
Boolean, if padding should be used - unused for RC4
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext
"""
if not isinstance(key, byte_cls):
raise TypeError(pretty_message(
'''
key must be a byte string, not %s
''',
type_name(key)
))
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
'''
data must be a byte string, not %s
''',
type_name(data)
))
if cipher != 'rc4' and not isinstance(iv, byte_cls):
raise TypeError(pretty_message(
'''
iv must be a byte string, not %s
''',
type_name(iv)
))
if cipher != 'rc4' and not padding:
raise ValueError('padding must be specified')
if _backend == 'winlegacy':
return _advapi32_encrypt(cipher, key, data, iv, padding)
return _bcrypt_encrypt(cipher, key, data, iv, padding) |
def SetConsoleTextAttribute(stream_id, attrs):
"""Set a console text attribute."""
handle = handles[stream_id]
return windll.kernel32.SetConsoleTextAttribute(handle, attrs) | Set a console text attribute. | Below is the the instruction that describes the task:
### Input:
Set a console text attribute.
### Response:
def SetConsoleTextAttribute(stream_id, attrs):
"""Set a console text attribute."""
handle = handles[stream_id]
return windll.kernel32.SetConsoleTextAttribute(handle, attrs) |
def system(self, cmd):
"""Execute a command in a subshell.
Parameters
----------
cmd : str
A command to be executed in the system shell.
Returns
-------
int : child's exitstatus
"""
# Get likely encoding for the output.
enc = DEFAULT_ENCODING
# Patterns to match on the output, for pexpect. We read input and
# allow either a short timeout or EOF
patterns = [pexpect.TIMEOUT, pexpect.EOF]
# the index of the EOF pattern in the list.
# even though we know it's 1, this call means we don't have to worry if
# we change the above list, and forget to change this value:
EOF_index = patterns.index(pexpect.EOF)
# The size of the output stored so far in the process output buffer.
# Since pexpect only appends to this buffer, each time we print we
# record how far we've printed, so that next time we only print *new*
# content from the buffer.
out_size = 0
try:
# Since we're not really searching the buffer for text patterns, we
# can set pexpect's search window to be tiny and it won't matter.
# We only search for the 'patterns' timeout or EOF, which aren't in
# the text itself.
#child = pexpect.spawn(pcmd, searchwindowsize=1)
if hasattr(pexpect, 'spawnb'):
child = pexpect.spawnb(self.sh, args=['-c', cmd]) # Pexpect-U
else:
child = pexpect.spawn(self.sh, args=['-c', cmd]) # Vanilla Pexpect
flush = sys.stdout.flush
while True:
# res is the index of the pattern that caused the match, so we
# know whether we've finished (if we matched EOF) or not
res_idx = child.expect_list(patterns, self.read_timeout)
print(child.before[out_size:].decode(enc, 'replace'), end='')
flush()
if res_idx==EOF_index:
break
# Update the pointer to what we've already printed
out_size = len(child.before)
except KeyboardInterrupt:
# We need to send ^C to the process. The ascii code for '^C' is 3
# (the character is known as ETX for 'End of Text', see
# curses.ascii.ETX).
child.sendline(chr(3))
# Read and print any more output the program might produce on its
# way out.
try:
out_size = len(child.before)
child.expect_list(patterns, self.terminate_timeout)
print(child.before[out_size:].decode(enc, 'replace'), end='')
sys.stdout.flush()
except KeyboardInterrupt:
# Impatient users tend to type it multiple times
pass
finally:
# Ensure the subprocess really is terminated
child.terminate(force=True)
# add isalive check, to ensure exitstatus is set:
child.isalive()
return child.exitstatus | Execute a command in a subshell.
Parameters
----------
cmd : str
A command to be executed in the system shell.
Returns
-------
int : child's exitstatus | Below is the the instruction that describes the task:
### Input:
Execute a command in a subshell.
Parameters
----------
cmd : str
A command to be executed in the system shell.
Returns
-------
int : child's exitstatus
### Response:
def system(self, cmd):
"""Execute a command in a subshell.
Parameters
----------
cmd : str
A command to be executed in the system shell.
Returns
-------
int : child's exitstatus
"""
# Get likely encoding for the output.
enc = DEFAULT_ENCODING
# Patterns to match on the output, for pexpect. We read input and
# allow either a short timeout or EOF
patterns = [pexpect.TIMEOUT, pexpect.EOF]
# the index of the EOF pattern in the list.
# even though we know it's 1, this call means we don't have to worry if
# we change the above list, and forget to change this value:
EOF_index = patterns.index(pexpect.EOF)
# The size of the output stored so far in the process output buffer.
# Since pexpect only appends to this buffer, each time we print we
# record how far we've printed, so that next time we only print *new*
# content from the buffer.
out_size = 0
try:
# Since we're not really searching the buffer for text patterns, we
# can set pexpect's search window to be tiny and it won't matter.
# We only search for the 'patterns' timeout or EOF, which aren't in
# the text itself.
#child = pexpect.spawn(pcmd, searchwindowsize=1)
if hasattr(pexpect, 'spawnb'):
child = pexpect.spawnb(self.sh, args=['-c', cmd]) # Pexpect-U
else:
child = pexpect.spawn(self.sh, args=['-c', cmd]) # Vanilla Pexpect
flush = sys.stdout.flush
while True:
# res is the index of the pattern that caused the match, so we
# know whether we've finished (if we matched EOF) or not
res_idx = child.expect_list(patterns, self.read_timeout)
print(child.before[out_size:].decode(enc, 'replace'), end='')
flush()
if res_idx==EOF_index:
break
# Update the pointer to what we've already printed
out_size = len(child.before)
except KeyboardInterrupt:
# We need to send ^C to the process. The ascii code for '^C' is 3
# (the character is known as ETX for 'End of Text', see
# curses.ascii.ETX).
child.sendline(chr(3))
# Read and print any more output the program might produce on its
# way out.
try:
out_size = len(child.before)
child.expect_list(patterns, self.terminate_timeout)
print(child.before[out_size:].decode(enc, 'replace'), end='')
sys.stdout.flush()
except KeyboardInterrupt:
# Impatient users tend to type it multiple times
pass
finally:
# Ensure the subprocess really is terminated
child.terminate(force=True)
# add isalive check, to ensure exitstatus is set:
child.isalive()
return child.exitstatus |
def _handle_login(self, event):
"""Manual password based login"""
# TODO: Refactor to simplify
self.log("Auth request for ", event.username, 'client:',
event.clientuuid)
# TODO: Define the requirements for secure passwords etc.
# They're also required in the Enrol module..!
if (len(event.username) < 1) or (len(event.password) < 5):
self.log("Illegal username or password received, login cancelled", lvl=warn)
self._fail(event, 'Password or username too short')
return
client_config = None
try:
user_account = objectmodels['user'].find_one({
'name': event.username
})
# self.log("Account: %s" % user_account._fields, lvl=debug)
if user_account is None:
raise AuthenticationError
except Exception as e:
self.log("No userobject due to error: ", e, type(e),
lvl=error)
self._fail(event)
return
self.log("User found.", lvl=debug)
if user_account.active is False:
self.log("Account deactivated.")
self._fail(event, 'Account deactivated.')
return
if not std_hash(event.password, self.salt) == user_account.passhash:
self.log("Password was wrong!", lvl=warn)
self._fail(event)
return
self.log("Passhash matches, checking client and profile.",
lvl=debug)
requested_client_uuid = event.requestedclientuuid
if requested_client_uuid is not None:
client_config = objectmodels['client'].find_one({
'uuid': requested_client_uuid
})
if client_config:
self.log("Checking client configuration permissions",
lvl=debug)
# TODO: Shareable client configurations?
if client_config.owner != user_account.uuid:
client_config = None
self.log("Unauthorized client configuration "
"requested",
lvl=warn)
else:
self.log("Unknown client configuration requested: ",
requested_client_uuid, event.__dict__,
lvl=warn)
if not client_config:
self.log("Creating new default client configuration")
# Either no configuration was found or not requested
# -> Create a new client configuration
uuid = event.clientuuid if event.clientuuid is not None else str(uuid4())
client_config = objectmodels['client']({'uuid': uuid})
client_config.name = std_human_uid(kind='place')
client_config.description = "New client configuration from " + user_account.name
client_config.owner = user_account.uuid
# TODO: Get client configuration storage done right, this one is too simple
client_config.save()
user_profile = self._get_profile(user_account)
self._login(event, user_account, user_profile, client_config)
self.log("Done with Login request", lvl=debug) | Manual password based login | Below is the the instruction that describes the task:
### Input:
Manual password based login
### Response:
def _handle_login(self, event):
"""Manual password based login"""
# TODO: Refactor to simplify
self.log("Auth request for ", event.username, 'client:',
event.clientuuid)
# TODO: Define the requirements for secure passwords etc.
# They're also required in the Enrol module..!
if (len(event.username) < 1) or (len(event.password) < 5):
self.log("Illegal username or password received, login cancelled", lvl=warn)
self._fail(event, 'Password or username too short')
return
client_config = None
try:
user_account = objectmodels['user'].find_one({
'name': event.username
})
# self.log("Account: %s" % user_account._fields, lvl=debug)
if user_account is None:
raise AuthenticationError
except Exception as e:
self.log("No userobject due to error: ", e, type(e),
lvl=error)
self._fail(event)
return
self.log("User found.", lvl=debug)
if user_account.active is False:
self.log("Account deactivated.")
self._fail(event, 'Account deactivated.')
return
if not std_hash(event.password, self.salt) == user_account.passhash:
self.log("Password was wrong!", lvl=warn)
self._fail(event)
return
self.log("Passhash matches, checking client and profile.",
lvl=debug)
requested_client_uuid = event.requestedclientuuid
if requested_client_uuid is not None:
client_config = objectmodels['client'].find_one({
'uuid': requested_client_uuid
})
if client_config:
self.log("Checking client configuration permissions",
lvl=debug)
# TODO: Shareable client configurations?
if client_config.owner != user_account.uuid:
client_config = None
self.log("Unauthorized client configuration "
"requested",
lvl=warn)
else:
self.log("Unknown client configuration requested: ",
requested_client_uuid, event.__dict__,
lvl=warn)
if not client_config:
self.log("Creating new default client configuration")
# Either no configuration was found or not requested
# -> Create a new client configuration
uuid = event.clientuuid if event.clientuuid is not None else str(uuid4())
client_config = objectmodels['client']({'uuid': uuid})
client_config.name = std_human_uid(kind='place')
client_config.description = "New client configuration from " + user_account.name
client_config.owner = user_account.uuid
# TODO: Get client configuration storage done right, this one is too simple
client_config.save()
user_profile = self._get_profile(user_account)
self._login(event, user_account, user_profile, client_config)
self.log("Done with Login request", lvl=debug) |
def add(self, host_value):
"""Add the given value to the collection.
:param host: an ip address or a hostname
:raises InvalidHostError: raised when the given value
is not a valid ip address nor a hostname
"""
host_obj = self._host_factory(host_value)
if self._get_match(host_obj) is not None:
return
self._add_new(host_obj) | Add the given value to the collection.
:param host: an ip address or a hostname
:raises InvalidHostError: raised when the given value
is not a valid ip address nor a hostname | Below is the the instruction that describes the task:
### Input:
Add the given value to the collection.
:param host: an ip address or a hostname
:raises InvalidHostError: raised when the given value
is not a valid ip address nor a hostname
### Response:
def add(self, host_value):
"""Add the given value to the collection.
:param host: an ip address or a hostname
:raises InvalidHostError: raised when the given value
is not a valid ip address nor a hostname
"""
host_obj = self._host_factory(host_value)
if self._get_match(host_obj) is not None:
return
self._add_new(host_obj) |
def instance_class(self):
"""Instance class."""
return Class(self._env, lib.EnvGetInstanceClass(self._env, self._ist)) | Instance class. | Below is the the instruction that describes the task:
### Input:
Instance class.
### Response:
def instance_class(self):
"""Instance class."""
return Class(self._env, lib.EnvGetInstanceClass(self._env, self._ist)) |
def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete (finished or
cancelled).
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
"""
with _AcquireFutures(fs):
finished = set(f for f in fs if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = set(fs) - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
timer = Timeout(timeout)
timer.start()
try:
for future in finished:
yield future
while pending:
waiter.event.wait()
with waiter.lock:
finished = waiter.finished_futures
waiter.finished_futures = []
waiter.event.clear()
for future in finished:
yield future
pending.remove(future)
except Timeout as e:
if timer is not e:
raise
raise TimeoutError('%d (of %d) futures unfinished' % (len(pending), len(fs)))
finally:
timer.cancel()
for f in fs:
f._waiters.remove(waiter) | An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete (finished or
cancelled).
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout. | Below is the the instruction that describes the task:
### Input:
An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete (finished or
cancelled).
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
### Response:
def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete (finished or
cancelled).
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
"""
with _AcquireFutures(fs):
finished = set(f for f in fs if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = set(fs) - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
timer = Timeout(timeout)
timer.start()
try:
for future in finished:
yield future
while pending:
waiter.event.wait()
with waiter.lock:
finished = waiter.finished_futures
waiter.finished_futures = []
waiter.event.clear()
for future in finished:
yield future
pending.remove(future)
except Timeout as e:
if timer is not e:
raise
raise TimeoutError('%d (of %d) futures unfinished' % (len(pending), len(fs)))
finally:
timer.cancel()
for f in fs:
f._waiters.remove(waiter) |
def warp_multi(src_ds_list, res='first', extent='intersection', t_srs='first', r='cubic', warptype=memwarp, outdir=None, dst_ndv=None, verbose=True, debug=False):
"""This parses and checks inputs, then calls desired warp function with appropriate arguments for each input ds
Parameters
----------
src_ds_list : list of gdal.Dataset objects
List of original datasets to be warped
res : arbitrary type
Desired output resolution
extent : arbitrary type
Desired output extent
t_srs : arbitrary type
Desired output spatial reference
r : str
Desired resampling algorithm
warptype : function
Desired warp type (write to memory or disk)
outdir : str
Desired output directory (for disk warp)
dst_ndv : float
Desired output NoData Value
verbose : bool
Print warp parameters
debug : bool
Print extra information for debugging purposes
Returns
-------
out_ds_list : list of gdal.Dataset objects
List of warped datasets (either in memory or on disk)
"""
#Type cast arguments as str for evaluation
#Avoid path errors
#res = str(res)
#extent = str(extent)
#t_srs = str(t_srs)
#Parse the input
t_srs = parse_srs(t_srs, src_ds_list)
res = parse_res(res, src_ds_list, t_srs)
extent = parse_extent(extent, src_ds_list, t_srs)
if verbose:
print("\nWarping all inputs to the following:")
print("Resolution: %s" % res)
print("Extent: %s" % str(extent))
print("Projection: '%s'" % t_srs.ExportToProj4())
print("Resampling alg: %s\n" % r)
out_ds_list = []
for i, ds in enumerate(src_ds_list):
fn_list = ds.GetFileList()
fn = '[memory]'
if fn_list is not None:
fn = fn_list[0]
if verbose:
print("%i of %i: %s" % (i+1, len(src_ds_list), fn))
#If input srs are different, must warp
ds_t_srs = geolib.get_ds_srs(ds)
srscheck = bool(t_srs.IsSame(ds_t_srs))
if debug:
print('\n%s' % ds_t_srs.ExportToWkt())
print('%s\n' % t_srs.ExportToWkt())
print('srscheck: %s\n' % srscheck)
rescheck = False
extentcheck = False
#if srscheck:
#Extract info from ds to see if warp is necessary
ds_res = geolib.get_res(ds, square=True)[0]
ds_extent = geolib.ds_extent(ds)
#Note: these checks necessary to handle rounding and precision issues
#Round extent and res to nearest mm
precision = 1E-3
#Or if t_srs has units of degrees
if ds_t_srs.IsGeographic():
precision = 1E-8
rescheck = (res is None) or geolib.res_compare(res, ds_res, precision=precision)
extentcheck = (extent is None) or geolib.extent_compare(extent, ds_extent, precision=precision)
if debug:
print('\n%s, %s\n' % (ds_res, res))
print('%s' % ds_extent)
print('%s\n' % extent)
print('rescheck: %s' % rescheck)
print('extentcheck: %s\n' % extentcheck)
#If the ds passes all three, it is identical to desired output, short circuit
if rescheck and extentcheck and srscheck:
out_ds_list.append(ds)
else:
dst_ds = warptype(ds, res, extent, t_srs, r, outdir, dst_ndv=dst_ndv, verbose=verbose)
out_ds_list.append(dst_ds)
return out_ds_list | This parses and checks inputs, then calls desired warp function with appropriate arguments for each input ds
Parameters
----------
src_ds_list : list of gdal.Dataset objects
List of original datasets to be warped
res : arbitrary type
Desired output resolution
extent : arbitrary type
Desired output extent
t_srs : arbitrary type
Desired output spatial reference
r : str
Desired resampling algorithm
warptype : function
Desired warp type (write to memory or disk)
outdir : str
Desired output directory (for disk warp)
dst_ndv : float
Desired output NoData Value
verbose : bool
Print warp parameters
debug : bool
Print extra information for debugging purposes
Returns
-------
out_ds_list : list of gdal.Dataset objects
List of warped datasets (either in memory or on disk) | Below is the the instruction that describes the task:
### Input:
This parses and checks inputs, then calls desired warp function with appropriate arguments for each input ds
Parameters
----------
src_ds_list : list of gdal.Dataset objects
List of original datasets to be warped
res : arbitrary type
Desired output resolution
extent : arbitrary type
Desired output extent
t_srs : arbitrary type
Desired output spatial reference
r : str
Desired resampling algorithm
warptype : function
Desired warp type (write to memory or disk)
outdir : str
Desired output directory (for disk warp)
dst_ndv : float
Desired output NoData Value
verbose : bool
Print warp parameters
debug : bool
Print extra information for debugging purposes
Returns
-------
out_ds_list : list of gdal.Dataset objects
List of warped datasets (either in memory or on disk)
### Response:
def warp_multi(src_ds_list, res='first', extent='intersection', t_srs='first', r='cubic', warptype=memwarp, outdir=None, dst_ndv=None, verbose=True, debug=False):
"""This parses and checks inputs, then calls desired warp function with appropriate arguments for each input ds
Parameters
----------
src_ds_list : list of gdal.Dataset objects
List of original datasets to be warped
res : arbitrary type
Desired output resolution
extent : arbitrary type
Desired output extent
t_srs : arbitrary type
Desired output spatial reference
r : str
Desired resampling algorithm
warptype : function
Desired warp type (write to memory or disk)
outdir : str
Desired output directory (for disk warp)
dst_ndv : float
Desired output NoData Value
verbose : bool
Print warp parameters
debug : bool
Print extra information for debugging purposes
Returns
-------
out_ds_list : list of gdal.Dataset objects
List of warped datasets (either in memory or on disk)
"""
#Type cast arguments as str for evaluation
#Avoid path errors
#res = str(res)
#extent = str(extent)
#t_srs = str(t_srs)
#Parse the input
t_srs = parse_srs(t_srs, src_ds_list)
res = parse_res(res, src_ds_list, t_srs)
extent = parse_extent(extent, src_ds_list, t_srs)
if verbose:
print("\nWarping all inputs to the following:")
print("Resolution: %s" % res)
print("Extent: %s" % str(extent))
print("Projection: '%s'" % t_srs.ExportToProj4())
print("Resampling alg: %s\n" % r)
out_ds_list = []
for i, ds in enumerate(src_ds_list):
fn_list = ds.GetFileList()
fn = '[memory]'
if fn_list is not None:
fn = fn_list[0]
if verbose:
print("%i of %i: %s" % (i+1, len(src_ds_list), fn))
#If input srs are different, must warp
ds_t_srs = geolib.get_ds_srs(ds)
srscheck = bool(t_srs.IsSame(ds_t_srs))
if debug:
print('\n%s' % ds_t_srs.ExportToWkt())
print('%s\n' % t_srs.ExportToWkt())
print('srscheck: %s\n' % srscheck)
rescheck = False
extentcheck = False
#if srscheck:
#Extract info from ds to see if warp is necessary
ds_res = geolib.get_res(ds, square=True)[0]
ds_extent = geolib.ds_extent(ds)
#Note: these checks necessary to handle rounding and precision issues
#Round extent and res to nearest mm
precision = 1E-3
#Or if t_srs has units of degrees
if ds_t_srs.IsGeographic():
precision = 1E-8
rescheck = (res is None) or geolib.res_compare(res, ds_res, precision=precision)
extentcheck = (extent is None) or geolib.extent_compare(extent, ds_extent, precision=precision)
if debug:
print('\n%s, %s\n' % (ds_res, res))
print('%s' % ds_extent)
print('%s\n' % extent)
print('rescheck: %s' % rescheck)
print('extentcheck: %s\n' % extentcheck)
#If the ds passes all three, it is identical to desired output, short circuit
if rescheck and extentcheck and srscheck:
out_ds_list.append(ds)
else:
dst_ds = warptype(ds, res, extent, t_srs, r, outdir, dst_ndv=dst_ndv, verbose=verbose)
out_ds_list.append(dst_ds)
return out_ds_list |
def create_unclaimed_draft(self, test_mode=False, files=None, file_urls=None, draft_type=None, subject=None, message=None, signers=None, cc_email_addresses=None, signing_redirect_url=None, form_fields_per_document=None, metadata=None, use_preexisting_fields=False, allow_decline=False):
''' Creates a new Draft that can be claimed using the claim URL
Creates a new Draft that can be claimed using the claim URL. The first
authenticated user to access the URL will claim the Draft and will be
shown either the "Sign and send" or the "Request signature" page with
the Draft loaded. Subsequent access to the claim URL will result in a
404. If the type is "send_document" then only the file parameter is
required. If the type is "request_signature", then the identities of the
signers and optionally the location of signing elements on the page are
also required.
Args:
test_mode (bool, optional): Whether this is a test, the signature request created from this draft will not be legally binding if set to True. Defaults to False.
files (list of str): The uploaded file(s) to send for signature
file_urls (list of str): URLs of the file for HelloSign to download to send for signature. Use either `files` or `file_urls`
draft_type (str): The type of unclaimed draft to create. Use "send_document" to create a claimable file, and "request_signature" for a claimable signature request. If the type is "request_signature" then signers name and email_address are not optional.
subject (str, optional): The subject in the email that will be sent to the signers
message (str, optional): The custom message in the email that will be sent to the signers
signers (list of dict): A list of signers, which each has the following attributes:
name (str): The name of the signer
email_address (str): Email address of the signer
order (str, optional): The order the signer is required to sign in
cc_email_addresses (list of str, optional): A list of email addresses that should be CC'd
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
form_fields_per_document (str, optional): The fields that should appear on the document, expressed as a serialized JSON data structure which is a list of lists of the form fields. Please refer to the API reference of HelloSign for more details (https://www.hellosign.com/api/reference#SignatureRequest)
metadata (dict, optional): Metadata to associate with the draft
use_preexisting_fields (bool): Whether to use preexisting PDF fields
allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0.
Returns:
An UnclaimedDraft object
'''
self._check_required_fields({
'draft_type': draft_type
}, [{
"files": files,
"file_urls": file_urls
}]
)
params = {
'test_mode': test_mode,
'files': files,
'file_urls': file_urls,
'draft_type': draft_type,
'subject': subject,
'message': message,
'signing_redirect_url': signing_redirect_url,
'signers': signers,
'cc_email_addresses': cc_email_addresses,
'form_fields_per_document': form_fields_per_document,
'metadata': metadata,
'use_preexisting_fields': use_preexisting_fields,
'allow_decline': allow_decline
}
return self._create_unclaimed_draft(**params) | Creates a new Draft that can be claimed using the claim URL
Creates a new Draft that can be claimed using the claim URL. The first
authenticated user to access the URL will claim the Draft and will be
shown either the "Sign and send" or the "Request signature" page with
the Draft loaded. Subsequent access to the claim URL will result in a
404. If the type is "send_document" then only the file parameter is
required. If the type is "request_signature", then the identities of the
signers and optionally the location of signing elements on the page are
also required.
Args:
test_mode (bool, optional): Whether this is a test, the signature request created from this draft will not be legally binding if set to True. Defaults to False.
files (list of str): The uploaded file(s) to send for signature
file_urls (list of str): URLs of the file for HelloSign to download to send for signature. Use either `files` or `file_urls`
draft_type (str): The type of unclaimed draft to create. Use "send_document" to create a claimable file, and "request_signature" for a claimable signature request. If the type is "request_signature" then signers name and email_address are not optional.
subject (str, optional): The subject in the email that will be sent to the signers
message (str, optional): The custom message in the email that will be sent to the signers
signers (list of dict): A list of signers, which each has the following attributes:
name (str): The name of the signer
email_address (str): Email address of the signer
order (str, optional): The order the signer is required to sign in
cc_email_addresses (list of str, optional): A list of email addresses that should be CC'd
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
form_fields_per_document (str, optional): The fields that should appear on the document, expressed as a serialized JSON data structure which is a list of lists of the form fields. Please refer to the API reference of HelloSign for more details (https://www.hellosign.com/api/reference#SignatureRequest)
metadata (dict, optional): Metadata to associate with the draft
use_preexisting_fields (bool): Whether to use preexisting PDF fields
allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0.
Returns:
An UnclaimedDraft object | Below is the the instruction that describes the task:
### Input:
Creates a new Draft that can be claimed using the claim URL
Creates a new Draft that can be claimed using the claim URL. The first
authenticated user to access the URL will claim the Draft and will be
shown either the "Sign and send" or the "Request signature" page with
the Draft loaded. Subsequent access to the claim URL will result in a
404. If the type is "send_document" then only the file parameter is
required. If the type is "request_signature", then the identities of the
signers and optionally the location of signing elements on the page are
also required.
Args:
test_mode (bool, optional): Whether this is a test, the signature request created from this draft will not be legally binding if set to True. Defaults to False.
files (list of str): The uploaded file(s) to send for signature
file_urls (list of str): URLs of the file for HelloSign to download to send for signature. Use either `files` or `file_urls`
draft_type (str): The type of unclaimed draft to create. Use "send_document" to create a claimable file, and "request_signature" for a claimable signature request. If the type is "request_signature" then signers name and email_address are not optional.
subject (str, optional): The subject in the email that will be sent to the signers
message (str, optional): The custom message in the email that will be sent to the signers
signers (list of dict): A list of signers, which each has the following attributes:
name (str): The name of the signer
email_address (str): Email address of the signer
order (str, optional): The order the signer is required to sign in
cc_email_addresses (list of str, optional): A list of email addresses that should be CC'd
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
form_fields_per_document (str, optional): The fields that should appear on the document, expressed as a serialized JSON data structure which is a list of lists of the form fields. Please refer to the API reference of HelloSign for more details (https://www.hellosign.com/api/reference#SignatureRequest)
metadata (dict, optional): Metadata to associate with the draft
use_preexisting_fields (bool): Whether to use preexisting PDF fields
allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0.
Returns:
An UnclaimedDraft object
### Response:
def create_unclaimed_draft(self, test_mode=False, files=None, file_urls=None, draft_type=None, subject=None, message=None, signers=None, cc_email_addresses=None, signing_redirect_url=None, form_fields_per_document=None, metadata=None, use_preexisting_fields=False, allow_decline=False):
''' Creates a new Draft that can be claimed using the claim URL
Creates a new Draft that can be claimed using the claim URL. The first
authenticated user to access the URL will claim the Draft and will be
shown either the "Sign and send" or the "Request signature" page with
the Draft loaded. Subsequent access to the claim URL will result in a
404. If the type is "send_document" then only the file parameter is
required. If the type is "request_signature", then the identities of the
signers and optionally the location of signing elements on the page are
also required.
Args:
test_mode (bool, optional): Whether this is a test, the signature request created from this draft will not be legally binding if set to True. Defaults to False.
files (list of str): The uploaded file(s) to send for signature
file_urls (list of str): URLs of the file for HelloSign to download to send for signature. Use either `files` or `file_urls`
draft_type (str): The type of unclaimed draft to create. Use "send_document" to create a claimable file, and "request_signature" for a claimable signature request. If the type is "request_signature" then signers name and email_address are not optional.
subject (str, optional): The subject in the email that will be sent to the signers
message (str, optional): The custom message in the email that will be sent to the signers
signers (list of dict): A list of signers, which each has the following attributes:
name (str): The name of the signer
email_address (str): Email address of the signer
order (str, optional): The order the signer is required to sign in
cc_email_addresses (list of str, optional): A list of email addresses that should be CC'd
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
form_fields_per_document (str, optional): The fields that should appear on the document, expressed as a serialized JSON data structure which is a list of lists of the form fields. Please refer to the API reference of HelloSign for more details (https://www.hellosign.com/api/reference#SignatureRequest)
metadata (dict, optional): Metadata to associate with the draft
use_preexisting_fields (bool): Whether to use preexisting PDF fields
allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0.
Returns:
An UnclaimedDraft object
'''
self._check_required_fields({
'draft_type': draft_type
}, [{
"files": files,
"file_urls": file_urls
}]
)
params = {
'test_mode': test_mode,
'files': files,
'file_urls': file_urls,
'draft_type': draft_type,
'subject': subject,
'message': message,
'signing_redirect_url': signing_redirect_url,
'signers': signers,
'cc_email_addresses': cc_email_addresses,
'form_fields_per_document': form_fields_per_document,
'metadata': metadata,
'use_preexisting_fields': use_preexisting_fields,
'allow_decline': allow_decline
}
return self._create_unclaimed_draft(**params) |
def update_ext(self, path, id, body=None):
"""Client extension hook for update."""
return self.put(path % id, body=body) | Client extension hook for update. | Below is the the instruction that describes the task:
### Input:
Client extension hook for update.
### Response:
def update_ext(self, path, id, body=None):
"""Client extension hook for update."""
return self.put(path % id, body=body) |
async def _multipart(self, files_dict):
'''
Forms multipart requests from a dict with name, path k/vs. Name
does not have to be the actual file name.
Args:
files_dict (dict): A dict of `filename:filepath`s, to be sent
as multipart files.
Returns:
multip_pkg (str): The strings representation of the content body,
multipart formatted.
'''
boundary = bytes(_BOUNDARY, self.encoding)
hder_format = 'Content-Disposition: form-data; name="{}"'
hder_format_io = '; filename="{}"'
multip_pkg = b''
num_of_parts = len(files_dict)
for index, kv in enumerate(files_dict.items(), start=1):
multip_pkg += (b'--' + boundary + b'\r\n')
k, v = kv
try:
pkg_body = await self._file_manager(v)
multip_pkg += bytes(hder_format.format(k) +
hder_format_io.format(basename(v)),
self.encoding)
mime_type = mimetypes.guess_type(basename(v))
if not mime_type[1]:
mime_type = 'application/octet-stream'
else:
mime_type = '/'.join(mime_type)
multip_pkg += bytes('; Content-Type: ' + mime_type,
self.encoding)
multip_pkg += b'\r\n'*2 + pkg_body
except (TypeError, FileNotFoundError):
pkg_body = bytes(v, self.encoding) + b'\r\n'
multip_pkg += bytes(hder_format.format(k) +
'\r\n'*2, self.encoding)
multip_pkg += pkg_body
if index == num_of_parts:
multip_pkg += b'--' + boundary + b'--\r\n'
return multip_pkg | Forms multipart requests from a dict with name, path k/vs. Name
does not have to be the actual file name.
Args:
files_dict (dict): A dict of `filename:filepath`s, to be sent
as multipart files.
Returns:
multip_pkg (str): The strings representation of the content body,
multipart formatted. | Below is the the instruction that describes the task:
### Input:
Forms multipart requests from a dict with name, path k/vs. Name
does not have to be the actual file name.
Args:
files_dict (dict): A dict of `filename:filepath`s, to be sent
as multipart files.
Returns:
multip_pkg (str): The strings representation of the content body,
multipart formatted.
### Response:
async def _multipart(self, files_dict):
'''
Forms multipart requests from a dict with name, path k/vs. Name
does not have to be the actual file name.
Args:
files_dict (dict): A dict of `filename:filepath`s, to be sent
as multipart files.
Returns:
multip_pkg (str): The strings representation of the content body,
multipart formatted.
'''
boundary = bytes(_BOUNDARY, self.encoding)
hder_format = 'Content-Disposition: form-data; name="{}"'
hder_format_io = '; filename="{}"'
multip_pkg = b''
num_of_parts = len(files_dict)
for index, kv in enumerate(files_dict.items(), start=1):
multip_pkg += (b'--' + boundary + b'\r\n')
k, v = kv
try:
pkg_body = await self._file_manager(v)
multip_pkg += bytes(hder_format.format(k) +
hder_format_io.format(basename(v)),
self.encoding)
mime_type = mimetypes.guess_type(basename(v))
if not mime_type[1]:
mime_type = 'application/octet-stream'
else:
mime_type = '/'.join(mime_type)
multip_pkg += bytes('; Content-Type: ' + mime_type,
self.encoding)
multip_pkg += b'\r\n'*2 + pkg_body
except (TypeError, FileNotFoundError):
pkg_body = bytes(v, self.encoding) + b'\r\n'
multip_pkg += bytes(hder_format.format(k) +
'\r\n'*2, self.encoding)
multip_pkg += pkg_body
if index == num_of_parts:
multip_pkg += b'--' + boundary + b'--\r\n'
return multip_pkg |
def create_dcnm_out_part(self, tenant_id, fw_dict, is_fw_virt=False):
"""Create the DCNM OUT partition and update the result. """
res = fw_const.DCNM_OUT_PART_CREATE_SUCCESS
tenant_name = fw_dict.get('tenant_name')
ret = True
try:
self._create_out_partition(tenant_id, tenant_name)
except Exception as exc:
LOG.error("Create of Out Partition failed for tenant "
"%(tenant)s ,Exception %(exc)s",
{'tenant': tenant_id, 'exc': str(exc)})
res = fw_const.DCNM_OUT_PART_CREATE_FAIL
ret = False
self.update_fw_db_result(tenant_id, dcnm_status=res)
LOG.info("Out partition created")
return ret | Create the DCNM OUT partition and update the result. | Below is the the instruction that describes the task:
### Input:
Create the DCNM OUT partition and update the result.
### Response:
def create_dcnm_out_part(self, tenant_id, fw_dict, is_fw_virt=False):
"""Create the DCNM OUT partition and update the result. """
res = fw_const.DCNM_OUT_PART_CREATE_SUCCESS
tenant_name = fw_dict.get('tenant_name')
ret = True
try:
self._create_out_partition(tenant_id, tenant_name)
except Exception as exc:
LOG.error("Create of Out Partition failed for tenant "
"%(tenant)s ,Exception %(exc)s",
{'tenant': tenant_id, 'exc': str(exc)})
res = fw_const.DCNM_OUT_PART_CREATE_FAIL
ret = False
self.update_fw_db_result(tenant_id, dcnm_status=res)
LOG.info("Out partition created")
return ret |
def symmetrically_add_atom(self, specie, point, coords_are_cartesian=False):
"""
Class method for adding a site at a specified point in a slab.
Will add the corresponding site on the other side of the
slab to maintain equivalent surfaces.
Arg:
specie (str): The specie to add
point (coords): The coordinate of the site in the slab to add.
coords_are_cartesian (bool): Is the point in cartesian coordinates
Returns:
(Slab): The modified slab
"""
# For now just use the species of the
# surface atom as the element to add
# Get the index of the corresponding site at the bottom
point2 = self.get_symmetric_site(point, cartesian=coords_are_cartesian)
self.append(specie, point, coords_are_cartesian=coords_are_cartesian)
self.append(specie, point2, coords_are_cartesian=coords_are_cartesian) | Class method for adding a site at a specified point in a slab.
Will add the corresponding site on the other side of the
slab to maintain equivalent surfaces.
Arg:
specie (str): The specie to add
point (coords): The coordinate of the site in the slab to add.
coords_are_cartesian (bool): Is the point in cartesian coordinates
Returns:
(Slab): The modified slab | Below is the the instruction that describes the task:
### Input:
Class method for adding a site at a specified point in a slab.
Will add the corresponding site on the other side of the
slab to maintain equivalent surfaces.
Arg:
specie (str): The specie to add
point (coords): The coordinate of the site in the slab to add.
coords_are_cartesian (bool): Is the point in cartesian coordinates
Returns:
(Slab): The modified slab
### Response:
def symmetrically_add_atom(self, specie, point, coords_are_cartesian=False):
"""
Class method for adding a site at a specified point in a slab.
Will add the corresponding site on the other side of the
slab to maintain equivalent surfaces.
Arg:
specie (str): The specie to add
point (coords): The coordinate of the site in the slab to add.
coords_are_cartesian (bool): Is the point in cartesian coordinates
Returns:
(Slab): The modified slab
"""
# For now just use the species of the
# surface atom as the element to add
# Get the index of the corresponding site at the bottom
point2 = self.get_symmetric_site(point, cartesian=coords_are_cartesian)
self.append(specie, point, coords_are_cartesian=coords_are_cartesian)
self.append(specie, point2, coords_are_cartesian=coords_are_cartesian) |
def _read_para_r1_counter(self, code, cbit, clen, *, desc, length, version):
"""Read HIP R1_COUNTER parameter.
Structure of HIP R1_COUNTER parameter [RFC 5201][RFC 7401]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Reserved, 4 bytes |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| R1 generation counter, 8 bytes |
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 ri_counter.type Parameter Type
1 15 ri_counter.critical Critical Bit
2 16 ri_counter.length Length of Contents
4 32 - Reserved
8 64 ri_counter.count Generation of Valid Puzzles
"""
if clen != 12:
raise ProtocolError(f'HIPv{version}: [Parano {code}] invalid format')
if code == 128 and version != 1:
raise ProtocolError(f'HIPv{version}: [Parano {code}] invalid parameter')
_resv = self._read_fileng(4)
_genc = self._read_unpack(8)
r1_counter = dict(
type=desc,
critical=cbit,
length=clen,
count=_genc,
)
return r1_counter | Read HIP R1_COUNTER parameter.
Structure of HIP R1_COUNTER parameter [RFC 5201][RFC 7401]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Reserved, 4 bytes |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| R1 generation counter, 8 bytes |
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 ri_counter.type Parameter Type
1 15 ri_counter.critical Critical Bit
2 16 ri_counter.length Length of Contents
4 32 - Reserved
8 64 ri_counter.count Generation of Valid Puzzles | Below is the the instruction that describes the task:
### Input:
Read HIP R1_COUNTER parameter.
Structure of HIP R1_COUNTER parameter [RFC 5201][RFC 7401]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Reserved, 4 bytes |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| R1 generation counter, 8 bytes |
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 ri_counter.type Parameter Type
1 15 ri_counter.critical Critical Bit
2 16 ri_counter.length Length of Contents
4 32 - Reserved
8 64 ri_counter.count Generation of Valid Puzzles
### Response:
def _read_para_r1_counter(self, code, cbit, clen, *, desc, length, version):
"""Read HIP R1_COUNTER parameter.
Structure of HIP R1_COUNTER parameter [RFC 5201][RFC 7401]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Reserved, 4 bytes |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| R1 generation counter, 8 bytes |
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 ri_counter.type Parameter Type
1 15 ri_counter.critical Critical Bit
2 16 ri_counter.length Length of Contents
4 32 - Reserved
8 64 ri_counter.count Generation of Valid Puzzles
"""
if clen != 12:
raise ProtocolError(f'HIPv{version}: [Parano {code}] invalid format')
if code == 128 and version != 1:
raise ProtocolError(f'HIPv{version}: [Parano {code}] invalid parameter')
_resv = self._read_fileng(4)
_genc = self._read_unpack(8)
r1_counter = dict(
type=desc,
critical=cbit,
length=clen,
count=_genc,
)
return r1_counter |
def _parse_vswitch_inspect_data(self, rd_list):
""" Parse the Virtual_Network_Vswitch_Query_Byte_Stats data to get
inspect data.
"""
def _parse_value(data_list, idx, keyword, offset):
return idx + offset, data_list[idx].rpartition(keyword)[2].strip()
vsw_dict = {}
with zvmutils.expect_invalid_resp_data():
# vswitch count
idx = 0
idx, vsw_count = _parse_value(rd_list, idx, 'vswitch count:', 2)
vsw_dict['vswitch_count'] = int(vsw_count)
# deal with each vswitch data
vsw_dict['vswitches'] = []
for i in range(vsw_dict['vswitch_count']):
vsw_data = {}
# skip vswitch number
idx += 1
# vswitch name
idx, vsw_name = _parse_value(rd_list, idx, 'vswitch name:', 1)
vsw_data['vswitch_name'] = vsw_name
# uplink count
idx, up_count = _parse_value(rd_list, idx, 'uplink count:', 1)
# skip uplink data
idx += int(up_count) * 9
# skip bridge data
idx += 8
# nic count
vsw_data['nics'] = []
idx, nic_count = _parse_value(rd_list, idx, 'nic count:', 1)
nic_count = int(nic_count)
for j in range(nic_count):
nic_data = {}
idx, nic_id = _parse_value(rd_list, idx, 'nic_id:', 1)
userid, toss, vdev = nic_id.partition(' ')
nic_data['userid'] = userid
nic_data['vdev'] = vdev
idx, nic_data['nic_fr_rx'] = _parse_value(rd_list, idx,
'nic_fr_rx:', 1
)
idx, nic_data['nic_fr_rx_dsc'] = _parse_value(rd_list, idx,
'nic_fr_rx_dsc:', 1
)
idx, nic_data['nic_fr_rx_err'] = _parse_value(rd_list, idx,
'nic_fr_rx_err:', 1
)
idx, nic_data['nic_fr_tx'] = _parse_value(rd_list, idx,
'nic_fr_tx:', 1
)
idx, nic_data['nic_fr_tx_dsc'] = _parse_value(rd_list, idx,
'nic_fr_tx_dsc:', 1
)
idx, nic_data['nic_fr_tx_err'] = _parse_value(rd_list, idx,
'nic_fr_tx_err:', 1
)
idx, nic_data['nic_rx'] = _parse_value(rd_list, idx,
'nic_rx:', 1
)
idx, nic_data['nic_tx'] = _parse_value(rd_list, idx,
'nic_tx:', 1
)
vsw_data['nics'].append(nic_data)
# vlan count
idx, vlan_count = _parse_value(rd_list, idx, 'vlan count:', 1)
# skip vlan data
idx += int(vlan_count) * 3
# skip the blank line
idx += 1
vsw_dict['vswitches'].append(vsw_data)
return vsw_dict | Parse the Virtual_Network_Vswitch_Query_Byte_Stats data to get
inspect data. | Below is the the instruction that describes the task:
### Input:
Parse the Virtual_Network_Vswitch_Query_Byte_Stats data to get
inspect data.
### Response:
def _parse_vswitch_inspect_data(self, rd_list):
""" Parse the Virtual_Network_Vswitch_Query_Byte_Stats data to get
inspect data.
"""
def _parse_value(data_list, idx, keyword, offset):
return idx + offset, data_list[idx].rpartition(keyword)[2].strip()
vsw_dict = {}
with zvmutils.expect_invalid_resp_data():
# vswitch count
idx = 0
idx, vsw_count = _parse_value(rd_list, idx, 'vswitch count:', 2)
vsw_dict['vswitch_count'] = int(vsw_count)
# deal with each vswitch data
vsw_dict['vswitches'] = []
for i in range(vsw_dict['vswitch_count']):
vsw_data = {}
# skip vswitch number
idx += 1
# vswitch name
idx, vsw_name = _parse_value(rd_list, idx, 'vswitch name:', 1)
vsw_data['vswitch_name'] = vsw_name
# uplink count
idx, up_count = _parse_value(rd_list, idx, 'uplink count:', 1)
# skip uplink data
idx += int(up_count) * 9
# skip bridge data
idx += 8
# nic count
vsw_data['nics'] = []
idx, nic_count = _parse_value(rd_list, idx, 'nic count:', 1)
nic_count = int(nic_count)
for j in range(nic_count):
nic_data = {}
idx, nic_id = _parse_value(rd_list, idx, 'nic_id:', 1)
userid, toss, vdev = nic_id.partition(' ')
nic_data['userid'] = userid
nic_data['vdev'] = vdev
idx, nic_data['nic_fr_rx'] = _parse_value(rd_list, idx,
'nic_fr_rx:', 1
)
idx, nic_data['nic_fr_rx_dsc'] = _parse_value(rd_list, idx,
'nic_fr_rx_dsc:', 1
)
idx, nic_data['nic_fr_rx_err'] = _parse_value(rd_list, idx,
'nic_fr_rx_err:', 1
)
idx, nic_data['nic_fr_tx'] = _parse_value(rd_list, idx,
'nic_fr_tx:', 1
)
idx, nic_data['nic_fr_tx_dsc'] = _parse_value(rd_list, idx,
'nic_fr_tx_dsc:', 1
)
idx, nic_data['nic_fr_tx_err'] = _parse_value(rd_list, idx,
'nic_fr_tx_err:', 1
)
idx, nic_data['nic_rx'] = _parse_value(rd_list, idx,
'nic_rx:', 1
)
idx, nic_data['nic_tx'] = _parse_value(rd_list, idx,
'nic_tx:', 1
)
vsw_data['nics'].append(nic_data)
# vlan count
idx, vlan_count = _parse_value(rd_list, idx, 'vlan count:', 1)
# skip vlan data
idx += int(vlan_count) * 3
# skip the blank line
idx += 1
vsw_dict['vswitches'].append(vsw_data)
return vsw_dict |
def _get_document_data(f, image_handler=None):
'''
``f`` is a ``ZipFile`` that is open
Extract out the document data, numbering data and the relationship data.
'''
if image_handler is None:
def image_handler(image_id, relationship_dict):
return relationship_dict.get(image_id)
document_xml = None
numbering_xml = None
relationship_xml = None
styles_xml = None
parser = etree.XMLParser(strip_cdata=False)
path, _ = os.path.split(f.filename)
media = {}
image_sizes = {}
# Loop through the files in the zip file.
for item in f.infolist():
# This file holds all the content of the document.
if item.filename == 'word/document.xml':
xml = f.read(item.filename)
document_xml = etree.fromstring(xml, parser)
# This file tells document.xml how lists should look.
elif item.filename == 'word/numbering.xml':
xml = f.read(item.filename)
numbering_xml = etree.fromstring(xml, parser)
elif item.filename == 'word/styles.xml':
xml = f.read(item.filename)
styles_xml = etree.fromstring(xml, parser)
# This file holds the targets for hyperlinks and images.
elif item.filename == 'word/_rels/document.xml.rels':
xml = f.read(item.filename)
try:
relationship_xml = etree.fromstring(xml, parser)
except XMLSyntaxError:
relationship_xml = etree.fromstring('<xml></xml>', parser)
if item.filename.startswith('word/media/'):
# Strip off the leading word/
media[item.filename[len('word/'):]] = f.extract(
item.filename,
path,
)
# Close the file pointer.
f.close()
# Get dictionaries for the numbering and the relationships.
numbering_dict = get_numbering_info(numbering_xml)
image_sizes = get_image_sizes(document_xml)
relationship_dict = get_relationship_info(
relationship_xml,
media,
image_sizes
)
styles_dict = get_style_dict(styles_xml)
font_sizes_dict = defaultdict(int)
if DETECT_FONT_SIZE:
font_sizes_dict = get_font_sizes_dict(document_xml, styles_dict)
meta_data = MetaData(
numbering_dict=numbering_dict,
relationship_dict=relationship_dict,
styles_dict=styles_dict,
font_sizes_dict=font_sizes_dict,
image_handler=image_handler,
image_sizes=image_sizes,
)
return document_xml, meta_data | ``f`` is a ``ZipFile`` that is open
Extract out the document data, numbering data and the relationship data. | Below is the the instruction that describes the task:
### Input:
``f`` is a ``ZipFile`` that is open
Extract out the document data, numbering data and the relationship data.
### Response:
def _get_document_data(f, image_handler=None):
'''
``f`` is a ``ZipFile`` that is open
Extract out the document data, numbering data and the relationship data.
'''
if image_handler is None:
def image_handler(image_id, relationship_dict):
return relationship_dict.get(image_id)
document_xml = None
numbering_xml = None
relationship_xml = None
styles_xml = None
parser = etree.XMLParser(strip_cdata=False)
path, _ = os.path.split(f.filename)
media = {}
image_sizes = {}
# Loop through the files in the zip file.
for item in f.infolist():
# This file holds all the content of the document.
if item.filename == 'word/document.xml':
xml = f.read(item.filename)
document_xml = etree.fromstring(xml, parser)
# This file tells document.xml how lists should look.
elif item.filename == 'word/numbering.xml':
xml = f.read(item.filename)
numbering_xml = etree.fromstring(xml, parser)
elif item.filename == 'word/styles.xml':
xml = f.read(item.filename)
styles_xml = etree.fromstring(xml, parser)
# This file holds the targets for hyperlinks and images.
elif item.filename == 'word/_rels/document.xml.rels':
xml = f.read(item.filename)
try:
relationship_xml = etree.fromstring(xml, parser)
except XMLSyntaxError:
relationship_xml = etree.fromstring('<xml></xml>', parser)
if item.filename.startswith('word/media/'):
# Strip off the leading word/
media[item.filename[len('word/'):]] = f.extract(
item.filename,
path,
)
# Close the file pointer.
f.close()
# Get dictionaries for the numbering and the relationships.
numbering_dict = get_numbering_info(numbering_xml)
image_sizes = get_image_sizes(document_xml)
relationship_dict = get_relationship_info(
relationship_xml,
media,
image_sizes
)
styles_dict = get_style_dict(styles_xml)
font_sizes_dict = defaultdict(int)
if DETECT_FONT_SIZE:
font_sizes_dict = get_font_sizes_dict(document_xml, styles_dict)
meta_data = MetaData(
numbering_dict=numbering_dict,
relationship_dict=relationship_dict,
styles_dict=styles_dict,
font_sizes_dict=font_sizes_dict,
image_handler=image_handler,
image_sizes=image_sizes,
)
return document_xml, meta_data |
def refraction(alt_degrees, temperature_C, pressure_mbar):
"""Given an observed altitude, return how much the image is refracted.
Zero refraction is returned both for objects very near the zenith,
as well as for objects more than one degree below the horizon.
"""
r = 0.016667 / tan((alt_degrees + 7.31 / (alt_degrees + 4.4)) * DEG2RAD)
d = r * (0.28 * pressure_mbar / (temperature_C + 273.0))
return where((-1.0 <= alt_degrees) & (alt_degrees <= 89.9), d, 0.0) | Given an observed altitude, return how much the image is refracted.
Zero refraction is returned both for objects very near the zenith,
as well as for objects more than one degree below the horizon. | Below is the the instruction that describes the task:
### Input:
Given an observed altitude, return how much the image is refracted.
Zero refraction is returned both for objects very near the zenith,
as well as for objects more than one degree below the horizon.
### Response:
def refraction(alt_degrees, temperature_C, pressure_mbar):
"""Given an observed altitude, return how much the image is refracted.
Zero refraction is returned both for objects very near the zenith,
as well as for objects more than one degree below the horizon.
"""
r = 0.016667 / tan((alt_degrees + 7.31 / (alt_degrees + 4.4)) * DEG2RAD)
d = r * (0.28 * pressure_mbar / (temperature_C + 273.0))
return where((-1.0 <= alt_degrees) & (alt_degrees <= 89.9), d, 0.0) |
def utc_datetime(dt=None, local_value=True):
""" Convert local datetime and/or datetime without timezone information to UTC datetime with timezone
information.
:param dt: local datetime to convert. If is None, then system datetime value is used
:param local_value: whether dt is a datetime in system timezone or UTC datetime without timezone information
:return: datetime in UTC with tz set
"""
# TODO: rename local_value to local_tz or in_local_tz
if dt is None:
return datetime.now(tz=timezone.utc)
result = dt
if result.utcoffset() is None:
if local_value is False:
return result.replace(tzinfo=timezone.utc)
else:
result = result.replace(tzinfo=local_tz())
return result.astimezone(timezone.utc) | Convert local datetime and/or datetime without timezone information to UTC datetime with timezone
information.
:param dt: local datetime to convert. If is None, then system datetime value is used
:param local_value: whether dt is a datetime in system timezone or UTC datetime without timezone information
:return: datetime in UTC with tz set | Below is the the instruction that describes the task:
### Input:
Convert local datetime and/or datetime without timezone information to UTC datetime with timezone
information.
:param dt: local datetime to convert. If is None, then system datetime value is used
:param local_value: whether dt is a datetime in system timezone or UTC datetime without timezone information
:return: datetime in UTC with tz set
### Response:
def utc_datetime(dt=None, local_value=True):
""" Convert local datetime and/or datetime without timezone information to UTC datetime with timezone
information.
:param dt: local datetime to convert. If is None, then system datetime value is used
:param local_value: whether dt is a datetime in system timezone or UTC datetime without timezone information
:return: datetime in UTC with tz set
"""
# TODO: rename local_value to local_tz or in_local_tz
if dt is None:
return datetime.now(tz=timezone.utc)
result = dt
if result.utcoffset() is None:
if local_value is False:
return result.replace(tzinfo=timezone.utc)
else:
result = result.replace(tzinfo=local_tz())
return result.astimezone(timezone.utc) |
def _guess_delimiter(self, data, delimiters):
"""
The delimiter /should/ occur the same number of times on
each row. However, due to malformed data, it may not. We don't want
an all or nothing approach, so we allow for small variations in this
number.
1) build a table of the frequency of each character on every line.
2) build a table of frequencies of this frequency (meta-frequency?),
e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
7 times in 2 rows'
3) use the mode of the meta-frequency to determine the /expected/
frequency for that character
4) find out how often the character actually meets that goal
5) the character that best meets its goal is the delimiter
For performance reasons, the data is evaluated in chunks, so it can
try and evaluate the smallest portion of the data possible, evaluating
additional chunks as necessary.
"""
data = filter(None, data.split('\n'))
ascii = [chr(c) for c in range(127)] # 7-bit ASCII
# build frequency tables
chunkLength = min(10, len(data))
iteration = 0
charFrequency = {}
modes = {}
delims = {}
start, end = 0, min(chunkLength, len(data))
while start < len(data):
iteration += 1
for line in data[start:end]:
for char in ascii:
metaFrequency = charFrequency.get(char, {})
# must count even if frequency is 0
freq = line.count(char)
# value is the mode
metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
charFrequency[char] = metaFrequency
for char in charFrequency.keys():
items = charFrequency[char].items()
if len(items) == 1 and items[0][0] == 0:
continue
# get the mode of the frequencies
if len(items) > 1:
modes[char] = reduce(lambda a, b: a[1] > b[1] and a or b,
items)
# adjust the mode - subtract the sum of all
# other frequencies
items.remove(modes[char])
modes[char] = (modes[char][0], modes[char][1]
- reduce(lambda a, b: (0, a[1] + b[1]),
items)[1])
else:
modes[char] = items[0]
# build a list of possible delimiters
modeList = modes.items()
total = float(chunkLength * iteration)
# (rows of consistent data) / (number of rows) = 100%
consistency = 1.0
# minimum consistency threshold
threshold = 0.9
while len(delims) == 0 and consistency >= threshold:
for k, v in modeList:
if v[0] > 0 and v[1] > 0:
if ((v[1]/total) >= consistency and
(delimiters is None or k in delimiters)):
delims[k] = v
consistency -= 0.01
if len(delims) == 1:
delim = delims.keys()[0]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
# analyze another chunkLength lines
start = end
end += chunkLength
if not delims:
return ('', 0)
# if there's more than one, fall back to a 'preferred' list
if len(delims) > 1:
for d in self.preferred:
if d in delims.keys():
skipinitialspace = (data[0].count(d) ==
data[0].count("%c " % d))
return (d, skipinitialspace)
# nothing else indicates a preference, pick the character that
# dominates(?)
items = [(v,k) for (k,v) in delims.items()]
items.sort()
delim = items[-1][1]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace) | The delimiter /should/ occur the same number of times on
each row. However, due to malformed data, it may not. We don't want
an all or nothing approach, so we allow for small variations in this
number.
1) build a table of the frequency of each character on every line.
2) build a table of frequencies of this frequency (meta-frequency?),
e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
7 times in 2 rows'
3) use the mode of the meta-frequency to determine the /expected/
frequency for that character
4) find out how often the character actually meets that goal
5) the character that best meets its goal is the delimiter
For performance reasons, the data is evaluated in chunks, so it can
try and evaluate the smallest portion of the data possible, evaluating
additional chunks as necessary. | Below is the the instruction that describes the task:
### Input:
The delimiter /should/ occur the same number of times on
each row. However, due to malformed data, it may not. We don't want
an all or nothing approach, so we allow for small variations in this
number.
1) build a table of the frequency of each character on every line.
2) build a table of frequencies of this frequency (meta-frequency?),
e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
7 times in 2 rows'
3) use the mode of the meta-frequency to determine the /expected/
frequency for that character
4) find out how often the character actually meets that goal
5) the character that best meets its goal is the delimiter
For performance reasons, the data is evaluated in chunks, so it can
try and evaluate the smallest portion of the data possible, evaluating
additional chunks as necessary.
### Response:
def _guess_delimiter(self, data, delimiters):
"""
The delimiter /should/ occur the same number of times on
each row. However, due to malformed data, it may not. We don't want
an all or nothing approach, so we allow for small variations in this
number.
1) build a table of the frequency of each character on every line.
2) build a table of frequencies of this frequency (meta-frequency?),
e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
7 times in 2 rows'
3) use the mode of the meta-frequency to determine the /expected/
frequency for that character
4) find out how often the character actually meets that goal
5) the character that best meets its goal is the delimiter
For performance reasons, the data is evaluated in chunks, so it can
try and evaluate the smallest portion of the data possible, evaluating
additional chunks as necessary.
"""
data = filter(None, data.split('\n'))
ascii = [chr(c) for c in range(127)] # 7-bit ASCII
# build frequency tables
chunkLength = min(10, len(data))
iteration = 0
charFrequency = {}
modes = {}
delims = {}
start, end = 0, min(chunkLength, len(data))
while start < len(data):
iteration += 1
for line in data[start:end]:
for char in ascii:
metaFrequency = charFrequency.get(char, {})
# must count even if frequency is 0
freq = line.count(char)
# value is the mode
metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
charFrequency[char] = metaFrequency
for char in charFrequency.keys():
items = charFrequency[char].items()
if len(items) == 1 and items[0][0] == 0:
continue
# get the mode of the frequencies
if len(items) > 1:
modes[char] = reduce(lambda a, b: a[1] > b[1] and a or b,
items)
# adjust the mode - subtract the sum of all
# other frequencies
items.remove(modes[char])
modes[char] = (modes[char][0], modes[char][1]
- reduce(lambda a, b: (0, a[1] + b[1]),
items)[1])
else:
modes[char] = items[0]
# build a list of possible delimiters
modeList = modes.items()
total = float(chunkLength * iteration)
# (rows of consistent data) / (number of rows) = 100%
consistency = 1.0
# minimum consistency threshold
threshold = 0.9
while len(delims) == 0 and consistency >= threshold:
for k, v in modeList:
if v[0] > 0 and v[1] > 0:
if ((v[1]/total) >= consistency and
(delimiters is None or k in delimiters)):
delims[k] = v
consistency -= 0.01
if len(delims) == 1:
delim = delims.keys()[0]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
# analyze another chunkLength lines
start = end
end += chunkLength
if not delims:
return ('', 0)
# if there's more than one, fall back to a 'preferred' list
if len(delims) > 1:
for d in self.preferred:
if d in delims.keys():
skipinitialspace = (data[0].count(d) ==
data[0].count("%c " % d))
return (d, skipinitialspace)
# nothing else indicates a preference, pick the character that
# dominates(?)
items = [(v,k) for (k,v) in delims.items()]
items.sort()
delim = items[-1][1]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace) |
def sort_batch_by_length(tensor: torch.Tensor, sequence_lengths: torch.Tensor):
"""
Sort a batch first tensor by some specified lengths.
Parameters
----------
tensor : torch.FloatTensor, required.
A batch first Pytorch tensor.
sequence_lengths : torch.LongTensor, required.
A tensor representing the lengths of some dimension of the tensor which
we want to sort by.
Returns
-------
sorted_tensor : torch.FloatTensor
The original tensor sorted along the batch dimension with respect to sequence_lengths.
sorted_sequence_lengths : torch.LongTensor
The original sequence_lengths sorted by decreasing size.
restoration_indices : torch.LongTensor
Indices into the sorted_tensor such that
``sorted_tensor.index_select(0, restoration_indices) == original_tensor``
permutation_index : torch.LongTensor
The indices used to sort the tensor. This is useful if you want to sort many
tensors using the same ordering.
"""
if not isinstance(tensor, torch.Tensor) or not isinstance(sequence_lengths, torch.Tensor):
raise ConfigurationError("Both the tensor and sequence lengths must be torch.Tensors.")
sorted_sequence_lengths, permutation_index = sequence_lengths.sort(0, descending=True)
sorted_tensor = tensor.index_select(0, permutation_index)
index_range = torch.arange(0, len(sequence_lengths), device=sequence_lengths.device)
# This is the equivalent of zipping with index, sorting by the original
# sequence lengths and returning the now sorted indices.
_, reverse_mapping = permutation_index.sort(0, descending=False)
restoration_indices = index_range.index_select(0, reverse_mapping)
return sorted_tensor, sorted_sequence_lengths, restoration_indices, permutation_index | Sort a batch first tensor by some specified lengths.
Parameters
----------
tensor : torch.FloatTensor, required.
A batch first Pytorch tensor.
sequence_lengths : torch.LongTensor, required.
A tensor representing the lengths of some dimension of the tensor which
we want to sort by.
Returns
-------
sorted_tensor : torch.FloatTensor
The original tensor sorted along the batch dimension with respect to sequence_lengths.
sorted_sequence_lengths : torch.LongTensor
The original sequence_lengths sorted by decreasing size.
restoration_indices : torch.LongTensor
Indices into the sorted_tensor such that
``sorted_tensor.index_select(0, restoration_indices) == original_tensor``
permutation_index : torch.LongTensor
The indices used to sort the tensor. This is useful if you want to sort many
tensors using the same ordering. | Below is the the instruction that describes the task:
### Input:
Sort a batch first tensor by some specified lengths.
Parameters
----------
tensor : torch.FloatTensor, required.
A batch first Pytorch tensor.
sequence_lengths : torch.LongTensor, required.
A tensor representing the lengths of some dimension of the tensor which
we want to sort by.
Returns
-------
sorted_tensor : torch.FloatTensor
The original tensor sorted along the batch dimension with respect to sequence_lengths.
sorted_sequence_lengths : torch.LongTensor
The original sequence_lengths sorted by decreasing size.
restoration_indices : torch.LongTensor
Indices into the sorted_tensor such that
``sorted_tensor.index_select(0, restoration_indices) == original_tensor``
permutation_index : torch.LongTensor
The indices used to sort the tensor. This is useful if you want to sort many
tensors using the same ordering.
### Response:
def sort_batch_by_length(tensor: torch.Tensor, sequence_lengths: torch.Tensor):
"""
Sort a batch first tensor by some specified lengths.
Parameters
----------
tensor : torch.FloatTensor, required.
A batch first Pytorch tensor.
sequence_lengths : torch.LongTensor, required.
A tensor representing the lengths of some dimension of the tensor which
we want to sort by.
Returns
-------
sorted_tensor : torch.FloatTensor
The original tensor sorted along the batch dimension with respect to sequence_lengths.
sorted_sequence_lengths : torch.LongTensor
The original sequence_lengths sorted by decreasing size.
restoration_indices : torch.LongTensor
Indices into the sorted_tensor such that
``sorted_tensor.index_select(0, restoration_indices) == original_tensor``
permutation_index : torch.LongTensor
The indices used to sort the tensor. This is useful if you want to sort many
tensors using the same ordering.
"""
if not isinstance(tensor, torch.Tensor) or not isinstance(sequence_lengths, torch.Tensor):
raise ConfigurationError("Both the tensor and sequence lengths must be torch.Tensors.")
sorted_sequence_lengths, permutation_index = sequence_lengths.sort(0, descending=True)
sorted_tensor = tensor.index_select(0, permutation_index)
index_range = torch.arange(0, len(sequence_lengths), device=sequence_lengths.device)
# This is the equivalent of zipping with index, sorting by the original
# sequence lengths and returning the now sorted indices.
_, reverse_mapping = permutation_index.sort(0, descending=False)
restoration_indices = index_range.index_select(0, reverse_mapping)
return sorted_tensor, sorted_sequence_lengths, restoration_indices, permutation_index |
def prep(self, wait, args, env=None):
"""
Given the return value of a preparefunc, prepare this
CompatStarter.
"""
self.pattern = wait
self.env = env
self.args = args
# wait is a function, supersedes the default behavior
if callable(wait):
self.wait = lambda lines: wait() | Given the return value of a preparefunc, prepare this
CompatStarter. | Below is the the instruction that describes the task:
### Input:
Given the return value of a preparefunc, prepare this
CompatStarter.
### Response:
def prep(self, wait, args, env=None):
"""
Given the return value of a preparefunc, prepare this
CompatStarter.
"""
self.pattern = wait
self.env = env
self.args = args
# wait is a function, supersedes the default behavior
if callable(wait):
self.wait = lambda lines: wait() |
def _load_custom_config(run_config):
"""Load custom configuration input HOCON file for cromwell.
"""
from pyhocon import ConfigFactory, HOCONConverter, ConfigTree
conf = ConfigFactory.parse_file(run_config)
out = {}
if "database" in conf:
out["database"] = HOCONConverter.to_hocon(ConfigTree({"database": conf.get_config("database")}))
return out | Load custom configuration input HOCON file for cromwell. | Below is the the instruction that describes the task:
### Input:
Load custom configuration input HOCON file for cromwell.
### Response:
def _load_custom_config(run_config):
"""Load custom configuration input HOCON file for cromwell.
"""
from pyhocon import ConfigFactory, HOCONConverter, ConfigTree
conf = ConfigFactory.parse_file(run_config)
out = {}
if "database" in conf:
out["database"] = HOCONConverter.to_hocon(ConfigTree({"database": conf.get_config("database")}))
return out |
def html2data(html_string):
"""
Convert an html table to a data table and spans.
Parameters
----------
html_string : str
The string containing the html table
Returns
-------
table : list of lists of str
spans : list of lists of lists of int
A span is a list of [row, column] pairs that define what cells
are merged in a table.
use_headers : bool
"""
spans = extract_spans(html_string)
column_count = get_html_column_count(html_string)
row_count = get_html_row_count(spans)
count = 0
while count < len(spans):
if len(spans[count]) == 1:
spans.pop(count)
else:
count += 1
table = extract_table(html_string, row_count, column_count)
use_headers = headers_present(html_string)
return table, spans, use_headers | Convert an html table to a data table and spans.
Parameters
----------
html_string : str
The string containing the html table
Returns
-------
table : list of lists of str
spans : list of lists of lists of int
A span is a list of [row, column] pairs that define what cells
are merged in a table.
use_headers : bool | Below is the the instruction that describes the task:
### Input:
Convert an html table to a data table and spans.
Parameters
----------
html_string : str
The string containing the html table
Returns
-------
table : list of lists of str
spans : list of lists of lists of int
A span is a list of [row, column] pairs that define what cells
are merged in a table.
use_headers : bool
### Response:
def html2data(html_string):
"""
Convert an html table to a data table and spans.
Parameters
----------
html_string : str
The string containing the html table
Returns
-------
table : list of lists of str
spans : list of lists of lists of int
A span is a list of [row, column] pairs that define what cells
are merged in a table.
use_headers : bool
"""
spans = extract_spans(html_string)
column_count = get_html_column_count(html_string)
row_count = get_html_row_count(spans)
count = 0
while count < len(spans):
if len(spans[count]) == 1:
spans.pop(count)
else:
count += 1
table = extract_table(html_string, row_count, column_count)
use_headers = headers_present(html_string)
return table, spans, use_headers |
def _parse_keyvals(self, line_iter):
"""Generate dictionary from key/value pairs.
"""
out = None
line = None
for line in line_iter:
if len(line) == 1 and line[0].upper() == line[0]:
break
else:
# setup output dictionaries, trimming off blank columns
if out is None:
while not line[-1]:
line = line[:-1]
out = [{} for _ in line[1:]]
# add blank values if the line is stripped
while len(line) < len(out) + 1:
line.append("")
for i in range(len(out)):
out[i][line[0]] = line[i+1].strip()
line = None
return out, line | Generate dictionary from key/value pairs. | Below is the the instruction that describes the task:
### Input:
Generate dictionary from key/value pairs.
### Response:
def _parse_keyvals(self, line_iter):
"""Generate dictionary from key/value pairs.
"""
out = None
line = None
for line in line_iter:
if len(line) == 1 and line[0].upper() == line[0]:
break
else:
# setup output dictionaries, trimming off blank columns
if out is None:
while not line[-1]:
line = line[:-1]
out = [{} for _ in line[1:]]
# add blank values if the line is stripped
while len(line) < len(out) + 1:
line.append("")
for i in range(len(out)):
out[i][line[0]] = line[i+1].strip()
line = None
return out, line |
def invoke(
src, event_file='event.json',
config_file='config.yaml', profile_name=None,
verbose=False,
):
"""Simulates a call to your function.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str alt_event:
An optional argument to override which event file to use.
:param bool verbose:
Whether to print out verbose details.
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Set AWS_PROFILE environment variable based on `--profile` option.
if profile_name:
os.environ['AWS_PROFILE'] = profile_name
# Load environment variables from the config file into the actual
# environment.
env_vars = cfg.get('environment_variables')
if env_vars:
for key, value in env_vars.items():
os.environ[key] = get_environment_variable_value(value)
# Load and parse event file.
path_to_event_file = os.path.join(src, event_file)
event = read(path_to_event_file, loader=json.loads)
# Tweak to allow module to import local modules
try:
sys.path.index(src)
except ValueError:
sys.path.append(src)
handler = cfg.get('handler')
# Inspect the handler string (<module>.<function name>) and translate it
# into a function we can execute.
fn = get_callable_handler_function(src, handler)
timeout = cfg.get('timeout')
if timeout:
context = LambdaContext(cfg.get('function_name'),timeout)
else:
context = LambdaContext(cfg.get('function_name'))
start = time.time()
results = fn(event, context)
end = time.time()
print('{0}'.format(results))
if verbose:
print('\nexecution time: {:.8f}s\nfunction execution '
'timeout: {:2}s'.format(end - start, cfg.get('timeout', 15))) | Simulates a call to your function.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str alt_event:
An optional argument to override which event file to use.
:param bool verbose:
Whether to print out verbose details. | Below is the the instruction that describes the task:
### Input:
Simulates a call to your function.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str alt_event:
An optional argument to override which event file to use.
:param bool verbose:
Whether to print out verbose details.
### Response:
def invoke(
src, event_file='event.json',
config_file='config.yaml', profile_name=None,
verbose=False,
):
"""Simulates a call to your function.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str alt_event:
An optional argument to override which event file to use.
:param bool verbose:
Whether to print out verbose details.
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Set AWS_PROFILE environment variable based on `--profile` option.
if profile_name:
os.environ['AWS_PROFILE'] = profile_name
# Load environment variables from the config file into the actual
# environment.
env_vars = cfg.get('environment_variables')
if env_vars:
for key, value in env_vars.items():
os.environ[key] = get_environment_variable_value(value)
# Load and parse event file.
path_to_event_file = os.path.join(src, event_file)
event = read(path_to_event_file, loader=json.loads)
# Tweak to allow module to import local modules
try:
sys.path.index(src)
except ValueError:
sys.path.append(src)
handler = cfg.get('handler')
# Inspect the handler string (<module>.<function name>) and translate it
# into a function we can execute.
fn = get_callable_handler_function(src, handler)
timeout = cfg.get('timeout')
if timeout:
context = LambdaContext(cfg.get('function_name'),timeout)
else:
context = LambdaContext(cfg.get('function_name'))
start = time.time()
results = fn(event, context)
end = time.time()
print('{0}'.format(results))
if verbose:
print('\nexecution time: {:.8f}s\nfunction execution '
'timeout: {:2}s'.format(end - start, cfg.get('timeout', 15))) |
def add_example(self, example):
"Add an example to the list of examples, checking it first."
self.check_example(example)
self.examples.append(example) | Add an example to the list of examples, checking it first. | Below is the the instruction that describes the task:
### Input:
Add an example to the list of examples, checking it first.
### Response:
def add_example(self, example):
"Add an example to the list of examples, checking it first."
self.check_example(example)
self.examples.append(example) |
def verification_start(self, phone_number, country_code, via='sms',
locale=None, code_length=4):
"""
:param string phone_number: stored in your databse or you provided while creating new user.
:param string country_code: stored in your databse or you provided while creating new user.
:param string via: verification method either sms or call
:param string locale: optional default none
:param number code_length: optional default 4
:return:
"""
if via != 'sms' and via != 'call':
raise AuthyFormatException("Invalid Via. Expected 'sms' or 'call'.")
options = {
'phone_number': phone_number,
'country_code': country_code,
'via': via
}
if locale:
options['locale'] = locale
try:
cl = int(code_length)
if cl < 4 or cl > 10:
raise ValueError
options['code_length'] = cl
except ValueError:
raise AuthyFormatException(
"Invalid code_length. Expected numeric value from 4-10.")
resp = self.post("/protected/json/phones/verification/start", options)
return Phone(self, resp) | :param string phone_number: stored in your databse or you provided while creating new user.
:param string country_code: stored in your databse or you provided while creating new user.
:param string via: verification method either sms or call
:param string locale: optional default none
:param number code_length: optional default 4
:return: | Below is the the instruction that describes the task:
### Input:
:param string phone_number: stored in your databse or you provided while creating new user.
:param string country_code: stored in your databse or you provided while creating new user.
:param string via: verification method either sms or call
:param string locale: optional default none
:param number code_length: optional default 4
:return:
### Response:
def verification_start(self, phone_number, country_code, via='sms',
locale=None, code_length=4):
"""
:param string phone_number: stored in your databse or you provided while creating new user.
:param string country_code: stored in your databse or you provided while creating new user.
:param string via: verification method either sms or call
:param string locale: optional default none
:param number code_length: optional default 4
:return:
"""
if via != 'sms' and via != 'call':
raise AuthyFormatException("Invalid Via. Expected 'sms' or 'call'.")
options = {
'phone_number': phone_number,
'country_code': country_code,
'via': via
}
if locale:
options['locale'] = locale
try:
cl = int(code_length)
if cl < 4 or cl > 10:
raise ValueError
options['code_length'] = cl
except ValueError:
raise AuthyFormatException(
"Invalid code_length. Expected numeric value from 4-10.")
resp = self.post("/protected/json/phones/verification/start", options)
return Phone(self, resp) |
def gatk_filter_rnaseq(vrn_file, data):
"""
this incorporates filters listed here, dropping clusters of variants
within a 35 nucleotide window, high fischer strand values and low
quality by depth
https://software.broadinstitute.org/gatk/guide/article?id=3891
java -jar GenomeAnalysisTK.jar -T VariantFiltration -R hg_19.fasta -V
input.vcf -window 35 -cluster 3 -filterName FS -filter "FS > 30.0"
-filterName QD -filter "QD < 2.0" -o output.vcf
"""
out_file = "%s-filter%s" % utils.splitext_plus(vrn_file)
if not file_exists(out_file):
ref_file = dd.get_ref_file(data)
with file_transaction(data, out_file) as tx_out_file:
params = ["VariantFiltration",
"-R", ref_file,
"-V", vrn_file,
"--cluster-window-size", "35",
"--cluster-size", "3",
"--filter-expression", "'FS > 30.0'",
"--filter-name", "FS",
"--filter-expression", "'QD < 2.0'",
"--filter-name", "QD",
"--output", tx_out_file]
# Use GATK4 for filtering, tools_off is for variant calling
config = utils.deepish_copy(dd.get_config(data))
if "gatk4" in dd.get_tools_off({"config": config}):
config["algorithm"]["tools_off"].remove("gatk4")
jvm_opts = broad.get_gatk_opts(config, os.path.dirname(tx_out_file))
do.run(broad.gatk_cmd("gatk", jvm_opts, params, config), "Filter RNA-seq variants.")
return out_file | this incorporates filters listed here, dropping clusters of variants
within a 35 nucleotide window, high fischer strand values and low
quality by depth
https://software.broadinstitute.org/gatk/guide/article?id=3891
java -jar GenomeAnalysisTK.jar -T VariantFiltration -R hg_19.fasta -V
input.vcf -window 35 -cluster 3 -filterName FS -filter "FS > 30.0"
-filterName QD -filter "QD < 2.0" -o output.vcf | Below is the the instruction that describes the task:
### Input:
this incorporates filters listed here, dropping clusters of variants
within a 35 nucleotide window, high fischer strand values and low
quality by depth
https://software.broadinstitute.org/gatk/guide/article?id=3891
java -jar GenomeAnalysisTK.jar -T VariantFiltration -R hg_19.fasta -V
input.vcf -window 35 -cluster 3 -filterName FS -filter "FS > 30.0"
-filterName QD -filter "QD < 2.0" -o output.vcf
### Response:
def gatk_filter_rnaseq(vrn_file, data):
"""
this incorporates filters listed here, dropping clusters of variants
within a 35 nucleotide window, high fischer strand values and low
quality by depth
https://software.broadinstitute.org/gatk/guide/article?id=3891
java -jar GenomeAnalysisTK.jar -T VariantFiltration -R hg_19.fasta -V
input.vcf -window 35 -cluster 3 -filterName FS -filter "FS > 30.0"
-filterName QD -filter "QD < 2.0" -o output.vcf
"""
out_file = "%s-filter%s" % utils.splitext_plus(vrn_file)
if not file_exists(out_file):
ref_file = dd.get_ref_file(data)
with file_transaction(data, out_file) as tx_out_file:
params = ["VariantFiltration",
"-R", ref_file,
"-V", vrn_file,
"--cluster-window-size", "35",
"--cluster-size", "3",
"--filter-expression", "'FS > 30.0'",
"--filter-name", "FS",
"--filter-expression", "'QD < 2.0'",
"--filter-name", "QD",
"--output", tx_out_file]
# Use GATK4 for filtering, tools_off is for variant calling
config = utils.deepish_copy(dd.get_config(data))
if "gatk4" in dd.get_tools_off({"config": config}):
config["algorithm"]["tools_off"].remove("gatk4")
jvm_opts = broad.get_gatk_opts(config, os.path.dirname(tx_out_file))
do.run(broad.gatk_cmd("gatk", jvm_opts, params, config), "Filter RNA-seq variants.")
return out_file |
def find_single_file_project(self): # type: () -> List[str]
"""
Find well formed singler file project
:return:
"""
files = [f for f in os.listdir(".") if os.path.isfile(f)]
candidates = []
setup_source = self.setup_py_source()
for file in files:
if file.endswith("setup.py") or not file.endswith(".py"):
continue # duh
if setup_source:
if file.replace(".py", "") in setup_source:
candidate = file.replace(".py", "")
if candidate != "setup":
candidates.append(candidate)
return candidates | Find well formed singler file project
:return: | Below is the the instruction that describes the task:
### Input:
Find well formed singler file project
:return:
### Response:
def find_single_file_project(self): # type: () -> List[str]
"""
Find well formed singler file project
:return:
"""
files = [f for f in os.listdir(".") if os.path.isfile(f)]
candidates = []
setup_source = self.setup_py_source()
for file in files:
if file.endswith("setup.py") or not file.endswith(".py"):
continue # duh
if setup_source:
if file.replace(".py", "") in setup_source:
candidate = file.replace(".py", "")
if candidate != "setup":
candidates.append(candidate)
return candidates |
def apply(diff, recs, strict=True):
"""
Transform the records with the patch. May fail if the records do not
match those expected in the patch.
"""
index_columns = diff['_index']
indexed = records.index(copy.deepcopy(list(recs)), index_columns)
_add_records(indexed, diff['added'], index_columns, strict=strict)
_remove_records(indexed, diff['removed'], index_columns, strict=strict)
_update_records(indexed, diff['changed'], strict=strict)
return records.sort(indexed.values()) | Transform the records with the patch. May fail if the records do not
match those expected in the patch. | Below is the the instruction that describes the task:
### Input:
Transform the records with the patch. May fail if the records do not
match those expected in the patch.
### Response:
def apply(diff, recs, strict=True):
"""
Transform the records with the patch. May fail if the records do not
match those expected in the patch.
"""
index_columns = diff['_index']
indexed = records.index(copy.deepcopy(list(recs)), index_columns)
_add_records(indexed, diff['added'], index_columns, strict=strict)
_remove_records(indexed, diff['removed'], index_columns, strict=strict)
_update_records(indexed, diff['changed'], strict=strict)
return records.sort(indexed.values()) |
def create_endpoint_folder(self, endpoint_id, folder):
'''create an endpoint folder, catching the error if it exists.
Parameters
==========
endpoint_id: the endpoint id parameters
folder: the relative path of the folder to create
'''
try:
res = self.transfer_client.operation_mkdir(endpoint_id, folder)
bot.info("%s --> %s" %(res['message'], folder))
except TransferAPIError:
bot.info('%s already exists at endpoint' %folder) | create an endpoint folder, catching the error if it exists.
Parameters
==========
endpoint_id: the endpoint id parameters
folder: the relative path of the folder to create | Below is the the instruction that describes the task:
### Input:
create an endpoint folder, catching the error if it exists.
Parameters
==========
endpoint_id: the endpoint id parameters
folder: the relative path of the folder to create
### Response:
def create_endpoint_folder(self, endpoint_id, folder):
'''create an endpoint folder, catching the error if it exists.
Parameters
==========
endpoint_id: the endpoint id parameters
folder: the relative path of the folder to create
'''
try:
res = self.transfer_client.operation_mkdir(endpoint_id, folder)
bot.info("%s --> %s" %(res['message'], folder))
except TransferAPIError:
bot.info('%s already exists at endpoint' %folder) |
def set_join_rule(self, room_id, join_rule):
"""Set the rule for users wishing to join the room.
Args:
room_id(str): The room to set the rules for.
join_rule(str): The chosen rule. One of: ["public", "knock",
"invite", "private"]
"""
content = {
"join_rule": join_rule
}
return self.send_state_event(room_id, "m.room.join_rules", content) | Set the rule for users wishing to join the room.
Args:
room_id(str): The room to set the rules for.
join_rule(str): The chosen rule. One of: ["public", "knock",
"invite", "private"] | Below is the the instruction that describes the task:
### Input:
Set the rule for users wishing to join the room.
Args:
room_id(str): The room to set the rules for.
join_rule(str): The chosen rule. One of: ["public", "knock",
"invite", "private"]
### Response:
def set_join_rule(self, room_id, join_rule):
"""Set the rule for users wishing to join the room.
Args:
room_id(str): The room to set the rules for.
join_rule(str): The chosen rule. One of: ["public", "knock",
"invite", "private"]
"""
content = {
"join_rule": join_rule
}
return self.send_state_event(room_id, "m.room.join_rules", content) |
def parse_fields_http(self, response, extra_org_map=None):
"""
The function for parsing ASN fields from a http response.
Args:
response (:obj:`str`): The response from the ASN http server.
extra_org_map (:obj:`dict`): Dictionary mapping org handles to
RIRs. This is for limited cases where ARIN REST (ASN fallback
HTTP lookup) does not show an RIR as the org handle e.g., DNIC
(which is now the built in ORG_MAP) e.g., {'DNIC': 'arin'}.
Valid RIR values are (note the case-sensitive - this is meant
to match the REST result): 'ARIN', 'RIPE', 'apnic', 'lacnic',
'afrinic'. Defaults to None.
Returns:
dict: The ASN lookup results
::
{
'asn' (None) - Cannot retrieve with this method.
'asn_date' (None) - Cannot retrieve with this method.
'asn_registry' (str) - The assigned ASN registry
'asn_cidr' (None) - Cannot retrieve with this method.
'asn_country_code' (None) - Cannot retrieve with this
method.
'asn_description' (None) - Cannot retrieve with this
method.
}
Raises:
ASNRegistryError: The ASN registry is not known.
ASNParseError: ASN parsing failed.
"""
# Set the org_map. Map the orgRef handle to an RIR.
org_map = self.org_map.copy()
try:
org_map.update(extra_org_map)
except (TypeError, ValueError, IndexError, KeyError):
pass
try:
asn_data = {
'asn_registry': None,
'asn': None,
'asn_cidr': None,
'asn_country_code': None,
'asn_date': None,
'asn_description': None
}
try:
net_list = response['nets']['net']
if not isinstance(net_list, list):
net_list = [net_list]
except (KeyError, TypeError):
log.debug('No networks found')
net_list = []
for n in reversed(net_list):
try:
asn_data['asn_registry'] = (
org_map[n['orgRef']['@handle'].upper()]
)
except KeyError as e:
log.debug('Could not parse ASN registry via HTTP: '
'{0}'.format(str(e)))
continue
break
if not asn_data['asn_registry']:
log.debug('Could not parse ASN registry via HTTP')
raise ASNRegistryError('ASN registry lookup failed.')
except ASNRegistryError:
raise
except Exception as e: # pragma: no cover
raise ASNParseError('Parsing failed for "{0}" with exception: {1}.'
''.format(response, e)[:100])
return asn_data | The function for parsing ASN fields from a http response.
Args:
response (:obj:`str`): The response from the ASN http server.
extra_org_map (:obj:`dict`): Dictionary mapping org handles to
RIRs. This is for limited cases where ARIN REST (ASN fallback
HTTP lookup) does not show an RIR as the org handle e.g., DNIC
(which is now the built in ORG_MAP) e.g., {'DNIC': 'arin'}.
Valid RIR values are (note the case-sensitive - this is meant
to match the REST result): 'ARIN', 'RIPE', 'apnic', 'lacnic',
'afrinic'. Defaults to None.
Returns:
dict: The ASN lookup results
::
{
'asn' (None) - Cannot retrieve with this method.
'asn_date' (None) - Cannot retrieve with this method.
'asn_registry' (str) - The assigned ASN registry
'asn_cidr' (None) - Cannot retrieve with this method.
'asn_country_code' (None) - Cannot retrieve with this
method.
'asn_description' (None) - Cannot retrieve with this
method.
}
Raises:
ASNRegistryError: The ASN registry is not known.
ASNParseError: ASN parsing failed. | Below is the the instruction that describes the task:
### Input:
The function for parsing ASN fields from a http response.
Args:
response (:obj:`str`): The response from the ASN http server.
extra_org_map (:obj:`dict`): Dictionary mapping org handles to
RIRs. This is for limited cases where ARIN REST (ASN fallback
HTTP lookup) does not show an RIR as the org handle e.g., DNIC
(which is now the built in ORG_MAP) e.g., {'DNIC': 'arin'}.
Valid RIR values are (note the case-sensitive - this is meant
to match the REST result): 'ARIN', 'RIPE', 'apnic', 'lacnic',
'afrinic'. Defaults to None.
Returns:
dict: The ASN lookup results
::
{
'asn' (None) - Cannot retrieve with this method.
'asn_date' (None) - Cannot retrieve with this method.
'asn_registry' (str) - The assigned ASN registry
'asn_cidr' (None) - Cannot retrieve with this method.
'asn_country_code' (None) - Cannot retrieve with this
method.
'asn_description' (None) - Cannot retrieve with this
method.
}
Raises:
ASNRegistryError: The ASN registry is not known.
ASNParseError: ASN parsing failed.
### Response:
def parse_fields_http(self, response, extra_org_map=None):
"""
The function for parsing ASN fields from a http response.
Args:
response (:obj:`str`): The response from the ASN http server.
extra_org_map (:obj:`dict`): Dictionary mapping org handles to
RIRs. This is for limited cases where ARIN REST (ASN fallback
HTTP lookup) does not show an RIR as the org handle e.g., DNIC
(which is now the built in ORG_MAP) e.g., {'DNIC': 'arin'}.
Valid RIR values are (note the case-sensitive - this is meant
to match the REST result): 'ARIN', 'RIPE', 'apnic', 'lacnic',
'afrinic'. Defaults to None.
Returns:
dict: The ASN lookup results
::
{
'asn' (None) - Cannot retrieve with this method.
'asn_date' (None) - Cannot retrieve with this method.
'asn_registry' (str) - The assigned ASN registry
'asn_cidr' (None) - Cannot retrieve with this method.
'asn_country_code' (None) - Cannot retrieve with this
method.
'asn_description' (None) - Cannot retrieve with this
method.
}
Raises:
ASNRegistryError: The ASN registry is not known.
ASNParseError: ASN parsing failed.
"""
# Set the org_map. Map the orgRef handle to an RIR.
org_map = self.org_map.copy()
try:
org_map.update(extra_org_map)
except (TypeError, ValueError, IndexError, KeyError):
pass
try:
asn_data = {
'asn_registry': None,
'asn': None,
'asn_cidr': None,
'asn_country_code': None,
'asn_date': None,
'asn_description': None
}
try:
net_list = response['nets']['net']
if not isinstance(net_list, list):
net_list = [net_list]
except (KeyError, TypeError):
log.debug('No networks found')
net_list = []
for n in reversed(net_list):
try:
asn_data['asn_registry'] = (
org_map[n['orgRef']['@handle'].upper()]
)
except KeyError as e:
log.debug('Could not parse ASN registry via HTTP: '
'{0}'.format(str(e)))
continue
break
if not asn_data['asn_registry']:
log.debug('Could not parse ASN registry via HTTP')
raise ASNRegistryError('ASN registry lookup failed.')
except ASNRegistryError:
raise
except Exception as e: # pragma: no cover
raise ASNParseError('Parsing failed for "{0}" with exception: {1}.'
''.format(response, e)[:100])
return asn_data |
def get_xy_steps(bbox, h_dim):
r"""Return meshgrid spacing based on bounding box.
bbox: dictionary
Dictionary containing coordinates for corners of study area.
h_dim: integer
Horizontal resolution in meters.
Returns
-------
x_steps, (X, ) ndarray
Number of grids in x dimension.
y_steps: (Y, ) ndarray
Number of grids in y dimension.
"""
x_range, y_range = get_xy_range(bbox)
x_steps = np.ceil(x_range / h_dim)
y_steps = np.ceil(y_range / h_dim)
return int(x_steps), int(y_steps) | r"""Return meshgrid spacing based on bounding box.
bbox: dictionary
Dictionary containing coordinates for corners of study area.
h_dim: integer
Horizontal resolution in meters.
Returns
-------
x_steps, (X, ) ndarray
Number of grids in x dimension.
y_steps: (Y, ) ndarray
Number of grids in y dimension. | Below is the the instruction that describes the task:
### Input:
r"""Return meshgrid spacing based on bounding box.
bbox: dictionary
Dictionary containing coordinates for corners of study area.
h_dim: integer
Horizontal resolution in meters.
Returns
-------
x_steps, (X, ) ndarray
Number of grids in x dimension.
y_steps: (Y, ) ndarray
Number of grids in y dimension.
### Response:
def get_xy_steps(bbox, h_dim):
r"""Return meshgrid spacing based on bounding box.
bbox: dictionary
Dictionary containing coordinates for corners of study area.
h_dim: integer
Horizontal resolution in meters.
Returns
-------
x_steps, (X, ) ndarray
Number of grids in x dimension.
y_steps: (Y, ) ndarray
Number of grids in y dimension.
"""
x_range, y_range = get_xy_range(bbox)
x_steps = np.ceil(x_range / h_dim)
y_steps = np.ceil(y_range / h_dim)
return int(x_steps), int(y_steps) |
def sndwrite(samples:np.ndarray, sr:int, outfile:str, encoding:str='auto') -> None:
"""
samples --> Array-like. the actual samples, shape=(nframes, channels)
sr --> Sampling-rate
outfile --> The name of the outfile. the extension will determine
the file-format.
The formats supported depend on the available backends
Without additional backends, only uncompressed formats
are supported (wav, aif)
encoding --> one of:
- 'auto' or None: the encoding is determined from the format
given by the extension of outfile, and
from the data
- 'pcm16'
- 'pcm24'
- 'pcm32'
- 'flt32'
NB: not all file formats support all encodings.
Throws a SndfileError if the format does not support
the given encoding
If set to 'auto', an encoding will be selected based on the
file-format and on the data. The bitdepth of the data is
measured, and if the file-format supports it, it will be used.
For bitdepths of 8, 16 and 24 bits, a PCM encoding will be used.
For a bitdepth of 32 bits, a FLOAT encoding will be used,
or the next lower supported encoding
"""
if encoding in ('auto', None):
encoding = _guessEncoding(samples, outfile)
# normalize in the case where there would be clipping
clipping = ((samples > 1).any() or (samples < -1).any())
if encoding.startswith('pcm') and clipping:
maxvalue = max(samples.max(), abs(samples.min()))
samples = samples / maxvalue
backend = _getWriteBackend(outfile, encoding)
if not backend:
raise SndfileError("No backend found to support the given format")
logger.debug(f"sndwrite: using backend {backend.name}")
return backend.write(samples, sr, outfile, encoding) | samples --> Array-like. the actual samples, shape=(nframes, channels)
sr --> Sampling-rate
outfile --> The name of the outfile. the extension will determine
the file-format.
The formats supported depend on the available backends
Without additional backends, only uncompressed formats
are supported (wav, aif)
encoding --> one of:
- 'auto' or None: the encoding is determined from the format
given by the extension of outfile, and
from the data
- 'pcm16'
- 'pcm24'
- 'pcm32'
- 'flt32'
NB: not all file formats support all encodings.
Throws a SndfileError if the format does not support
the given encoding
If set to 'auto', an encoding will be selected based on the
file-format and on the data. The bitdepth of the data is
measured, and if the file-format supports it, it will be used.
For bitdepths of 8, 16 and 24 bits, a PCM encoding will be used.
For a bitdepth of 32 bits, a FLOAT encoding will be used,
or the next lower supported encoding | Below is the the instruction that describes the task:
### Input:
samples --> Array-like. the actual samples, shape=(nframes, channels)
sr --> Sampling-rate
outfile --> The name of the outfile. the extension will determine
the file-format.
The formats supported depend on the available backends
Without additional backends, only uncompressed formats
are supported (wav, aif)
encoding --> one of:
- 'auto' or None: the encoding is determined from the format
given by the extension of outfile, and
from the data
- 'pcm16'
- 'pcm24'
- 'pcm32'
- 'flt32'
NB: not all file formats support all encodings.
Throws a SndfileError if the format does not support
the given encoding
If set to 'auto', an encoding will be selected based on the
file-format and on the data. The bitdepth of the data is
measured, and if the file-format supports it, it will be used.
For bitdepths of 8, 16 and 24 bits, a PCM encoding will be used.
For a bitdepth of 32 bits, a FLOAT encoding will be used,
or the next lower supported encoding
### Response:
def sndwrite(samples:np.ndarray, sr:int, outfile:str, encoding:str='auto') -> None:
"""
samples --> Array-like. the actual samples, shape=(nframes, channels)
sr --> Sampling-rate
outfile --> The name of the outfile. the extension will determine
the file-format.
The formats supported depend on the available backends
Without additional backends, only uncompressed formats
are supported (wav, aif)
encoding --> one of:
- 'auto' or None: the encoding is determined from the format
given by the extension of outfile, and
from the data
- 'pcm16'
- 'pcm24'
- 'pcm32'
- 'flt32'
NB: not all file formats support all encodings.
Throws a SndfileError if the format does not support
the given encoding
If set to 'auto', an encoding will be selected based on the
file-format and on the data. The bitdepth of the data is
measured, and if the file-format supports it, it will be used.
For bitdepths of 8, 16 and 24 bits, a PCM encoding will be used.
For a bitdepth of 32 bits, a FLOAT encoding will be used,
or the next lower supported encoding
"""
if encoding in ('auto', None):
encoding = _guessEncoding(samples, outfile)
# normalize in the case where there would be clipping
clipping = ((samples > 1).any() or (samples < -1).any())
if encoding.startswith('pcm') and clipping:
maxvalue = max(samples.max(), abs(samples.min()))
samples = samples / maxvalue
backend = _getWriteBackend(outfile, encoding)
if not backend:
raise SndfileError("No backend found to support the given format")
logger.debug(f"sndwrite: using backend {backend.name}")
return backend.write(samples, sr, outfile, encoding) |
def cons(o, seq) -> ISeq:
"""Creates a new sequence where o is the first element and seq is the rest.
If seq is None, return a list containing o. If seq is not a ISeq, attempt
to coerce it to a ISeq and then cons o onto the resulting sequence."""
if seq is None:
return llist.l(o)
if isinstance(seq, ISeq):
return seq.cons(o)
return Maybe(to_seq(seq)).map(lambda s: s.cons(o)).or_else(lambda: llist.l(o)) | Creates a new sequence where o is the first element and seq is the rest.
If seq is None, return a list containing o. If seq is not a ISeq, attempt
to coerce it to a ISeq and then cons o onto the resulting sequence. | Below is the the instruction that describes the task:
### Input:
Creates a new sequence where o is the first element and seq is the rest.
If seq is None, return a list containing o. If seq is not a ISeq, attempt
to coerce it to a ISeq and then cons o onto the resulting sequence.
### Response:
def cons(o, seq) -> ISeq:
"""Creates a new sequence where o is the first element and seq is the rest.
If seq is None, return a list containing o. If seq is not a ISeq, attempt
to coerce it to a ISeq and then cons o onto the resulting sequence."""
if seq is None:
return llist.l(o)
if isinstance(seq, ISeq):
return seq.cons(o)
return Maybe(to_seq(seq)).map(lambda s: s.cons(o)).or_else(lambda: llist.l(o)) |
def freeze_subjects(self):
"""Converts variable data into numpy arrays.
This is required after all subjects have been added via the
add_subject function, since we don't know ahead of time who is
participating in the analysis due to various filtering possibilities.
"""
self.phenotype_data = numpy.array(self.phenotype_data)
self.covariate_data = numpy.array(self.covariate_data) | Converts variable data into numpy arrays.
This is required after all subjects have been added via the
add_subject function, since we don't know ahead of time who is
participating in the analysis due to various filtering possibilities. | Below is the the instruction that describes the task:
### Input:
Converts variable data into numpy arrays.
This is required after all subjects have been added via the
add_subject function, since we don't know ahead of time who is
participating in the analysis due to various filtering possibilities.
### Response:
def freeze_subjects(self):
"""Converts variable data into numpy arrays.
This is required after all subjects have been added via the
add_subject function, since we don't know ahead of time who is
participating in the analysis due to various filtering possibilities.
"""
self.phenotype_data = numpy.array(self.phenotype_data)
self.covariate_data = numpy.array(self.covariate_data) |
def AddDir(self, dirpath):
"""Adds a directory path as a source.
Args:
dirpath: a string representing a path to the directory.
Returns:
True if the directory is not an already existing source.
"""
if dirpath not in self._dirs:
self._dirs.add(dirpath)
return True
return False | Adds a directory path as a source.
Args:
dirpath: a string representing a path to the directory.
Returns:
True if the directory is not an already existing source. | Below is the the instruction that describes the task:
### Input:
Adds a directory path as a source.
Args:
dirpath: a string representing a path to the directory.
Returns:
True if the directory is not an already existing source.
### Response:
def AddDir(self, dirpath):
"""Adds a directory path as a source.
Args:
dirpath: a string representing a path to the directory.
Returns:
True if the directory is not an already existing source.
"""
if dirpath not in self._dirs:
self._dirs.add(dirpath)
return True
return False |
def hash_codeblocks(text, hashes):
"""Hashes codeblocks (<pre> elements).
Codeblocks are strictly defined to be (non-list) lines that are
indented at least 4 spaces from the newline. Exactly 4 spaces will
be stripped from the beginning of the line -- any leading
whitespace after that is preserved.
Codeblock lines that are separated only by blank lines will be
included in the same codeblock (as will the intermediate newlines).
Certain HTML entities (&, <, >, ", ') will always be escaped inside
code blocks.
Markdown defines code blocks to be <pre><code>, not just <pre>.
Certain highlighting packages (like highlight.js) are designed
to accomodate (and even look) for this type of conversion.
"""
def sub(match):
block = match.group(1).rstrip('\n')
block = re.sub(r'(?:(?<=\n)|(?<=\A)) {4}', '', block)
block = escape(block)
block = '<pre><code>{}</code></pre>'.format(block)
hashed = hash_text(block, 'pre')
hashes[hashed] = block
return '\n\n' + hashed + '\n\n'
return re_codeblock.sub(sub, text) | Hashes codeblocks (<pre> elements).
Codeblocks are strictly defined to be (non-list) lines that are
indented at least 4 spaces from the newline. Exactly 4 spaces will
be stripped from the beginning of the line -- any leading
whitespace after that is preserved.
Codeblock lines that are separated only by blank lines will be
included in the same codeblock (as will the intermediate newlines).
Certain HTML entities (&, <, >, ", ') will always be escaped inside
code blocks.
Markdown defines code blocks to be <pre><code>, not just <pre>.
Certain highlighting packages (like highlight.js) are designed
to accomodate (and even look) for this type of conversion. | Below is the the instruction that describes the task:
### Input:
Hashes codeblocks (<pre> elements).
Codeblocks are strictly defined to be (non-list) lines that are
indented at least 4 spaces from the newline. Exactly 4 spaces will
be stripped from the beginning of the line -- any leading
whitespace after that is preserved.
Codeblock lines that are separated only by blank lines will be
included in the same codeblock (as will the intermediate newlines).
Certain HTML entities (&, <, >, ", ') will always be escaped inside
code blocks.
Markdown defines code blocks to be <pre><code>, not just <pre>.
Certain highlighting packages (like highlight.js) are designed
to accomodate (and even look) for this type of conversion.
### Response:
def hash_codeblocks(text, hashes):
"""Hashes codeblocks (<pre> elements).
Codeblocks are strictly defined to be (non-list) lines that are
indented at least 4 spaces from the newline. Exactly 4 spaces will
be stripped from the beginning of the line -- any leading
whitespace after that is preserved.
Codeblock lines that are separated only by blank lines will be
included in the same codeblock (as will the intermediate newlines).
Certain HTML entities (&, <, >, ", ') will always be escaped inside
code blocks.
Markdown defines code blocks to be <pre><code>, not just <pre>.
Certain highlighting packages (like highlight.js) are designed
to accomodate (and even look) for this type of conversion.
"""
def sub(match):
block = match.group(1).rstrip('\n')
block = re.sub(r'(?:(?<=\n)|(?<=\A)) {4}', '', block)
block = escape(block)
block = '<pre><code>{}</code></pre>'.format(block)
hashed = hash_text(block, 'pre')
hashes[hashed] = block
return '\n\n' + hashed + '\n\n'
return re_codeblock.sub(sub, text) |
def close(self):
"""Close this adapter stream.
This method may only be called once in the lifetime of an
AdapterStream and it will shutdown the underlying device adapter,
disconnect all devices and stop all background activity.
If this stream is configured to save a record of all RPCs, the RPCs
will be logged to a file at this point.
"""
try:
self._loop.run_coroutine(self.adapter.stop())
finally:
self._save_recording() | Close this adapter stream.
This method may only be called once in the lifetime of an
AdapterStream and it will shutdown the underlying device adapter,
disconnect all devices and stop all background activity.
If this stream is configured to save a record of all RPCs, the RPCs
will be logged to a file at this point. | Below is the the instruction that describes the task:
### Input:
Close this adapter stream.
This method may only be called once in the lifetime of an
AdapterStream and it will shutdown the underlying device adapter,
disconnect all devices and stop all background activity.
If this stream is configured to save a record of all RPCs, the RPCs
will be logged to a file at this point.
### Response:
def close(self):
"""Close this adapter stream.
This method may only be called once in the lifetime of an
AdapterStream and it will shutdown the underlying device adapter,
disconnect all devices and stop all background activity.
If this stream is configured to save a record of all RPCs, the RPCs
will be logged to a file at this point.
"""
try:
self._loop.run_coroutine(self.adapter.stop())
finally:
self._save_recording() |
def get_inventory_by_name(nme, character):
"""
returns the inventory index by name
"""
for ndx, sk in enumerate(character["inventory"]):
#print("sk = ", sk, " , nme = ", nme)
if sk["name"] == nme:
return ndx
return 0 | returns the inventory index by name | Below is the the instruction that describes the task:
### Input:
returns the inventory index by name
### Response:
def get_inventory_by_name(nme, character):
"""
returns the inventory index by name
"""
for ndx, sk in enumerate(character["inventory"]):
#print("sk = ", sk, " , nme = ", nme)
if sk["name"] == nme:
return ndx
return 0 |
def cmd_nc(host, port, family, ssl_enable, crlf, source_ip, source_port, protocol):
"""Some kind of netcat/ncat replacement.
The execution emulates the feeling of this popular tools.
Example:
\b
$ habu.nc --crlf www.portantier.com 80
Connected to 45.77.113.133 80
HEAD / HTTP/1.0
\b
HTTP/1.0 301 Moved Permanently
Date: Thu, 26 Jul 2018 21:10:51 GMT
Server: OpenBSD httpd
Connection: close
Content-Type: text/html
Content-Length: 443
Location: https://www.portantier.com/
"""
resolved = socket.getaddrinfo(host, port)
families = {
'4' : [ socket.AF_INET ],
'6' : [ socket.AF_INET6 ],
'46': [ socket.AF_INET, socket.AF_INET6]
}
address = None
for r in resolved:
if r[0] in families[family]:
address = r # (<AddressFamily.AF_INET6: 10>, <SocketType.SOCK_STREAM: 1>, 6, '', ('2606:2800:220:1:248:1893:25c8:1946', 80, 0, 0))
if not address:
print('Could not resolve {} to the ip address family selected ({})'.format(host, family), file=sys.stderr)
sys.exit(1)
to_send = b''
if not source_ip:
source_ip = which_source_for(address[4][0])
if protocol == 'tcp':
s = socket.socket(address[0], socket.SOCK_STREAM)
else:
s = socket.socket(address[0], socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((source_ip, source_port))
if ssl_enable:
ssl_context = ssl.SSLContext()
s = ssl_context.wrap_socket(s, server_side=False)
try:
s.connect((address[4][0], port))
print('Connected to', address[4][0], port, file=sys.stderr)
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
while True:
iready, oready, eready = select.select([sys.stdin, s], [], [s])
for i in iready:
if i == sys.stdin:
if crlf:
to_send += i.readline().replace('\n', '\r\n').encode()
else:
to_send += i.readline().encode()
else:
received = s.recv(4096)
if not received:
sys.exit(1)
os.write(sys.stdout.fileno(), received)
iready, oready, eready = select.select([], [s], [s])
for o in oready:
if to_send:
o.send(to_send)
to_send = b''
s.close() | Some kind of netcat/ncat replacement.
The execution emulates the feeling of this popular tools.
Example:
\b
$ habu.nc --crlf www.portantier.com 80
Connected to 45.77.113.133 80
HEAD / HTTP/1.0
\b
HTTP/1.0 301 Moved Permanently
Date: Thu, 26 Jul 2018 21:10:51 GMT
Server: OpenBSD httpd
Connection: close
Content-Type: text/html
Content-Length: 443
Location: https://www.portantier.com/ | Below is the the instruction that describes the task:
### Input:
Some kind of netcat/ncat replacement.
The execution emulates the feeling of this popular tools.
Example:
\b
$ habu.nc --crlf www.portantier.com 80
Connected to 45.77.113.133 80
HEAD / HTTP/1.0
\b
HTTP/1.0 301 Moved Permanently
Date: Thu, 26 Jul 2018 21:10:51 GMT
Server: OpenBSD httpd
Connection: close
Content-Type: text/html
Content-Length: 443
Location: https://www.portantier.com/
### Response:
def cmd_nc(host, port, family, ssl_enable, crlf, source_ip, source_port, protocol):
"""Some kind of netcat/ncat replacement.
The execution emulates the feeling of this popular tools.
Example:
\b
$ habu.nc --crlf www.portantier.com 80
Connected to 45.77.113.133 80
HEAD / HTTP/1.0
\b
HTTP/1.0 301 Moved Permanently
Date: Thu, 26 Jul 2018 21:10:51 GMT
Server: OpenBSD httpd
Connection: close
Content-Type: text/html
Content-Length: 443
Location: https://www.portantier.com/
"""
resolved = socket.getaddrinfo(host, port)
families = {
'4' : [ socket.AF_INET ],
'6' : [ socket.AF_INET6 ],
'46': [ socket.AF_INET, socket.AF_INET6]
}
address = None
for r in resolved:
if r[0] in families[family]:
address = r # (<AddressFamily.AF_INET6: 10>, <SocketType.SOCK_STREAM: 1>, 6, '', ('2606:2800:220:1:248:1893:25c8:1946', 80, 0, 0))
if not address:
print('Could not resolve {} to the ip address family selected ({})'.format(host, family), file=sys.stderr)
sys.exit(1)
to_send = b''
if not source_ip:
source_ip = which_source_for(address[4][0])
if protocol == 'tcp':
s = socket.socket(address[0], socket.SOCK_STREAM)
else:
s = socket.socket(address[0], socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((source_ip, source_port))
if ssl_enable:
ssl_context = ssl.SSLContext()
s = ssl_context.wrap_socket(s, server_side=False)
try:
s.connect((address[4][0], port))
print('Connected to', address[4][0], port, file=sys.stderr)
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
while True:
iready, oready, eready = select.select([sys.stdin, s], [], [s])
for i in iready:
if i == sys.stdin:
if crlf:
to_send += i.readline().replace('\n', '\r\n').encode()
else:
to_send += i.readline().encode()
else:
received = s.recv(4096)
if not received:
sys.exit(1)
os.write(sys.stdout.fileno(), received)
iready, oready, eready = select.select([], [s], [s])
for o in oready:
if to_send:
o.send(to_send)
to_send = b''
s.close() |
def _truncate(self, x, k):
''' given a vector x, leave its top-k absolute-value entries alone, and set the rest to 0 '''
not_F = np.argsort(np.abs(x))[:-k]
x[not_F] = 0
return x | given a vector x, leave its top-k absolute-value entries alone, and set the rest to 0 | Below is the the instruction that describes the task:
### Input:
given a vector x, leave its top-k absolute-value entries alone, and set the rest to 0
### Response:
def _truncate(self, x, k):
''' given a vector x, leave its top-k absolute-value entries alone, and set the rest to 0 '''
not_F = np.argsort(np.abs(x))[:-k]
x[not_F] = 0
return x |
def strip_spaces(x):
"""
Strips spaces
:param x:
:return:
"""
x = x.replace(b' ', b'')
x = x.replace(b'\t', b'')
return x | Strips spaces
:param x:
:return: | Below is the the instruction that describes the task:
### Input:
Strips spaces
:param x:
:return:
### Response:
def strip_spaces(x):
"""
Strips spaces
:param x:
:return:
"""
x = x.replace(b' ', b'')
x = x.replace(b'\t', b'')
return x |
def stop(self, wait=False):
"""
Terminate the VM instance launched on the cloud for this specific node.
"""
if self.instance_id is not None:
log.info("Shutting down node `%s` (VM instance `%s`) ...",
self.name, self.instance_id)
self._cloud_provider.stop_instance(self.instance_id)
if wait:
while self.is_alive():
time.sleep(1)
# When an instance is terminated, the EC2 cloud provider will
# basically return it as "running" state. Setting the
# `instance_id` attribute to None will force `is_alive()`
# method not to check with the cloud provider, and forever
# forgetting about the instance id.
self.instance_id = None | Terminate the VM instance launched on the cloud for this specific node. | Below is the the instruction that describes the task:
### Input:
Terminate the VM instance launched on the cloud for this specific node.
### Response:
def stop(self, wait=False):
"""
Terminate the VM instance launched on the cloud for this specific node.
"""
if self.instance_id is not None:
log.info("Shutting down node `%s` (VM instance `%s`) ...",
self.name, self.instance_id)
self._cloud_provider.stop_instance(self.instance_id)
if wait:
while self.is_alive():
time.sleep(1)
# When an instance is terminated, the EC2 cloud provider will
# basically return it as "running" state. Setting the
# `instance_id` attribute to None will force `is_alive()`
# method not to check with the cloud provider, and forever
# forgetting about the instance id.
self.instance_id = None |
def chemical_formula(self):
"""the chemical formula of the molecule"""
counts = {}
for number in self.numbers:
counts[number] = counts.get(number, 0)+1
items = []
for number, count in sorted(counts.items(), reverse=True):
if count == 1:
items.append(periodic[number].symbol)
else:
items.append("%s%i" % (periodic[number].symbol, count))
return "".join(items) | the chemical formula of the molecule | Below is the the instruction that describes the task:
### Input:
the chemical formula of the molecule
### Response:
def chemical_formula(self):
"""the chemical formula of the molecule"""
counts = {}
for number in self.numbers:
counts[number] = counts.get(number, 0)+1
items = []
for number, count in sorted(counts.items(), reverse=True):
if count == 1:
items.append(periodic[number].symbol)
else:
items.append("%s%i" % (periodic[number].symbol, count))
return "".join(items) |
def setup(self):
"""Setup."""
self.blocks = self.config['block_comments']
self.lines = self.config['line_comments']
self.group_comments = self.config['group_comments']
# If the style isn't found, just go with CSS, then use the appropriate prefix.
self.stylesheets = STYLESHEET_TYPE.get(self.config['stylesheets'].lower(), CSS)
self.prefix = [k for k, v in STYLESHEET_TYPE.items() if v == SASS][0]
self.pattern = RE_CSS if self.stylesheets == CSS else RE_SCSS | Setup. | Below is the the instruction that describes the task:
### Input:
Setup.
### Response:
def setup(self):
"""Setup."""
self.blocks = self.config['block_comments']
self.lines = self.config['line_comments']
self.group_comments = self.config['group_comments']
# If the style isn't found, just go with CSS, then use the appropriate prefix.
self.stylesheets = STYLESHEET_TYPE.get(self.config['stylesheets'].lower(), CSS)
self.prefix = [k for k, v in STYLESHEET_TYPE.items() if v == SASS][0]
self.pattern = RE_CSS if self.stylesheets == CSS else RE_SCSS |
async def update_firmware(port: str,
firmware_file_path: str,
loop: Optional[asyncio.AbstractEventLoop])\
-> Tuple[str, Tuple[bool, str]]:
"""
Run avrdude firmware upload command. Switch back to normal module port
Note: For modules with old bootloader, the kernel could assign the module
a new port after the update (since the board is automatically reset).
Scan for such a port change and use the appropriate port.
Returns a tuple of the new port to communicate on (or None if it was not
found) and a tuple of success and message from avrdude.
"""
ports_before_update = await _discover_ports()
config_file_path = os.path.join(package_root,
'config', 'modules', 'avrdude.conf')
kwargs: Dict[str, Any] = {
'stdout': asyncio.subprocess.PIPE,
'stderr': asyncio.subprocess.PIPE
}
if loop:
kwargs['loop'] = loop
proc = await asyncio.create_subprocess_exec(
'avrdude', '-C{}'.format(config_file_path), '-v',
'-p{}'.format(PART_NO),
'-c{}'.format(PROGRAMMER_ID),
'-P{}'.format(port),
'-b{}'.format(BAUDRATE), '-D',
'-Uflash:w:{}:i'.format(firmware_file_path),
**kwargs)
await proc.wait()
_result = await proc.communicate()
result = _result[1].decode()
avrdude_res = _format_avrdude_response(result)
if avrdude_res[0]:
log.debug(result)
else:
log.error("Failed to update module firmware for {}: {}"
.format(port, avrdude_res[1]))
new_port = await _port_on_mode_switch(ports_before_update)
log.info("New port: {}".format(new_port))
return new_port, avrdude_res | Run avrdude firmware upload command. Switch back to normal module port
Note: For modules with old bootloader, the kernel could assign the module
a new port after the update (since the board is automatically reset).
Scan for such a port change and use the appropriate port.
Returns a tuple of the new port to communicate on (or None if it was not
found) and a tuple of success and message from avrdude. | Below is the the instruction that describes the task:
### Input:
Run avrdude firmware upload command. Switch back to normal module port
Note: For modules with old bootloader, the kernel could assign the module
a new port after the update (since the board is automatically reset).
Scan for such a port change and use the appropriate port.
Returns a tuple of the new port to communicate on (or None if it was not
found) and a tuple of success and message from avrdude.
### Response:
async def update_firmware(port: str,
firmware_file_path: str,
loop: Optional[asyncio.AbstractEventLoop])\
-> Tuple[str, Tuple[bool, str]]:
"""
Run avrdude firmware upload command. Switch back to normal module port
Note: For modules with old bootloader, the kernel could assign the module
a new port after the update (since the board is automatically reset).
Scan for such a port change and use the appropriate port.
Returns a tuple of the new port to communicate on (or None if it was not
found) and a tuple of success and message from avrdude.
"""
ports_before_update = await _discover_ports()
config_file_path = os.path.join(package_root,
'config', 'modules', 'avrdude.conf')
kwargs: Dict[str, Any] = {
'stdout': asyncio.subprocess.PIPE,
'stderr': asyncio.subprocess.PIPE
}
if loop:
kwargs['loop'] = loop
proc = await asyncio.create_subprocess_exec(
'avrdude', '-C{}'.format(config_file_path), '-v',
'-p{}'.format(PART_NO),
'-c{}'.format(PROGRAMMER_ID),
'-P{}'.format(port),
'-b{}'.format(BAUDRATE), '-D',
'-Uflash:w:{}:i'.format(firmware_file_path),
**kwargs)
await proc.wait()
_result = await proc.communicate()
result = _result[1].decode()
avrdude_res = _format_avrdude_response(result)
if avrdude_res[0]:
log.debug(result)
else:
log.error("Failed to update module firmware for {}: {}"
.format(port, avrdude_res[1]))
new_port = await _port_on_mode_switch(ports_before_update)
log.info("New port: {}".format(new_port))
return new_port, avrdude_res |
def deletion(args):
"""
%prog deletion [mac.mic.bam|mac.mic.bed] mic.gaps.bed
Find IES based on mapping MAC reads to MIC genome.
"""
p = OptionParser(deletion.__doc__)
p.add_option("--mindepth", default=3, type="int",
help="Minimum depth to call a deletion")
p.add_option("--minspan", default=30, type="int",
help="Minimum span to call a deletion")
p.add_option("--split", default=False, action="store_true",
help="Break at cigar N into separate parts")
p.set_tmpdir()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, gapsbedfile = args
if bedfile.endswith(".bam"):
bamfile = bedfile
bedfile = bamfile.replace(".sorted.", ".").replace(".bam", ".bed")
if need_update(bamfile, bedfile):
cmd = "bamToBed -i {0}".format(bamfile)
if opts.split:
cmd += " -split"
cmd += " | cut -f1-4"
sh(cmd, outfile=bedfile)
sort_tmpdir = "--tmpdir={0}".format(opts.tmpdir)
if bedfile.endswith(".sorted.bed"):
pf = bedfile.rsplit(".", 2)[0]
sortedbedfile = bedfile
else:
pf = bedfile.rsplit(".", 1)[0]
sortedbedfile = pf + ".sorted.bed"
if need_update(bedfile, sortedbedfile):
sort([bedfile, "-u", "--accn", sort_tmpdir])
# Find reads that contain multiple matches
ibedfile = pf + ".d.bed"
if need_update(sortedbedfile, ibedfile):
bed = Bed(sortedbedfile, sorted=False)
fw = open(ibedfile, "w")
logging.debug("Write deletions to `{0}`.".format(ibedfile))
for accn, bb in groupby(bed, key=lambda x: x.accn):
bb = list(bb)
branges = [(x.seqid, x.start, x.end) for x in bb]
iranges = range_interleave(branges)
for seqid, start, end in iranges:
if end - start + 1 < opts.minspan:
continue
print("\t".join(str(x) for x in \
(seqid, start - 1, end, accn + '-d')), file=fw)
fw.close()
# Uniqify the insertions and count occurrences
countbedfile = pf + ".uniq.bed"
if need_update(ibedfile, countbedfile):
bed = Bed(ibedfile)
fw = open(countbedfile, "w")
logging.debug("Write counts to `{0}`.".format(countbedfile))
registry = Counter((x.seqid, x.start, x.end) for x in bed)
ies_id = 1
for (seqid, start, end), count in registry.items():
ies_name = "{0:05d}-r{1}".format(ies_id, count)
if count < opts.mindepth:
continue
print("\t".join(str(x) for x in \
(seqid, start - 1, end, ies_name)), file=fw)
ies_id += 1
fw.close()
sort([countbedfile, "-i", sort_tmpdir])
# Remove deletions that contain some read depth
depthbedfile = pf + ".depth.bed"
if need_update((sortedbedfile, countbedfile), depthbedfile):
depth([sortedbedfile, countbedfile, "--outfile={0}".format(depthbedfile)])
validbedfile = pf + ".valid.bed"
if need_update(depthbedfile, validbedfile):
fw = open(validbedfile, "w")
logging.debug("Filter valid deletions to `{0}`.".format(validbedfile))
bed = Bed(depthbedfile)
all_scores = [float(b.score) for b in bed]
lb, ub = outlier_cutoff(all_scores)
logging.debug("Bounds for depths: LB={0:.2f} (ignored) UB={1:.2f}".format(lb, ub))
for b in bed:
if float(b.score) > ub:
continue
print(b, file=fw)
fw.close()
# Remove deletions that contain sequencing gaps on its flanks
selectedbedfile = pf + ".selected.bed"
if need_update(validbedfile, selectedbedfile):
flanksbedfile = pf + ".flanks.bed"
fw = open(flanksbedfile, "w")
bed = Bed(validbedfile)
flank = 100
logging.debug("Write deletion flanks to `{0}`.".format(flanksbedfile))
for b in bed:
start, end = b.start, b.end
b.start, b.end = start, min(start + flank - 1, end)
print(b, file=fw)
b.start, b.end = max(start, end - flank + 1), end
print(b, file=fw)
fw.close()
intersectidsfile = pf + ".intersect.ids"
cmd = "intersectBed -a {0} -b {1}".format(flanksbedfile, gapsbedfile)
cmd += " | cut -f4 | sort -u"
sh(cmd, outfile=intersectidsfile)
some([validbedfile, intersectidsfile, "-v",
"--outfile={0}".format(selectedbedfile)])
# Find best-scoring non-overlapping set
iesbedfile = pf + ".ies.bed"
if need_update(selectedbedfile, iesbedfile):
bed = Bed(selectedbedfile)
fw = open(iesbedfile, "w")
logging.debug("Write IES to `{0}`.".format(iesbedfile))
branges = [Range(x.seqid, x.start, x.end, int(x.accn.rsplit("r")[-1]), i) \
for i, x in enumerate(bed)]
iranges, iscore = range_chain(branges)
logging.debug("Best chain score: {0} ({1} IES)".\
format(iscore, len(iranges)))
ies_id = 1
for seqid, start, end, score, id in iranges:
ies_name = "IES-{0:05d}-r{1}".format(ies_id, score)
span = end - start + 1
print("\t".join(str(x) for x in \
(seqid, start - 1, end, ies_name, span)), file=fw)
ies_id += 1
fw.close() | %prog deletion [mac.mic.bam|mac.mic.bed] mic.gaps.bed
Find IES based on mapping MAC reads to MIC genome. | Below is the the instruction that describes the task:
### Input:
%prog deletion [mac.mic.bam|mac.mic.bed] mic.gaps.bed
Find IES based on mapping MAC reads to MIC genome.
### Response:
def deletion(args):
"""
%prog deletion [mac.mic.bam|mac.mic.bed] mic.gaps.bed
Find IES based on mapping MAC reads to MIC genome.
"""
p = OptionParser(deletion.__doc__)
p.add_option("--mindepth", default=3, type="int",
help="Minimum depth to call a deletion")
p.add_option("--minspan", default=30, type="int",
help="Minimum span to call a deletion")
p.add_option("--split", default=False, action="store_true",
help="Break at cigar N into separate parts")
p.set_tmpdir()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, gapsbedfile = args
if bedfile.endswith(".bam"):
bamfile = bedfile
bedfile = bamfile.replace(".sorted.", ".").replace(".bam", ".bed")
if need_update(bamfile, bedfile):
cmd = "bamToBed -i {0}".format(bamfile)
if opts.split:
cmd += " -split"
cmd += " | cut -f1-4"
sh(cmd, outfile=bedfile)
sort_tmpdir = "--tmpdir={0}".format(opts.tmpdir)
if bedfile.endswith(".sorted.bed"):
pf = bedfile.rsplit(".", 2)[0]
sortedbedfile = bedfile
else:
pf = bedfile.rsplit(".", 1)[0]
sortedbedfile = pf + ".sorted.bed"
if need_update(bedfile, sortedbedfile):
sort([bedfile, "-u", "--accn", sort_tmpdir])
# Find reads that contain multiple matches
ibedfile = pf + ".d.bed"
if need_update(sortedbedfile, ibedfile):
bed = Bed(sortedbedfile, sorted=False)
fw = open(ibedfile, "w")
logging.debug("Write deletions to `{0}`.".format(ibedfile))
for accn, bb in groupby(bed, key=lambda x: x.accn):
bb = list(bb)
branges = [(x.seqid, x.start, x.end) for x in bb]
iranges = range_interleave(branges)
for seqid, start, end in iranges:
if end - start + 1 < opts.minspan:
continue
print("\t".join(str(x) for x in \
(seqid, start - 1, end, accn + '-d')), file=fw)
fw.close()
# Uniqify the insertions and count occurrences
countbedfile = pf + ".uniq.bed"
if need_update(ibedfile, countbedfile):
bed = Bed(ibedfile)
fw = open(countbedfile, "w")
logging.debug("Write counts to `{0}`.".format(countbedfile))
registry = Counter((x.seqid, x.start, x.end) for x in bed)
ies_id = 1
for (seqid, start, end), count in registry.items():
ies_name = "{0:05d}-r{1}".format(ies_id, count)
if count < opts.mindepth:
continue
print("\t".join(str(x) for x in \
(seqid, start - 1, end, ies_name)), file=fw)
ies_id += 1
fw.close()
sort([countbedfile, "-i", sort_tmpdir])
# Remove deletions that contain some read depth
depthbedfile = pf + ".depth.bed"
if need_update((sortedbedfile, countbedfile), depthbedfile):
depth([sortedbedfile, countbedfile, "--outfile={0}".format(depthbedfile)])
validbedfile = pf + ".valid.bed"
if need_update(depthbedfile, validbedfile):
fw = open(validbedfile, "w")
logging.debug("Filter valid deletions to `{0}`.".format(validbedfile))
bed = Bed(depthbedfile)
all_scores = [float(b.score) for b in bed]
lb, ub = outlier_cutoff(all_scores)
logging.debug("Bounds for depths: LB={0:.2f} (ignored) UB={1:.2f}".format(lb, ub))
for b in bed:
if float(b.score) > ub:
continue
print(b, file=fw)
fw.close()
# Remove deletions that contain sequencing gaps on its flanks
selectedbedfile = pf + ".selected.bed"
if need_update(validbedfile, selectedbedfile):
flanksbedfile = pf + ".flanks.bed"
fw = open(flanksbedfile, "w")
bed = Bed(validbedfile)
flank = 100
logging.debug("Write deletion flanks to `{0}`.".format(flanksbedfile))
for b in bed:
start, end = b.start, b.end
b.start, b.end = start, min(start + flank - 1, end)
print(b, file=fw)
b.start, b.end = max(start, end - flank + 1), end
print(b, file=fw)
fw.close()
intersectidsfile = pf + ".intersect.ids"
cmd = "intersectBed -a {0} -b {1}".format(flanksbedfile, gapsbedfile)
cmd += " | cut -f4 | sort -u"
sh(cmd, outfile=intersectidsfile)
some([validbedfile, intersectidsfile, "-v",
"--outfile={0}".format(selectedbedfile)])
# Find best-scoring non-overlapping set
iesbedfile = pf + ".ies.bed"
if need_update(selectedbedfile, iesbedfile):
bed = Bed(selectedbedfile)
fw = open(iesbedfile, "w")
logging.debug("Write IES to `{0}`.".format(iesbedfile))
branges = [Range(x.seqid, x.start, x.end, int(x.accn.rsplit("r")[-1]), i) \
for i, x in enumerate(bed)]
iranges, iscore = range_chain(branges)
logging.debug("Best chain score: {0} ({1} IES)".\
format(iscore, len(iranges)))
ies_id = 1
for seqid, start, end, score, id in iranges:
ies_name = "IES-{0:05d}-r{1}".format(ies_id, score)
span = end - start + 1
print("\t".join(str(x) for x in \
(seqid, start - 1, end, ies_name, span)), file=fw)
ies_id += 1
fw.close() |
def _get_ann_labels_data(self, order_ann, bins_ann):
"""Generate ColumnDataSource dictionary for annular labels.
"""
if self.yticks is None:
return dict(x=[], y=[], text=[], angle=[])
mapping = self._compute_tick_mapping("radius", order_ann, bins_ann)
values = [(label, radius[0]) for label, radius in mapping.items()]
labels, radius = zip(*values)
radius = np.array(radius)
y_coord = np.sin(np.deg2rad(self.yrotation)) * radius + self.max_radius
x_coord = np.cos(np.deg2rad(self.yrotation)) * radius + self.max_radius
return dict(x=x_coord,
y=y_coord,
text=labels,
angle=[0]*len(labels)) | Generate ColumnDataSource dictionary for annular labels. | Below is the the instruction that describes the task:
### Input:
Generate ColumnDataSource dictionary for annular labels.
### Response:
def _get_ann_labels_data(self, order_ann, bins_ann):
"""Generate ColumnDataSource dictionary for annular labels.
"""
if self.yticks is None:
return dict(x=[], y=[], text=[], angle=[])
mapping = self._compute_tick_mapping("radius", order_ann, bins_ann)
values = [(label, radius[0]) for label, radius in mapping.items()]
labels, radius = zip(*values)
radius = np.array(radius)
y_coord = np.sin(np.deg2rad(self.yrotation)) * radius + self.max_radius
x_coord = np.cos(np.deg2rad(self.yrotation)) * radius + self.max_radius
return dict(x=x_coord,
y=y_coord,
text=labels,
angle=[0]*len(labels)) |
def tprint(text, font=DEFAULT_FONT, chr_ignore=True):
r"""
Print art text (support \n).
:param text: input text
:type text:str
:param font: input font
:type font:str
:param chr_ignore: ignore not supported character
:type chr_ignore:bool
:return: None
"""
result = text2art(text, font=font, chr_ignore=chr_ignore)
print(result) | r"""
Print art text (support \n).
:param text: input text
:type text:str
:param font: input font
:type font:str
:param chr_ignore: ignore not supported character
:type chr_ignore:bool
:return: None | Below is the the instruction that describes the task:
### Input:
r"""
Print art text (support \n).
:param text: input text
:type text:str
:param font: input font
:type font:str
:param chr_ignore: ignore not supported character
:type chr_ignore:bool
:return: None
### Response:
def tprint(text, font=DEFAULT_FONT, chr_ignore=True):
r"""
Print art text (support \n).
:param text: input text
:type text:str
:param font: input font
:type font:str
:param chr_ignore: ignore not supported character
:type chr_ignore:bool
:return: None
"""
result = text2art(text, font=font, chr_ignore=chr_ignore)
print(result) |
def request(self, method, params):
"""Send a JSONRPC request."""
identifier = random.randint(1, 1000)
self._transport.write(jsonrpc_request(method, identifier, params))
self._buffer[identifier] = {'flag': asyncio.Event()}
yield from self._buffer[identifier]['flag'].wait()
result = self._buffer[identifier]['data']
del self._buffer[identifier]['data']
return result | Send a JSONRPC request. | Below is the the instruction that describes the task:
### Input:
Send a JSONRPC request.
### Response:
def request(self, method, params):
"""Send a JSONRPC request."""
identifier = random.randint(1, 1000)
self._transport.write(jsonrpc_request(method, identifier, params))
self._buffer[identifier] = {'flag': asyncio.Event()}
yield from self._buffer[identifier]['flag'].wait()
result = self._buffer[identifier]['data']
del self._buffer[identifier]['data']
return result |
def import_ecdsa_privatekey_from_file(filepath, password=None):
"""
<Purpose>
Import the encrypted ECDSA key file in 'filepath', decrypt it, and return
the key object in 'securesystemslib.formats.ECDSAKEY_SCHEMA' format.
The 'cryptography' library is currently supported and performs the actual
cryptographic routine.
<Arguments>
filepath:
<filepath> file, an ECDSA encrypted key file.
password:
The password, or passphrase, to import the private key (i.e., the
encrypted key file 'filepath' must be decrypted before the ECDSA key
object can be returned.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted or the imported key object contains an invalid key type (i.e.,
not 'ecdsa-sha2-nistp256').
securesystemslib.exceptions.CryptoError, if 'filepath' cannot be decrypted.
<Side Effects>
'password' is used to decrypt the 'filepath' key file.
<Returns>
An ECDSA key object of the form: 'securesystemslib.formats.ECDSAKEY_SCHEMA'.
"""
# Does 'filepath' have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
# If the caller does not provide a password argument, prompt for one.
# Password confirmation disabled here, which should ideally happen only
# when creating encrypted key files (i.e., improve usability).
if password is None: # pragma: no cover
# It is safe to specify the full path of 'filepath' in the prompt and not
# worry about leaking sensitive information about the key's location.
# However, care should be taken when including the full path in exceptions
# and log files.
password = get_password('Enter a password for the encrypted ECDSA'
' key (' + Fore.RED + filepath + Fore.RESET + '): ',
confirm=False)
# Does 'password' have the correct format?
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
# Store the encrypted contents of 'filepath' prior to calling the decryption
# routine.
encrypted_key = None
with open(filepath, 'rb') as file_object:
encrypted_key = file_object.read()
# Decrypt the loaded key file, calling the 'cryptography' library to generate
# the derived encryption key from 'password'. Raise
# 'securesystemslib.exceptions.CryptoError' if the decryption fails.
key_object = securesystemslib.keys.decrypt_key(encrypted_key.decode('utf-8'),
password)
# Raise an exception if an unexpected key type is imported.
if key_object['keytype'] != 'ecdsa-sha2-nistp256':
message = 'Invalid key type loaded: ' + repr(key_object['keytype'])
raise securesystemslib.exceptions.FormatError(message)
# Add "keyid_hash_algorithms" so that equal ecdsa keys with different keyids
# can be associated using supported keyid_hash_algorithms.
key_object['keyid_hash_algorithms'] = \
securesystemslib.settings.HASH_ALGORITHMS
return key_object | <Purpose>
Import the encrypted ECDSA key file in 'filepath', decrypt it, and return
the key object in 'securesystemslib.formats.ECDSAKEY_SCHEMA' format.
The 'cryptography' library is currently supported and performs the actual
cryptographic routine.
<Arguments>
filepath:
<filepath> file, an ECDSA encrypted key file.
password:
The password, or passphrase, to import the private key (i.e., the
encrypted key file 'filepath' must be decrypted before the ECDSA key
object can be returned.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted or the imported key object contains an invalid key type (i.e.,
not 'ecdsa-sha2-nistp256').
securesystemslib.exceptions.CryptoError, if 'filepath' cannot be decrypted.
<Side Effects>
'password' is used to decrypt the 'filepath' key file.
<Returns>
An ECDSA key object of the form: 'securesystemslib.formats.ECDSAKEY_SCHEMA'. | Below is the the instruction that describes the task:
### Input:
<Purpose>
Import the encrypted ECDSA key file in 'filepath', decrypt it, and return
the key object in 'securesystemslib.formats.ECDSAKEY_SCHEMA' format.
The 'cryptography' library is currently supported and performs the actual
cryptographic routine.
<Arguments>
filepath:
<filepath> file, an ECDSA encrypted key file.
password:
The password, or passphrase, to import the private key (i.e., the
encrypted key file 'filepath' must be decrypted before the ECDSA key
object can be returned.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted or the imported key object contains an invalid key type (i.e.,
not 'ecdsa-sha2-nistp256').
securesystemslib.exceptions.CryptoError, if 'filepath' cannot be decrypted.
<Side Effects>
'password' is used to decrypt the 'filepath' key file.
<Returns>
An ECDSA key object of the form: 'securesystemslib.formats.ECDSAKEY_SCHEMA'.
### Response:
def import_ecdsa_privatekey_from_file(filepath, password=None):
"""
<Purpose>
Import the encrypted ECDSA key file in 'filepath', decrypt it, and return
the key object in 'securesystemslib.formats.ECDSAKEY_SCHEMA' format.
The 'cryptography' library is currently supported and performs the actual
cryptographic routine.
<Arguments>
filepath:
<filepath> file, an ECDSA encrypted key file.
password:
The password, or passphrase, to import the private key (i.e., the
encrypted key file 'filepath' must be decrypted before the ECDSA key
object can be returned.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted or the imported key object contains an invalid key type (i.e.,
not 'ecdsa-sha2-nistp256').
securesystemslib.exceptions.CryptoError, if 'filepath' cannot be decrypted.
<Side Effects>
'password' is used to decrypt the 'filepath' key file.
<Returns>
An ECDSA key object of the form: 'securesystemslib.formats.ECDSAKEY_SCHEMA'.
"""
# Does 'filepath' have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
# If the caller does not provide a password argument, prompt for one.
# Password confirmation disabled here, which should ideally happen only
# when creating encrypted key files (i.e., improve usability).
if password is None: # pragma: no cover
# It is safe to specify the full path of 'filepath' in the prompt and not
# worry about leaking sensitive information about the key's location.
# However, care should be taken when including the full path in exceptions
# and log files.
password = get_password('Enter a password for the encrypted ECDSA'
' key (' + Fore.RED + filepath + Fore.RESET + '): ',
confirm=False)
# Does 'password' have the correct format?
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
# Store the encrypted contents of 'filepath' prior to calling the decryption
# routine.
encrypted_key = None
with open(filepath, 'rb') as file_object:
encrypted_key = file_object.read()
# Decrypt the loaded key file, calling the 'cryptography' library to generate
# the derived encryption key from 'password'. Raise
# 'securesystemslib.exceptions.CryptoError' if the decryption fails.
key_object = securesystemslib.keys.decrypt_key(encrypted_key.decode('utf-8'),
password)
# Raise an exception if an unexpected key type is imported.
if key_object['keytype'] != 'ecdsa-sha2-nistp256':
message = 'Invalid key type loaded: ' + repr(key_object['keytype'])
raise securesystemslib.exceptions.FormatError(message)
# Add "keyid_hash_algorithms" so that equal ecdsa keys with different keyids
# can be associated using supported keyid_hash_algorithms.
key_object['keyid_hash_algorithms'] = \
securesystemslib.settings.HASH_ALGORITHMS
return key_object |
def calculate_P(self, T, P, method):
r'''Method to calculate pressure-dependent liquid viscosity at
temperature `T` and pressure `P` with a given method.
This method has no exception handling; see `TP_dependent_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate viscosity, [K]
P : float
Pressure at which to calculate viscosity, [K]
method : str
Name of the method to use
Returns
-------
mu : float
Viscosity of the liquid at T and P, [Pa*S]
'''
if method == LUCAS:
mu = self.T_dependent_property(T)
Psat = self.Psat(T) if hasattr(self.Psat, '__call__') else self.Psat
mu = Lucas(T, P, self.Tc, self.Pc, self.omega, Psat, mu)
elif method == COOLPROP:
mu = PropsSI('V', 'T', T, 'P', P, self.CASRN)
elif method in self.tabular_data:
mu = self.interpolate_P(T, P, method)
return mu | r'''Method to calculate pressure-dependent liquid viscosity at
temperature `T` and pressure `P` with a given method.
This method has no exception handling; see `TP_dependent_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate viscosity, [K]
P : float
Pressure at which to calculate viscosity, [K]
method : str
Name of the method to use
Returns
-------
mu : float
Viscosity of the liquid at T and P, [Pa*S] | Below is the the instruction that describes the task:
### Input:
r'''Method to calculate pressure-dependent liquid viscosity at
temperature `T` and pressure `P` with a given method.
This method has no exception handling; see `TP_dependent_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate viscosity, [K]
P : float
Pressure at which to calculate viscosity, [K]
method : str
Name of the method to use
Returns
-------
mu : float
Viscosity of the liquid at T and P, [Pa*S]
### Response:
def calculate_P(self, T, P, method):
r'''Method to calculate pressure-dependent liquid viscosity at
temperature `T` and pressure `P` with a given method.
This method has no exception handling; see `TP_dependent_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate viscosity, [K]
P : float
Pressure at which to calculate viscosity, [K]
method : str
Name of the method to use
Returns
-------
mu : float
Viscosity of the liquid at T and P, [Pa*S]
'''
if method == LUCAS:
mu = self.T_dependent_property(T)
Psat = self.Psat(T) if hasattr(self.Psat, '__call__') else self.Psat
mu = Lucas(T, P, self.Tc, self.Pc, self.omega, Psat, mu)
elif method == COOLPROP:
mu = PropsSI('V', 'T', T, 'P', P, self.CASRN)
elif method in self.tabular_data:
mu = self.interpolate_P(T, P, method)
return mu |
def autolink(self, link, is_email=False):
"""Rendering a given link or email address.
:param link: link content or email address.
:param is_email: whether this is an email or not.
"""
text = link = escape(link)
if is_email:
link = 'mailto:%s' % link
return '<a href="%s">%s</a>' % (link, text) | Rendering a given link or email address.
:param link: link content or email address.
:param is_email: whether this is an email or not. | Below is the the instruction that describes the task:
### Input:
Rendering a given link or email address.
:param link: link content or email address.
:param is_email: whether this is an email or not.
### Response:
def autolink(self, link, is_email=False):
"""Rendering a given link or email address.
:param link: link content or email address.
:param is_email: whether this is an email or not.
"""
text = link = escape(link)
if is_email:
link = 'mailto:%s' % link
return '<a href="%s">%s</a>' % (link, text) |
def press_button(self, value):
"""
Click the button with the given label.
"""
button = find_button(world.browser, value)
if not button:
raise AssertionError(
"Cannot find a button named '{}'.".format(value))
button.click() | Click the button with the given label. | Below is the the instruction that describes the task:
### Input:
Click the button with the given label.
### Response:
def press_button(self, value):
"""
Click the button with the given label.
"""
button = find_button(world.browser, value)
if not button:
raise AssertionError(
"Cannot find a button named '{}'.".format(value))
button.click() |
def _on_timeline_update(self, event):
"""Timeline update broadcast from Abode SocketIO server."""
if isinstance(event, (tuple, list)):
event = event[0]
event_type = event.get('event_type')
event_code = event.get('event_code')
if not event_type or not event_code:
_LOGGER.warning("Invalid timeline update event: %s", event)
return
_LOGGER.debug("Timeline event received: %s - %s (%s)",
event.get('event_name'), event_type, event_code)
# Compress our callbacks into those that match this event_code
# or ones registered to get callbacks for all events
codes = (event_code, TIMELINE.ALL['event_code'])
all_callbacks = [self._timeline_callbacks[code] for code in codes]
for callbacks in all_callbacks:
for callback in callbacks:
_execute_callback(callback, event)
# Attempt to map the event code to a group and callback
event_group = TIMELINE.map_event_code(event_code)
if event_group:
for callback in self._event_callbacks.get(event_group, ()):
_execute_callback(callback, event) | Timeline update broadcast from Abode SocketIO server. | Below is the the instruction that describes the task:
### Input:
Timeline update broadcast from Abode SocketIO server.
### Response:
def _on_timeline_update(self, event):
"""Timeline update broadcast from Abode SocketIO server."""
if isinstance(event, (tuple, list)):
event = event[0]
event_type = event.get('event_type')
event_code = event.get('event_code')
if not event_type or not event_code:
_LOGGER.warning("Invalid timeline update event: %s", event)
return
_LOGGER.debug("Timeline event received: %s - %s (%s)",
event.get('event_name'), event_type, event_code)
# Compress our callbacks into those that match this event_code
# or ones registered to get callbacks for all events
codes = (event_code, TIMELINE.ALL['event_code'])
all_callbacks = [self._timeline_callbacks[code] for code in codes]
for callbacks in all_callbacks:
for callback in callbacks:
_execute_callback(callback, event)
# Attempt to map the event code to a group and callback
event_group = TIMELINE.map_event_code(event_code)
if event_group:
for callback in self._event_callbacks.get(event_group, ()):
_execute_callback(callback, event) |
def _limit_and_df(self, query, limit, as_df=False):
"""adds a limit (limit==None := no limit) to any query and allow a return as pandas.DataFrame
:param bool as_df: if is set to True results return as pandas.DataFrame
:param `sqlalchemy.orm.query.Query` query: SQL Alchemy query
:param int limit: maximum number of results
:return: query result of pyctd.manager.models.XY objects
"""
if limit:
query = query.limit(limit)
if as_df:
results = read_sql(query.statement, self.engine)
else:
results = query.all()
return results | adds a limit (limit==None := no limit) to any query and allow a return as pandas.DataFrame
:param bool as_df: if is set to True results return as pandas.DataFrame
:param `sqlalchemy.orm.query.Query` query: SQL Alchemy query
:param int limit: maximum number of results
:return: query result of pyctd.manager.models.XY objects | Below is the the instruction that describes the task:
### Input:
adds a limit (limit==None := no limit) to any query and allow a return as pandas.DataFrame
:param bool as_df: if is set to True results return as pandas.DataFrame
:param `sqlalchemy.orm.query.Query` query: SQL Alchemy query
:param int limit: maximum number of results
:return: query result of pyctd.manager.models.XY objects
### Response:
def _limit_and_df(self, query, limit, as_df=False):
"""adds a limit (limit==None := no limit) to any query and allow a return as pandas.DataFrame
:param bool as_df: if is set to True results return as pandas.DataFrame
:param `sqlalchemy.orm.query.Query` query: SQL Alchemy query
:param int limit: maximum number of results
:return: query result of pyctd.manager.models.XY objects
"""
if limit:
query = query.limit(limit)
if as_df:
results = read_sql(query.statement, self.engine)
else:
results = query.all()
return results |
def All(*validators):
"""
Combines all the given validator callables into one, running all the
validators in sequence on the given value.
"""
@wraps(All)
def built(value):
for validator in validators:
value = validator(value)
return value
return built | Combines all the given validator callables into one, running all the
validators in sequence on the given value. | Below is the the instruction that describes the task:
### Input:
Combines all the given validator callables into one, running all the
validators in sequence on the given value.
### Response:
def All(*validators):
"""
Combines all the given validator callables into one, running all the
validators in sequence on the given value.
"""
@wraps(All)
def built(value):
for validator in validators:
value = validator(value)
return value
return built |
def add(self, name, mech, usage='both',
init_lifetime=None, accept_lifetime=None, impersonator=None,
store=None):
"""Acquire more credentials to add to the current set
This method works like :meth:`acquire`, except that it adds the
acquired credentials for a single mechanism to a copy of the current
set, instead of creating a new set for multiple mechanisms.
Unlike :meth:`acquire`, you cannot pass None desired name or
mechanism.
If the `impersonator` argument is used, the credentials will
impersonate the given name using the impersonator credentials
(:requires-ext:`s4u`).
If the `store` argument is used, the credentials will be acquired
from the given credential store (:requires-ext:`cred_store`).
Otherwise, the credentials are acquired from the default store.
The credential store information is a dictionary containing
mechanisms-specific keys and values pointing to a credential store
or stores.
Note that the `store` argument is not compatible with the
`impersonator` argument.
Args:
name (Name): the name associated with the
credentials
mech (OID): the desired :class:`MechType` to be used with the
credentials
usage (str): the usage for the credentials -- either 'both',
'initiate', or 'accept'
init_lifetime (int): the desired initiate lifetime of the
credentials, or None for indefinite
accept_lifetime (int): the desired accept lifetime of the
credentials, or None for indefinite
impersonator (Credentials): the credentials to use to impersonate
the given name, or None to not acquire normally
(:requires-ext:`s4u`)
store (dict): the credential store information pointing to the
credential store from which to acquire the credentials,
or None for the default store (:requires-ext:`cred_store`)
Returns:
Credentials: the credentials set containing the current credentials
and the newly acquired ones.
Raises:
BadMechanismError
BadNameTypeError
BadNameError
DuplicateCredentialsElementError
ExpiredCredentialsError
MissingCredentialsError
"""
if store is not None and impersonator is not None:
raise ValueError('You cannot use both the `impersonator` and '
'`store` arguments at the same time')
if store is not None:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores")
store = _encode_dict(store)
res = rcred_cred_store.add_cred_from(store, self, name, mech,
usage, init_lifetime,
accept_lifetime)
elif impersonator is not None:
if rcred_s4u is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for S4U")
res = rcred_s4u.add_cred_impersonate_name(self, impersonator,
name, mech, usage,
init_lifetime,
accept_lifetime)
else:
res = rcreds.add_cred(self, name, mech, usage, init_lifetime,
accept_lifetime)
return Credentials(res.creds) | Acquire more credentials to add to the current set
This method works like :meth:`acquire`, except that it adds the
acquired credentials for a single mechanism to a copy of the current
set, instead of creating a new set for multiple mechanisms.
Unlike :meth:`acquire`, you cannot pass None desired name or
mechanism.
If the `impersonator` argument is used, the credentials will
impersonate the given name using the impersonator credentials
(:requires-ext:`s4u`).
If the `store` argument is used, the credentials will be acquired
from the given credential store (:requires-ext:`cred_store`).
Otherwise, the credentials are acquired from the default store.
The credential store information is a dictionary containing
mechanisms-specific keys and values pointing to a credential store
or stores.
Note that the `store` argument is not compatible with the
`impersonator` argument.
Args:
name (Name): the name associated with the
credentials
mech (OID): the desired :class:`MechType` to be used with the
credentials
usage (str): the usage for the credentials -- either 'both',
'initiate', or 'accept'
init_lifetime (int): the desired initiate lifetime of the
credentials, or None for indefinite
accept_lifetime (int): the desired accept lifetime of the
credentials, or None for indefinite
impersonator (Credentials): the credentials to use to impersonate
the given name, or None to not acquire normally
(:requires-ext:`s4u`)
store (dict): the credential store information pointing to the
credential store from which to acquire the credentials,
or None for the default store (:requires-ext:`cred_store`)
Returns:
Credentials: the credentials set containing the current credentials
and the newly acquired ones.
Raises:
BadMechanismError
BadNameTypeError
BadNameError
DuplicateCredentialsElementError
ExpiredCredentialsError
MissingCredentialsError | Below is the the instruction that describes the task:
### Input:
Acquire more credentials to add to the current set
This method works like :meth:`acquire`, except that it adds the
acquired credentials for a single mechanism to a copy of the current
set, instead of creating a new set for multiple mechanisms.
Unlike :meth:`acquire`, you cannot pass None desired name or
mechanism.
If the `impersonator` argument is used, the credentials will
impersonate the given name using the impersonator credentials
(:requires-ext:`s4u`).
If the `store` argument is used, the credentials will be acquired
from the given credential store (:requires-ext:`cred_store`).
Otherwise, the credentials are acquired from the default store.
The credential store information is a dictionary containing
mechanisms-specific keys and values pointing to a credential store
or stores.
Note that the `store` argument is not compatible with the
`impersonator` argument.
Args:
name (Name): the name associated with the
credentials
mech (OID): the desired :class:`MechType` to be used with the
credentials
usage (str): the usage for the credentials -- either 'both',
'initiate', or 'accept'
init_lifetime (int): the desired initiate lifetime of the
credentials, or None for indefinite
accept_lifetime (int): the desired accept lifetime of the
credentials, or None for indefinite
impersonator (Credentials): the credentials to use to impersonate
the given name, or None to not acquire normally
(:requires-ext:`s4u`)
store (dict): the credential store information pointing to the
credential store from which to acquire the credentials,
or None for the default store (:requires-ext:`cred_store`)
Returns:
Credentials: the credentials set containing the current credentials
and the newly acquired ones.
Raises:
BadMechanismError
BadNameTypeError
BadNameError
DuplicateCredentialsElementError
ExpiredCredentialsError
MissingCredentialsError
### Response:
def add(self, name, mech, usage='both',
init_lifetime=None, accept_lifetime=None, impersonator=None,
store=None):
"""Acquire more credentials to add to the current set
This method works like :meth:`acquire`, except that it adds the
acquired credentials for a single mechanism to a copy of the current
set, instead of creating a new set for multiple mechanisms.
Unlike :meth:`acquire`, you cannot pass None desired name or
mechanism.
If the `impersonator` argument is used, the credentials will
impersonate the given name using the impersonator credentials
(:requires-ext:`s4u`).
If the `store` argument is used, the credentials will be acquired
from the given credential store (:requires-ext:`cred_store`).
Otherwise, the credentials are acquired from the default store.
The credential store information is a dictionary containing
mechanisms-specific keys and values pointing to a credential store
or stores.
Note that the `store` argument is not compatible with the
`impersonator` argument.
Args:
name (Name): the name associated with the
credentials
mech (OID): the desired :class:`MechType` to be used with the
credentials
usage (str): the usage for the credentials -- either 'both',
'initiate', or 'accept'
init_lifetime (int): the desired initiate lifetime of the
credentials, or None for indefinite
accept_lifetime (int): the desired accept lifetime of the
credentials, or None for indefinite
impersonator (Credentials): the credentials to use to impersonate
the given name, or None to not acquire normally
(:requires-ext:`s4u`)
store (dict): the credential store information pointing to the
credential store from which to acquire the credentials,
or None for the default store (:requires-ext:`cred_store`)
Returns:
Credentials: the credentials set containing the current credentials
and the newly acquired ones.
Raises:
BadMechanismError
BadNameTypeError
BadNameError
DuplicateCredentialsElementError
ExpiredCredentialsError
MissingCredentialsError
"""
if store is not None and impersonator is not None:
raise ValueError('You cannot use both the `impersonator` and '
'`store` arguments at the same time')
if store is not None:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores")
store = _encode_dict(store)
res = rcred_cred_store.add_cred_from(store, self, name, mech,
usage, init_lifetime,
accept_lifetime)
elif impersonator is not None:
if rcred_s4u is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for S4U")
res = rcred_s4u.add_cred_impersonate_name(self, impersonator,
name, mech, usage,
init_lifetime,
accept_lifetime)
else:
res = rcreds.add_cred(self, name, mech, usage, init_lifetime,
accept_lifetime)
return Credentials(res.creds) |
def nurbs_to_bspline(obj, **kwargs):
""" Extracts the non-rational components from rational parametric shapes, if possible.
The possibility of converting a rational shape to a non-rational one depends on the weights vector.
:param obj: NURBS shape
:type obj: NURBS.Curve, NURBS.Surface or NURBS.Volume
:return: B-Spline shape
:rtype: BSpline.Curve, BSpline.Surface or BSpline.Volume
:raises: TypeError
"""
if not obj.rational:
raise TypeError("The input must be a rational shape")
# Get keyword arguments
tol = kwargs.get('tol', 10e-8)
# Test for non-rational component extraction
for w in obj.weights:
if abs(w - 1.0) > tol:
print("Cannot extract non-rational components")
return obj
# NURBS -> B-Spline
if isinstance(obj, NURBS.Curve):
return _convert.convert_curve(obj, BSpline)
elif isinstance(obj, NURBS.Surface):
return _convert.convert_surface(obj, BSpline)
elif isinstance(obj, NURBS.Volume):
return _convert.convert_volume(obj, BSpline)
else:
raise TypeError("Input must be an instance of NURBS curve, surface or volume") | Extracts the non-rational components from rational parametric shapes, if possible.
The possibility of converting a rational shape to a non-rational one depends on the weights vector.
:param obj: NURBS shape
:type obj: NURBS.Curve, NURBS.Surface or NURBS.Volume
:return: B-Spline shape
:rtype: BSpline.Curve, BSpline.Surface or BSpline.Volume
:raises: TypeError | Below is the the instruction that describes the task:
### Input:
Extracts the non-rational components from rational parametric shapes, if possible.
The possibility of converting a rational shape to a non-rational one depends on the weights vector.
:param obj: NURBS shape
:type obj: NURBS.Curve, NURBS.Surface or NURBS.Volume
:return: B-Spline shape
:rtype: BSpline.Curve, BSpline.Surface or BSpline.Volume
:raises: TypeError
### Response:
def nurbs_to_bspline(obj, **kwargs):
""" Extracts the non-rational components from rational parametric shapes, if possible.
The possibility of converting a rational shape to a non-rational one depends on the weights vector.
:param obj: NURBS shape
:type obj: NURBS.Curve, NURBS.Surface or NURBS.Volume
:return: B-Spline shape
:rtype: BSpline.Curve, BSpline.Surface or BSpline.Volume
:raises: TypeError
"""
if not obj.rational:
raise TypeError("The input must be a rational shape")
# Get keyword arguments
tol = kwargs.get('tol', 10e-8)
# Test for non-rational component extraction
for w in obj.weights:
if abs(w - 1.0) > tol:
print("Cannot extract non-rational components")
return obj
# NURBS -> B-Spline
if isinstance(obj, NURBS.Curve):
return _convert.convert_curve(obj, BSpline)
elif isinstance(obj, NURBS.Surface):
return _convert.convert_surface(obj, BSpline)
elif isinstance(obj, NURBS.Volume):
return _convert.convert_volume(obj, BSpline)
else:
raise TypeError("Input must be an instance of NURBS curve, surface or volume") |
def debug_shell(user_ns, user_global_ns, traceback=None, execWrapper=None):
"""
Spawns some interactive shell. Tries to use IPython if available.
Falls back to :func:`pdb.post_mortem` or :func:`simple_debug_shell`.
:param dict[str] user_ns:
:param dict[str] user_global_ns:
:param traceback:
:param execWrapper:
:return: nothing
"""
ipshell = None
try:
# noinspection PyPackageRequirements
import IPython
have_ipython = True
except ImportError:
have_ipython = False
if not ipshell and traceback and have_ipython:
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
from IPython.core.debugger import Pdb
# noinspection PyPackageRequirements,PyUnresolvedReferences
from IPython.terminal.debugger import TerminalPdb
# noinspection PyPackageRequirements,PyUnresolvedReferences
from IPython.terminal.ipapp import TerminalIPythonApp
ipapp = TerminalIPythonApp.instance()
ipapp.interact = False # Avoid output (banner, prints)
ipapp.initialize(argv=[])
def_colors = ipapp.shell.colors
pdb_obj = TerminalPdb(def_colors)
pdb_obj.botframe = None # not sure. exception otherwise at quit
def ipshell():
"""
Run the IPython shell.
"""
pdb_obj.interaction(None, traceback=traceback)
except Exception:
print("IPython Pdb exception:")
better_exchook(*sys.exc_info(), autodebugshell=False)
if not ipshell and have_ipython:
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
import IPython
# noinspection PyPackageRequirements,PyUnresolvedReferences
import IPython.terminal.embed
class DummyMod(object):
"""Dummy module"""
module = DummyMod()
module.__dict__ = user_global_ns
module.__name__ = "_DummyMod"
if "__name__" not in user_ns:
user_ns = user_ns.copy()
user_ns["__name__"] = "_DummyUserNsMod"
ipshell = IPython.terminal.embed.InteractiveShellEmbed.instance(
user_ns=user_ns, user_module=module)
except Exception:
print("IPython not available:")
better_exchook(*sys.exc_info(), autodebugshell=False)
else:
if execWrapper:
old = ipshell.run_code
ipshell.run_code = lambda code: execWrapper(lambda: old(code))
if ipshell:
ipshell()
else:
print("Use simple debug shell:")
if traceback:
import pdb
pdb.post_mortem(traceback)
else:
simple_debug_shell(user_global_ns, user_ns) | Spawns some interactive shell. Tries to use IPython if available.
Falls back to :func:`pdb.post_mortem` or :func:`simple_debug_shell`.
:param dict[str] user_ns:
:param dict[str] user_global_ns:
:param traceback:
:param execWrapper:
:return: nothing | Below is the the instruction that describes the task:
### Input:
Spawns some interactive shell. Tries to use IPython if available.
Falls back to :func:`pdb.post_mortem` or :func:`simple_debug_shell`.
:param dict[str] user_ns:
:param dict[str] user_global_ns:
:param traceback:
:param execWrapper:
:return: nothing
### Response:
def debug_shell(user_ns, user_global_ns, traceback=None, execWrapper=None):
"""
Spawns some interactive shell. Tries to use IPython if available.
Falls back to :func:`pdb.post_mortem` or :func:`simple_debug_shell`.
:param dict[str] user_ns:
:param dict[str] user_global_ns:
:param traceback:
:param execWrapper:
:return: nothing
"""
ipshell = None
try:
# noinspection PyPackageRequirements
import IPython
have_ipython = True
except ImportError:
have_ipython = False
if not ipshell and traceback and have_ipython:
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
from IPython.core.debugger import Pdb
# noinspection PyPackageRequirements,PyUnresolvedReferences
from IPython.terminal.debugger import TerminalPdb
# noinspection PyPackageRequirements,PyUnresolvedReferences
from IPython.terminal.ipapp import TerminalIPythonApp
ipapp = TerminalIPythonApp.instance()
ipapp.interact = False # Avoid output (banner, prints)
ipapp.initialize(argv=[])
def_colors = ipapp.shell.colors
pdb_obj = TerminalPdb(def_colors)
pdb_obj.botframe = None # not sure. exception otherwise at quit
def ipshell():
"""
Run the IPython shell.
"""
pdb_obj.interaction(None, traceback=traceback)
except Exception:
print("IPython Pdb exception:")
better_exchook(*sys.exc_info(), autodebugshell=False)
if not ipshell and have_ipython:
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
import IPython
# noinspection PyPackageRequirements,PyUnresolvedReferences
import IPython.terminal.embed
class DummyMod(object):
"""Dummy module"""
module = DummyMod()
module.__dict__ = user_global_ns
module.__name__ = "_DummyMod"
if "__name__" not in user_ns:
user_ns = user_ns.copy()
user_ns["__name__"] = "_DummyUserNsMod"
ipshell = IPython.terminal.embed.InteractiveShellEmbed.instance(
user_ns=user_ns, user_module=module)
except Exception:
print("IPython not available:")
better_exchook(*sys.exc_info(), autodebugshell=False)
else:
if execWrapper:
old = ipshell.run_code
ipshell.run_code = lambda code: execWrapper(lambda: old(code))
if ipshell:
ipshell()
else:
print("Use simple debug shell:")
if traceback:
import pdb
pdb.post_mortem(traceback)
else:
simple_debug_shell(user_global_ns, user_ns) |
def teardown(self):
"""
Clean up all resources when we're done with them.
"""
self.containers._teardown()
self.networks._teardown()
self.volumes._teardown()
# We need to close the underlying APIClient explicitly to avoid
# ResourceWarnings from unclosed HTTP connections.
self._client.api.close() | Clean up all resources when we're done with them. | Below is the the instruction that describes the task:
### Input:
Clean up all resources when we're done with them.
### Response:
def teardown(self):
"""
Clean up all resources when we're done with them.
"""
self.containers._teardown()
self.networks._teardown()
self.volumes._teardown()
# We need to close the underlying APIClient explicitly to avoid
# ResourceWarnings from unclosed HTTP connections.
self._client.api.close() |
def plotwrapper(f):
"""
This decorator allows for PyMC arguments of various types to be passed to
the plotting functions. It identifies the type of object and locates its
trace(s), then passes the data to the wrapped plotting function.
"""
def wrapper(pymc_obj, *args, **kwargs):
start = 0
if 'start' in kwargs:
start = kwargs.pop('start')
# Figure out what type of object it is
try:
# First try Model type
for variable in pymc_obj._variables_to_tally:
# Plot object
if variable._plot is not False:
data = pymc_obj.trace(variable.__name__)[start:]
if size(data[-1]) >= 10 and variable._plot != True:
continue
elif variable.dtype is dtype('object'):
continue
name = variable.__name__
if args:
name = '%s_%s' % (args[0], variable.__name__)
f(data, name, *args, **kwargs)
return
except AttributeError:
pass
try:
# Then try Trace type
data = pymc_obj()[:]
name = pymc_obj.name
f(data, name, *args, **kwargs)
return
except (AttributeError, TypeError):
pass
try:
# Then try Node type
if pymc_obj._plot is not False:
data = pymc_obj.trace()[start:] # This is deprecated. DH
name = pymc_obj.__name__
f(data, name, *args, **kwargs)
return
except AttributeError:
pass
if isinstance(pymc_obj, dict):
# Then try dictionary
for i in pymc_obj:
data = pymc_obj[i][start:]
if args:
i = '%s_%s' % (args[0], i)
elif 'name' in kwargs:
i = '%s_%s' % (kwargs.pop('name'), i)
f(data, i, *args, **kwargs)
return
# If others fail, assume that raw data is passed
f(pymc_obj, *args, **kwargs)
wrapper.__doc__ = f.__doc__
wrapper.__name__ = f.__name__
return wrapper | This decorator allows for PyMC arguments of various types to be passed to
the plotting functions. It identifies the type of object and locates its
trace(s), then passes the data to the wrapped plotting function. | Below is the the instruction that describes the task:
### Input:
This decorator allows for PyMC arguments of various types to be passed to
the plotting functions. It identifies the type of object and locates its
trace(s), then passes the data to the wrapped plotting function.
### Response:
def plotwrapper(f):
"""
This decorator allows for PyMC arguments of various types to be passed to
the plotting functions. It identifies the type of object and locates its
trace(s), then passes the data to the wrapped plotting function.
"""
def wrapper(pymc_obj, *args, **kwargs):
start = 0
if 'start' in kwargs:
start = kwargs.pop('start')
# Figure out what type of object it is
try:
# First try Model type
for variable in pymc_obj._variables_to_tally:
# Plot object
if variable._plot is not False:
data = pymc_obj.trace(variable.__name__)[start:]
if size(data[-1]) >= 10 and variable._plot != True:
continue
elif variable.dtype is dtype('object'):
continue
name = variable.__name__
if args:
name = '%s_%s' % (args[0], variable.__name__)
f(data, name, *args, **kwargs)
return
except AttributeError:
pass
try:
# Then try Trace type
data = pymc_obj()[:]
name = pymc_obj.name
f(data, name, *args, **kwargs)
return
except (AttributeError, TypeError):
pass
try:
# Then try Node type
if pymc_obj._plot is not False:
data = pymc_obj.trace()[start:] # This is deprecated. DH
name = pymc_obj.__name__
f(data, name, *args, **kwargs)
return
except AttributeError:
pass
if isinstance(pymc_obj, dict):
# Then try dictionary
for i in pymc_obj:
data = pymc_obj[i][start:]
if args:
i = '%s_%s' % (args[0], i)
elif 'name' in kwargs:
i = '%s_%s' % (kwargs.pop('name'), i)
f(data, i, *args, **kwargs)
return
# If others fail, assume that raw data is passed
f(pymc_obj, *args, **kwargs)
wrapper.__doc__ = f.__doc__
wrapper.__name__ = f.__name__
return wrapper |
def execute(self):
"""
Execute the actions necessary to perform a `molecule init role` and
returns None.
:return: None
"""
role_name = self._command_args['role_name']
role_directory = os.getcwd()
msg = 'Initializing new role {}...'.format(role_name)
LOG.info(msg)
if os.path.isdir(role_name):
msg = ('The directory {} exists. '
'Cannot create new role.').format(role_name)
util.sysexit_with_message(msg)
template_directory = ''
if 'template' in self._command_args.keys():
template_directory = self._command_args['template']
else:
template_directory = 'role'
self._process_templates(template_directory, self._command_args,
role_directory)
scenario_base_directory = os.path.join(role_directory, role_name)
templates = [
'scenario/driver/{driver_name}'.format(**self._command_args),
'scenario/verifier/{verifier_name}'.format(**self._command_args),
]
for template in templates:
self._process_templates(template, self._command_args,
scenario_base_directory)
self._process_templates('molecule', self._command_args, role_directory)
role_directory = os.path.join(role_directory, role_name)
msg = 'Initialized role in {} successfully.'.format(role_directory)
LOG.success(msg) | Execute the actions necessary to perform a `molecule init role` and
returns None.
:return: None | Below is the the instruction that describes the task:
### Input:
Execute the actions necessary to perform a `molecule init role` and
returns None.
:return: None
### Response:
def execute(self):
"""
Execute the actions necessary to perform a `molecule init role` and
returns None.
:return: None
"""
role_name = self._command_args['role_name']
role_directory = os.getcwd()
msg = 'Initializing new role {}...'.format(role_name)
LOG.info(msg)
if os.path.isdir(role_name):
msg = ('The directory {} exists. '
'Cannot create new role.').format(role_name)
util.sysexit_with_message(msg)
template_directory = ''
if 'template' in self._command_args.keys():
template_directory = self._command_args['template']
else:
template_directory = 'role'
self._process_templates(template_directory, self._command_args,
role_directory)
scenario_base_directory = os.path.join(role_directory, role_name)
templates = [
'scenario/driver/{driver_name}'.format(**self._command_args),
'scenario/verifier/{verifier_name}'.format(**self._command_args),
]
for template in templates:
self._process_templates(template, self._command_args,
scenario_base_directory)
self._process_templates('molecule', self._command_args, role_directory)
role_directory = os.path.join(role_directory, role_name)
msg = 'Initialized role in {} successfully.'.format(role_directory)
LOG.success(msg) |
def get_vpnv4fs_table(self):
"""Returns global VPNv4 Flow Specification table.
Creates the table if it does not exist.
"""
vpnv4fs_table = self._global_tables.get(RF_VPNv4_FLOWSPEC)
# Lazy initialization of the table.
if not vpnv4fs_table:
vpnv4fs_table = VPNv4FlowSpecTable(self._core_service,
self._signal_bus)
self._global_tables[RF_VPNv4_FLOWSPEC] = vpnv4fs_table
self._tables[(None, RF_VPNv4_FLOWSPEC)] = vpnv4fs_table
return vpnv4fs_table | Returns global VPNv4 Flow Specification table.
Creates the table if it does not exist. | Below is the the instruction that describes the task:
### Input:
Returns global VPNv4 Flow Specification table.
Creates the table if it does not exist.
### Response:
def get_vpnv4fs_table(self):
"""Returns global VPNv4 Flow Specification table.
Creates the table if it does not exist.
"""
vpnv4fs_table = self._global_tables.get(RF_VPNv4_FLOWSPEC)
# Lazy initialization of the table.
if not vpnv4fs_table:
vpnv4fs_table = VPNv4FlowSpecTable(self._core_service,
self._signal_bus)
self._global_tables[RF_VPNv4_FLOWSPEC] = vpnv4fs_table
self._tables[(None, RF_VPNv4_FLOWSPEC)] = vpnv4fs_table
return vpnv4fs_table |
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
"""
return {self.BLOB_KEY_PARAM: self._blob_key,
self.START_FILE_INDEX_PARAM: self._start_file_index,
self.END_FILE_INDEX_PARAM: self._end_file_index,
self.OFFSET_PARAM: self._next_offset()} | Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader. | Below is the the instruction that describes the task:
### Input:
Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
### Response:
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
"""
return {self.BLOB_KEY_PARAM: self._blob_key,
self.START_FILE_INDEX_PARAM: self._start_file_index,
self.END_FILE_INDEX_PARAM: self._end_file_index,
self.OFFSET_PARAM: self._next_offset()} |
def connect_post_namespaced_pod_proxy_with_path(self, name, namespace, path, **kwargs): # noqa: E501
"""connect_post_namespaced_pod_proxy_with_path # noqa: E501
connect POST requests to proxy of Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_post_namespaced_pod_proxy_with_path(name, namespace, path, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodProxyOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: path to the resource (required)
:param str path2: Path is the URL path to use for the current proxy request to pod.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connect_post_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501
else:
(data) = self.connect_post_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501
return data | connect_post_namespaced_pod_proxy_with_path # noqa: E501
connect POST requests to proxy of Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_post_namespaced_pod_proxy_with_path(name, namespace, path, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodProxyOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: path to the resource (required)
:param str path2: Path is the URL path to use for the current proxy request to pod.
:return: str
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
connect_post_namespaced_pod_proxy_with_path # noqa: E501
connect POST requests to proxy of Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_post_namespaced_pod_proxy_with_path(name, namespace, path, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodProxyOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: path to the resource (required)
:param str path2: Path is the URL path to use for the current proxy request to pod.
:return: str
If the method is called asynchronously,
returns the request thread.
### Response:
def connect_post_namespaced_pod_proxy_with_path(self, name, namespace, path, **kwargs): # noqa: E501
"""connect_post_namespaced_pod_proxy_with_path # noqa: E501
connect POST requests to proxy of Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_post_namespaced_pod_proxy_with_path(name, namespace, path, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodProxyOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: path to the resource (required)
:param str path2: Path is the URL path to use for the current proxy request to pod.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connect_post_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501
else:
(data) = self.connect_post_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501
return data |
def join(*vectors):
r"""
Takes an arbitrary number of aligned vectors of the same length and combines
them into a single vector (vertically).
E.g. taking two 100-sample feature vectors of once 5 and once 7 features, a 100x12
feature vector is created and returned.
The feature vectors are expected to have the form samples*features i.e.::
s1 s2 s3 [...]
f1
f2
[...]
Parameters
----------
*vectors : sequences
A number of vectors with the same number of samples.
Returns
-------
vector : ndarray
The combined vectors.
"""
# check supplied arguments
if len(vectors) < 2:
return vectors[0]
# process supplied arguments
vectors = list(vectors)
for i in range(len(vectors)):
vectors[i] = numpy.array(vectors[i], copy=False)
if vectors[i].ndim == 1:
vectors[i] = numpy.array([vectors[i]], copy=False).T
# treat single-value cases special (no squeezing)
if 1 == len(vectors[0]):
return numpy.concatenate(vectors, 1)
return numpy.squeeze(numpy.concatenate(vectors, 1)) | r"""
Takes an arbitrary number of aligned vectors of the same length and combines
them into a single vector (vertically).
E.g. taking two 100-sample feature vectors of once 5 and once 7 features, a 100x12
feature vector is created and returned.
The feature vectors are expected to have the form samples*features i.e.::
s1 s2 s3 [...]
f1
f2
[...]
Parameters
----------
*vectors : sequences
A number of vectors with the same number of samples.
Returns
-------
vector : ndarray
The combined vectors. | Below is the the instruction that describes the task:
### Input:
r"""
Takes an arbitrary number of aligned vectors of the same length and combines
them into a single vector (vertically).
E.g. taking two 100-sample feature vectors of once 5 and once 7 features, a 100x12
feature vector is created and returned.
The feature vectors are expected to have the form samples*features i.e.::
s1 s2 s3 [...]
f1
f2
[...]
Parameters
----------
*vectors : sequences
A number of vectors with the same number of samples.
Returns
-------
vector : ndarray
The combined vectors.
### Response:
def join(*vectors):
r"""
Takes an arbitrary number of aligned vectors of the same length and combines
them into a single vector (vertically).
E.g. taking two 100-sample feature vectors of once 5 and once 7 features, a 100x12
feature vector is created and returned.
The feature vectors are expected to have the form samples*features i.e.::
s1 s2 s3 [...]
f1
f2
[...]
Parameters
----------
*vectors : sequences
A number of vectors with the same number of samples.
Returns
-------
vector : ndarray
The combined vectors.
"""
# check supplied arguments
if len(vectors) < 2:
return vectors[0]
# process supplied arguments
vectors = list(vectors)
for i in range(len(vectors)):
vectors[i] = numpy.array(vectors[i], copy=False)
if vectors[i].ndim == 1:
vectors[i] = numpy.array([vectors[i]], copy=False).T
# treat single-value cases special (no squeezing)
if 1 == len(vectors[0]):
return numpy.concatenate(vectors, 1)
return numpy.squeeze(numpy.concatenate(vectors, 1)) |
def parse_pdb_ligand_info(self, pdb_ligand_info):
'''This only parses the ligand type as all the other information should be in the .cif file. The XML file has
proper capitalization whereas the .cif file uses all caps for the ligand type.'''
mtchs = re.findall('(<ligand.*?</ligand>)', pdb_ligand_info, re.DOTALL)
for m in mtchs:
if m.upper().find('CHEMICALID="{0}"'.format(self.PDBCode.upper())) != -1:
ligand_type = re.match('<ligand.*?\stype="(.*?)".*?>', m, re.DOTALL)
if ligand_type:
self.LigandType = ligand_type.group(1) | This only parses the ligand type as all the other information should be in the .cif file. The XML file has
proper capitalization whereas the .cif file uses all caps for the ligand type. | Below is the the instruction that describes the task:
### Input:
This only parses the ligand type as all the other information should be in the .cif file. The XML file has
proper capitalization whereas the .cif file uses all caps for the ligand type.
### Response:
def parse_pdb_ligand_info(self, pdb_ligand_info):
'''This only parses the ligand type as all the other information should be in the .cif file. The XML file has
proper capitalization whereas the .cif file uses all caps for the ligand type.'''
mtchs = re.findall('(<ligand.*?</ligand>)', pdb_ligand_info, re.DOTALL)
for m in mtchs:
if m.upper().find('CHEMICALID="{0}"'.format(self.PDBCode.upper())) != -1:
ligand_type = re.match('<ligand.*?\stype="(.*?)".*?>', m, re.DOTALL)
if ligand_type:
self.LigandType = ligand_type.group(1) |
def unregister(self, label: str) -> None:
"""
Unregisters the entries in the encoder and decoder registries which
have the label ``label``.
"""
self.unregister_encoder(label)
self.unregister_decoder(label) | Unregisters the entries in the encoder and decoder registries which
have the label ``label``. | Below is the the instruction that describes the task:
### Input:
Unregisters the entries in the encoder and decoder registries which
have the label ``label``.
### Response:
def unregister(self, label: str) -> None:
"""
Unregisters the entries in the encoder and decoder registries which
have the label ``label``.
"""
self.unregister_encoder(label)
self.unregister_decoder(label) |
def option(self, section, option):
""" Returns the value of the option """
if self.config.has_section(section):
if self.config.has_option(section, option):
return (True, self.config.get(section, option))
return (False, 'Option: ' + option + ' does not exist')
return (False, 'Section: ' + section + ' does not exist') | Returns the value of the option | Below is the the instruction that describes the task:
### Input:
Returns the value of the option
### Response:
def option(self, section, option):
""" Returns the value of the option """
if self.config.has_section(section):
if self.config.has_option(section, option):
return (True, self.config.get(section, option))
return (False, 'Option: ' + option + ' does not exist')
return (False, 'Section: ' + section + ' does not exist') |
def get(self, key, default=_sentinel):
"""
Gets the value from the key.
If the key doesn't exist, the default value is returned, otherwise None.
:param key: The key
:param default: The default value
:return: The value
"""
tup = self._data.get(key.lower())
if tup is not None:
return tup[1]
elif default is not _sentinel:
return default
else:
return None | Gets the value from the key.
If the key doesn't exist, the default value is returned, otherwise None.
:param key: The key
:param default: The default value
:return: The value | Below is the the instruction that describes the task:
### Input:
Gets the value from the key.
If the key doesn't exist, the default value is returned, otherwise None.
:param key: The key
:param default: The default value
:return: The value
### Response:
def get(self, key, default=_sentinel):
"""
Gets the value from the key.
If the key doesn't exist, the default value is returned, otherwise None.
:param key: The key
:param default: The default value
:return: The value
"""
tup = self._data.get(key.lower())
if tup is not None:
return tup[1]
elif default is not _sentinel:
return default
else:
return None |
def get_next_input(self):
"""
Returns the next line of input
:return: string of input
"""
# TODO: could override input if we get input coming in at the same time
all_input = Deployment.objects.get(pk=self.id).input or ''
lines = all_input.splitlines()
first_line = lines[0] if len(lines) else None
lines = lines[1:] if len(lines) > 1 else []
Deployment.objects.filter(pk=self.id).update(input='\n'.join(lines))
return first_line | Returns the next line of input
:return: string of input | Below is the the instruction that describes the task:
### Input:
Returns the next line of input
:return: string of input
### Response:
def get_next_input(self):
"""
Returns the next line of input
:return: string of input
"""
# TODO: could override input if we get input coming in at the same time
all_input = Deployment.objects.get(pk=self.id).input or ''
lines = all_input.splitlines()
first_line = lines[0] if len(lines) else None
lines = lines[1:] if len(lines) > 1 else []
Deployment.objects.filter(pk=self.id).update(input='\n'.join(lines))
return first_line |
def child(self,index):
"helper for __getitem__/__setitem__"
if isinstance(index,tuple):
attr,i = index
return getattr(self,attr)[i]
else: return getattr(self,index) | helper for __getitem__/__setitem__ | Below is the the instruction that describes the task:
### Input:
helper for __getitem__/__setitem__
### Response:
def child(self,index):
"helper for __getitem__/__setitem__"
if isinstance(index,tuple):
attr,i = index
return getattr(self,attr)[i]
else: return getattr(self,index) |
def _get_layer_converter_fn(layer, add_custom_layers = False):
"""Get the right converter function for Keras
"""
layer_type = type(layer)
if layer_type in _KERAS_LAYER_REGISTRY:
convert_func = _KERAS_LAYER_REGISTRY[layer_type]
if convert_func is _layers2.convert_activation:
act_name = _layers2._get_activation_name_from_keras_layer(layer)
if act_name == 'CUSTOM':
return None
return convert_func
elif add_custom_layers:
return None
else:
raise TypeError("Keras layer of type %s is not supported." % type(layer)) | Get the right converter function for Keras | Below is the the instruction that describes the task:
### Input:
Get the right converter function for Keras
### Response:
def _get_layer_converter_fn(layer, add_custom_layers = False):
"""Get the right converter function for Keras
"""
layer_type = type(layer)
if layer_type in _KERAS_LAYER_REGISTRY:
convert_func = _KERAS_LAYER_REGISTRY[layer_type]
if convert_func is _layers2.convert_activation:
act_name = _layers2._get_activation_name_from_keras_layer(layer)
if act_name == 'CUSTOM':
return None
return convert_func
elif add_custom_layers:
return None
else:
raise TypeError("Keras layer of type %s is not supported." % type(layer)) |
async def update(self, fields=''):
'''reload object info from emby
|coro|
Parameters
----------
fields : str
additional fields to request when updating
See Also
--------
refresh : same thing
send :
post :
'''
path = 'Users/{{UserId}}/Items/{}'.format(self.id)
info = await self.connector.getJson(path,
remote=False,
Fields='Path,Overview,'+fields
)
self.object_dict.update(info)
self.extras = {}
return self | reload object info from emby
|coro|
Parameters
----------
fields : str
additional fields to request when updating
See Also
--------
refresh : same thing
send :
post : | Below is the the instruction that describes the task:
### Input:
reload object info from emby
|coro|
Parameters
----------
fields : str
additional fields to request when updating
See Also
--------
refresh : same thing
send :
post :
### Response:
async def update(self, fields=''):
'''reload object info from emby
|coro|
Parameters
----------
fields : str
additional fields to request when updating
See Also
--------
refresh : same thing
send :
post :
'''
path = 'Users/{{UserId}}/Items/{}'.format(self.id)
info = await self.connector.getJson(path,
remote=False,
Fields='Path,Overview,'+fields
)
self.object_dict.update(info)
self.extras = {}
return self |
Subsets and Splits