code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def delete_profile_extension_members(self, profile_extension, query_column, ids_to_delete):
""" Responsys.deleteProfileExtensionRecords call
Accepts:
InteractObject profile_extension
list field_list
list ids_to_retrieve
string query_column
default: 'RIID'
Returns list of DeleteResults
"""
profile_extension = profile_extension.get_soap_object(self.client)
result = self.call(
'deleteProfileExtensionMembers', profile_extension, query_column, ids_to_delete)
if hasattr(result, '__iter__'):
return [DeleteResult(delete_result) for delete_result in result]
return [DeleteResult(result)] | Responsys.deleteProfileExtensionRecords call
Accepts:
InteractObject profile_extension
list field_list
list ids_to_retrieve
string query_column
default: 'RIID'
Returns list of DeleteResults | Below is the the instruction that describes the task:
### Input:
Responsys.deleteProfileExtensionRecords call
Accepts:
InteractObject profile_extension
list field_list
list ids_to_retrieve
string query_column
default: 'RIID'
Returns list of DeleteResults
### Response:
def delete_profile_extension_members(self, profile_extension, query_column, ids_to_delete):
""" Responsys.deleteProfileExtensionRecords call
Accepts:
InteractObject profile_extension
list field_list
list ids_to_retrieve
string query_column
default: 'RIID'
Returns list of DeleteResults
"""
profile_extension = profile_extension.get_soap_object(self.client)
result = self.call(
'deleteProfileExtensionMembers', profile_extension, query_column, ids_to_delete)
if hasattr(result, '__iter__'):
return [DeleteResult(delete_result) for delete_result in result]
return [DeleteResult(result)] |
def makeHandler(self, dialect):
"""create and cache the handler object for this dialect"""
if dialect not in self.dialects:
m = "The dialect specified, '{}', wasn't whitelisted in change_hook".format(dialect)
log.msg(m)
log.msg(
"Note: if dialect is 'base' then it's possible your URL is malformed and we didn't regex it properly")
raise ValueError(m)
if dialect not in self._dialect_handlers:
if dialect not in self._plugins:
m = "The dialect specified, '{}', is not registered as a buildbot.webhook plugin".format(dialect)
log.msg(m)
raise ValueError(m)
options = self.dialects[dialect]
if isinstance(options, dict) and 'custom_class' in options:
klass = options['custom_class']
else:
klass = self._plugins.get(dialect)
self._dialect_handlers[dialect] = klass(self.master, self.dialects[dialect])
return self._dialect_handlers[dialect] | create and cache the handler object for this dialect | Below is the the instruction that describes the task:
### Input:
create and cache the handler object for this dialect
### Response:
def makeHandler(self, dialect):
"""create and cache the handler object for this dialect"""
if dialect not in self.dialects:
m = "The dialect specified, '{}', wasn't whitelisted in change_hook".format(dialect)
log.msg(m)
log.msg(
"Note: if dialect is 'base' then it's possible your URL is malformed and we didn't regex it properly")
raise ValueError(m)
if dialect not in self._dialect_handlers:
if dialect not in self._plugins:
m = "The dialect specified, '{}', is not registered as a buildbot.webhook plugin".format(dialect)
log.msg(m)
raise ValueError(m)
options = self.dialects[dialect]
if isinstance(options, dict) and 'custom_class' in options:
klass = options['custom_class']
else:
klass = self._plugins.get(dialect)
self._dialect_handlers[dialect] = klass(self.master, self.dialects[dialect])
return self._dialect_handlers[dialect] |
def format_value(self):
"""
Return the formatted (interpreted) data according to `data_type`.
"""
return format_value(
self.data_type,
self.data,
self.parent.stringpool_main.getString
) | Return the formatted (interpreted) data according to `data_type`. | Below is the the instruction that describes the task:
### Input:
Return the formatted (interpreted) data according to `data_type`.
### Response:
def format_value(self):
"""
Return the formatted (interpreted) data according to `data_type`.
"""
return format_value(
self.data_type,
self.data,
self.parent.stringpool_main.getString
) |
def _normalize_to_unit(self, value, unit):
"""Normalize the value to the unit returned.
We use base-1000 for second-based units, and base-1024 for
byte-based units. Sadly, the Nagios-Plugins specification doesn't
disambiguate base-1000 (KB) and base-1024 (KiB).
"""
if unit == 'ms':
return value / 1000.0
if unit == 'us':
return value / 1000000.0
if unit == 'KB':
return value * 1024
if unit == 'MB':
return value * 1024 * 1024
if unit == 'GB':
return value * 1024 * 1024 * 1024
if unit == 'TB':
return value * 1024 * 1024 * 1024 * 1024
return value | Normalize the value to the unit returned.
We use base-1000 for second-based units, and base-1024 for
byte-based units. Sadly, the Nagios-Plugins specification doesn't
disambiguate base-1000 (KB) and base-1024 (KiB). | Below is the the instruction that describes the task:
### Input:
Normalize the value to the unit returned.
We use base-1000 for second-based units, and base-1024 for
byte-based units. Sadly, the Nagios-Plugins specification doesn't
disambiguate base-1000 (KB) and base-1024 (KiB).
### Response:
def _normalize_to_unit(self, value, unit):
"""Normalize the value to the unit returned.
We use base-1000 for second-based units, and base-1024 for
byte-based units. Sadly, the Nagios-Plugins specification doesn't
disambiguate base-1000 (KB) and base-1024 (KiB).
"""
if unit == 'ms':
return value / 1000.0
if unit == 'us':
return value / 1000000.0
if unit == 'KB':
return value * 1024
if unit == 'MB':
return value * 1024 * 1024
if unit == 'GB':
return value * 1024 * 1024 * 1024
if unit == 'TB':
return value * 1024 * 1024 * 1024 * 1024
return value |
def addInternalLink(self, link):
'''Appends InternalLink
'''
if isinstance(link, InternalLink):
self.internalLinks.append(link)
else:
raise InternalLinkError(
'link Type should be InternalLink, not %s' % type(link)) | Appends InternalLink | Below is the the instruction that describes the task:
### Input:
Appends InternalLink
### Response:
def addInternalLink(self, link):
'''Appends InternalLink
'''
if isinstance(link, InternalLink):
self.internalLinks.append(link)
else:
raise InternalLinkError(
'link Type should be InternalLink, not %s' % type(link)) |
def rotate(self, azimuth, axis=None):
"""Rotate the trackball about the "Up" axis by azimuth radians.
Parameters
----------
azimuth : float
The number of radians to rotate.
"""
target = self._target
y_axis = self._n_pose[:3, 1].flatten()
if axis is not None:
y_axis = axis
x_rot_mat = transformations.rotation_matrix(azimuth, y_axis, target)
self._n_pose = x_rot_mat.dot(self._n_pose)
y_axis = self._pose[:3, 1].flatten()
if axis is not None:
y_axis = axis
x_rot_mat = transformations.rotation_matrix(azimuth, y_axis, target)
self._pose = x_rot_mat.dot(self._pose) | Rotate the trackball about the "Up" axis by azimuth radians.
Parameters
----------
azimuth : float
The number of radians to rotate. | Below is the the instruction that describes the task:
### Input:
Rotate the trackball about the "Up" axis by azimuth radians.
Parameters
----------
azimuth : float
The number of radians to rotate.
### Response:
def rotate(self, azimuth, axis=None):
"""Rotate the trackball about the "Up" axis by azimuth radians.
Parameters
----------
azimuth : float
The number of radians to rotate.
"""
target = self._target
y_axis = self._n_pose[:3, 1].flatten()
if axis is not None:
y_axis = axis
x_rot_mat = transformations.rotation_matrix(azimuth, y_axis, target)
self._n_pose = x_rot_mat.dot(self._n_pose)
y_axis = self._pose[:3, 1].flatten()
if axis is not None:
y_axis = axis
x_rot_mat = transformations.rotation_matrix(azimuth, y_axis, target)
self._pose = x_rot_mat.dot(self._pose) |
def add_subgroups(self, subgroups):
"""
Add a list of SubGroupDefinition objects to this composite.
Note that in contrast to :meth:`BaseTrack`, which takes a single
dictionary indicating the particular subgroups for the track, this
method takes a list of :class:`SubGroupDefinition` objects representing
the allowed subgroups for the composite.
:param subgroups:
List of SubGroupDefinition objects.
"""
if subgroups is None:
subgroups = {}
_subgroups = {}
for sg in subgroups:
assert isinstance(sg, SubGroupDefinition)
_subgroups[sg.name] = sg
self.subgroups = _subgroups | Add a list of SubGroupDefinition objects to this composite.
Note that in contrast to :meth:`BaseTrack`, which takes a single
dictionary indicating the particular subgroups for the track, this
method takes a list of :class:`SubGroupDefinition` objects representing
the allowed subgroups for the composite.
:param subgroups:
List of SubGroupDefinition objects. | Below is the the instruction that describes the task:
### Input:
Add a list of SubGroupDefinition objects to this composite.
Note that in contrast to :meth:`BaseTrack`, which takes a single
dictionary indicating the particular subgroups for the track, this
method takes a list of :class:`SubGroupDefinition` objects representing
the allowed subgroups for the composite.
:param subgroups:
List of SubGroupDefinition objects.
### Response:
def add_subgroups(self, subgroups):
"""
Add a list of SubGroupDefinition objects to this composite.
Note that in contrast to :meth:`BaseTrack`, which takes a single
dictionary indicating the particular subgroups for the track, this
method takes a list of :class:`SubGroupDefinition` objects representing
the allowed subgroups for the composite.
:param subgroups:
List of SubGroupDefinition objects.
"""
if subgroups is None:
subgroups = {}
_subgroups = {}
for sg in subgroups:
assert isinstance(sg, SubGroupDefinition)
_subgroups[sg.name] = sg
self.subgroups = _subgroups |
def floats(self, n: int = 2) -> List[float]:
"""Generate a list of random float numbers.
:param n: Raise 10 to the 'n' power.
:return: The list of floating-point numbers.
"""
nums = [self.random.random()
for _ in range(10 ** int(n))]
return nums | Generate a list of random float numbers.
:param n: Raise 10 to the 'n' power.
:return: The list of floating-point numbers. | Below is the the instruction that describes the task:
### Input:
Generate a list of random float numbers.
:param n: Raise 10 to the 'n' power.
:return: The list of floating-point numbers.
### Response:
def floats(self, n: int = 2) -> List[float]:
"""Generate a list of random float numbers.
:param n: Raise 10 to the 'n' power.
:return: The list of floating-point numbers.
"""
nums = [self.random.random()
for _ in range(10 ** int(n))]
return nums |
def add_jac(self, m, val, row, col):
"""Add tuples (val, row, col) to the Jacobian matrix ``m``
Implemented in numpy.arrays for temporary storage.
"""
assert m in ('Fx', 'Fy', 'Gx', 'Gy', 'Fx0', 'Fy0', 'Gx0', 'Gy0'), \
'Wrong Jacobian matrix name <{0}>'.format(m)
if isinstance(val, (int, float)):
val = val * ones(len(row), 1)
self._temp[m]['I'] = matrix([self._temp[m]['I'], matrix(row)])
self._temp[m]['J'] = matrix([self._temp[m]['J'], matrix(col)])
self._temp[m]['V'] = matrix([self._temp[m]['V'], matrix(val)]) | Add tuples (val, row, col) to the Jacobian matrix ``m``
Implemented in numpy.arrays for temporary storage. | Below is the the instruction that describes the task:
### Input:
Add tuples (val, row, col) to the Jacobian matrix ``m``
Implemented in numpy.arrays for temporary storage.
### Response:
def add_jac(self, m, val, row, col):
"""Add tuples (val, row, col) to the Jacobian matrix ``m``
Implemented in numpy.arrays for temporary storage.
"""
assert m in ('Fx', 'Fy', 'Gx', 'Gy', 'Fx0', 'Fy0', 'Gx0', 'Gy0'), \
'Wrong Jacobian matrix name <{0}>'.format(m)
if isinstance(val, (int, float)):
val = val * ones(len(row), 1)
self._temp[m]['I'] = matrix([self._temp[m]['I'], matrix(row)])
self._temp[m]['J'] = matrix([self._temp[m]['J'], matrix(col)])
self._temp[m]['V'] = matrix([self._temp[m]['V'], matrix(val)]) |
def copy(self):
"""Return a clone of this retry manager"""
return Retry(max_tries=self.max_tries, delay=self.delay, backoff=self.backoff,
max_jitter=self.max_jitter / 100.0, max_delay=self.max_delay, sleep_func=self.sleep_func,
deadline=self.deadline, retry_exceptions=self.retry_exceptions) | Return a clone of this retry manager | Below is the the instruction that describes the task:
### Input:
Return a clone of this retry manager
### Response:
def copy(self):
"""Return a clone of this retry manager"""
return Retry(max_tries=self.max_tries, delay=self.delay, backoff=self.backoff,
max_jitter=self.max_jitter / 100.0, max_delay=self.max_delay, sleep_func=self.sleep_func,
deadline=self.deadline, retry_exceptions=self.retry_exceptions) |
def accel_fl(q: np.ndarray):
"""Accelaration in the earth-sun system using Fluxion potential energy"""
# Infer number of dimensions from q
dims: int = len(q)
# Number of celestial bodies
B: int = dims // 3
# The force given the positions q of the bodies
f = force(q)
# The accelerations from this force
a = np.zeros(dims)
for i in range(B):
a[slices[i]] = f[slices[i]] / mass[i]
return a | Accelaration in the earth-sun system using Fluxion potential energy | Below is the the instruction that describes the task:
### Input:
Accelaration in the earth-sun system using Fluxion potential energy
### Response:
def accel_fl(q: np.ndarray):
"""Accelaration in the earth-sun system using Fluxion potential energy"""
# Infer number of dimensions from q
dims: int = len(q)
# Number of celestial bodies
B: int = dims // 3
# The force given the positions q of the bodies
f = force(q)
# The accelerations from this force
a = np.zeros(dims)
for i in range(B):
a[slices[i]] = f[slices[i]] / mass[i]
return a |
def lint_cli(ctx, exclude, skip_untracked, commit_only):
# type: (click.Context, List[str], bool, bool) -> None
""" Run pep8 and pylint on all project files.
You can configure the linting paths using the lint.paths config variable.
This should be a list of paths that will be linted. If a path to a directory
is given, all files in that directory and it's subdirectories will be
used.
The pep8 and pylint config paths are by default stored in ops/tools/pep8.ini
and ops/tools/pylint.ini. You can customise those paths in your config with
lint.pep8_cfg and lint.pylint_cfg variables.
**Config Example**::
\b
lint:
pylint_cfg: 'ops/tools/pylint.ini'
pep8_cfg: 'ops/tools/pep8.ini'
paths:
- 'src/mypkg'
**Examples**::
\b
$ peltak lint # Run linter in default mode, skip untracked
$ peltak lint --commit # Lint only files staged for commit
$ peltak lint --all # Lint all files, including untracked.
$ peltak lint --pretend # Print the list of files to lint
$ peltak lint -e "*.tox*" # Don't lint files inside .tox directory
"""
if ctx.invoked_subcommand:
return
from peltak.logic import lint
lint.lint(exclude, skip_untracked, commit_only) | Run pep8 and pylint on all project files.
You can configure the linting paths using the lint.paths config variable.
This should be a list of paths that will be linted. If a path to a directory
is given, all files in that directory and it's subdirectories will be
used.
The pep8 and pylint config paths are by default stored in ops/tools/pep8.ini
and ops/tools/pylint.ini. You can customise those paths in your config with
lint.pep8_cfg and lint.pylint_cfg variables.
**Config Example**::
\b
lint:
pylint_cfg: 'ops/tools/pylint.ini'
pep8_cfg: 'ops/tools/pep8.ini'
paths:
- 'src/mypkg'
**Examples**::
\b
$ peltak lint # Run linter in default mode, skip untracked
$ peltak lint --commit # Lint only files staged for commit
$ peltak lint --all # Lint all files, including untracked.
$ peltak lint --pretend # Print the list of files to lint
$ peltak lint -e "*.tox*" # Don't lint files inside .tox directory | Below is the the instruction that describes the task:
### Input:
Run pep8 and pylint on all project files.
You can configure the linting paths using the lint.paths config variable.
This should be a list of paths that will be linted. If a path to a directory
is given, all files in that directory and it's subdirectories will be
used.
The pep8 and pylint config paths are by default stored in ops/tools/pep8.ini
and ops/tools/pylint.ini. You can customise those paths in your config with
lint.pep8_cfg and lint.pylint_cfg variables.
**Config Example**::
\b
lint:
pylint_cfg: 'ops/tools/pylint.ini'
pep8_cfg: 'ops/tools/pep8.ini'
paths:
- 'src/mypkg'
**Examples**::
\b
$ peltak lint # Run linter in default mode, skip untracked
$ peltak lint --commit # Lint only files staged for commit
$ peltak lint --all # Lint all files, including untracked.
$ peltak lint --pretend # Print the list of files to lint
$ peltak lint -e "*.tox*" # Don't lint files inside .tox directory
### Response:
def lint_cli(ctx, exclude, skip_untracked, commit_only):
# type: (click.Context, List[str], bool, bool) -> None
""" Run pep8 and pylint on all project files.
You can configure the linting paths using the lint.paths config variable.
This should be a list of paths that will be linted. If a path to a directory
is given, all files in that directory and it's subdirectories will be
used.
The pep8 and pylint config paths are by default stored in ops/tools/pep8.ini
and ops/tools/pylint.ini. You can customise those paths in your config with
lint.pep8_cfg and lint.pylint_cfg variables.
**Config Example**::
\b
lint:
pylint_cfg: 'ops/tools/pylint.ini'
pep8_cfg: 'ops/tools/pep8.ini'
paths:
- 'src/mypkg'
**Examples**::
\b
$ peltak lint # Run linter in default mode, skip untracked
$ peltak lint --commit # Lint only files staged for commit
$ peltak lint --all # Lint all files, including untracked.
$ peltak lint --pretend # Print the list of files to lint
$ peltak lint -e "*.tox*" # Don't lint files inside .tox directory
"""
if ctx.invoked_subcommand:
return
from peltak.logic import lint
lint.lint(exclude, skip_untracked, commit_only) |
def validate_content(*objs):
"""Runs the correct validator for given `obj`ects. Assumes all same type"""
from .main import Collection, Module
validator = {
Collection: cnxml.validate_collxml,
Module: cnxml.validate_cnxml,
}[type(objs[0])]
return validator(*[obj.file for obj in objs]) | Runs the correct validator for given `obj`ects. Assumes all same type | Below is the the instruction that describes the task:
### Input:
Runs the correct validator for given `obj`ects. Assumes all same type
### Response:
def validate_content(*objs):
"""Runs the correct validator for given `obj`ects. Assumes all same type"""
from .main import Collection, Module
validator = {
Collection: cnxml.validate_collxml,
Module: cnxml.validate_cnxml,
}[type(objs[0])]
return validator(*[obj.file for obj in objs]) |
def is_best_response(self, own_action, opponents_actions, tol=None):
"""
Return True if `own_action` is a best response to
`opponents_actions`.
Parameters
----------
own_action : scalar(int) or array_like(float, ndim=1)
An integer representing a pure action, or an array of floats
representing a mixed action.
opponents_actions : see `best_response`
tol : scalar(float), optional(default=None)
Tolerance level used in determining best responses. If None,
default to the value of the `tol` attribute.
Returns
-------
bool
True if `own_action` is a best response to
`opponents_actions`; False otherwise.
"""
if tol is None:
tol = self.tol
payoff_vector = self.payoff_vector(opponents_actions)
payoff_max = payoff_vector.max()
if isinstance(own_action, numbers.Integral):
return payoff_vector[own_action] >= payoff_max - tol
else:
return np.dot(own_action, payoff_vector) >= payoff_max - tol | Return True if `own_action` is a best response to
`opponents_actions`.
Parameters
----------
own_action : scalar(int) or array_like(float, ndim=1)
An integer representing a pure action, or an array of floats
representing a mixed action.
opponents_actions : see `best_response`
tol : scalar(float), optional(default=None)
Tolerance level used in determining best responses. If None,
default to the value of the `tol` attribute.
Returns
-------
bool
True if `own_action` is a best response to
`opponents_actions`; False otherwise. | Below is the the instruction that describes the task:
### Input:
Return True if `own_action` is a best response to
`opponents_actions`.
Parameters
----------
own_action : scalar(int) or array_like(float, ndim=1)
An integer representing a pure action, or an array of floats
representing a mixed action.
opponents_actions : see `best_response`
tol : scalar(float), optional(default=None)
Tolerance level used in determining best responses. If None,
default to the value of the `tol` attribute.
Returns
-------
bool
True if `own_action` is a best response to
`opponents_actions`; False otherwise.
### Response:
def is_best_response(self, own_action, opponents_actions, tol=None):
"""
Return True if `own_action` is a best response to
`opponents_actions`.
Parameters
----------
own_action : scalar(int) or array_like(float, ndim=1)
An integer representing a pure action, or an array of floats
representing a mixed action.
opponents_actions : see `best_response`
tol : scalar(float), optional(default=None)
Tolerance level used in determining best responses. If None,
default to the value of the `tol` attribute.
Returns
-------
bool
True if `own_action` is a best response to
`opponents_actions`; False otherwise.
"""
if tol is None:
tol = self.tol
payoff_vector = self.payoff_vector(opponents_actions)
payoff_max = payoff_vector.max()
if isinstance(own_action, numbers.Integral):
return payoff_vector[own_action] >= payoff_max - tol
else:
return np.dot(own_action, payoff_vector) >= payoff_max - tol |
def set_name(self, name):
"""
RETURN NEW FILE WITH GIVEN EXTENSION
"""
path = self._filename.split("/")
parts = path[-1].split(".")
if len(parts) == 1:
path[-1] = name
else:
path[-1] = name + "." + parts[-1]
return File("/".join(path)) | RETURN NEW FILE WITH GIVEN EXTENSION | Below is the the instruction that describes the task:
### Input:
RETURN NEW FILE WITH GIVEN EXTENSION
### Response:
def set_name(self, name):
"""
RETURN NEW FILE WITH GIVEN EXTENSION
"""
path = self._filename.split("/")
parts = path[-1].split(".")
if len(parts) == 1:
path[-1] = name
else:
path[-1] = name + "." + parts[-1]
return File("/".join(path)) |
def _vector_pattern_uniform_op_right(func):
"""decorator for operator overloading when VectorPatternUniform is on
the right"""
@wraps(func)
def verif(self, patt):
if isinstance(patt, numbers.Number):
return TransversePatternUniform(func(self, self._tdsphere, patt),
func(self, self._pdsphere, patt),
doublesphere=True)
else:
raise TypeError(err_msg['no_combi_VP'])
return verif | decorator for operator overloading when VectorPatternUniform is on
the right | Below is the the instruction that describes the task:
### Input:
decorator for operator overloading when VectorPatternUniform is on
the right
### Response:
def _vector_pattern_uniform_op_right(func):
"""decorator for operator overloading when VectorPatternUniform is on
the right"""
@wraps(func)
def verif(self, patt):
if isinstance(patt, numbers.Number):
return TransversePatternUniform(func(self, self._tdsphere, patt),
func(self, self._pdsphere, patt),
doublesphere=True)
else:
raise TypeError(err_msg['no_combi_VP'])
return verif |
def statvfs(path):
'''
.. versionadded:: 2014.1.0
Perform a statvfs call against the filesystem that the file resides on
CLI Example:
.. code-block:: bash
salt '*' file.statvfs /path/to/file
'''
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('File path must be absolute.')
try:
stv = os.statvfs(path)
return dict((key, getattr(stv, key)) for key in ('f_bavail', 'f_bfree',
'f_blocks', 'f_bsize', 'f_favail', 'f_ffree', 'f_files', 'f_flag',
'f_frsize', 'f_namemax'))
except (OSError, IOError):
raise CommandExecutionError('Could not statvfs \'{0}\''.format(path))
return False | .. versionadded:: 2014.1.0
Perform a statvfs call against the filesystem that the file resides on
CLI Example:
.. code-block:: bash
salt '*' file.statvfs /path/to/file | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2014.1.0
Perform a statvfs call against the filesystem that the file resides on
CLI Example:
.. code-block:: bash
salt '*' file.statvfs /path/to/file
### Response:
def statvfs(path):
'''
.. versionadded:: 2014.1.0
Perform a statvfs call against the filesystem that the file resides on
CLI Example:
.. code-block:: bash
salt '*' file.statvfs /path/to/file
'''
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('File path must be absolute.')
try:
stv = os.statvfs(path)
return dict((key, getattr(stv, key)) for key in ('f_bavail', 'f_bfree',
'f_blocks', 'f_bsize', 'f_favail', 'f_ffree', 'f_files', 'f_flag',
'f_frsize', 'f_namemax'))
except (OSError, IOError):
raise CommandExecutionError('Could not statvfs \'{0}\''.format(path))
return False |
def get_folders(self):
""" Returns a list of all folders for this account
Returns:
List[:class:`Folder <pyOutlook.core.folder.Folder>`]
"""
endpoint = 'https://outlook.office.com/api/v2.0/me/MailFolders/'
r = requests.get(endpoint, headers=self._headers)
if check_response(r):
return Folder._json_to_folders(self, r.json()) | Returns a list of all folders for this account
Returns:
List[:class:`Folder <pyOutlook.core.folder.Folder>`] | Below is the the instruction that describes the task:
### Input:
Returns a list of all folders for this account
Returns:
List[:class:`Folder <pyOutlook.core.folder.Folder>`]
### Response:
def get_folders(self):
""" Returns a list of all folders for this account
Returns:
List[:class:`Folder <pyOutlook.core.folder.Folder>`]
"""
endpoint = 'https://outlook.office.com/api/v2.0/me/MailFolders/'
r = requests.get(endpoint, headers=self._headers)
if check_response(r):
return Folder._json_to_folders(self, r.json()) |
def UploadArtifactYamlFile(file_content,
overwrite=True,
overwrite_system_artifacts=False):
"""Upload a yaml or json file as an artifact to the datastore."""
loaded_artifacts = []
registry_obj = artifact_registry.REGISTRY
# Make sure all artifacts are loaded so we don't accidentally overwrite one.
registry_obj.GetArtifacts(reload_datastore_artifacts=True)
new_artifacts = registry_obj.ArtifactsFromYaml(file_content)
new_artifact_names = set()
# A quick syntax check before we upload anything.
for artifact_value in new_artifacts:
artifact_registry.ValidateSyntax(artifact_value)
new_artifact_names.add(artifact_value.name)
# Iterate through each artifact adding it to the collection.
artifact_coll = artifact_registry.ArtifactCollection(ARTIFACT_STORE_ROOT_URN)
current_artifacts = list(artifact_coll)
# We need to remove artifacts we are overwriting.
filtered_artifacts = [
art for art in current_artifacts if art.name not in new_artifact_names
]
artifact_coll.Delete()
with data_store.DB.GetMutationPool() as pool:
for artifact_value in filtered_artifacts:
artifact_coll.Add(artifact_value, mutation_pool=pool)
for artifact_value in new_artifacts:
registry_obj.RegisterArtifact(
artifact_value,
source="datastore:%s" % ARTIFACT_STORE_ROOT_URN,
overwrite_if_exists=overwrite,
overwrite_system_artifacts=overwrite_system_artifacts)
artifact_coll.Add(artifact_value, mutation_pool=pool)
if data_store.RelationalDBEnabled():
data_store.REL_DB.WriteArtifact(artifact_value)
loaded_artifacts.append(artifact_value)
name = artifact_value.name
logging.info("Uploaded artifact %s to %s", name, ARTIFACT_STORE_ROOT_URN)
# Once all artifacts are loaded we can validate dependencies. Note that we do
# not have to perform a syntax validation because it is already done after
# YAML is parsed.
for artifact_value in loaded_artifacts:
artifact_registry.ValidateDependencies(artifact_value) | Upload a yaml or json file as an artifact to the datastore. | Below is the the instruction that describes the task:
### Input:
Upload a yaml or json file as an artifact to the datastore.
### Response:
def UploadArtifactYamlFile(file_content,
overwrite=True,
overwrite_system_artifacts=False):
"""Upload a yaml or json file as an artifact to the datastore."""
loaded_artifacts = []
registry_obj = artifact_registry.REGISTRY
# Make sure all artifacts are loaded so we don't accidentally overwrite one.
registry_obj.GetArtifacts(reload_datastore_artifacts=True)
new_artifacts = registry_obj.ArtifactsFromYaml(file_content)
new_artifact_names = set()
# A quick syntax check before we upload anything.
for artifact_value in new_artifacts:
artifact_registry.ValidateSyntax(artifact_value)
new_artifact_names.add(artifact_value.name)
# Iterate through each artifact adding it to the collection.
artifact_coll = artifact_registry.ArtifactCollection(ARTIFACT_STORE_ROOT_URN)
current_artifacts = list(artifact_coll)
# We need to remove artifacts we are overwriting.
filtered_artifacts = [
art for art in current_artifacts if art.name not in new_artifact_names
]
artifact_coll.Delete()
with data_store.DB.GetMutationPool() as pool:
for artifact_value in filtered_artifacts:
artifact_coll.Add(artifact_value, mutation_pool=pool)
for artifact_value in new_artifacts:
registry_obj.RegisterArtifact(
artifact_value,
source="datastore:%s" % ARTIFACT_STORE_ROOT_URN,
overwrite_if_exists=overwrite,
overwrite_system_artifacts=overwrite_system_artifacts)
artifact_coll.Add(artifact_value, mutation_pool=pool)
if data_store.RelationalDBEnabled():
data_store.REL_DB.WriteArtifact(artifact_value)
loaded_artifacts.append(artifact_value)
name = artifact_value.name
logging.info("Uploaded artifact %s to %s", name, ARTIFACT_STORE_ROOT_URN)
# Once all artifacts are loaded we can validate dependencies. Note that we do
# not have to perform a syntax validation because it is already done after
# YAML is parsed.
for artifact_value in loaded_artifacts:
artifact_registry.ValidateDependencies(artifact_value) |
def _environment_sanity_check(environment):
"""Perform a sanity check on the environment."""
assert issubclass(environment.undefined, Undefined), 'undefined must ' \
'be a subclass of undefined because filters depend on it.'
assert environment.block_start_string != \
environment.variable_start_string != \
environment.comment_start_string, 'block, variable and comment ' \
'start strings must be different'
assert environment.newline_sequence in ('\r', '\r\n', '\n'), \
'newline_sequence set to unknown line ending string.'
return environment | Perform a sanity check on the environment. | Below is the the instruction that describes the task:
### Input:
Perform a sanity check on the environment.
### Response:
def _environment_sanity_check(environment):
"""Perform a sanity check on the environment."""
assert issubclass(environment.undefined, Undefined), 'undefined must ' \
'be a subclass of undefined because filters depend on it.'
assert environment.block_start_string != \
environment.variable_start_string != \
environment.comment_start_string, 'block, variable and comment ' \
'start strings must be different'
assert environment.newline_sequence in ('\r', '\r\n', '\n'), \
'newline_sequence set to unknown line ending string.'
return environment |
def filter(self, query, inplace=True):
"""Use a query statement to filter data. Note that you specify the data
to be removed!
Parameters
----------
query : string
The query string to be evaluated. Is directly provided to
pandas.DataFrame.query
inplace : bool
if True, change the container dataframe in place (defaults to True)
Returns
-------
result : :py:class:`pandas.DataFrame`
DataFrame that contains the result of the filter application
"""
with LogDataChanges(self, filter_action='filter', filter_query=query):
result = self.data.query(
'not ({0})'.format(query),
inplace=inplace,
)
return result | Use a query statement to filter data. Note that you specify the data
to be removed!
Parameters
----------
query : string
The query string to be evaluated. Is directly provided to
pandas.DataFrame.query
inplace : bool
if True, change the container dataframe in place (defaults to True)
Returns
-------
result : :py:class:`pandas.DataFrame`
DataFrame that contains the result of the filter application | Below is the the instruction that describes the task:
### Input:
Use a query statement to filter data. Note that you specify the data
to be removed!
Parameters
----------
query : string
The query string to be evaluated. Is directly provided to
pandas.DataFrame.query
inplace : bool
if True, change the container dataframe in place (defaults to True)
Returns
-------
result : :py:class:`pandas.DataFrame`
DataFrame that contains the result of the filter application
### Response:
def filter(self, query, inplace=True):
"""Use a query statement to filter data. Note that you specify the data
to be removed!
Parameters
----------
query : string
The query string to be evaluated. Is directly provided to
pandas.DataFrame.query
inplace : bool
if True, change the container dataframe in place (defaults to True)
Returns
-------
result : :py:class:`pandas.DataFrame`
DataFrame that contains the result of the filter application
"""
with LogDataChanges(self, filter_action='filter', filter_query=query):
result = self.data.query(
'not ({0})'.format(query),
inplace=inplace,
)
return result |
def main(): # pragma: no cover
"""Simple tests."""
opts = [
Option('--foo'),
Option('--bar'),
Option('--baz'),
Option('--key', group='secret', mutually_exclusive=True),
Option('--key-file', group='secret', mutually_exclusive=True),
Option('--key-thing', group='secret'),
Option('--this', group='things'),
Option('--who', group='group of its own'),
# Option('--more', mutually_exclusive=True), # should fail
Option('--more', mutually_exclusive=True, dest='more'), # should be ok
Option('--less', mutually_exclusive=True, dest='more'), # should be ok
]
myconf = Config(options=opts)
if len(sys.argv) == 1:
sys.argv.append('--help')
myconf.parse() | Simple tests. | Below is the the instruction that describes the task:
### Input:
Simple tests.
### Response:
def main(): # pragma: no cover
"""Simple tests."""
opts = [
Option('--foo'),
Option('--bar'),
Option('--baz'),
Option('--key', group='secret', mutually_exclusive=True),
Option('--key-file', group='secret', mutually_exclusive=True),
Option('--key-thing', group='secret'),
Option('--this', group='things'),
Option('--who', group='group of its own'),
# Option('--more', mutually_exclusive=True), # should fail
Option('--more', mutually_exclusive=True, dest='more'), # should be ok
Option('--less', mutually_exclusive=True, dest='more'), # should be ok
]
myconf = Config(options=opts)
if len(sys.argv) == 1:
sys.argv.append('--help')
myconf.parse() |
def destroy_balancer(balancer_id, profile, **libcloud_kwargs):
'''
Destroy a load balancer
:param balancer_id: LoadBalancer ID which should be used
:type balancer_id: ``str``
:param profile: The profile key
:type profile: ``str``
:param libcloud_kwargs: Extra arguments for the driver's destroy_balancer method
:type libcloud_kwargs: ``dict``
:return: ``True`` if the destroy was successful, otherwise ``False``.
:rtype: ``bool``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.destroy_balancer balancer_1 profile1
'''
conn = _get_driver(profile=profile)
libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs)
balancer = conn.get_balancer(balancer_id)
return conn.destroy_balancer(balancer, **libcloud_kwargs) | Destroy a load balancer
:param balancer_id: LoadBalancer ID which should be used
:type balancer_id: ``str``
:param profile: The profile key
:type profile: ``str``
:param libcloud_kwargs: Extra arguments for the driver's destroy_balancer method
:type libcloud_kwargs: ``dict``
:return: ``True`` if the destroy was successful, otherwise ``False``.
:rtype: ``bool``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.destroy_balancer balancer_1 profile1 | Below is the the instruction that describes the task:
### Input:
Destroy a load balancer
:param balancer_id: LoadBalancer ID which should be used
:type balancer_id: ``str``
:param profile: The profile key
:type profile: ``str``
:param libcloud_kwargs: Extra arguments for the driver's destroy_balancer method
:type libcloud_kwargs: ``dict``
:return: ``True`` if the destroy was successful, otherwise ``False``.
:rtype: ``bool``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.destroy_balancer balancer_1 profile1
### Response:
def destroy_balancer(balancer_id, profile, **libcloud_kwargs):
'''
Destroy a load balancer
:param balancer_id: LoadBalancer ID which should be used
:type balancer_id: ``str``
:param profile: The profile key
:type profile: ``str``
:param libcloud_kwargs: Extra arguments for the driver's destroy_balancer method
:type libcloud_kwargs: ``dict``
:return: ``True`` if the destroy was successful, otherwise ``False``.
:rtype: ``bool``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.destroy_balancer balancer_1 profile1
'''
conn = _get_driver(profile=profile)
libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs)
balancer = conn.get_balancer(balancer_id)
return conn.destroy_balancer(balancer, **libcloud_kwargs) |
def jsonrpc_method(name,
authenticated=False,
authentication_arguments=['username', 'password'],
safe=False,
validate=False,
site=default_site):
"""
Wraps a function turns it into a json-rpc method. Adds several attributes
to the function specific to the JSON-RPC machinery and adds it to the default
jsonrpc_site if one isn't provided. You must import the module containing
these functions in your urls.py.
name
The name of your method. IE: `namespace.methodName` The method name
can include type information, like `ns.method(String, Array) -> Nil`.
authenticated=False
Adds `username` and `password` arguments to the beginning of your
method if the user hasn't already been authenticated. These will
be used to authenticate the user against `django.contrib.authenticate`
If you use HTTP auth or other authentication middleware, `username`
and `password` will not be added, and this method will only check
against `request.user.is_authenticated`.
You may pass a callable to replace `django.contrib.auth.authenticate`
as the authentication method. It must return either a User or `None`
and take the keyword arguments `username` and `password`.
safe=False
Designates whether or not your method may be accessed by HTTP GET.
By default this is turned off.
validate=False
Validates the arguments passed to your method based on type
information provided in the signature. Supply type information by
including types in your method declaration. Like so:
@jsonrpc_method('myapp.specialSauce(Array, String)', validate=True)
def special_sauce(self, ingredients, instructions):
return SpecialSauce(ingredients, instructions)
Calls to `myapp.specialSauce` will now check each arguments type
before calling `special_sauce`, throwing an `InvalidParamsError`
when it encounters a discrepancy. This can significantly reduce the
amount of code required to write JSON-RPC services.
site=default_site
Defines which site the jsonrpc method will be added to. Can be any
object that provides a `register(name, func)` method.
"""
def decorator(func):
arg_names = getargspec(func)[0][1:]
X = {'name': name, 'arg_names': arg_names}
if authenticated:
if authenticated is True or six.callable(authenticated):
# TODO: this is an assumption
X['arg_names'] = authentication_arguments + X['arg_names']
X['name'] = _inject_args(X['name'], ('String', 'String'))
from django.contrib.auth import authenticate as _authenticate
from django.contrib.auth.models import User
else:
authenticate = authenticated
@six.wraps(func)
def _func(request, *args, **kwargs):
user = getattr(request, 'user', None)
is_authenticated = getattr(user, 'is_authenticated',
lambda: False)
if ((user is not None and six.callable(is_authenticated) and
not is_authenticated()) or user is None):
user = None
try:
creds = args[:len(authentication_arguments)]
if len(creds) == 0:
raise IndexError
# Django's authenticate() method takes arguments as dict
user = _authenticate(username=creds[0],
password=creds[1], *creds[2:])
if user is not None:
args = args[len(authentication_arguments):]
except IndexError:
auth_kwargs = {}
try:
for auth_kwarg in authentication_arguments:
auth_kwargs[auth_kwarg] = kwargs[auth_kwarg]
except KeyError:
raise InvalidParamsError(
'Authenticated methods require at least '
'[%(arguments)s] or {%(arguments)s} arguments' %
{'arguments': ', '.join(authentication_arguments)})
user = _authenticate(**auth_kwargs)
if user is not None:
for auth_kwarg in authentication_arguments:
kwargs.pop(auth_kwarg)
if user is None:
raise InvalidCredentialsError
request.user = user
return func(request, *args, **kwargs)
else:
_func = func
@six.wraps(_func)
def exc_printer(*a, **kw):
try:
return _func(*a, **kw)
except Exception as e:
try:
print('JSONRPC SERVICE EXCEPTION')
import traceback
traceback.print_exc()
except:
pass
six.reraise(*sys.exc_info())
ret_func = exc_printer
method, arg_types, return_type = \
_parse_sig(X['name'], X['arg_names'], validate)
ret_func.json_args = X['arg_names']
ret_func.json_arg_types = arg_types
ret_func.json_return_type = return_type
ret_func.json_method = method
ret_func.json_safe = safe
ret_func.json_sig = X['name']
ret_func.json_validate = validate
site.register(method, ret_func)
return ret_func
return decorator | Wraps a function turns it into a json-rpc method. Adds several attributes
to the function specific to the JSON-RPC machinery and adds it to the default
jsonrpc_site if one isn't provided. You must import the module containing
these functions in your urls.py.
name
The name of your method. IE: `namespace.methodName` The method name
can include type information, like `ns.method(String, Array) -> Nil`.
authenticated=False
Adds `username` and `password` arguments to the beginning of your
method if the user hasn't already been authenticated. These will
be used to authenticate the user against `django.contrib.authenticate`
If you use HTTP auth or other authentication middleware, `username`
and `password` will not be added, and this method will only check
against `request.user.is_authenticated`.
You may pass a callable to replace `django.contrib.auth.authenticate`
as the authentication method. It must return either a User or `None`
and take the keyword arguments `username` and `password`.
safe=False
Designates whether or not your method may be accessed by HTTP GET.
By default this is turned off.
validate=False
Validates the arguments passed to your method based on type
information provided in the signature. Supply type information by
including types in your method declaration. Like so:
@jsonrpc_method('myapp.specialSauce(Array, String)', validate=True)
def special_sauce(self, ingredients, instructions):
return SpecialSauce(ingredients, instructions)
Calls to `myapp.specialSauce` will now check each arguments type
before calling `special_sauce`, throwing an `InvalidParamsError`
when it encounters a discrepancy. This can significantly reduce the
amount of code required to write JSON-RPC services.
site=default_site
Defines which site the jsonrpc method will be added to. Can be any
object that provides a `register(name, func)` method. | Below is the the instruction that describes the task:
### Input:
Wraps a function turns it into a json-rpc method. Adds several attributes
to the function specific to the JSON-RPC machinery and adds it to the default
jsonrpc_site if one isn't provided. You must import the module containing
these functions in your urls.py.
name
The name of your method. IE: `namespace.methodName` The method name
can include type information, like `ns.method(String, Array) -> Nil`.
authenticated=False
Adds `username` and `password` arguments to the beginning of your
method if the user hasn't already been authenticated. These will
be used to authenticate the user against `django.contrib.authenticate`
If you use HTTP auth or other authentication middleware, `username`
and `password` will not be added, and this method will only check
against `request.user.is_authenticated`.
You may pass a callable to replace `django.contrib.auth.authenticate`
as the authentication method. It must return either a User or `None`
and take the keyword arguments `username` and `password`.
safe=False
Designates whether or not your method may be accessed by HTTP GET.
By default this is turned off.
validate=False
Validates the arguments passed to your method based on type
information provided in the signature. Supply type information by
including types in your method declaration. Like so:
@jsonrpc_method('myapp.specialSauce(Array, String)', validate=True)
def special_sauce(self, ingredients, instructions):
return SpecialSauce(ingredients, instructions)
Calls to `myapp.specialSauce` will now check each arguments type
before calling `special_sauce`, throwing an `InvalidParamsError`
when it encounters a discrepancy. This can significantly reduce the
amount of code required to write JSON-RPC services.
site=default_site
Defines which site the jsonrpc method will be added to. Can be any
object that provides a `register(name, func)` method.
### Response:
def jsonrpc_method(name,
authenticated=False,
authentication_arguments=['username', 'password'],
safe=False,
validate=False,
site=default_site):
"""
Wraps a function turns it into a json-rpc method. Adds several attributes
to the function specific to the JSON-RPC machinery and adds it to the default
jsonrpc_site if one isn't provided. You must import the module containing
these functions in your urls.py.
name
The name of your method. IE: `namespace.methodName` The method name
can include type information, like `ns.method(String, Array) -> Nil`.
authenticated=False
Adds `username` and `password` arguments to the beginning of your
method if the user hasn't already been authenticated. These will
be used to authenticate the user against `django.contrib.authenticate`
If you use HTTP auth or other authentication middleware, `username`
and `password` will not be added, and this method will only check
against `request.user.is_authenticated`.
You may pass a callable to replace `django.contrib.auth.authenticate`
as the authentication method. It must return either a User or `None`
and take the keyword arguments `username` and `password`.
safe=False
Designates whether or not your method may be accessed by HTTP GET.
By default this is turned off.
validate=False
Validates the arguments passed to your method based on type
information provided in the signature. Supply type information by
including types in your method declaration. Like so:
@jsonrpc_method('myapp.specialSauce(Array, String)', validate=True)
def special_sauce(self, ingredients, instructions):
return SpecialSauce(ingredients, instructions)
Calls to `myapp.specialSauce` will now check each arguments type
before calling `special_sauce`, throwing an `InvalidParamsError`
when it encounters a discrepancy. This can significantly reduce the
amount of code required to write JSON-RPC services.
site=default_site
Defines which site the jsonrpc method will be added to. Can be any
object that provides a `register(name, func)` method.
"""
def decorator(func):
arg_names = getargspec(func)[0][1:]
X = {'name': name, 'arg_names': arg_names}
if authenticated:
if authenticated is True or six.callable(authenticated):
# TODO: this is an assumption
X['arg_names'] = authentication_arguments + X['arg_names']
X['name'] = _inject_args(X['name'], ('String', 'String'))
from django.contrib.auth import authenticate as _authenticate
from django.contrib.auth.models import User
else:
authenticate = authenticated
@six.wraps(func)
def _func(request, *args, **kwargs):
user = getattr(request, 'user', None)
is_authenticated = getattr(user, 'is_authenticated',
lambda: False)
if ((user is not None and six.callable(is_authenticated) and
not is_authenticated()) or user is None):
user = None
try:
creds = args[:len(authentication_arguments)]
if len(creds) == 0:
raise IndexError
# Django's authenticate() method takes arguments as dict
user = _authenticate(username=creds[0],
password=creds[1], *creds[2:])
if user is not None:
args = args[len(authentication_arguments):]
except IndexError:
auth_kwargs = {}
try:
for auth_kwarg in authentication_arguments:
auth_kwargs[auth_kwarg] = kwargs[auth_kwarg]
except KeyError:
raise InvalidParamsError(
'Authenticated methods require at least '
'[%(arguments)s] or {%(arguments)s} arguments' %
{'arguments': ', '.join(authentication_arguments)})
user = _authenticate(**auth_kwargs)
if user is not None:
for auth_kwarg in authentication_arguments:
kwargs.pop(auth_kwarg)
if user is None:
raise InvalidCredentialsError
request.user = user
return func(request, *args, **kwargs)
else:
_func = func
@six.wraps(_func)
def exc_printer(*a, **kw):
try:
return _func(*a, **kw)
except Exception as e:
try:
print('JSONRPC SERVICE EXCEPTION')
import traceback
traceback.print_exc()
except:
pass
six.reraise(*sys.exc_info())
ret_func = exc_printer
method, arg_types, return_type = \
_parse_sig(X['name'], X['arg_names'], validate)
ret_func.json_args = X['arg_names']
ret_func.json_arg_types = arg_types
ret_func.json_return_type = return_type
ret_func.json_method = method
ret_func.json_safe = safe
ret_func.json_sig = X['name']
ret_func.json_validate = validate
site.register(method, ret_func)
return ret_func
return decorator |
def get_rarity_info(self, rarity: str):
"""Returns card info from constants
Parameters
---------
rarity: str
A rarity name
Returns None or Constants
"""
for c in self.constants.rarities:
if c.name == rarity:
return c | Returns card info from constants
Parameters
---------
rarity: str
A rarity name
Returns None or Constants | Below is the the instruction that describes the task:
### Input:
Returns card info from constants
Parameters
---------
rarity: str
A rarity name
Returns None or Constants
### Response:
def get_rarity_info(self, rarity: str):
"""Returns card info from constants
Parameters
---------
rarity: str
A rarity name
Returns None or Constants
"""
for c in self.constants.rarities:
if c.name == rarity:
return c |
def clamp(color, min_v, max_v):
"""
Clamps a color such that the value is between min_v and max_v.
"""
h, s, v = rgb_to_hsv(*map(down_scale, color))
min_v, max_v = map(down_scale, (min_v, max_v))
v = min(max(min_v, v), max_v)
return tuple(map(up_scale, hsv_to_rgb(h, s, v))) | Clamps a color such that the value is between min_v and max_v. | Below is the the instruction that describes the task:
### Input:
Clamps a color such that the value is between min_v and max_v.
### Response:
def clamp(color, min_v, max_v):
"""
Clamps a color such that the value is between min_v and max_v.
"""
h, s, v = rgb_to_hsv(*map(down_scale, color))
min_v, max_v = map(down_scale, (min_v, max_v))
v = min(max(min_v, v), max_v)
return tuple(map(up_scale, hsv_to_rgb(h, s, v))) |
def _init_map(self, record_types=None, **kwargs):
"""Initialize form map"""
osid_objects.OsidObjectForm._init_map(self, record_types=record_types)
self._my_map['rubricId'] = self._rubric_default
self._my_map['assignedBankIds'] = [str(kwargs['bank_id'])]
self._my_map['levelId'] = self._level_default
if self._supports_simple_sequencing():
self._my_map['childIds'] = [] | Initialize form map | Below is the the instruction that describes the task:
### Input:
Initialize form map
### Response:
def _init_map(self, record_types=None, **kwargs):
"""Initialize form map"""
osid_objects.OsidObjectForm._init_map(self, record_types=record_types)
self._my_map['rubricId'] = self._rubric_default
self._my_map['assignedBankIds'] = [str(kwargs['bank_id'])]
self._my_map['levelId'] = self._level_default
if self._supports_simple_sequencing():
self._my_map['childIds'] = [] |
def getresponse(self):
"""
Pass-thru method to make this class behave a little like HTTPConnection
"""
resp = self.http.getresponse()
self.log.info("resp is %s", str(resp))
if resp.status < 400:
return resp
else:
errtext = resp.read()
content_type = resp.getheader('Content-Type', 'text/plain')
raise HttpError(code=resp.status, content_type=content_type, content=errtext) | Pass-thru method to make this class behave a little like HTTPConnection | Below is the the instruction that describes the task:
### Input:
Pass-thru method to make this class behave a little like HTTPConnection
### Response:
def getresponse(self):
"""
Pass-thru method to make this class behave a little like HTTPConnection
"""
resp = self.http.getresponse()
self.log.info("resp is %s", str(resp))
if resp.status < 400:
return resp
else:
errtext = resp.read()
content_type = resp.getheader('Content-Type', 'text/plain')
raise HttpError(code=resp.status, content_type=content_type, content=errtext) |
def cli(yaml_paths, pptx_template_path, font_size, master_slide_idx, slide_layout_idx, dst_dir, font_name,
slide_txt_alignment, validate):
"""
A powerpoint builder
https://github.com/sukujgrg/pptx-builder-from-yaml
"""
dst_dir = Path(dst_dir)
pptx_template_path = Path(pptx_template_path)
pptx_template = pick_master_slide(pptx_template_path)
yamlfiles = []
for yaml_path in yaml_paths:
yaml_path = Path(yaml_path)
if yaml_path.is_dir():
yamlfiles.extend([yml for yml in yaml_path.iterdir()])
else:
yamlfiles.append(yaml_path)
if validate:
exit_fail = False
for yamlfile in yamlfiles:
try:
validate_yaml_file(SCHEMA_FOR_YAML, Path(yamlfile))
msg = f"VALIDATE: Validation of {yamlfile} passed"
click.echo(click.style(msg, fg="blue"))
except jsonschema.exceptions.ValidationError as err:
msg = f"ERR: {yamlfile} {str(err.message)} {err.path}"
click.echo(click.style(msg, fg="red"), nl=True)
exit_fail = True
except Exception:
raise
if exit_fail:
sys.exit(1)
for yamlfile in yamlfiles:
try:
r = build_slide(
Path(yamlfile),
pptx_template,
master_slide_idx,
slide_layout_idx,
font_size,
dst_dir,
font_name,
slide_txt_alignment
)
msg = f"PPTX: {r}"
click.echo(click.style(msg, fg="green"))
except Exception:
raise | A powerpoint builder
https://github.com/sukujgrg/pptx-builder-from-yaml | Below is the the instruction that describes the task:
### Input:
A powerpoint builder
https://github.com/sukujgrg/pptx-builder-from-yaml
### Response:
def cli(yaml_paths, pptx_template_path, font_size, master_slide_idx, slide_layout_idx, dst_dir, font_name,
slide_txt_alignment, validate):
"""
A powerpoint builder
https://github.com/sukujgrg/pptx-builder-from-yaml
"""
dst_dir = Path(dst_dir)
pptx_template_path = Path(pptx_template_path)
pptx_template = pick_master_slide(pptx_template_path)
yamlfiles = []
for yaml_path in yaml_paths:
yaml_path = Path(yaml_path)
if yaml_path.is_dir():
yamlfiles.extend([yml for yml in yaml_path.iterdir()])
else:
yamlfiles.append(yaml_path)
if validate:
exit_fail = False
for yamlfile in yamlfiles:
try:
validate_yaml_file(SCHEMA_FOR_YAML, Path(yamlfile))
msg = f"VALIDATE: Validation of {yamlfile} passed"
click.echo(click.style(msg, fg="blue"))
except jsonschema.exceptions.ValidationError as err:
msg = f"ERR: {yamlfile} {str(err.message)} {err.path}"
click.echo(click.style(msg, fg="red"), nl=True)
exit_fail = True
except Exception:
raise
if exit_fail:
sys.exit(1)
for yamlfile in yamlfiles:
try:
r = build_slide(
Path(yamlfile),
pptx_template,
master_slide_idx,
slide_layout_idx,
font_size,
dst_dir,
font_name,
slide_txt_alignment
)
msg = f"PPTX: {r}"
click.echo(click.style(msg, fg="green"))
except Exception:
raise |
def Memory_setPressureNotificationsSuppressed(self, suppressed):
"""
Function path: Memory.setPressureNotificationsSuppressed
Domain: Memory
Method name: setPressureNotificationsSuppressed
Parameters:
Required arguments:
'suppressed' (type: boolean) -> If true, memory pressure notifications will be suppressed.
No return value.
Description: Enable/disable suppressing memory pressure notifications in all processes.
"""
assert isinstance(suppressed, (bool,)
), "Argument 'suppressed' must be of type '['bool']'. Received type: '%s'" % type(
suppressed)
subdom_funcs = self.synchronous_command(
'Memory.setPressureNotificationsSuppressed', suppressed=suppressed)
return subdom_funcs | Function path: Memory.setPressureNotificationsSuppressed
Domain: Memory
Method name: setPressureNotificationsSuppressed
Parameters:
Required arguments:
'suppressed' (type: boolean) -> If true, memory pressure notifications will be suppressed.
No return value.
Description: Enable/disable suppressing memory pressure notifications in all processes. | Below is the the instruction that describes the task:
### Input:
Function path: Memory.setPressureNotificationsSuppressed
Domain: Memory
Method name: setPressureNotificationsSuppressed
Parameters:
Required arguments:
'suppressed' (type: boolean) -> If true, memory pressure notifications will be suppressed.
No return value.
Description: Enable/disable suppressing memory pressure notifications in all processes.
### Response:
def Memory_setPressureNotificationsSuppressed(self, suppressed):
"""
Function path: Memory.setPressureNotificationsSuppressed
Domain: Memory
Method name: setPressureNotificationsSuppressed
Parameters:
Required arguments:
'suppressed' (type: boolean) -> If true, memory pressure notifications will be suppressed.
No return value.
Description: Enable/disable suppressing memory pressure notifications in all processes.
"""
assert isinstance(suppressed, (bool,)
), "Argument 'suppressed' must be of type '['bool']'. Received type: '%s'" % type(
suppressed)
subdom_funcs = self.synchronous_command(
'Memory.setPressureNotificationsSuppressed', suppressed=suppressed)
return subdom_funcs |
def follow_link_by_selector(self, selector):
"""
Navigate to the href of the element matching the CSS selector.
N.B. this does not click the link, but changes the browser's URL.
"""
elem = find_element_by_jquery(world.browser, selector)
href = elem.get_attribute('href')
world.browser.get(href) | Navigate to the href of the element matching the CSS selector.
N.B. this does not click the link, but changes the browser's URL. | Below is the the instruction that describes the task:
### Input:
Navigate to the href of the element matching the CSS selector.
N.B. this does not click the link, but changes the browser's URL.
### Response:
def follow_link_by_selector(self, selector):
"""
Navigate to the href of the element matching the CSS selector.
N.B. this does not click the link, but changes the browser's URL.
"""
elem = find_element_by_jquery(world.browser, selector)
href = elem.get_attribute('href')
world.browser.get(href) |
def get_all_tags(filters=None, region=None, key=None, keyid=None, profile=None):
'''
Describe all tags matching the filter criteria, or all tags in the account otherwise.
.. versionadded:: 2018.3.0
filters
(dict) - Additional constraints on which volumes to return. Note that valid filters vary
extensively depending on the resource type. When in doubt, search first without a filter
and then use the returned data to help fine-tune your search. You can generally garner the
resource type from its ID (e.g. `vol-XXXXX` is a volume, `i-XXXXX` is an instance, etc.
CLI Example:
.. code-block:: bash
salt-call boto_ec2.get_all_tags '{"tag:Name": myInstanceNameTag, resource-type: instance}'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
ret = conn.get_all_tags(filters)
tags = {}
for t in ret:
if t.res_id not in tags:
tags[t.res_id] = {}
tags[t.res_id][t.name] = t.value
return tags
except boto.exception.BotoServerError as e:
log.error(e)
return {} | Describe all tags matching the filter criteria, or all tags in the account otherwise.
.. versionadded:: 2018.3.0
filters
(dict) - Additional constraints on which volumes to return. Note that valid filters vary
extensively depending on the resource type. When in doubt, search first without a filter
and then use the returned data to help fine-tune your search. You can generally garner the
resource type from its ID (e.g. `vol-XXXXX` is a volume, `i-XXXXX` is an instance, etc.
CLI Example:
.. code-block:: bash
salt-call boto_ec2.get_all_tags '{"tag:Name": myInstanceNameTag, resource-type: instance}' | Below is the the instruction that describes the task:
### Input:
Describe all tags matching the filter criteria, or all tags in the account otherwise.
.. versionadded:: 2018.3.0
filters
(dict) - Additional constraints on which volumes to return. Note that valid filters vary
extensively depending on the resource type. When in doubt, search first without a filter
and then use the returned data to help fine-tune your search. You can generally garner the
resource type from its ID (e.g. `vol-XXXXX` is a volume, `i-XXXXX` is an instance, etc.
CLI Example:
.. code-block:: bash
salt-call boto_ec2.get_all_tags '{"tag:Name": myInstanceNameTag, resource-type: instance}'
### Response:
def get_all_tags(filters=None, region=None, key=None, keyid=None, profile=None):
'''
Describe all tags matching the filter criteria, or all tags in the account otherwise.
.. versionadded:: 2018.3.0
filters
(dict) - Additional constraints on which volumes to return. Note that valid filters vary
extensively depending on the resource type. When in doubt, search first without a filter
and then use the returned data to help fine-tune your search. You can generally garner the
resource type from its ID (e.g. `vol-XXXXX` is a volume, `i-XXXXX` is an instance, etc.
CLI Example:
.. code-block:: bash
salt-call boto_ec2.get_all_tags '{"tag:Name": myInstanceNameTag, resource-type: instance}'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
ret = conn.get_all_tags(filters)
tags = {}
for t in ret:
if t.res_id not in tags:
tags[t.res_id] = {}
tags[t.res_id][t.name] = t.value
return tags
except boto.exception.BotoServerError as e:
log.error(e)
return {} |
def expire_data(self):
"""Expire data within the samples collection."""
# Do we need to start deleting stuff?
while self.sample_storage_size() > self.samples_cap:
# This should return the 'oldest' record in samples
record = self.database[self.sample_collection].find().sort('import_time',pymongo.ASCENDING).limit(1)[0]
self.remove_sample(record['md5']) | Expire data within the samples collection. | Below is the the instruction that describes the task:
### Input:
Expire data within the samples collection.
### Response:
def expire_data(self):
"""Expire data within the samples collection."""
# Do we need to start deleting stuff?
while self.sample_storage_size() > self.samples_cap:
# This should return the 'oldest' record in samples
record = self.database[self.sample_collection].find().sort('import_time',pymongo.ASCENDING).limit(1)[0]
self.remove_sample(record['md5']) |
def fetch(self):
'''
Fetch the repo. If the local copy was updated, return True. If the
local copy was already up-to-date, return False.
This function requires that a _fetch() function be implemented in a
sub-class.
'''
try:
with self.gen_lock(lock_type='update'):
log.debug('Fetching %s remote \'%s\'', self.role, self.id)
# Run provider-specific fetch code
return self._fetch()
except GitLockError as exc:
if exc.errno == errno.EEXIST:
log.warning(
'Update lock file is present for %s remote \'%s\', '
'skipping. If this warning persists, it is possible that '
'the update process was interrupted, but the lock could '
'also have been manually set. Removing %s or running '
'\'salt-run cache.clear_git_lock %s type=update\' will '
'allow updates to continue for this remote.',
self.role,
self.id,
self._get_lock_file(lock_type='update'),
self.role,
)
return False | Fetch the repo. If the local copy was updated, return True. If the
local copy was already up-to-date, return False.
This function requires that a _fetch() function be implemented in a
sub-class. | Below is the the instruction that describes the task:
### Input:
Fetch the repo. If the local copy was updated, return True. If the
local copy was already up-to-date, return False.
This function requires that a _fetch() function be implemented in a
sub-class.
### Response:
def fetch(self):
'''
Fetch the repo. If the local copy was updated, return True. If the
local copy was already up-to-date, return False.
This function requires that a _fetch() function be implemented in a
sub-class.
'''
try:
with self.gen_lock(lock_type='update'):
log.debug('Fetching %s remote \'%s\'', self.role, self.id)
# Run provider-specific fetch code
return self._fetch()
except GitLockError as exc:
if exc.errno == errno.EEXIST:
log.warning(
'Update lock file is present for %s remote \'%s\', '
'skipping. If this warning persists, it is possible that '
'the update process was interrupted, but the lock could '
'also have been manually set. Removing %s or running '
'\'salt-run cache.clear_git_lock %s type=update\' will '
'allow updates to continue for this remote.',
self.role,
self.id,
self._get_lock_file(lock_type='update'),
self.role,
)
return False |
def get_ipv4fs_table(self):
"""Returns global IPv4 Flow Specification table.
Creates the table if it does not exist.
"""
ipv4fs_table = self._global_tables.get(RF_IPv4_FLOWSPEC)
# Lazy initialization of the table.
if not ipv4fs_table:
ipv4fs_table = IPv4FlowSpecTable(self._core_service,
self._signal_bus)
self._global_tables[RF_IPv4_FLOWSPEC] = ipv4fs_table
self._tables[(None, RF_IPv4_FLOWSPEC)] = ipv4fs_table
return ipv4fs_table | Returns global IPv4 Flow Specification table.
Creates the table if it does not exist. | Below is the the instruction that describes the task:
### Input:
Returns global IPv4 Flow Specification table.
Creates the table if it does not exist.
### Response:
def get_ipv4fs_table(self):
"""Returns global IPv4 Flow Specification table.
Creates the table if it does not exist.
"""
ipv4fs_table = self._global_tables.get(RF_IPv4_FLOWSPEC)
# Lazy initialization of the table.
if not ipv4fs_table:
ipv4fs_table = IPv4FlowSpecTable(self._core_service,
self._signal_bus)
self._global_tables[RF_IPv4_FLOWSPEC] = ipv4fs_table
self._tables[(None, RF_IPv4_FLOWSPEC)] = ipv4fs_table
return ipv4fs_table |
def get_files():
"""
Return the list of all source/header files in `c/` directory.
The files will have pathnames relative to the current folder, for example
"c/csv/reader_utils.cc".
"""
sources = []
headers = ["datatable/include/datatable.h"]
assert os.path.isfile(headers[0])
for dirpath, _, filenames in os.walk("c"):
for f in filenames:
fullname = os.path.join(dirpath, f)
if f.endswith(".h") or f.endswith(".inc"):
headers.append(fullname)
elif f.endswith(".c") or f.endswith(".cc"):
sources.append(fullname)
return (sources, headers) | Return the list of all source/header files in `c/` directory.
The files will have pathnames relative to the current folder, for example
"c/csv/reader_utils.cc". | Below is the the instruction that describes the task:
### Input:
Return the list of all source/header files in `c/` directory.
The files will have pathnames relative to the current folder, for example
"c/csv/reader_utils.cc".
### Response:
def get_files():
"""
Return the list of all source/header files in `c/` directory.
The files will have pathnames relative to the current folder, for example
"c/csv/reader_utils.cc".
"""
sources = []
headers = ["datatable/include/datatable.h"]
assert os.path.isfile(headers[0])
for dirpath, _, filenames in os.walk("c"):
for f in filenames:
fullname = os.path.join(dirpath, f)
if f.endswith(".h") or f.endswith(".inc"):
headers.append(fullname)
elif f.endswith(".c") or f.endswith(".cc"):
sources.append(fullname)
return (sources, headers) |
def Pack(self, msg, type_url_prefix='type.googleapis.com/'):
"""Packs the specified message into current Any message."""
if len(type_url_prefix) < 1 or type_url_prefix[-1] != '/':
self.type_url = '%s/%s' % (type_url_prefix, msg.DESCRIPTOR.full_name)
else:
self.type_url = '%s%s' % (type_url_prefix, msg.DESCRIPTOR.full_name)
self.value = msg.SerializeToString() | Packs the specified message into current Any message. | Below is the the instruction that describes the task:
### Input:
Packs the specified message into current Any message.
### Response:
def Pack(self, msg, type_url_prefix='type.googleapis.com/'):
"""Packs the specified message into current Any message."""
if len(type_url_prefix) < 1 or type_url_prefix[-1] != '/':
self.type_url = '%s/%s' % (type_url_prefix, msg.DESCRIPTOR.full_name)
else:
self.type_url = '%s%s' % (type_url_prefix, msg.DESCRIPTOR.full_name)
self.value = msg.SerializeToString() |
def update_dois(self):
"""Remove duplicate BibMatch DOIs."""
dois = record_get_field_instances(self.record, '024', ind1="7")
all_dois = {}
for field in dois:
subs = field_get_subfield_instances(field)
subs_dict = dict(subs)
if subs_dict.get('a'):
if subs_dict['a'] in all_dois:
record_delete_field(self.record, tag='024', ind1='7', field_position_global=field[4])
continue
all_dois[subs_dict['a']] = field | Remove duplicate BibMatch DOIs. | Below is the the instruction that describes the task:
### Input:
Remove duplicate BibMatch DOIs.
### Response:
def update_dois(self):
"""Remove duplicate BibMatch DOIs."""
dois = record_get_field_instances(self.record, '024', ind1="7")
all_dois = {}
for field in dois:
subs = field_get_subfield_instances(field)
subs_dict = dict(subs)
if subs_dict.get('a'):
if subs_dict['a'] in all_dois:
record_delete_field(self.record, tag='024', ind1='7', field_position_global=field[4])
continue
all_dois[subs_dict['a']] = field |
def send_registration_mail(email, *, request, **kwargs):
"""send_registration_mail(email, *, request, **kwargs)
Sends the registration mail
* ``email``: The email address where the registration link should be
sent to.
* ``request``: A HTTP request instance, used to construct the complete
URL (including protocol and domain) for the registration link.
* Additional keyword arguments for ``get_confirmation_url`` respectively
``get_confirmation_code``.
The mail is rendered using the following two templates:
* ``registration/email_registration_email.txt``: The first line of this
template will be the subject, the third to the last line the body of the
email.
* ``registration/email_registration_email.html``: The body of the HTML
version of the mail. This template is **NOT** available by default and
is not required either.
"""
render_to_mail(
"registration/email_registration_email",
{"url": get_confirmation_url(email, request, **kwargs)},
to=[email],
).send() | send_registration_mail(email, *, request, **kwargs)
Sends the registration mail
* ``email``: The email address where the registration link should be
sent to.
* ``request``: A HTTP request instance, used to construct the complete
URL (including protocol and domain) for the registration link.
* Additional keyword arguments for ``get_confirmation_url`` respectively
``get_confirmation_code``.
The mail is rendered using the following two templates:
* ``registration/email_registration_email.txt``: The first line of this
template will be the subject, the third to the last line the body of the
email.
* ``registration/email_registration_email.html``: The body of the HTML
version of the mail. This template is **NOT** available by default and
is not required either. | Below is the the instruction that describes the task:
### Input:
send_registration_mail(email, *, request, **kwargs)
Sends the registration mail
* ``email``: The email address where the registration link should be
sent to.
* ``request``: A HTTP request instance, used to construct the complete
URL (including protocol and domain) for the registration link.
* Additional keyword arguments for ``get_confirmation_url`` respectively
``get_confirmation_code``.
The mail is rendered using the following two templates:
* ``registration/email_registration_email.txt``: The first line of this
template will be the subject, the third to the last line the body of the
email.
* ``registration/email_registration_email.html``: The body of the HTML
version of the mail. This template is **NOT** available by default and
is not required either.
### Response:
def send_registration_mail(email, *, request, **kwargs):
"""send_registration_mail(email, *, request, **kwargs)
Sends the registration mail
* ``email``: The email address where the registration link should be
sent to.
* ``request``: A HTTP request instance, used to construct the complete
URL (including protocol and domain) for the registration link.
* Additional keyword arguments for ``get_confirmation_url`` respectively
``get_confirmation_code``.
The mail is rendered using the following two templates:
* ``registration/email_registration_email.txt``: The first line of this
template will be the subject, the third to the last line the body of the
email.
* ``registration/email_registration_email.html``: The body of the HTML
version of the mail. This template is **NOT** available by default and
is not required either.
"""
render_to_mail(
"registration/email_registration_email",
{"url": get_confirmation_url(email, request, **kwargs)},
to=[email],
).send() |
def store(self, value, context=None):
"""
Converts the value to one that is safe to store on a record within
the record values dictionary
:param value | <variant>
:return <variant>
"""
if isinstance(value, (str, unicode)):
value = self.valueFromString(value)
# store the internationalized property
if self.testFlag(self.Flags.I18n):
if not isinstance(value, dict):
context = context or orb.Context()
return {context.locale: value}
else:
return value
else:
return value | Converts the value to one that is safe to store on a record within
the record values dictionary
:param value | <variant>
:return <variant> | Below is the the instruction that describes the task:
### Input:
Converts the value to one that is safe to store on a record within
the record values dictionary
:param value | <variant>
:return <variant>
### Response:
def store(self, value, context=None):
"""
Converts the value to one that is safe to store on a record within
the record values dictionary
:param value | <variant>
:return <variant>
"""
if isinstance(value, (str, unicode)):
value = self.valueFromString(value)
# store the internationalized property
if self.testFlag(self.Flags.I18n):
if not isinstance(value, dict):
context = context or orb.Context()
return {context.locale: value}
else:
return value
else:
return value |
def code(self):
"""the http status code to return to the client, by default, 200 if a body is present otherwise 204"""
code = getattr(self, '_code', None)
if not code:
if self.has_body():
code = 200
else:
code = 204
return code | the http status code to return to the client, by default, 200 if a body is present otherwise 204 | Below is the the instruction that describes the task:
### Input:
the http status code to return to the client, by default, 200 if a body is present otherwise 204
### Response:
def code(self):
"""the http status code to return to the client, by default, 200 if a body is present otherwise 204"""
code = getattr(self, '_code', None)
if not code:
if self.has_body():
code = 200
else:
code = 204
return code |
def get_rosetta_sequence_to_atom_json_map(self):
'''Returns the mapping from Rosetta residue IDs to PDB ATOM residue IDs in JSON format.'''
import json
if not self.rosetta_to_atom_sequence_maps and self.rosetta_sequences:
raise Exception('The PDB to Rosetta mapping has not been determined. Please call construct_pdb_to_rosetta_residue_map first.')
d = {}
for c, sm in self.rosetta_to_atom_sequence_maps.iteritems():
for k, v in sm.map.iteritems():
d[k] = v
#d[c] = sm.map
return json.dumps(d, indent = 4, sort_keys = True) | Returns the mapping from Rosetta residue IDs to PDB ATOM residue IDs in JSON format. | Below is the the instruction that describes the task:
### Input:
Returns the mapping from Rosetta residue IDs to PDB ATOM residue IDs in JSON format.
### Response:
def get_rosetta_sequence_to_atom_json_map(self):
'''Returns the mapping from Rosetta residue IDs to PDB ATOM residue IDs in JSON format.'''
import json
if not self.rosetta_to_atom_sequence_maps and self.rosetta_sequences:
raise Exception('The PDB to Rosetta mapping has not been determined. Please call construct_pdb_to_rosetta_residue_map first.')
d = {}
for c, sm in self.rosetta_to_atom_sequence_maps.iteritems():
for k, v in sm.map.iteritems():
d[k] = v
#d[c] = sm.map
return json.dumps(d, indent = 4, sort_keys = True) |
def cylinder(cls, **kwargs):
""" Returns a cylinder.
Kwargs:
start (list): Start of cylinder, default [0, -1, 0].
end (list): End of cylinder, default [0, 1, 0].
radius (float): Radius of cylinder, default 1.0.
slices (int): Number of slices, default 16.
"""
s = kwargs.get('start', Vector(0.0, -1.0, 0.0))
e = kwargs.get('end', Vector(0.0, 1.0, 0.0))
if isinstance(s, list):
s = Vector(*s)
if isinstance(e, list):
e = Vector(*e)
r = kwargs.get('radius', 1.0)
slices = kwargs.get('slices', 16)
ray = e.minus(s)
axisZ = ray.unit()
isY = (math.fabs(axisZ.y) > 0.5)
axisX = Vector(float(isY), float(not isY), 0).cross(axisZ).unit()
axisY = axisX.cross(axisZ).unit()
start = Vertex(s, axisZ.negated())
end = Vertex(e, axisZ.unit())
polygons = []
def point(stack, angle, normalBlend):
out = axisX.times(math.cos(angle)).plus(
axisY.times(math.sin(angle)))
pos = s.plus(ray.times(stack)).plus(out.times(r))
normal = out.times(1.0 - math.fabs(normalBlend)).plus(
axisZ.times(normalBlend))
return Vertex(pos, normal)
dt = math.pi * 2.0 / float(slices)
for i in range(0, slices):
t0 = i * dt
i1 = (i + 1) % slices
t1 = i1 * dt
polygons.append(Polygon([start.clone(),
point(0., t0, -1.),
point(0., t1, -1.)]))
polygons.append(Polygon([point(0., t1, 0.),
point(0., t0, 0.),
point(1., t0, 0.),
point(1., t1, 0.)]))
polygons.append(Polygon([end.clone(),
point(1., t1, 1.),
point(1., t0, 1.)]))
return CSG.fromPolygons(polygons) | Returns a cylinder.
Kwargs:
start (list): Start of cylinder, default [0, -1, 0].
end (list): End of cylinder, default [0, 1, 0].
radius (float): Radius of cylinder, default 1.0.
slices (int): Number of slices, default 16. | Below is the the instruction that describes the task:
### Input:
Returns a cylinder.
Kwargs:
start (list): Start of cylinder, default [0, -1, 0].
end (list): End of cylinder, default [0, 1, 0].
radius (float): Radius of cylinder, default 1.0.
slices (int): Number of slices, default 16.
### Response:
def cylinder(cls, **kwargs):
""" Returns a cylinder.
Kwargs:
start (list): Start of cylinder, default [0, -1, 0].
end (list): End of cylinder, default [0, 1, 0].
radius (float): Radius of cylinder, default 1.0.
slices (int): Number of slices, default 16.
"""
s = kwargs.get('start', Vector(0.0, -1.0, 0.0))
e = kwargs.get('end', Vector(0.0, 1.0, 0.0))
if isinstance(s, list):
s = Vector(*s)
if isinstance(e, list):
e = Vector(*e)
r = kwargs.get('radius', 1.0)
slices = kwargs.get('slices', 16)
ray = e.minus(s)
axisZ = ray.unit()
isY = (math.fabs(axisZ.y) > 0.5)
axisX = Vector(float(isY), float(not isY), 0).cross(axisZ).unit()
axisY = axisX.cross(axisZ).unit()
start = Vertex(s, axisZ.negated())
end = Vertex(e, axisZ.unit())
polygons = []
def point(stack, angle, normalBlend):
out = axisX.times(math.cos(angle)).plus(
axisY.times(math.sin(angle)))
pos = s.plus(ray.times(stack)).plus(out.times(r))
normal = out.times(1.0 - math.fabs(normalBlend)).plus(
axisZ.times(normalBlend))
return Vertex(pos, normal)
dt = math.pi * 2.0 / float(slices)
for i in range(0, slices):
t0 = i * dt
i1 = (i + 1) % slices
t1 = i1 * dt
polygons.append(Polygon([start.clone(),
point(0., t0, -1.),
point(0., t1, -1.)]))
polygons.append(Polygon([point(0., t1, 0.),
point(0., t0, 0.),
point(1., t0, 0.),
point(1., t1, 0.)]))
polygons.append(Polygon([end.clone(),
point(1., t1, 1.),
point(1., t0, 1.)]))
return CSG.fromPolygons(polygons) |
def __apply(self, migration=None, run_all=False):
"""
If a migration is supplied, runs that migration and appends to state.
If run_all==True, runs all migrations.
Raises a ValueError if neither "migration" nor "run_all" are provided.
"""
out = StringIO()
trace = None
migrate_kwargs = {
'interactive': False,
'stdout': out,
'database': self._database_name,
}
if migration is not None:
migrate_kwargs.update({
'app_label': migration[0],
'migration_name': migration[1],
})
elif not run_all:
raise ValueError('Either a migration must be provided or "run_all" must be True')
start = self._timer()
try:
call_command("migrate", **migrate_kwargs)
except Exception:
trace = ''.join(traceback.format_exception(*sys.exc_info()))
finally:
end = self._timer()
successes, failure = self._parse_migrate_output(out.getvalue())
self._migration_state.append({
'database': self._database_name,
'migration': 'all' if run_all else (migration[0], migration[1]),
'duration': end - start,
'output': _remove_escape_characters(out.getvalue()),
'succeeded_migrations': successes, # [(app, migration), ...]
'failed_migration': failure, # (app, migration)
'traceback': trace,
'succeeded': failure is None and trace is None,
})
if failure is not None:
raise CommandError("Migration failed for app '{}' - migration '{}'.\n".format(*failure))
elif trace is not None:
raise CommandError("Migrations failed unexpectedly. See self.state['traceback'] for details.") | If a migration is supplied, runs that migration and appends to state.
If run_all==True, runs all migrations.
Raises a ValueError if neither "migration" nor "run_all" are provided. | Below is the the instruction that describes the task:
### Input:
If a migration is supplied, runs that migration and appends to state.
If run_all==True, runs all migrations.
Raises a ValueError if neither "migration" nor "run_all" are provided.
### Response:
def __apply(self, migration=None, run_all=False):
"""
If a migration is supplied, runs that migration and appends to state.
If run_all==True, runs all migrations.
Raises a ValueError if neither "migration" nor "run_all" are provided.
"""
out = StringIO()
trace = None
migrate_kwargs = {
'interactive': False,
'stdout': out,
'database': self._database_name,
}
if migration is not None:
migrate_kwargs.update({
'app_label': migration[0],
'migration_name': migration[1],
})
elif not run_all:
raise ValueError('Either a migration must be provided or "run_all" must be True')
start = self._timer()
try:
call_command("migrate", **migrate_kwargs)
except Exception:
trace = ''.join(traceback.format_exception(*sys.exc_info()))
finally:
end = self._timer()
successes, failure = self._parse_migrate_output(out.getvalue())
self._migration_state.append({
'database': self._database_name,
'migration': 'all' if run_all else (migration[0], migration[1]),
'duration': end - start,
'output': _remove_escape_characters(out.getvalue()),
'succeeded_migrations': successes, # [(app, migration), ...]
'failed_migration': failure, # (app, migration)
'traceback': trace,
'succeeded': failure is None and trace is None,
})
if failure is not None:
raise CommandError("Migration failed for app '{}' - migration '{}'.\n".format(*failure))
elif trace is not None:
raise CommandError("Migrations failed unexpectedly. See self.state['traceback'] for details.") |
def on_connection_open(self, connection):
"""This method is called by pika once the connection to RabbitMQ has
been established.
:type connection: pika.TornadoConnection
"""
LOGGER.debug('Connection opened')
connection.add_on_connection_blocked_callback(
self.on_connection_blocked)
connection.add_on_connection_unblocked_callback(
self.on_connection_unblocked)
connection.add_backpressure_callback(self.on_back_pressure_detected)
self.channel = self._open_channel() | This method is called by pika once the connection to RabbitMQ has
been established.
:type connection: pika.TornadoConnection | Below is the the instruction that describes the task:
### Input:
This method is called by pika once the connection to RabbitMQ has
been established.
:type connection: pika.TornadoConnection
### Response:
def on_connection_open(self, connection):
"""This method is called by pika once the connection to RabbitMQ has
been established.
:type connection: pika.TornadoConnection
"""
LOGGER.debug('Connection opened')
connection.add_on_connection_blocked_callback(
self.on_connection_blocked)
connection.add_on_connection_unblocked_callback(
self.on_connection_unblocked)
connection.add_backpressure_callback(self.on_back_pressure_detected)
self.channel = self._open_channel() |
async def get_departures(self):
"""Get departure info from stopid."""
from .common import CommonFunctions
common = CommonFunctions(self.loop, self.session)
departures = []
endpoint = '{}/StopVisit/GetDepartures/{}'.format(BASE_URL,
str(self.stopid))
data = await common.api_call(endpoint)
for entries in data or []:
try:
data = entries['MonitoredVehicleJourney']
if self.destination is not None:
if data['DestinationName'] == self.destination:
data = entries['MonitoredVehicleJourney']
line = data['LineRef']
destinationname = data['DestinationName']
monitored = data['MonitoredCall']
time = monitored['ExpectedDepartureTime']
departures.append({"time": time,
"line": line,
"destination": destinationname})
else:
data = entries['MonitoredVehicleJourney']
line = data['LineRef']
destinationname = data['DestinationName']
monitored = data['MonitoredCall']
time = monitored['ExpectedDepartureTime']
departures.append({"time": time,
"line": line,
"destination": destinationname})
except (TypeError, KeyError, IndexError) as error:
LOGGER.error('Error connecting to Ruter, %s', error)
self._departures = await common.sort_data(departures, 'time') | Get departure info from stopid. | Below is the the instruction that describes the task:
### Input:
Get departure info from stopid.
### Response:
async def get_departures(self):
"""Get departure info from stopid."""
from .common import CommonFunctions
common = CommonFunctions(self.loop, self.session)
departures = []
endpoint = '{}/StopVisit/GetDepartures/{}'.format(BASE_URL,
str(self.stopid))
data = await common.api_call(endpoint)
for entries in data or []:
try:
data = entries['MonitoredVehicleJourney']
if self.destination is not None:
if data['DestinationName'] == self.destination:
data = entries['MonitoredVehicleJourney']
line = data['LineRef']
destinationname = data['DestinationName']
monitored = data['MonitoredCall']
time = monitored['ExpectedDepartureTime']
departures.append({"time": time,
"line": line,
"destination": destinationname})
else:
data = entries['MonitoredVehicleJourney']
line = data['LineRef']
destinationname = data['DestinationName']
monitored = data['MonitoredCall']
time = monitored['ExpectedDepartureTime']
departures.append({"time": time,
"line": line,
"destination": destinationname})
except (TypeError, KeyError, IndexError) as error:
LOGGER.error('Error connecting to Ruter, %s', error)
self._departures = await common.sort_data(departures, 'time') |
def build_docs(location="doc-source", target=None, library="icetea_lib"):
"""
Build documentation for Icetea. Start by autogenerating module documentation
and finish by building html.
:param location: Documentation source
:param target: Documentation target path
:param library: Library location for autodoc.
:return: -1 if something fails. 0 if successfull.
"""
cmd_ar = ["sphinx-apidoc", "-o", location, library]
try:
print("Generating api docs.")
retcode = check_call(cmd_ar)
except CalledProcessError as error:
print("Documentation build failed. Return code: {}".format(error.returncode))
return 3
except OSError as error:
print(error)
print("Documentation build failed. Are you missing Sphinx? Please install sphinx using "
"'pip install sphinx'.")
return 3
target = "doc{}html".format(os.sep) if target is None else target
cmd_ar = ["sphinx-build", "-b", "html", location, target]
try:
print("Building html documentation.")
retcode = check_call(cmd_ar)
except CalledProcessError as error:
print("Documentation build failed. Return code: {}".format(error.returncode))
return 3
except OSError as error:
print(error)
print("Documentation build failed. Are you missing Sphinx? Please install sphinx using "
"'pip install sphinx'.")
return 3
print("Documentation built.")
return 0 | Build documentation for Icetea. Start by autogenerating module documentation
and finish by building html.
:param location: Documentation source
:param target: Documentation target path
:param library: Library location for autodoc.
:return: -1 if something fails. 0 if successfull. | Below is the the instruction that describes the task:
### Input:
Build documentation for Icetea. Start by autogenerating module documentation
and finish by building html.
:param location: Documentation source
:param target: Documentation target path
:param library: Library location for autodoc.
:return: -1 if something fails. 0 if successfull.
### Response:
def build_docs(location="doc-source", target=None, library="icetea_lib"):
"""
Build documentation for Icetea. Start by autogenerating module documentation
and finish by building html.
:param location: Documentation source
:param target: Documentation target path
:param library: Library location for autodoc.
:return: -1 if something fails. 0 if successfull.
"""
cmd_ar = ["sphinx-apidoc", "-o", location, library]
try:
print("Generating api docs.")
retcode = check_call(cmd_ar)
except CalledProcessError as error:
print("Documentation build failed. Return code: {}".format(error.returncode))
return 3
except OSError as error:
print(error)
print("Documentation build failed. Are you missing Sphinx? Please install sphinx using "
"'pip install sphinx'.")
return 3
target = "doc{}html".format(os.sep) if target is None else target
cmd_ar = ["sphinx-build", "-b", "html", location, target]
try:
print("Building html documentation.")
retcode = check_call(cmd_ar)
except CalledProcessError as error:
print("Documentation build failed. Return code: {}".format(error.returncode))
return 3
except OSError as error:
print(error)
print("Documentation build failed. Are you missing Sphinx? Please install sphinx using "
"'pip install sphinx'.")
return 3
print("Documentation built.")
return 0 |
def _receive(self, msg):
"""
Receive a message from the input source and perhaps raise an Exception.
"""
msg = self._convert(msg)
if msg is None:
return
str_msg = self.verbose and self._msg_to_str(msg)
if self.verbose and log.is_debug():
log.debug('Message %s', str_msg)
if self.pre_routing:
self.pre_routing.receive(msg)
receiver, msg = self.routing.receive(msg)
if receiver:
receiver.receive(msg)
if self.verbose:
log.info('Routed message %s (%s) to %s', str_msg[:128], msg,
repr(receiver)) | Receive a message from the input source and perhaps raise an Exception. | Below is the the instruction that describes the task:
### Input:
Receive a message from the input source and perhaps raise an Exception.
### Response:
def _receive(self, msg):
"""
Receive a message from the input source and perhaps raise an Exception.
"""
msg = self._convert(msg)
if msg is None:
return
str_msg = self.verbose and self._msg_to_str(msg)
if self.verbose and log.is_debug():
log.debug('Message %s', str_msg)
if self.pre_routing:
self.pre_routing.receive(msg)
receiver, msg = self.routing.receive(msg)
if receiver:
receiver.receive(msg)
if self.verbose:
log.info('Routed message %s (%s) to %s', str_msg[:128], msg,
repr(receiver)) |
def get_ribo_counts(ribo_fileobj, transcript_name, read_lengths, read_offsets):
"""For each mapped read of the given transcript in the BAM file
(pysam AlignmentFile object), return the position (+1) and the
corresponding frame (1, 2 or 3) to which it aligns.
Keyword arguments:
ribo_fileobj -- file object - BAM file opened using pysam AlignmentFile
transcript_name -- Name of transcript to get counts for
read_length (optional) -- If provided, get counts only for reads of this length.
"""
read_counts = {}
total_reads = 0
for record in ribo_fileobj.fetch(transcript_name):
query_length = record.query_length
position_ref = record.pos + 1
for index, read_length in enumerate(read_lengths):
position = position_ref # reset position
if read_length == 0 or read_length == query_length:
# if an offset is specified, increment position by that offset.
position += read_offsets[index]
else:
# ignore other reads/lengths
continue
total_reads += 1
try:
read_counts[position]
except KeyError:
read_counts[position] = {1: 0, 2: 0, 3: 0}
# calculate the frame of the read from position
rem = position % 3
if rem == 0:
read_counts[position][3] += 1
else:
read_counts[position][rem] += 1
log.debug('Total read counts: {}'.format(total_reads))
log.debug('RiboSeq read counts for transcript: {0}\n{1}'.format(transcript_name, read_counts))
return read_counts, total_reads | For each mapped read of the given transcript in the BAM file
(pysam AlignmentFile object), return the position (+1) and the
corresponding frame (1, 2 or 3) to which it aligns.
Keyword arguments:
ribo_fileobj -- file object - BAM file opened using pysam AlignmentFile
transcript_name -- Name of transcript to get counts for
read_length (optional) -- If provided, get counts only for reads of this length. | Below is the the instruction that describes the task:
### Input:
For each mapped read of the given transcript in the BAM file
(pysam AlignmentFile object), return the position (+1) and the
corresponding frame (1, 2 or 3) to which it aligns.
Keyword arguments:
ribo_fileobj -- file object - BAM file opened using pysam AlignmentFile
transcript_name -- Name of transcript to get counts for
read_length (optional) -- If provided, get counts only for reads of this length.
### Response:
def get_ribo_counts(ribo_fileobj, transcript_name, read_lengths, read_offsets):
"""For each mapped read of the given transcript in the BAM file
(pysam AlignmentFile object), return the position (+1) and the
corresponding frame (1, 2 or 3) to which it aligns.
Keyword arguments:
ribo_fileobj -- file object - BAM file opened using pysam AlignmentFile
transcript_name -- Name of transcript to get counts for
read_length (optional) -- If provided, get counts only for reads of this length.
"""
read_counts = {}
total_reads = 0
for record in ribo_fileobj.fetch(transcript_name):
query_length = record.query_length
position_ref = record.pos + 1
for index, read_length in enumerate(read_lengths):
position = position_ref # reset position
if read_length == 0 or read_length == query_length:
# if an offset is specified, increment position by that offset.
position += read_offsets[index]
else:
# ignore other reads/lengths
continue
total_reads += 1
try:
read_counts[position]
except KeyError:
read_counts[position] = {1: 0, 2: 0, 3: 0}
# calculate the frame of the read from position
rem = position % 3
if rem == 0:
read_counts[position][3] += 1
else:
read_counts[position][rem] += 1
log.debug('Total read counts: {}'.format(total_reads))
log.debug('RiboSeq read counts for transcript: {0}\n{1}'.format(transcript_name, read_counts))
return read_counts, total_reads |
def _get_select_commands(self, source, tables):
"""
Create select queries for all of the tables from a source database.
:param source: Source database name
:param tables: Iterable of table names
:return: Dictionary of table keys, command values
"""
# Create dictionary of select queries
row_queries = {tbl: self.select_all(tbl, execute=False) for tbl in
tqdm(tables, total=len(tables), desc='Getting {0} select queries'.format(source))}
# Convert command strings into lists of commands
for tbl, command in row_queries.items():
if isinstance(command, str):
row_queries[tbl] = [command]
# Pack commands into list of tuples
return [(tbl, cmd) for tbl, cmds in row_queries.items() for cmd in cmds] | Create select queries for all of the tables from a source database.
:param source: Source database name
:param tables: Iterable of table names
:return: Dictionary of table keys, command values | Below is the the instruction that describes the task:
### Input:
Create select queries for all of the tables from a source database.
:param source: Source database name
:param tables: Iterable of table names
:return: Dictionary of table keys, command values
### Response:
def _get_select_commands(self, source, tables):
"""
Create select queries for all of the tables from a source database.
:param source: Source database name
:param tables: Iterable of table names
:return: Dictionary of table keys, command values
"""
# Create dictionary of select queries
row_queries = {tbl: self.select_all(tbl, execute=False) for tbl in
tqdm(tables, total=len(tables), desc='Getting {0} select queries'.format(source))}
# Convert command strings into lists of commands
for tbl, command in row_queries.items():
if isinstance(command, str):
row_queries[tbl] = [command]
# Pack commands into list of tuples
return [(tbl, cmd) for tbl, cmds in row_queries.items() for cmd in cmds] |
def identify_image(image):
"""Provides a tuple of image's UNIQUE_IMAGE_ATTRIBUTES. Note:
this is not guaranteed to be unique (and will often not be)
for pre-1.1 metadata, as subvariant did not exist. Provided as
a function so consumers can use it on plain image dicts read from
the metadata or PDC.
"""
try:
# Image instance case
attrs = tuple(getattr(image, attr) for attr in UNIQUE_IMAGE_ATTRIBUTES)
except AttributeError:
# Plain dict case
attrs = tuple(image.get(attr, None) for attr in UNIQUE_IMAGE_ATTRIBUTES)
ui = UniqueImage(*attrs)
# If unified is None (which could happen in the dict case, we want default
# value of False instead. Also convert additional_variants to a list.
return ui._replace(
unified=ui.unified or False, additional_variants=ui.additional_variants or []
) | Provides a tuple of image's UNIQUE_IMAGE_ATTRIBUTES. Note:
this is not guaranteed to be unique (and will often not be)
for pre-1.1 metadata, as subvariant did not exist. Provided as
a function so consumers can use it on plain image dicts read from
the metadata or PDC. | Below is the the instruction that describes the task:
### Input:
Provides a tuple of image's UNIQUE_IMAGE_ATTRIBUTES. Note:
this is not guaranteed to be unique (and will often not be)
for pre-1.1 metadata, as subvariant did not exist. Provided as
a function so consumers can use it on plain image dicts read from
the metadata or PDC.
### Response:
def identify_image(image):
"""Provides a tuple of image's UNIQUE_IMAGE_ATTRIBUTES. Note:
this is not guaranteed to be unique (and will often not be)
for pre-1.1 metadata, as subvariant did not exist. Provided as
a function so consumers can use it on plain image dicts read from
the metadata or PDC.
"""
try:
# Image instance case
attrs = tuple(getattr(image, attr) for attr in UNIQUE_IMAGE_ATTRIBUTES)
except AttributeError:
# Plain dict case
attrs = tuple(image.get(attr, None) for attr in UNIQUE_IMAGE_ATTRIBUTES)
ui = UniqueImage(*attrs)
# If unified is None (which could happen in the dict case, we want default
# value of False instead. Also convert additional_variants to a list.
return ui._replace(
unified=ui.unified or False, additional_variants=ui.additional_variants or []
) |
def reproject_geometry(
geometry, src_crs=None, dst_crs=None, error_on_clip=False, validity_check=True,
antimeridian_cutting=False
):
"""
Reproject a geometry to target CRS.
Also, clips geometry if it lies outside the destination CRS boundary.
Supported destination CRSes for clipping: 4326 (WGS84), 3857 (Spherical
Mercator) and 3035 (ETRS89 / ETRS-LAEA).
Parameters
----------
geometry : ``shapely.geometry``
src_crs : ``rasterio.crs.CRS`` or EPSG code
CRS of source data
dst_crs : ``rasterio.crs.CRS`` or EPSG code
target CRS
error_on_clip : bool
raises a ``RuntimeError`` if a geometry is outside of CRS bounds
(default: False)
validity_check : bool
checks if reprojected geometry is valid and throws ``TopologicalError``
if invalid (default: True)
antimeridian_cutting : bool
cut geometry at Antimeridian; can result in a multipart output geometry
Returns
-------
geometry : ``shapely.geometry``
"""
src_crs = _validated_crs(src_crs)
dst_crs = _validated_crs(dst_crs)
def _reproject_geom(geometry, src_crs, dst_crs):
if geometry.is_empty:
return geometry
else:
out_geom = to_shape(
transform_geom(
src_crs.to_dict(),
dst_crs.to_dict(),
mapping(geometry),
antimeridian_cutting=antimeridian_cutting
)
)
return _repair(out_geom) if validity_check else out_geom
# return repaired geometry if no reprojection needed
if src_crs == dst_crs or geometry.is_empty:
return _repair(geometry)
# geometry needs to be clipped to its CRS bounds
elif (
dst_crs.is_epsg_code and # just in case for an CRS with EPSG code
dst_crs.get("init") in CRS_BOUNDS and # if CRS has defined bounds
dst_crs.get("init") != "epsg:4326" # and is not WGS84 (does not need clipping)
):
wgs84_crs = CRS().from_epsg(4326)
# get dst_crs boundaries
crs_bbox = box(*CRS_BOUNDS[dst_crs.get("init")])
# reproject geometry to WGS84
geometry_4326 = _reproject_geom(geometry, src_crs, wgs84_crs)
# raise error if geometry has to be clipped
if error_on_clip and not geometry_4326.within(crs_bbox):
raise RuntimeError("geometry outside target CRS bounds")
# clip geometry dst_crs boundaries and return
return _reproject_geom(crs_bbox.intersection(geometry_4326), wgs84_crs, dst_crs)
# return without clipping if destination CRS does not have defined bounds
else:
return _reproject_geom(geometry, src_crs, dst_crs) | Reproject a geometry to target CRS.
Also, clips geometry if it lies outside the destination CRS boundary.
Supported destination CRSes for clipping: 4326 (WGS84), 3857 (Spherical
Mercator) and 3035 (ETRS89 / ETRS-LAEA).
Parameters
----------
geometry : ``shapely.geometry``
src_crs : ``rasterio.crs.CRS`` or EPSG code
CRS of source data
dst_crs : ``rasterio.crs.CRS`` or EPSG code
target CRS
error_on_clip : bool
raises a ``RuntimeError`` if a geometry is outside of CRS bounds
(default: False)
validity_check : bool
checks if reprojected geometry is valid and throws ``TopologicalError``
if invalid (default: True)
antimeridian_cutting : bool
cut geometry at Antimeridian; can result in a multipart output geometry
Returns
-------
geometry : ``shapely.geometry`` | Below is the the instruction that describes the task:
### Input:
Reproject a geometry to target CRS.
Also, clips geometry if it lies outside the destination CRS boundary.
Supported destination CRSes for clipping: 4326 (WGS84), 3857 (Spherical
Mercator) and 3035 (ETRS89 / ETRS-LAEA).
Parameters
----------
geometry : ``shapely.geometry``
src_crs : ``rasterio.crs.CRS`` or EPSG code
CRS of source data
dst_crs : ``rasterio.crs.CRS`` or EPSG code
target CRS
error_on_clip : bool
raises a ``RuntimeError`` if a geometry is outside of CRS bounds
(default: False)
validity_check : bool
checks if reprojected geometry is valid and throws ``TopologicalError``
if invalid (default: True)
antimeridian_cutting : bool
cut geometry at Antimeridian; can result in a multipart output geometry
Returns
-------
geometry : ``shapely.geometry``
### Response:
def reproject_geometry(
geometry, src_crs=None, dst_crs=None, error_on_clip=False, validity_check=True,
antimeridian_cutting=False
):
"""
Reproject a geometry to target CRS.
Also, clips geometry if it lies outside the destination CRS boundary.
Supported destination CRSes for clipping: 4326 (WGS84), 3857 (Spherical
Mercator) and 3035 (ETRS89 / ETRS-LAEA).
Parameters
----------
geometry : ``shapely.geometry``
src_crs : ``rasterio.crs.CRS`` or EPSG code
CRS of source data
dst_crs : ``rasterio.crs.CRS`` or EPSG code
target CRS
error_on_clip : bool
raises a ``RuntimeError`` if a geometry is outside of CRS bounds
(default: False)
validity_check : bool
checks if reprojected geometry is valid and throws ``TopologicalError``
if invalid (default: True)
antimeridian_cutting : bool
cut geometry at Antimeridian; can result in a multipart output geometry
Returns
-------
geometry : ``shapely.geometry``
"""
src_crs = _validated_crs(src_crs)
dst_crs = _validated_crs(dst_crs)
def _reproject_geom(geometry, src_crs, dst_crs):
if geometry.is_empty:
return geometry
else:
out_geom = to_shape(
transform_geom(
src_crs.to_dict(),
dst_crs.to_dict(),
mapping(geometry),
antimeridian_cutting=antimeridian_cutting
)
)
return _repair(out_geom) if validity_check else out_geom
# return repaired geometry if no reprojection needed
if src_crs == dst_crs or geometry.is_empty:
return _repair(geometry)
# geometry needs to be clipped to its CRS bounds
elif (
dst_crs.is_epsg_code and # just in case for an CRS with EPSG code
dst_crs.get("init") in CRS_BOUNDS and # if CRS has defined bounds
dst_crs.get("init") != "epsg:4326" # and is not WGS84 (does not need clipping)
):
wgs84_crs = CRS().from_epsg(4326)
# get dst_crs boundaries
crs_bbox = box(*CRS_BOUNDS[dst_crs.get("init")])
# reproject geometry to WGS84
geometry_4326 = _reproject_geom(geometry, src_crs, wgs84_crs)
# raise error if geometry has to be clipped
if error_on_clip and not geometry_4326.within(crs_bbox):
raise RuntimeError("geometry outside target CRS bounds")
# clip geometry dst_crs boundaries and return
return _reproject_geom(crs_bbox.intersection(geometry_4326), wgs84_crs, dst_crs)
# return without clipping if destination CRS does not have defined bounds
else:
return _reproject_geom(geometry, src_crs, dst_crs) |
def checkCursor(self):
'Keep cursor in bounds of data and screen.'
# keep cursor within actual available rowset
if self.nRows == 0 or self.cursorRowIndex <= 0:
self.cursorRowIndex = 0
elif self.cursorRowIndex >= self.nRows:
self.cursorRowIndex = self.nRows-1
if self.cursorVisibleColIndex <= 0:
self.cursorVisibleColIndex = 0
elif self.cursorVisibleColIndex >= self.nVisibleCols:
self.cursorVisibleColIndex = self.nVisibleCols-1
if self.topRowIndex <= 0:
self.topRowIndex = 0
elif self.topRowIndex > self.nRows-1:
self.topRowIndex = self.nRows-1
# (x,y) is relative cell within screen viewport
x = self.cursorVisibleColIndex - self.leftVisibleColIndex
y = self.cursorRowIndex - self.topRowIndex + 1 # header
# check bounds, scroll if necessary
if y < 1:
self.topRowIndex = self.cursorRowIndex
elif y > self.nVisibleRows:
self.topRowIndex = self.cursorRowIndex-self.nVisibleRows+1
if x <= 0:
self.leftVisibleColIndex = self.cursorVisibleColIndex
else:
while True:
if self.leftVisibleColIndex == self.cursorVisibleColIndex: # not much more we can do
break
self.calcColLayout()
mincolidx, maxcolidx = min(self.visibleColLayout.keys()), max(self.visibleColLayout.keys())
if self.cursorVisibleColIndex < mincolidx:
self.leftVisibleColIndex -= max((self.cursorVisibleColIndex - mincolid)//2, 1)
continue
elif self.cursorVisibleColIndex > maxcolidx:
self.leftVisibleColIndex += max((maxcolidx - self.cursorVisibleColIndex)//2, 1)
continue
cur_x, cur_w = self.visibleColLayout[self.cursorVisibleColIndex]
if cur_x+cur_w < self.vd.windowWidth: # current columns fit entirely on screen
break
self.leftVisibleColIndex += 1 | Keep cursor in bounds of data and screen. | Below is the the instruction that describes the task:
### Input:
Keep cursor in bounds of data and screen.
### Response:
def checkCursor(self):
'Keep cursor in bounds of data and screen.'
# keep cursor within actual available rowset
if self.nRows == 0 or self.cursorRowIndex <= 0:
self.cursorRowIndex = 0
elif self.cursorRowIndex >= self.nRows:
self.cursorRowIndex = self.nRows-1
if self.cursorVisibleColIndex <= 0:
self.cursorVisibleColIndex = 0
elif self.cursorVisibleColIndex >= self.nVisibleCols:
self.cursorVisibleColIndex = self.nVisibleCols-1
if self.topRowIndex <= 0:
self.topRowIndex = 0
elif self.topRowIndex > self.nRows-1:
self.topRowIndex = self.nRows-1
# (x,y) is relative cell within screen viewport
x = self.cursorVisibleColIndex - self.leftVisibleColIndex
y = self.cursorRowIndex - self.topRowIndex + 1 # header
# check bounds, scroll if necessary
if y < 1:
self.topRowIndex = self.cursorRowIndex
elif y > self.nVisibleRows:
self.topRowIndex = self.cursorRowIndex-self.nVisibleRows+1
if x <= 0:
self.leftVisibleColIndex = self.cursorVisibleColIndex
else:
while True:
if self.leftVisibleColIndex == self.cursorVisibleColIndex: # not much more we can do
break
self.calcColLayout()
mincolidx, maxcolidx = min(self.visibleColLayout.keys()), max(self.visibleColLayout.keys())
if self.cursorVisibleColIndex < mincolidx:
self.leftVisibleColIndex -= max((self.cursorVisibleColIndex - mincolid)//2, 1)
continue
elif self.cursorVisibleColIndex > maxcolidx:
self.leftVisibleColIndex += max((maxcolidx - self.cursorVisibleColIndex)//2, 1)
continue
cur_x, cur_w = self.visibleColLayout[self.cursorVisibleColIndex]
if cur_x+cur_w < self.vd.windowWidth: # current columns fit entirely on screen
break
self.leftVisibleColIndex += 1 |
def update_payment_request(self, tid, currency=None, amount=None,
action=None, ledger=None, callback_uri=None,
display_message_uri=None, capture_id=None,
additional_amount=None, text=None, refund_id=None,
required_scope=None, required_scope_text=None, line_items=None):
"""Update payment request, reauthorize, capture, release or abort
It is possible to update ledger and the callback URIs for a payment
request. Changes are always appended to the open report of a ledger,
and notifications are sent to the callback registered at the time of
notification.
Capturing an authorized payment or reauthorizing is done with the
action field.
The call is idempotent; that is, if one posts the same amount,
additional_amount and capture_id twice with action CAPTURE, only one
capture is performed. Similarly, if one posts twice with action CAPTURE
without any amount stated, to capture the full amount, only one full
capture is performed.
Arguments:
ledger:
Log entries will be added to the open report on the specified
ledger
display_message_uri:
Messages that can be used to inform the POS operator about the
progress of the payment request will be POSTed to this URI if
provided
callback_uri:
If provided, mCASH will POST to this URI when the status of the
payment request changes, using the message mechanism described
in the introduction. The data in the "object" part of the
message is the same as what can be retrieved by calling GET on
the "/payment_request/<tid>/outcome/" resource URI.
currency:
3 chars https://en.wikipedia.org/wiki/ISO_4217
amount:
The base amount of the payment
additional_amount:
Typically cash withdrawal or gratuity
capture_id:
Local id for capture. Must be set if amount is set, otherwise
capture_id must be unset.
tid:
Transaction id assigned by mCASH
refund_id:
Refund id needed when doing partial refund
text:
For example reason for refund.
action:
Action to perform.
required_scope:
Scopes required to fulfill payment
line_items:
An updated line_items. Will fail if line_items
already set in the payment request or if the sum of the totals
is different from the original amount.
required_scope_text:
Text that is shown to user when asked for permission.
"""
arguments = {'ledger': ledger,
'display_message_uri': display_message_uri,
'callback_uri': callback_uri,
'currency': currency,
'amount': amount,
'additional_amount': additional_amount,
'capture_id': capture_id,
'action': action,
'text': text,
'refund_id': refund_id}
if required_scope:
arguments['required_scope'] = required_scope
arguments['required_scope_text'] = required_scope_text
if line_items:
arguments['line_items'] = line_items
arguments = {k: v for k, v in arguments.items() if v is not None}
return self.do_req('PUT',
self.merchant_api_base_url + '/payment_request/' +
tid + '/', arguments) | Update payment request, reauthorize, capture, release or abort
It is possible to update ledger and the callback URIs for a payment
request. Changes are always appended to the open report of a ledger,
and notifications are sent to the callback registered at the time of
notification.
Capturing an authorized payment or reauthorizing is done with the
action field.
The call is idempotent; that is, if one posts the same amount,
additional_amount and capture_id twice with action CAPTURE, only one
capture is performed. Similarly, if one posts twice with action CAPTURE
without any amount stated, to capture the full amount, only one full
capture is performed.
Arguments:
ledger:
Log entries will be added to the open report on the specified
ledger
display_message_uri:
Messages that can be used to inform the POS operator about the
progress of the payment request will be POSTed to this URI if
provided
callback_uri:
If provided, mCASH will POST to this URI when the status of the
payment request changes, using the message mechanism described
in the introduction. The data in the "object" part of the
message is the same as what can be retrieved by calling GET on
the "/payment_request/<tid>/outcome/" resource URI.
currency:
3 chars https://en.wikipedia.org/wiki/ISO_4217
amount:
The base amount of the payment
additional_amount:
Typically cash withdrawal or gratuity
capture_id:
Local id for capture. Must be set if amount is set, otherwise
capture_id must be unset.
tid:
Transaction id assigned by mCASH
refund_id:
Refund id needed when doing partial refund
text:
For example reason for refund.
action:
Action to perform.
required_scope:
Scopes required to fulfill payment
line_items:
An updated line_items. Will fail if line_items
already set in the payment request or if the sum of the totals
is different from the original amount.
required_scope_text:
Text that is shown to user when asked for permission. | Below is the the instruction that describes the task:
### Input:
Update payment request, reauthorize, capture, release or abort
It is possible to update ledger and the callback URIs for a payment
request. Changes are always appended to the open report of a ledger,
and notifications are sent to the callback registered at the time of
notification.
Capturing an authorized payment or reauthorizing is done with the
action field.
The call is idempotent; that is, if one posts the same amount,
additional_amount and capture_id twice with action CAPTURE, only one
capture is performed. Similarly, if one posts twice with action CAPTURE
without any amount stated, to capture the full amount, only one full
capture is performed.
Arguments:
ledger:
Log entries will be added to the open report on the specified
ledger
display_message_uri:
Messages that can be used to inform the POS operator about the
progress of the payment request will be POSTed to this URI if
provided
callback_uri:
If provided, mCASH will POST to this URI when the status of the
payment request changes, using the message mechanism described
in the introduction. The data in the "object" part of the
message is the same as what can be retrieved by calling GET on
the "/payment_request/<tid>/outcome/" resource URI.
currency:
3 chars https://en.wikipedia.org/wiki/ISO_4217
amount:
The base amount of the payment
additional_amount:
Typically cash withdrawal or gratuity
capture_id:
Local id for capture. Must be set if amount is set, otherwise
capture_id must be unset.
tid:
Transaction id assigned by mCASH
refund_id:
Refund id needed when doing partial refund
text:
For example reason for refund.
action:
Action to perform.
required_scope:
Scopes required to fulfill payment
line_items:
An updated line_items. Will fail if line_items
already set in the payment request or if the sum of the totals
is different from the original amount.
required_scope_text:
Text that is shown to user when asked for permission.
### Response:
def update_payment_request(self, tid, currency=None, amount=None,
action=None, ledger=None, callback_uri=None,
display_message_uri=None, capture_id=None,
additional_amount=None, text=None, refund_id=None,
required_scope=None, required_scope_text=None, line_items=None):
"""Update payment request, reauthorize, capture, release or abort
It is possible to update ledger and the callback URIs for a payment
request. Changes are always appended to the open report of a ledger,
and notifications are sent to the callback registered at the time of
notification.
Capturing an authorized payment or reauthorizing is done with the
action field.
The call is idempotent; that is, if one posts the same amount,
additional_amount and capture_id twice with action CAPTURE, only one
capture is performed. Similarly, if one posts twice with action CAPTURE
without any amount stated, to capture the full amount, only one full
capture is performed.
Arguments:
ledger:
Log entries will be added to the open report on the specified
ledger
display_message_uri:
Messages that can be used to inform the POS operator about the
progress of the payment request will be POSTed to this URI if
provided
callback_uri:
If provided, mCASH will POST to this URI when the status of the
payment request changes, using the message mechanism described
in the introduction. The data in the "object" part of the
message is the same as what can be retrieved by calling GET on
the "/payment_request/<tid>/outcome/" resource URI.
currency:
3 chars https://en.wikipedia.org/wiki/ISO_4217
amount:
The base amount of the payment
additional_amount:
Typically cash withdrawal or gratuity
capture_id:
Local id for capture. Must be set if amount is set, otherwise
capture_id must be unset.
tid:
Transaction id assigned by mCASH
refund_id:
Refund id needed when doing partial refund
text:
For example reason for refund.
action:
Action to perform.
required_scope:
Scopes required to fulfill payment
line_items:
An updated line_items. Will fail if line_items
already set in the payment request or if the sum of the totals
is different from the original amount.
required_scope_text:
Text that is shown to user when asked for permission.
"""
arguments = {'ledger': ledger,
'display_message_uri': display_message_uri,
'callback_uri': callback_uri,
'currency': currency,
'amount': amount,
'additional_amount': additional_amount,
'capture_id': capture_id,
'action': action,
'text': text,
'refund_id': refund_id}
if required_scope:
arguments['required_scope'] = required_scope
arguments['required_scope_text'] = required_scope_text
if line_items:
arguments['line_items'] = line_items
arguments = {k: v for k, v in arguments.items() if v is not None}
return self.do_req('PUT',
self.merchant_api_base_url + '/payment_request/' +
tid + '/', arguments) |
def create_snapshot(self, systemId, snapshotSpecificationObject):
"""
Create snapshot for list of volumes
:param systemID: Cluster ID
:param snapshotSpecificationObject: Of class SnapshotSpecification
:rtype: SnapshotGroupId
"""
self.conn.connection._check_login()
#try:
response = self.conn.connection._do_post("{}/{}{}/{}".format(self.conn.connection._api_url, "instances/System::", systemId, 'action/snapshotVolumes'), json=snapshotSpecificationObject.__to_dict__())
#except:
# raise RuntimeError("create_snapshot_by_system_id() - Error communicating with ScaleIO gateway")
return response | Create snapshot for list of volumes
:param systemID: Cluster ID
:param snapshotSpecificationObject: Of class SnapshotSpecification
:rtype: SnapshotGroupId | Below is the the instruction that describes the task:
### Input:
Create snapshot for list of volumes
:param systemID: Cluster ID
:param snapshotSpecificationObject: Of class SnapshotSpecification
:rtype: SnapshotGroupId
### Response:
def create_snapshot(self, systemId, snapshotSpecificationObject):
"""
Create snapshot for list of volumes
:param systemID: Cluster ID
:param snapshotSpecificationObject: Of class SnapshotSpecification
:rtype: SnapshotGroupId
"""
self.conn.connection._check_login()
#try:
response = self.conn.connection._do_post("{}/{}{}/{}".format(self.conn.connection._api_url, "instances/System::", systemId, 'action/snapshotVolumes'), json=snapshotSpecificationObject.__to_dict__())
#except:
# raise RuntimeError("create_snapshot_by_system_id() - Error communicating with ScaleIO gateway")
return response |
def login(self, login, password, set_auth=False):
"""
Attempts a login to the remote server
and on success returns user id and session
or None
Warning: Do not depend on this. This will be deprecated
with SSO.
param set_auth: sets the authentication on the client
"""
rv = self.session.post(
self.host,
dumps({
"method": "common.db.login",
"params": [login, password]
}),
)
rv = loads(rv.content)['result']
if set_auth:
self.set_auth(
SessionAuth(login, *rv)
)
return rv | Attempts a login to the remote server
and on success returns user id and session
or None
Warning: Do not depend on this. This will be deprecated
with SSO.
param set_auth: sets the authentication on the client | Below is the the instruction that describes the task:
### Input:
Attempts a login to the remote server
and on success returns user id and session
or None
Warning: Do not depend on this. This will be deprecated
with SSO.
param set_auth: sets the authentication on the client
### Response:
def login(self, login, password, set_auth=False):
"""
Attempts a login to the remote server
and on success returns user id and session
or None
Warning: Do not depend on this. This will be deprecated
with SSO.
param set_auth: sets the authentication on the client
"""
rv = self.session.post(
self.host,
dumps({
"method": "common.db.login",
"params": [login, password]
}),
)
rv = loads(rv.content)['result']
if set_auth:
self.set_auth(
SessionAuth(login, *rv)
)
return rv |
def putParamset(self, paramset, data={}):
"""
Some devices act upon changes to paramsets.
A "putted" paramset must not contain all keys available in the specified paramset,
just the ones which are writable and should be changed.
"""
try:
if paramset in self._PARAMSETS and data:
self._proxy.putParamset(self._ADDRESS, paramset, data)
# We update all paramsets to at least have a temporarily accurate state for the device.
# This might not be true for tasks that take long to complete (lifting a rollershutter completely etc.).
# For this the server-process has to call the updateParamsets-method when it receives events for the device.
self.updateParamsets()
return True
else:
return False
except Exception as err:
LOG.error("HMGeneric.putParamset: Exception: " + str(err))
return False | Some devices act upon changes to paramsets.
A "putted" paramset must not contain all keys available in the specified paramset,
just the ones which are writable and should be changed. | Below is the the instruction that describes the task:
### Input:
Some devices act upon changes to paramsets.
A "putted" paramset must not contain all keys available in the specified paramset,
just the ones which are writable and should be changed.
### Response:
def putParamset(self, paramset, data={}):
"""
Some devices act upon changes to paramsets.
A "putted" paramset must not contain all keys available in the specified paramset,
just the ones which are writable and should be changed.
"""
try:
if paramset in self._PARAMSETS and data:
self._proxy.putParamset(self._ADDRESS, paramset, data)
# We update all paramsets to at least have a temporarily accurate state for the device.
# This might not be true for tasks that take long to complete (lifting a rollershutter completely etc.).
# For this the server-process has to call the updateParamsets-method when it receives events for the device.
self.updateParamsets()
return True
else:
return False
except Exception as err:
LOG.error("HMGeneric.putParamset: Exception: " + str(err))
return False |
def get_outputs_from_cm(index, cm):
"""Return indices of the outputs of node with the given index."""
return tuple(i for i in range(cm.shape[0]) if cm[index][i]) | Return indices of the outputs of node with the given index. | Below is the the instruction that describes the task:
### Input:
Return indices of the outputs of node with the given index.
### Response:
def get_outputs_from_cm(index, cm):
"""Return indices of the outputs of node with the given index."""
return tuple(i for i in range(cm.shape[0]) if cm[index][i]) |
def load_scenario(self, scenario_name, **kwargs):
"""Load a scenario into the emulated object.
Scenarios are specific states of an an object that can be customized
with keyword parameters. Typical examples are:
- data logger with full storage
- device with low battery indication on
Args:
scenario_name (str): The name of the scenario that we wish to
load.
**kwargs: Any arguments that should be passed to configure
the scenario. These arguments will be passed directly
to the scenario handler.
"""
scenario = self._known_scenarios.get(scenario_name)
if scenario is None:
raise ArgumentError("Unknown scenario %s" % scenario_name, known_scenarios=list(self._known_scenarios))
scenario(**kwargs) | Load a scenario into the emulated object.
Scenarios are specific states of an an object that can be customized
with keyword parameters. Typical examples are:
- data logger with full storage
- device with low battery indication on
Args:
scenario_name (str): The name of the scenario that we wish to
load.
**kwargs: Any arguments that should be passed to configure
the scenario. These arguments will be passed directly
to the scenario handler. | Below is the the instruction that describes the task:
### Input:
Load a scenario into the emulated object.
Scenarios are specific states of an an object that can be customized
with keyword parameters. Typical examples are:
- data logger with full storage
- device with low battery indication on
Args:
scenario_name (str): The name of the scenario that we wish to
load.
**kwargs: Any arguments that should be passed to configure
the scenario. These arguments will be passed directly
to the scenario handler.
### Response:
def load_scenario(self, scenario_name, **kwargs):
"""Load a scenario into the emulated object.
Scenarios are specific states of an an object that can be customized
with keyword parameters. Typical examples are:
- data logger with full storage
- device with low battery indication on
Args:
scenario_name (str): The name of the scenario that we wish to
load.
**kwargs: Any arguments that should be passed to configure
the scenario. These arguments will be passed directly
to the scenario handler.
"""
scenario = self._known_scenarios.get(scenario_name)
if scenario is None:
raise ArgumentError("Unknown scenario %s" % scenario_name, known_scenarios=list(self._known_scenarios))
scenario(**kwargs) |
def to_api_data(self, restrict_keys=None):
""" Returns a dict to communicate with the server
:param restrict_keys: a set of keys to restrict the returned data to
:rtype: dict
"""
cc = self._cc # alias
data = {
cc('column_hidden'): self._column_hidden,
cc('row_hidden'): self._row_hidden,
cc('formulas'): self._formulas,
cc('formulas_local'): self._formulas_local,
cc('formulas_r1_c1'): self._formulas_r1_c1,
cc('number_format'): self._number_format,
cc('values'): self._values,
}
if restrict_keys:
for key in list(data.keys()):
if key not in restrict_keys:
del data[key]
return data | Returns a dict to communicate with the server
:param restrict_keys: a set of keys to restrict the returned data to
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Returns a dict to communicate with the server
:param restrict_keys: a set of keys to restrict the returned data to
:rtype: dict
### Response:
def to_api_data(self, restrict_keys=None):
""" Returns a dict to communicate with the server
:param restrict_keys: a set of keys to restrict the returned data to
:rtype: dict
"""
cc = self._cc # alias
data = {
cc('column_hidden'): self._column_hidden,
cc('row_hidden'): self._row_hidden,
cc('formulas'): self._formulas,
cc('formulas_local'): self._formulas_local,
cc('formulas_r1_c1'): self._formulas_r1_c1,
cc('number_format'): self._number_format,
cc('values'): self._values,
}
if restrict_keys:
for key in list(data.keys()):
if key not in restrict_keys:
del data[key]
return data |
def get_sample_frame(self):
"""Return first available image in observation result"""
for frame in self.frames:
return frame.open()
for res in self.results.values():
return res.open()
return None | Return first available image in observation result | Below is the the instruction that describes the task:
### Input:
Return first available image in observation result
### Response:
def get_sample_frame(self):
"""Return first available image in observation result"""
for frame in self.frames:
return frame.open()
for res in self.results.values():
return res.open()
return None |
def get_single_allele_from_reads(allele_reads):
"""
Given a sequence of AlleleRead objects, which are expected to all have
the same allele, return that allele.
"""
allele_reads = list(allele_reads)
if len(allele_reads) == 0:
raise ValueError("Expected non-empty list of AlleleRead objects")
seq = allele_reads[0].allele
if any(read.allele != seq for read in allele_reads):
raise ValueError("Expected all AlleleRead objects to have same allele '%s', got %s" % (
seq, allele_reads))
return seq | Given a sequence of AlleleRead objects, which are expected to all have
the same allele, return that allele. | Below is the the instruction that describes the task:
### Input:
Given a sequence of AlleleRead objects, which are expected to all have
the same allele, return that allele.
### Response:
def get_single_allele_from_reads(allele_reads):
"""
Given a sequence of AlleleRead objects, which are expected to all have
the same allele, return that allele.
"""
allele_reads = list(allele_reads)
if len(allele_reads) == 0:
raise ValueError("Expected non-empty list of AlleleRead objects")
seq = allele_reads[0].allele
if any(read.allele != seq for read in allele_reads):
raise ValueError("Expected all AlleleRead objects to have same allele '%s', got %s" % (
seq, allele_reads))
return seq |
def delete(self):
"""
Delete a loopback cluster virtual interface from this engine.
Changes to the engine configuration are done immediately.
You can find cluster virtual loopbacks by iterating at the
engine level::
for loopbacks in engine.loopback_interface:
...
:raises UpdateElementFailed: failure to delete loopback interface
:return: None
"""
self._engine.data[self.typeof] = \
[loopback for loopback in self._engine.data.get(self.typeof, [])
if loopback.get('address') != self.address]
self._engine.update() | Delete a loopback cluster virtual interface from this engine.
Changes to the engine configuration are done immediately.
You can find cluster virtual loopbacks by iterating at the
engine level::
for loopbacks in engine.loopback_interface:
...
:raises UpdateElementFailed: failure to delete loopback interface
:return: None | Below is the the instruction that describes the task:
### Input:
Delete a loopback cluster virtual interface from this engine.
Changes to the engine configuration are done immediately.
You can find cluster virtual loopbacks by iterating at the
engine level::
for loopbacks in engine.loopback_interface:
...
:raises UpdateElementFailed: failure to delete loopback interface
:return: None
### Response:
def delete(self):
"""
Delete a loopback cluster virtual interface from this engine.
Changes to the engine configuration are done immediately.
You can find cluster virtual loopbacks by iterating at the
engine level::
for loopbacks in engine.loopback_interface:
...
:raises UpdateElementFailed: failure to delete loopback interface
:return: None
"""
self._engine.data[self.typeof] = \
[loopback for loopback in self._engine.data.get(self.typeof, [])
if loopback.get('address') != self.address]
self._engine.update() |
def write_display(self):
"""Write display buffer to display hardware."""
for i, value in enumerate(self.buffer):
self._device.write8(i, value) | Write display buffer to display hardware. | Below is the the instruction that describes the task:
### Input:
Write display buffer to display hardware.
### Response:
def write_display(self):
"""Write display buffer to display hardware."""
for i, value in enumerate(self.buffer):
self._device.write8(i, value) |
def char_sets():
"""Return a list of the IANA Character Sets, or an empty list if the
IANA website is unreachable.
Store it as a function attribute so that we only build the list once.
"""
if not hasattr(char_sets, 'setlist'):
clist = []
try:
data = requests.get('http://www.iana.org/assignments/character-'
'sets/character-sets-1.csv')
except requests.exceptions.RequestException:
return []
for line in data.iter_lines():
if line:
line = line.decode("utf-8")
if line.count(',') > 0:
vals = line.split(',')
if vals[0]:
clist.append(vals[0])
else:
clist.append(vals[1])
char_sets.setlist = clist
return char_sets.setlist | Return a list of the IANA Character Sets, or an empty list if the
IANA website is unreachable.
Store it as a function attribute so that we only build the list once. | Below is the the instruction that describes the task:
### Input:
Return a list of the IANA Character Sets, or an empty list if the
IANA website is unreachable.
Store it as a function attribute so that we only build the list once.
### Response:
def char_sets():
"""Return a list of the IANA Character Sets, or an empty list if the
IANA website is unreachable.
Store it as a function attribute so that we only build the list once.
"""
if not hasattr(char_sets, 'setlist'):
clist = []
try:
data = requests.get('http://www.iana.org/assignments/character-'
'sets/character-sets-1.csv')
except requests.exceptions.RequestException:
return []
for line in data.iter_lines():
if line:
line = line.decode("utf-8")
if line.count(',') > 0:
vals = line.split(',')
if vals[0]:
clist.append(vals[0])
else:
clist.append(vals[1])
char_sets.setlist = clist
return char_sets.setlist |
def get(self, reset=True):
"""
Get time since last initialisation / reset.
Parameters
----------
reset = bool, optional
Should the clock be reset after returning time?
Returns
----------
float
Time passed in milliseconds.
Example
----------
>>> import neurokit as nk
>>> time_passed_since_neurobox_loading = nk.time.get()
>>> nk.time.reset()
>>> time_passed_since_reset = nk.time.get()
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- time
"""
t = (builtin_time.clock()-self.clock)*1000
if reset is True:
self.reset()
return(t) | Get time since last initialisation / reset.
Parameters
----------
reset = bool, optional
Should the clock be reset after returning time?
Returns
----------
float
Time passed in milliseconds.
Example
----------
>>> import neurokit as nk
>>> time_passed_since_neurobox_loading = nk.time.get()
>>> nk.time.reset()
>>> time_passed_since_reset = nk.time.get()
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- time | Below is the the instruction that describes the task:
### Input:
Get time since last initialisation / reset.
Parameters
----------
reset = bool, optional
Should the clock be reset after returning time?
Returns
----------
float
Time passed in milliseconds.
Example
----------
>>> import neurokit as nk
>>> time_passed_since_neurobox_loading = nk.time.get()
>>> nk.time.reset()
>>> time_passed_since_reset = nk.time.get()
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- time
### Response:
def get(self, reset=True):
"""
Get time since last initialisation / reset.
Parameters
----------
reset = bool, optional
Should the clock be reset after returning time?
Returns
----------
float
Time passed in milliseconds.
Example
----------
>>> import neurokit as nk
>>> time_passed_since_neurobox_loading = nk.time.get()
>>> nk.time.reset()
>>> time_passed_since_reset = nk.time.get()
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- time
"""
t = (builtin_time.clock()-self.clock)*1000
if reset is True:
self.reset()
return(t) |
def check_fast(self, r, k=None):
'''Fast check if there's any error in a message+ecc. Can be used before decoding, in addition to hashes to detect if the message was tampered, or after decoding to check that the message was fully recovered.
returns True/False
'''
n = self.n
if not k: k = self.k
#h = self.h[k]
g = self.g[k]
# If we were given a string, convert to a list (important to support fields above 2^8)
if isinstance(r, _str):
r = [ord(x) for x in r]
# Turn r into a polynomial
r = Polynomial([GF2int(x) for x in r])
# Compute the syndromes:
sz = self._syndromes(r, k=k)
# Checking that the syndrome is all 0 is sufficient to check if there are no more any errors in the decoded message
#return all(int(x) == 0 for x in sz)
return sz.coefficients.count(GF2int(0)) == len(sz) | Fast check if there's any error in a message+ecc. Can be used before decoding, in addition to hashes to detect if the message was tampered, or after decoding to check that the message was fully recovered.
returns True/False | Below is the the instruction that describes the task:
### Input:
Fast check if there's any error in a message+ecc. Can be used before decoding, in addition to hashes to detect if the message was tampered, or after decoding to check that the message was fully recovered.
returns True/False
### Response:
def check_fast(self, r, k=None):
'''Fast check if there's any error in a message+ecc. Can be used before decoding, in addition to hashes to detect if the message was tampered, or after decoding to check that the message was fully recovered.
returns True/False
'''
n = self.n
if not k: k = self.k
#h = self.h[k]
g = self.g[k]
# If we were given a string, convert to a list (important to support fields above 2^8)
if isinstance(r, _str):
r = [ord(x) for x in r]
# Turn r into a polynomial
r = Polynomial([GF2int(x) for x in r])
# Compute the syndromes:
sz = self._syndromes(r, k=k)
# Checking that the syndrome is all 0 is sufficient to check if there are no more any errors in the decoded message
#return all(int(x) == 0 for x in sz)
return sz.coefficients.count(GF2int(0)) == len(sz) |
def create_point(self, x, y):
"""Create an ECDSA point on the SECP256k1 curve with the given coords.
:param x: The x coordinate on the curve
:type x: long
:param y: The y coodinate on the curve
:type y: long
"""
if (not isinstance(x, six.integer_types) or
not isinstance(y, six.integer_types)):
raise ValueError("The coordinates must be longs.")
return _ECDSA_Point(SECP256k1.curve, x, y) | Create an ECDSA point on the SECP256k1 curve with the given coords.
:param x: The x coordinate on the curve
:type x: long
:param y: The y coodinate on the curve
:type y: long | Below is the the instruction that describes the task:
### Input:
Create an ECDSA point on the SECP256k1 curve with the given coords.
:param x: The x coordinate on the curve
:type x: long
:param y: The y coodinate on the curve
:type y: long
### Response:
def create_point(self, x, y):
"""Create an ECDSA point on the SECP256k1 curve with the given coords.
:param x: The x coordinate on the curve
:type x: long
:param y: The y coodinate on the curve
:type y: long
"""
if (not isinstance(x, six.integer_types) or
not isinstance(y, six.integer_types)):
raise ValueError("The coordinates must be longs.")
return _ECDSA_Point(SECP256k1.curve, x, y) |
def timeseries(self, start, end, **kwargs):
r""" Returns a time series of observations at a user specified location for a specified time. Users must specify
at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa',
'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See
below mandatory and optional parameters. Also see the metadata() function for station IDs.
Arguments:
----------
start: string, mandatory
Start date in form of YYYYMMDDhhmm. MUST BE USED WITH THE END PARAMETER. Default time is UTC
e.g., start='201306011800'
end: string, mandatory
End date in form of YYYYMMDDhhmm. MUST BE USED WITH THE START PARAMETER. Default time is UTC
e.g., end='201306011800'
obtimezone: string, optional
Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local'
showemptystations: string, optional
Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are
omitted by default.
stid: string, optional
Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb'
county: string, optional
County/parish/borough (US/Canada only), full name e.g. county='Larimer'
state: string, optional
US state, 2-letter ID e.g. state='CO'
country: string, optional
Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx'
radius: string, optional
Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20"
bbox: string, optional
Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41"
cwa: string, optional
NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX'
nwsfirezone: string, optional
NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile
containing the full list of zones. e.g. nwsfirezone='LOX241'
gacc: string, optional
Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs.
subgacc: string, optional
Name of Sub GACC e.g. subgacc='EB07'
vars: string, optional
Single or comma separated list of sensor variables. Will return all stations that match one of provided
variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in
the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars.
status: string, optional
A value of either active or inactive returns stations currently set as active or inactive in the archive.
Omitting this param returns all stations. e.g. status='active'
units: string, optional
String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for
FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph,
speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa,
alti|inhg. e.g. units='temp|F,speed|kph,metric'
groupby: string, optional
Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc
e.g. groupby='state'
timeformat: string, optional
A python format string for returning customized date-time groups for observation times. Can include
characters. e.g. timeformat='%m/%d/%Y at %H:%M'
Returns:
--------
Dictionary of time series observations through the get_response() function.
Raises:
-------
None.
"""
self._check_geo_param(kwargs)
kwargs['start'] = start
kwargs['end'] = end
kwargs['token'] = self.token
return self._get_response('stations/timeseries', kwargs) | r""" Returns a time series of observations at a user specified location for a specified time. Users must specify
at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa',
'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See
below mandatory and optional parameters. Also see the metadata() function for station IDs.
Arguments:
----------
start: string, mandatory
Start date in form of YYYYMMDDhhmm. MUST BE USED WITH THE END PARAMETER. Default time is UTC
e.g., start='201306011800'
end: string, mandatory
End date in form of YYYYMMDDhhmm. MUST BE USED WITH THE START PARAMETER. Default time is UTC
e.g., end='201306011800'
obtimezone: string, optional
Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local'
showemptystations: string, optional
Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are
omitted by default.
stid: string, optional
Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb'
county: string, optional
County/parish/borough (US/Canada only), full name e.g. county='Larimer'
state: string, optional
US state, 2-letter ID e.g. state='CO'
country: string, optional
Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx'
radius: string, optional
Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20"
bbox: string, optional
Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41"
cwa: string, optional
NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX'
nwsfirezone: string, optional
NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile
containing the full list of zones. e.g. nwsfirezone='LOX241'
gacc: string, optional
Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs.
subgacc: string, optional
Name of Sub GACC e.g. subgacc='EB07'
vars: string, optional
Single or comma separated list of sensor variables. Will return all stations that match one of provided
variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in
the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars.
status: string, optional
A value of either active or inactive returns stations currently set as active or inactive in the archive.
Omitting this param returns all stations. e.g. status='active'
units: string, optional
String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for
FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph,
speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa,
alti|inhg. e.g. units='temp|F,speed|kph,metric'
groupby: string, optional
Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc
e.g. groupby='state'
timeformat: string, optional
A python format string for returning customized date-time groups for observation times. Can include
characters. e.g. timeformat='%m/%d/%Y at %H:%M'
Returns:
--------
Dictionary of time series observations through the get_response() function.
Raises:
-------
None. | Below is the the instruction that describes the task:
### Input:
r""" Returns a time series of observations at a user specified location for a specified time. Users must specify
at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa',
'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See
below mandatory and optional parameters. Also see the metadata() function for station IDs.
Arguments:
----------
start: string, mandatory
Start date in form of YYYYMMDDhhmm. MUST BE USED WITH THE END PARAMETER. Default time is UTC
e.g., start='201306011800'
end: string, mandatory
End date in form of YYYYMMDDhhmm. MUST BE USED WITH THE START PARAMETER. Default time is UTC
e.g., end='201306011800'
obtimezone: string, optional
Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local'
showemptystations: string, optional
Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are
omitted by default.
stid: string, optional
Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb'
county: string, optional
County/parish/borough (US/Canada only), full name e.g. county='Larimer'
state: string, optional
US state, 2-letter ID e.g. state='CO'
country: string, optional
Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx'
radius: string, optional
Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20"
bbox: string, optional
Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41"
cwa: string, optional
NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX'
nwsfirezone: string, optional
NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile
containing the full list of zones. e.g. nwsfirezone='LOX241'
gacc: string, optional
Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs.
subgacc: string, optional
Name of Sub GACC e.g. subgacc='EB07'
vars: string, optional
Single or comma separated list of sensor variables. Will return all stations that match one of provided
variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in
the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars.
status: string, optional
A value of either active or inactive returns stations currently set as active or inactive in the archive.
Omitting this param returns all stations. e.g. status='active'
units: string, optional
String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for
FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph,
speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa,
alti|inhg. e.g. units='temp|F,speed|kph,metric'
groupby: string, optional
Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc
e.g. groupby='state'
timeformat: string, optional
A python format string for returning customized date-time groups for observation times. Can include
characters. e.g. timeformat='%m/%d/%Y at %H:%M'
Returns:
--------
Dictionary of time series observations through the get_response() function.
Raises:
-------
None.
### Response:
def timeseries(self, start, end, **kwargs):
r""" Returns a time series of observations at a user specified location for a specified time. Users must specify
at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa',
'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See
below mandatory and optional parameters. Also see the metadata() function for station IDs.
Arguments:
----------
start: string, mandatory
Start date in form of YYYYMMDDhhmm. MUST BE USED WITH THE END PARAMETER. Default time is UTC
e.g., start='201306011800'
end: string, mandatory
End date in form of YYYYMMDDhhmm. MUST BE USED WITH THE START PARAMETER. Default time is UTC
e.g., end='201306011800'
obtimezone: string, optional
Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local'
showemptystations: string, optional
Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are
omitted by default.
stid: string, optional
Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb'
county: string, optional
County/parish/borough (US/Canada only), full name e.g. county='Larimer'
state: string, optional
US state, 2-letter ID e.g. state='CO'
country: string, optional
Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx'
radius: string, optional
Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20"
bbox: string, optional
Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41"
cwa: string, optional
NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX'
nwsfirezone: string, optional
NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile
containing the full list of zones. e.g. nwsfirezone='LOX241'
gacc: string, optional
Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs.
subgacc: string, optional
Name of Sub GACC e.g. subgacc='EB07'
vars: string, optional
Single or comma separated list of sensor variables. Will return all stations that match one of provided
variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in
the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars.
status: string, optional
A value of either active or inactive returns stations currently set as active or inactive in the archive.
Omitting this param returns all stations. e.g. status='active'
units: string, optional
String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for
FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph,
speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa,
alti|inhg. e.g. units='temp|F,speed|kph,metric'
groupby: string, optional
Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc
e.g. groupby='state'
timeformat: string, optional
A python format string for returning customized date-time groups for observation times. Can include
characters. e.g. timeformat='%m/%d/%Y at %H:%M'
Returns:
--------
Dictionary of time series observations through the get_response() function.
Raises:
-------
None.
"""
self._check_geo_param(kwargs)
kwargs['start'] = start
kwargs['end'] = end
kwargs['token'] = self.token
return self._get_response('stations/timeseries', kwargs) |
def GetFileEntryByPathSpec(self, path_spec):
"""Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
FileEntry: a file entry or None.
"""
row_index = getattr(path_spec, 'row_index', None)
row_condition = getattr(path_spec, 'row_condition', None)
# If no row_index or row_condition is provided, return a directory.
if row_index is None and row_condition is None:
return sqlite_blob_file_entry.SQLiteBlobFileEntry(
self._resolver_context, self, path_spec, is_root=True,
is_virtual=True)
return sqlite_blob_file_entry.SQLiteBlobFileEntry(
self._resolver_context, self, path_spec) | Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
FileEntry: a file entry or None. | Below is the the instruction that describes the task:
### Input:
Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
FileEntry: a file entry or None.
### Response:
def GetFileEntryByPathSpec(self, path_spec):
"""Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
FileEntry: a file entry or None.
"""
row_index = getattr(path_spec, 'row_index', None)
row_condition = getattr(path_spec, 'row_condition', None)
# If no row_index or row_condition is provided, return a directory.
if row_index is None and row_condition is None:
return sqlite_blob_file_entry.SQLiteBlobFileEntry(
self._resolver_context, self, path_spec, is_root=True,
is_virtual=True)
return sqlite_blob_file_entry.SQLiteBlobFileEntry(
self._resolver_context, self, path_spec) |
def _canonicalize_query(self, query):
"""
Transform the query dictionary to replace e.g. documents with __ref__ fields.
"""
def transform_query(q):
for encoder in self.query_encoders:
q = encoder.encode(q,[])
if isinstance(q, dict):
nq = {}
for key,value in q.items():
new_key = key
if isinstance(value,dict) and len(value) == 1 and list(value.keys())[0].startswith('$'):
if list(value.keys())[0] in ('$all','$in'):
if list(value.values())[0] and isinstance(list(value.values())[0][0],Document):
if self._use_pk_based_refs:
new_key+='.pk'
else:
new_key+='.__ref__'
elif isinstance(value,Document):
if self._use_pk_based_refs:
new_key+='.pk'
else:
new_key+='.__ref__'
nq[new_key] = transform_query(value)
return nq
elif isinstance(q, (list,QuerySet,tuple)):
return [transform_query(x) for x in q]
elif isinstance(q,Document):
collection = self.get_collection_for_obj(q)
if self._use_pk_based_refs:
return q.pk
else:
return "%s:%s" % (collection,q.pk)
else:
return q
return transform_query(query) | Transform the query dictionary to replace e.g. documents with __ref__ fields. | Below is the the instruction that describes the task:
### Input:
Transform the query dictionary to replace e.g. documents with __ref__ fields.
### Response:
def _canonicalize_query(self, query):
"""
Transform the query dictionary to replace e.g. documents with __ref__ fields.
"""
def transform_query(q):
for encoder in self.query_encoders:
q = encoder.encode(q,[])
if isinstance(q, dict):
nq = {}
for key,value in q.items():
new_key = key
if isinstance(value,dict) and len(value) == 1 and list(value.keys())[0].startswith('$'):
if list(value.keys())[0] in ('$all','$in'):
if list(value.values())[0] and isinstance(list(value.values())[0][0],Document):
if self._use_pk_based_refs:
new_key+='.pk'
else:
new_key+='.__ref__'
elif isinstance(value,Document):
if self._use_pk_based_refs:
new_key+='.pk'
else:
new_key+='.__ref__'
nq[new_key] = transform_query(value)
return nq
elif isinstance(q, (list,QuerySet,tuple)):
return [transform_query(x) for x in q]
elif isinstance(q,Document):
collection = self.get_collection_for_obj(q)
if self._use_pk_based_refs:
return q.pk
else:
return "%s:%s" % (collection,q.pk)
else:
return q
return transform_query(query) |
def dragMoveEvent( self, event ):
"""
Processes the drag drop event using the filter set by the \
setDragDropFilter
:param event | <QDragEvent>
"""
filt = self.dragDropFilter()
if ( not filt ):
super(XCalendarWidget, self).dragMoveEvent(event)
return
filt(self, event) | Processes the drag drop event using the filter set by the \
setDragDropFilter
:param event | <QDragEvent> | Below is the the instruction that describes the task:
### Input:
Processes the drag drop event using the filter set by the \
setDragDropFilter
:param event | <QDragEvent>
### Response:
def dragMoveEvent( self, event ):
"""
Processes the drag drop event using the filter set by the \
setDragDropFilter
:param event | <QDragEvent>
"""
filt = self.dragDropFilter()
if ( not filt ):
super(XCalendarWidget, self).dragMoveEvent(event)
return
filt(self, event) |
def cmd_map(self, args):
'''map commands'''
from MAVProxy.modules.mavproxy_map import mp_slipmap
if len(args) < 1:
print("usage: map <icon|set>")
elif args[0] == "icon":
if len(args) < 3:
print("Usage: map icon <lat> <lon> <icon>")
else:
lat = args[1]
lon = args[2]
flag = 'flag.png'
if len(args) > 3:
flag = args[3] + '.png'
icon = self.map.icon(flag)
self.map.add_object(mp_slipmap.SlipIcon('icon - %s [%u]' % (str(flag),self.icon_counter),
(float(lat),float(lon)),
icon, layer=3, rotation=0, follow=False))
self.icon_counter += 1
elif args[0] == "set":
self.map_settings.command(args[1:])
self.map.add_object(mp_slipmap.SlipBrightness(self.map_settings.brightness))
elif args[0] == "sethome":
self.cmd_set_home(args)
elif args[0] == "sethomepos":
self.cmd_set_homepos(args)
elif args[0] == "setorigin":
self.cmd_set_origin(args)
elif args[0] == "setoriginpos":
self.cmd_set_originpos(args)
elif args[0] == "zoom":
self.cmd_zoom(args)
elif args[0] == "center":
self.cmd_center(args)
elif args[0] == "follow":
self.cmd_follow(args)
else:
print("usage: map <icon|set>") | map commands | Below is the the instruction that describes the task:
### Input:
map commands
### Response:
def cmd_map(self, args):
'''map commands'''
from MAVProxy.modules.mavproxy_map import mp_slipmap
if len(args) < 1:
print("usage: map <icon|set>")
elif args[0] == "icon":
if len(args) < 3:
print("Usage: map icon <lat> <lon> <icon>")
else:
lat = args[1]
lon = args[2]
flag = 'flag.png'
if len(args) > 3:
flag = args[3] + '.png'
icon = self.map.icon(flag)
self.map.add_object(mp_slipmap.SlipIcon('icon - %s [%u]' % (str(flag),self.icon_counter),
(float(lat),float(lon)),
icon, layer=3, rotation=0, follow=False))
self.icon_counter += 1
elif args[0] == "set":
self.map_settings.command(args[1:])
self.map.add_object(mp_slipmap.SlipBrightness(self.map_settings.brightness))
elif args[0] == "sethome":
self.cmd_set_home(args)
elif args[0] == "sethomepos":
self.cmd_set_homepos(args)
elif args[0] == "setorigin":
self.cmd_set_origin(args)
elif args[0] == "setoriginpos":
self.cmd_set_originpos(args)
elif args[0] == "zoom":
self.cmd_zoom(args)
elif args[0] == "center":
self.cmd_center(args)
elif args[0] == "follow":
self.cmd_follow(args)
else:
print("usage: map <icon|set>") |
async def i2c_read_request(self, command):
"""
This method sends an I2C read request to Firmata. It is qualified by a single shot, continuous
read, or stop reading command.
Special Note: for the read type supply one of the following string values:
"0" = I2C_READ
"1" = I2C_READ | I2C_END_TX_MASK"
"2" = I2C_READ_CONTINUOUSLY
"3" = I2C_READ_CONTINUOUSLY | I2C_END_TX_MASK
"4" = I2C_STOP_READING
:param command: {"method": "i2c_read_request", "params": [I2C_ADDRESS, I2C_REGISTER,
NUMBER_OF_BYTES, I2C_READ_TYPE ]}
:returns: {"method": "i2c_read_request_reply", "params": [DATA]}
"""
device_address = int(command[0])
register = int(command[1])
number_of_bytes = int(command[2])
if command[3] == "0":
read_type = Constants.I2C_READ_CONTINUOUSLY
elif command[3] == "1":
read_type = Constants.I2C_READ
elif command[3] == "2":
read_type = Constants.I2C_READ | Constants.I2C_END_TX_MASK
elif command[3] == "3":
read_type = Constants.I2C_READ_CONTINUOUSLY | Constants.I2C_END_TX_MASK
else: # the default case stop reading valid request or invalid request
read_type = Constants.I2C_STOP_READING
await self.core.i2c_read_request(device_address, register, number_of_bytes, read_type,
self.i2c_read_request_callback)
await asyncio.sleep(.1) | This method sends an I2C read request to Firmata. It is qualified by a single shot, continuous
read, or stop reading command.
Special Note: for the read type supply one of the following string values:
"0" = I2C_READ
"1" = I2C_READ | I2C_END_TX_MASK"
"2" = I2C_READ_CONTINUOUSLY
"3" = I2C_READ_CONTINUOUSLY | I2C_END_TX_MASK
"4" = I2C_STOP_READING
:param command: {"method": "i2c_read_request", "params": [I2C_ADDRESS, I2C_REGISTER,
NUMBER_OF_BYTES, I2C_READ_TYPE ]}
:returns: {"method": "i2c_read_request_reply", "params": [DATA]} | Below is the the instruction that describes the task:
### Input:
This method sends an I2C read request to Firmata. It is qualified by a single shot, continuous
read, or stop reading command.
Special Note: for the read type supply one of the following string values:
"0" = I2C_READ
"1" = I2C_READ | I2C_END_TX_MASK"
"2" = I2C_READ_CONTINUOUSLY
"3" = I2C_READ_CONTINUOUSLY | I2C_END_TX_MASK
"4" = I2C_STOP_READING
:param command: {"method": "i2c_read_request", "params": [I2C_ADDRESS, I2C_REGISTER,
NUMBER_OF_BYTES, I2C_READ_TYPE ]}
:returns: {"method": "i2c_read_request_reply", "params": [DATA]}
### Response:
async def i2c_read_request(self, command):
"""
This method sends an I2C read request to Firmata. It is qualified by a single shot, continuous
read, or stop reading command.
Special Note: for the read type supply one of the following string values:
"0" = I2C_READ
"1" = I2C_READ | I2C_END_TX_MASK"
"2" = I2C_READ_CONTINUOUSLY
"3" = I2C_READ_CONTINUOUSLY | I2C_END_TX_MASK
"4" = I2C_STOP_READING
:param command: {"method": "i2c_read_request", "params": [I2C_ADDRESS, I2C_REGISTER,
NUMBER_OF_BYTES, I2C_READ_TYPE ]}
:returns: {"method": "i2c_read_request_reply", "params": [DATA]}
"""
device_address = int(command[0])
register = int(command[1])
number_of_bytes = int(command[2])
if command[3] == "0":
read_type = Constants.I2C_READ_CONTINUOUSLY
elif command[3] == "1":
read_type = Constants.I2C_READ
elif command[3] == "2":
read_type = Constants.I2C_READ | Constants.I2C_END_TX_MASK
elif command[3] == "3":
read_type = Constants.I2C_READ_CONTINUOUSLY | Constants.I2C_END_TX_MASK
else: # the default case stop reading valid request or invalid request
read_type = Constants.I2C_STOP_READING
await self.core.i2c_read_request(device_address, register, number_of_bytes, read_type,
self.i2c_read_request_callback)
await asyncio.sleep(.1) |
def read_image(image, path=''):
"""Read one image.
Parameters
-----------
image : str
The image file name.
path : str
The image folder path.
Returns
-------
numpy.array
The image.
"""
return imageio.imread(os.path.join(path, image)) | Read one image.
Parameters
-----------
image : str
The image file name.
path : str
The image folder path.
Returns
-------
numpy.array
The image. | Below is the the instruction that describes the task:
### Input:
Read one image.
Parameters
-----------
image : str
The image file name.
path : str
The image folder path.
Returns
-------
numpy.array
The image.
### Response:
def read_image(image, path=''):
"""Read one image.
Parameters
-----------
image : str
The image file name.
path : str
The image folder path.
Returns
-------
numpy.array
The image.
"""
return imageio.imread(os.path.join(path, image)) |
def save(package, data, params={}, is_public=False):
"""Build and push data to Quilt registry at user/package/data_node,
associating params as metadata for the data node.
:param package: short package specifier string, i.e. 'team:user/pkg/subpath'
:param data: data to save (np.ndarray or pd.DataFrame)
:param params: metadata dictionary
:param is_public: boolean kwarg to push the packages publicly
"""
for key, value in params.items():
if isinstance(value, np.ndarray):
value = value.astype(float)
params[key] = value.tolist()
build_from_node(package, nodes.DataNode(None, None, data, params))
push('{}'.format(package), is_public=is_public) | Build and push data to Quilt registry at user/package/data_node,
associating params as metadata for the data node.
:param package: short package specifier string, i.e. 'team:user/pkg/subpath'
:param data: data to save (np.ndarray or pd.DataFrame)
:param params: metadata dictionary
:param is_public: boolean kwarg to push the packages publicly | Below is the the instruction that describes the task:
### Input:
Build and push data to Quilt registry at user/package/data_node,
associating params as metadata for the data node.
:param package: short package specifier string, i.e. 'team:user/pkg/subpath'
:param data: data to save (np.ndarray or pd.DataFrame)
:param params: metadata dictionary
:param is_public: boolean kwarg to push the packages publicly
### Response:
def save(package, data, params={}, is_public=False):
"""Build and push data to Quilt registry at user/package/data_node,
associating params as metadata for the data node.
:param package: short package specifier string, i.e. 'team:user/pkg/subpath'
:param data: data to save (np.ndarray or pd.DataFrame)
:param params: metadata dictionary
:param is_public: boolean kwarg to push the packages publicly
"""
for key, value in params.items():
if isinstance(value, np.ndarray):
value = value.astype(float)
params[key] = value.tolist()
build_from_node(package, nodes.DataNode(None, None, data, params))
push('{}'.format(package), is_public=is_public) |
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0] | Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed. | Below is the the instruction that describes the task:
### Input:
Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
### Response:
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0] |
def psd(self):
"""
A pyCBC FrequencySeries holding the appropriate PSD.
Return the PSD used in the metric calculation.
"""
if not self._psd:
errMsg = "The PSD has not been set in the metricParameters "
errMsg += "instance."
raise ValueError(errMsg)
return self._psd | A pyCBC FrequencySeries holding the appropriate PSD.
Return the PSD used in the metric calculation. | Below is the the instruction that describes the task:
### Input:
A pyCBC FrequencySeries holding the appropriate PSD.
Return the PSD used in the metric calculation.
### Response:
def psd(self):
"""
A pyCBC FrequencySeries holding the appropriate PSD.
Return the PSD used in the metric calculation.
"""
if not self._psd:
errMsg = "The PSD has not been set in the metricParameters "
errMsg += "instance."
raise ValueError(errMsg)
return self._psd |
def fail_remaining(self):
"""
Mark all unfinished tasks (including currently running ones) as
failed.
"""
self._failed.update(self._graph.nodes)
self._graph = Graph()
self._running = set() | Mark all unfinished tasks (including currently running ones) as
failed. | Below is the the instruction that describes the task:
### Input:
Mark all unfinished tasks (including currently running ones) as
failed.
### Response:
def fail_remaining(self):
"""
Mark all unfinished tasks (including currently running ones) as
failed.
"""
self._failed.update(self._graph.nodes)
self._graph = Graph()
self._running = set() |
def _render_our_module_flags(self, module, output_lines, prefix=''):
"""Returns a help string for a given module."""
flags = self._get_flags_defined_by_module(module)
if flags:
self._render_module_flags(module, flags, output_lines, prefix) | Returns a help string for a given module. | Below is the the instruction that describes the task:
### Input:
Returns a help string for a given module.
### Response:
def _render_our_module_flags(self, module, output_lines, prefix=''):
"""Returns a help string for a given module."""
flags = self._get_flags_defined_by_module(module)
if flags:
self._render_module_flags(module, flags, output_lines, prefix) |
def _kamb_count(cos_dist, sigma=3):
"""Original Kamb kernel function (raw count within radius)."""
n = float(cos_dist.size)
dist = _kamb_radius(n, sigma)
count = (cos_dist >= dist).astype(float)
return count, _kamb_units(n, dist) | Original Kamb kernel function (raw count within radius). | Below is the the instruction that describes the task:
### Input:
Original Kamb kernel function (raw count within radius).
### Response:
def _kamb_count(cos_dist, sigma=3):
"""Original Kamb kernel function (raw count within radius)."""
n = float(cos_dist.size)
dist = _kamb_radius(n, sigma)
count = (cos_dist >= dist).astype(float)
return count, _kamb_units(n, dist) |
def cancel_order(self, order_id, stock):
"""Cancel An Order
https://starfighter.readme.io/docs/cancel-an-order
"""
url_fragment = 'venues/{venue}/stocks/{stock}/orders/{order_id}'.format(
venue=self.venue,
stock=stock,
order_id=order_id,
)
url = urljoin(self.base_url, url_fragment)
return self.session.delete(url).json() | Cancel An Order
https://starfighter.readme.io/docs/cancel-an-order | Below is the the instruction that describes the task:
### Input:
Cancel An Order
https://starfighter.readme.io/docs/cancel-an-order
### Response:
def cancel_order(self, order_id, stock):
"""Cancel An Order
https://starfighter.readme.io/docs/cancel-an-order
"""
url_fragment = 'venues/{venue}/stocks/{stock}/orders/{order_id}'.format(
venue=self.venue,
stock=stock,
order_id=order_id,
)
url = urljoin(self.base_url, url_fragment)
return self.session.delete(url).json() |
def _flush_graph_val(self):
"""Send all new and changed graph values to the database."""
if not self._graphvals2set:
return
delafter = {}
for graph, key, branch, turn, tick, value in self._graphvals2set:
if (graph, key, branch) in delafter:
delafter[graph, key, branch] = min((
(turn, tick),
delafter[graph, key, branch]
))
else:
delafter[graph, key, branch] = (turn, tick)
self.sqlmany(
'del_graph_val_after',
*((graph, key, branch, turn, turn, tick)
for ((graph, key, branch), (turn, tick)) in delafter.items())
)
self.sqlmany('graph_val_insert', *self._graphvals2set)
self._graphvals2set = [] | Send all new and changed graph values to the database. | Below is the the instruction that describes the task:
### Input:
Send all new and changed graph values to the database.
### Response:
def _flush_graph_val(self):
"""Send all new and changed graph values to the database."""
if not self._graphvals2set:
return
delafter = {}
for graph, key, branch, turn, tick, value in self._graphvals2set:
if (graph, key, branch) in delafter:
delafter[graph, key, branch] = min((
(turn, tick),
delafter[graph, key, branch]
))
else:
delafter[graph, key, branch] = (turn, tick)
self.sqlmany(
'del_graph_val_after',
*((graph, key, branch, turn, turn, tick)
for ((graph, key, branch), (turn, tick)) in delafter.items())
)
self.sqlmany('graph_val_insert', *self._graphvals2set)
self._graphvals2set = [] |
def run(self, target, payload, instance_id=None, hook_id=None, **kwargs):
"""
target: the url to receive the payload.
payload: a python primitive data structure
instance_id: a possibly None "trigger" instance ID
hook_id: the ID of defining Hook object
"""
requests.post(
url=target,
data=json.dumps(payload),
headers={
'Content-Type': 'application/json',
'Authorization': 'Token %s' % settings.HOOK_AUTH_TOKEN
}
) | target: the url to receive the payload.
payload: a python primitive data structure
instance_id: a possibly None "trigger" instance ID
hook_id: the ID of defining Hook object | Below is the the instruction that describes the task:
### Input:
target: the url to receive the payload.
payload: a python primitive data structure
instance_id: a possibly None "trigger" instance ID
hook_id: the ID of defining Hook object
### Response:
def run(self, target, payload, instance_id=None, hook_id=None, **kwargs):
"""
target: the url to receive the payload.
payload: a python primitive data structure
instance_id: a possibly None "trigger" instance ID
hook_id: the ID of defining Hook object
"""
requests.post(
url=target,
data=json.dumps(payload),
headers={
'Content-Type': 'application/json',
'Authorization': 'Token %s' % settings.HOOK_AUTH_TOKEN
}
) |
def make_release(cts):
'''Make and upload the release.
Changelog:
- v0.2.1 -- 2016-11-18 -- specify downloading of non-cached version of the
package for multiple formats can be properly and
individually tested.
- 0.2.2 -- 2016-11028 -- move configuration to top of file
'''
make_release_version = __version__
colorama.init()
text.title("Minchin 'Make Release' for Python v{}".format(make_release_version))
print()
text.subtitle("Configuration")
print("base dir -> {}".format(here_directory()))
print("source -> .\{}\\".format(source_directory().relative_to(here_directory())))
print("test dir -> .\{}\\".format(test_directory().relative_to(here_directory())))
#print("doc dir -> .\{}\\".format(doc_directory().relative_to(here_directory())))
print("version file -> .\{}".format(version_file().relative_to(here_directory())))
print()
text.subtitle("Git -- Clean directory?")
print()
text.subtitle("Sort Import Statements")
print()
text.subtitle("Run Tests")
print()
text.subtitle("Update Version Number")
new_version = update_version_number(None)
print()
text.subtitle("Add Release to Changelog")
print()
text.subtitle("Build Documentation")
print()
text.query_yes_quit('All good and ready to go?')
text.subtitle("Build Distributions")
build_distribution()
for server in [
#"local",
#"testpypi",
"pypi",
]:
for file_format in ["tar.gz", "whl"]:
print()
text.subtitle("Test {} Build {}".format(file_format, server))
check_local_install(new_version, file_format, server) | Make and upload the release.
Changelog:
- v0.2.1 -- 2016-11-18 -- specify downloading of non-cached version of the
package for multiple formats can be properly and
individually tested.
- 0.2.2 -- 2016-11028 -- move configuration to top of file | Below is the the instruction that describes the task:
### Input:
Make and upload the release.
Changelog:
- v0.2.1 -- 2016-11-18 -- specify downloading of non-cached version of the
package for multiple formats can be properly and
individually tested.
- 0.2.2 -- 2016-11028 -- move configuration to top of file
### Response:
def make_release(cts):
'''Make and upload the release.
Changelog:
- v0.2.1 -- 2016-11-18 -- specify downloading of non-cached version of the
package for multiple formats can be properly and
individually tested.
- 0.2.2 -- 2016-11028 -- move configuration to top of file
'''
make_release_version = __version__
colorama.init()
text.title("Minchin 'Make Release' for Python v{}".format(make_release_version))
print()
text.subtitle("Configuration")
print("base dir -> {}".format(here_directory()))
print("source -> .\{}\\".format(source_directory().relative_to(here_directory())))
print("test dir -> .\{}\\".format(test_directory().relative_to(here_directory())))
#print("doc dir -> .\{}\\".format(doc_directory().relative_to(here_directory())))
print("version file -> .\{}".format(version_file().relative_to(here_directory())))
print()
text.subtitle("Git -- Clean directory?")
print()
text.subtitle("Sort Import Statements")
print()
text.subtitle("Run Tests")
print()
text.subtitle("Update Version Number")
new_version = update_version_number(None)
print()
text.subtitle("Add Release to Changelog")
print()
text.subtitle("Build Documentation")
print()
text.query_yes_quit('All good and ready to go?')
text.subtitle("Build Distributions")
build_distribution()
for server in [
#"local",
#"testpypi",
"pypi",
]:
for file_format in ["tar.gz", "whl"]:
print()
text.subtitle("Test {} Build {}".format(file_format, server))
check_local_install(new_version, file_format, server) |
async def _process_latching(self, key, latching_entry):
"""
This is a private utility method.
This method process latching events and either returns them via
callback or stores them in the latch map
:param key: Encoded pin
:param latching_entry: a latch table entry
:returns: Callback or store data in latch map
"""
if latching_entry[Constants.LATCH_CALLBACK]:
# auto clear entry and execute the callback
if latching_entry[Constants.LATCH_CALLBACK_TYPE]:
await latching_entry[Constants.LATCH_CALLBACK] \
([key, latching_entry[Constants.LATCHED_DATA], time.time()])
# noinspection PyPep8
else:
latching_entry[Constants.LATCH_CALLBACK] \
([key, latching_entry[Constants.LATCHED_DATA], time.time()])
self.latch_map[key] = [0, 0, 0, 0, 0, None]
else:
updated_latch_entry = latching_entry
updated_latch_entry[Constants.LATCH_STATE] = \
Constants.LATCH_LATCHED
updated_latch_entry[Constants.LATCHED_DATA] = \
latching_entry[Constants.LATCHED_DATA]
# time stamp it
updated_latch_entry[Constants.LATCHED_TIME_STAMP] = time.time()
self.latch_map[key] = updated_latch_entry | This is a private utility method.
This method process latching events and either returns them via
callback or stores them in the latch map
:param key: Encoded pin
:param latching_entry: a latch table entry
:returns: Callback or store data in latch map | Below is the the instruction that describes the task:
### Input:
This is a private utility method.
This method process latching events and either returns them via
callback or stores them in the latch map
:param key: Encoded pin
:param latching_entry: a latch table entry
:returns: Callback or store data in latch map
### Response:
async def _process_latching(self, key, latching_entry):
"""
This is a private utility method.
This method process latching events and either returns them via
callback or stores them in the latch map
:param key: Encoded pin
:param latching_entry: a latch table entry
:returns: Callback or store data in latch map
"""
if latching_entry[Constants.LATCH_CALLBACK]:
# auto clear entry and execute the callback
if latching_entry[Constants.LATCH_CALLBACK_TYPE]:
await latching_entry[Constants.LATCH_CALLBACK] \
([key, latching_entry[Constants.LATCHED_DATA], time.time()])
# noinspection PyPep8
else:
latching_entry[Constants.LATCH_CALLBACK] \
([key, latching_entry[Constants.LATCHED_DATA], time.time()])
self.latch_map[key] = [0, 0, 0, 0, 0, None]
else:
updated_latch_entry = latching_entry
updated_latch_entry[Constants.LATCH_STATE] = \
Constants.LATCH_LATCHED
updated_latch_entry[Constants.LATCHED_DATA] = \
latching_entry[Constants.LATCHED_DATA]
# time stamp it
updated_latch_entry[Constants.LATCHED_TIME_STAMP] = time.time()
self.latch_map[key] = updated_latch_entry |
def _sanitize(self, value):
"""
Remove the control characters that are not allowed in XML:
https://www.w3.org/TR/xml/#charsets
Leave all other characters.
"""
if isinstance(value, six.binary_type):
value = value.decode('utf-8')
if isinstance(value, six.text_type):
new_value = ''.join(ch for ch in value if self._valid_char(ch))
else:
return value
# The new string will be equivalent to the original string if no control characters are present.
# If equivalent, return the original string - some tests check for object equality instead of string equality.
return value if value == new_value else new_value | Remove the control characters that are not allowed in XML:
https://www.w3.org/TR/xml/#charsets
Leave all other characters. | Below is the the instruction that describes the task:
### Input:
Remove the control characters that are not allowed in XML:
https://www.w3.org/TR/xml/#charsets
Leave all other characters.
### Response:
def _sanitize(self, value):
"""
Remove the control characters that are not allowed in XML:
https://www.w3.org/TR/xml/#charsets
Leave all other characters.
"""
if isinstance(value, six.binary_type):
value = value.decode('utf-8')
if isinstance(value, six.text_type):
new_value = ''.join(ch for ch in value if self._valid_char(ch))
else:
return value
# The new string will be equivalent to the original string if no control characters are present.
# If equivalent, return the original string - some tests check for object equality instead of string equality.
return value if value == new_value else new_value |
def download_file_from_google_drive(ID, destination):
"""Download file from Google Drive.
See ``tl.files.load_celebA_dataset`` for example.
Parameters
--------------
ID : str
The driver ID.
destination : str
The destination for save file.
"""
def save_response_content(response, destination, chunk_size=32 * 1024):
total_size = int(response.headers.get('content-length', 0))
with open(destination, "wb") as f:
for chunk in tqdm(response.iter_content(chunk_size), total=total_size, unit='B', unit_scale=True,
desc=destination):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={'id': ID}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': ID, 'confirm': token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination) | Download file from Google Drive.
See ``tl.files.load_celebA_dataset`` for example.
Parameters
--------------
ID : str
The driver ID.
destination : str
The destination for save file. | Below is the the instruction that describes the task:
### Input:
Download file from Google Drive.
See ``tl.files.load_celebA_dataset`` for example.
Parameters
--------------
ID : str
The driver ID.
destination : str
The destination for save file.
### Response:
def download_file_from_google_drive(ID, destination):
"""Download file from Google Drive.
See ``tl.files.load_celebA_dataset`` for example.
Parameters
--------------
ID : str
The driver ID.
destination : str
The destination for save file.
"""
def save_response_content(response, destination, chunk_size=32 * 1024):
total_size = int(response.headers.get('content-length', 0))
with open(destination, "wb") as f:
for chunk in tqdm(response.iter_content(chunk_size), total=total_size, unit='B', unit_scale=True,
desc=destination):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={'id': ID}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': ID, 'confirm': token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination) |
def offset_range(self, start, end):
"""
Database start/end entries are always ordered such that
start < end. This makes computing a relative position (e.g. of a stop
codon relative to its transcript) complicated since the "end"
position of a backwards locus is actually earlir on the strand.
This function correctly selects a start vs. end value depending
on this locuses's strand and determines that position's offset from
the earliest position in this locus.
"""
assert start <= end, \
"Locations should always have start < end, got start=%d, end=%d" % (
start, end)
if start < self.start or end > self.end:
raise ValueError("Range (%d, %d) falls outside %s" % (
start, end, self))
if self.on_forward_strand:
return (start - self.start, end - self.start)
else:
return (self.end - end, self.end - start) | Database start/end entries are always ordered such that
start < end. This makes computing a relative position (e.g. of a stop
codon relative to its transcript) complicated since the "end"
position of a backwards locus is actually earlir on the strand.
This function correctly selects a start vs. end value depending
on this locuses's strand and determines that position's offset from
the earliest position in this locus. | Below is the the instruction that describes the task:
### Input:
Database start/end entries are always ordered such that
start < end. This makes computing a relative position (e.g. of a stop
codon relative to its transcript) complicated since the "end"
position of a backwards locus is actually earlir on the strand.
This function correctly selects a start vs. end value depending
on this locuses's strand and determines that position's offset from
the earliest position in this locus.
### Response:
def offset_range(self, start, end):
"""
Database start/end entries are always ordered such that
start < end. This makes computing a relative position (e.g. of a stop
codon relative to its transcript) complicated since the "end"
position of a backwards locus is actually earlir on the strand.
This function correctly selects a start vs. end value depending
on this locuses's strand and determines that position's offset from
the earliest position in this locus.
"""
assert start <= end, \
"Locations should always have start < end, got start=%d, end=%d" % (
start, end)
if start < self.start or end > self.end:
raise ValueError("Range (%d, %d) falls outside %s" % (
start, end, self))
if self.on_forward_strand:
return (start - self.start, end - self.start)
else:
return (self.end - end, self.end - start) |
def GET_user_profile( self, path_info, user_id ):
"""
Get a user profile.
Reply the profile on success
Return 404 on failure to load
"""
if not check_name(user_id) and not check_subdomain(user_id):
return self._reply_json({'error': 'Invalid name or subdomain'}, status_code=400)
blockstackd_url = get_blockstackd_url()
resp = blockstackd_client.resolve_profile(user_id, hostport=blockstackd_url)
if json_is_error(resp):
self._reply_json({'error': resp['error']}, status_code=404)
return
self._reply_json(resp['profile'])
return | Get a user profile.
Reply the profile on success
Return 404 on failure to load | Below is the the instruction that describes the task:
### Input:
Get a user profile.
Reply the profile on success
Return 404 on failure to load
### Response:
def GET_user_profile( self, path_info, user_id ):
"""
Get a user profile.
Reply the profile on success
Return 404 on failure to load
"""
if not check_name(user_id) and not check_subdomain(user_id):
return self._reply_json({'error': 'Invalid name or subdomain'}, status_code=400)
blockstackd_url = get_blockstackd_url()
resp = blockstackd_client.resolve_profile(user_id, hostport=blockstackd_url)
if json_is_error(resp):
self._reply_json({'error': resp['error']}, status_code=404)
return
self._reply_json(resp['profile'])
return |
def list_xz (archive, compression, cmd, verbosity, interactive):
"""List a XZ archive."""
cmdlist = [cmd]
cmdlist.append('-l')
if verbosity > 1:
cmdlist.append('-v')
cmdlist.append(archive)
return cmdlist | List a XZ archive. | Below is the the instruction that describes the task:
### Input:
List a XZ archive.
### Response:
def list_xz (archive, compression, cmd, verbosity, interactive):
"""List a XZ archive."""
cmdlist = [cmd]
cmdlist.append('-l')
if verbosity > 1:
cmdlist.append('-v')
cmdlist.append(archive)
return cmdlist |
def imagetransformer_base_10l_8h_big_uncond_dr03_dan_64_2d():
"""big 1d model for unconditional generation on imagenet."""
hparams = image_transformer2d_base()
hparams.unconditional = True
hparams.hidden_size = 512
hparams.batch_size = 1
hparams.img_len = 64
hparams.num_heads = 8
hparams.filter_size = 2048
hparams.batch_size = 1
hparams.max_length = 3075
hparams.max_length = 14000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.1
hparams.dec_attention_type = cia.AttentionType.LOCAL_2D
hparams.query_shape = (16, 16)
hparams.memory_flange = (8, 8)
return hparams | big 1d model for unconditional generation on imagenet. | Below is the the instruction that describes the task:
### Input:
big 1d model for unconditional generation on imagenet.
### Response:
def imagetransformer_base_10l_8h_big_uncond_dr03_dan_64_2d():
"""big 1d model for unconditional generation on imagenet."""
hparams = image_transformer2d_base()
hparams.unconditional = True
hparams.hidden_size = 512
hparams.batch_size = 1
hparams.img_len = 64
hparams.num_heads = 8
hparams.filter_size = 2048
hparams.batch_size = 1
hparams.max_length = 3075
hparams.max_length = 14000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.1
hparams.dec_attention_type = cia.AttentionType.LOCAL_2D
hparams.query_shape = (16, 16)
hparams.memory_flange = (8, 8)
return hparams |
def newer_pairwise_group(sources_groups, targets):
"""Walk both arguments in parallel, testing if each source group is newer
than its corresponding target. Returns a pair of lists (sources_groups,
targets) where sources is newer than target, according to the semantics
of 'newer_group()'.
"""
if len(sources_groups) != len(targets):
raise ValueError("'sources_group' and 'targets' must be the same length")
# build a pair of lists (sources_groups, targets) where source is newer
n_sources = []
n_targets = []
for i in range(len(sources_groups)):
if newer_group(sources_groups[i], targets[i]):
n_sources.append(sources_groups[i])
n_targets.append(targets[i])
return n_sources, n_targets | Walk both arguments in parallel, testing if each source group is newer
than its corresponding target. Returns a pair of lists (sources_groups,
targets) where sources is newer than target, according to the semantics
of 'newer_group()'. | Below is the the instruction that describes the task:
### Input:
Walk both arguments in parallel, testing if each source group is newer
than its corresponding target. Returns a pair of lists (sources_groups,
targets) where sources is newer than target, according to the semantics
of 'newer_group()'.
### Response:
def newer_pairwise_group(sources_groups, targets):
"""Walk both arguments in parallel, testing if each source group is newer
than its corresponding target. Returns a pair of lists (sources_groups,
targets) where sources is newer than target, according to the semantics
of 'newer_group()'.
"""
if len(sources_groups) != len(targets):
raise ValueError("'sources_group' and 'targets' must be the same length")
# build a pair of lists (sources_groups, targets) where source is newer
n_sources = []
n_targets = []
for i in range(len(sources_groups)):
if newer_group(sources_groups[i], targets[i]):
n_sources.append(sources_groups[i])
n_targets.append(targets[i])
return n_sources, n_targets |
def get_unread(self, request, notifications, mark_as_read):
"""
return unread notifications and mark as read
(unless read=false param is passed)
"""
notifications = notifications.filter(is_read=False)
serializer = UnreadNotificationSerializer(list(notifications), # evaluate queryset
many=True,
context=self.get_serializer_context())
# retrieve unread notifications as read (default behaviour)
if mark_as_read:
notifications.update(is_read=True)
return Response(serializer.data) | return unread notifications and mark as read
(unless read=false param is passed) | Below is the the instruction that describes the task:
### Input:
return unread notifications and mark as read
(unless read=false param is passed)
### Response:
def get_unread(self, request, notifications, mark_as_read):
"""
return unread notifications and mark as read
(unless read=false param is passed)
"""
notifications = notifications.filter(is_read=False)
serializer = UnreadNotificationSerializer(list(notifications), # evaluate queryset
many=True,
context=self.get_serializer_context())
# retrieve unread notifications as read (default behaviour)
if mark_as_read:
notifications.update(is_read=True)
return Response(serializer.data) |
def delete_lifecycle(self, policy=None, params=None):
"""
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-delete-lifecycle.html>`_
:arg policy: The name of the index lifecycle policy
"""
return self.transport.perform_request(
"DELETE", _make_path("_ilm", "policy", policy), params=params
) | `<https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-delete-lifecycle.html>`_
:arg policy: The name of the index lifecycle policy | Below is the the instruction that describes the task:
### Input:
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-delete-lifecycle.html>`_
:arg policy: The name of the index lifecycle policy
### Response:
def delete_lifecycle(self, policy=None, params=None):
"""
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-delete-lifecycle.html>`_
:arg policy: The name of the index lifecycle policy
"""
return self.transport.perform_request(
"DELETE", _make_path("_ilm", "policy", policy), params=params
) |
def geocentric_to_ecef(latitude, longitude, altitude):
"""Convert geocentric coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geocentric latitude (degrees)
longitude : float or array_like
Geocentric longitude (degrees)
altitude : float or array_like
Height (km) above presumed spherical Earth with radius 6371 km.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
r = earth_geo_radius + altitude
x = r * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = r * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = r * np.sin(np.deg2rad(latitude))
return x, y, z | Convert geocentric coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geocentric latitude (degrees)
longitude : float or array_like
Geocentric longitude (degrees)
altitude : float or array_like
Height (km) above presumed spherical Earth with radius 6371 km.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km | Below is the the instruction that describes the task:
### Input:
Convert geocentric coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geocentric latitude (degrees)
longitude : float or array_like
Geocentric longitude (degrees)
altitude : float or array_like
Height (km) above presumed spherical Earth with radius 6371 km.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
### Response:
def geocentric_to_ecef(latitude, longitude, altitude):
"""Convert geocentric coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geocentric latitude (degrees)
longitude : float or array_like
Geocentric longitude (degrees)
altitude : float or array_like
Height (km) above presumed spherical Earth with radius 6371 km.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
r = earth_geo_radius + altitude
x = r * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = r * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = r * np.sin(np.deg2rad(latitude))
return x, y, z |
def delete(overlay):
'''
Remove the given overlay from the your locally installed overlays.
Specify 'ALL' to remove all overlays.
Return a list of the overlays(s) that were removed:
CLI Example:
.. code-block:: bash
salt '*' layman.delete <overlay name>
'''
ret = list()
old_overlays = list_local()
cmd = 'layman --quietness=0 --delete {0}'.format(overlay)
delete_attempt = __salt__['cmd.run_all'](cmd, python_shell=False)
if delete_attempt['retcode'] != 0:
raise salt.exceptions.CommandExecutionError(delete_attempt['stdout'])
new_overlays = list_local()
# If we now have no overlays added, We need to ensure that the make.conf
# does not source layman's make.conf, as it will break emerge
if not new_overlays:
srcline = 'source /var/lib/layman/make.conf'
makeconf = _get_makeconf()
if __salt__['file.contains'](makeconf, 'layman'):
__salt__['file.sed'](makeconf, srcline, '')
ret = [overlay for overlay in old_overlays if overlay not in new_overlays]
return ret | Remove the given overlay from the your locally installed overlays.
Specify 'ALL' to remove all overlays.
Return a list of the overlays(s) that were removed:
CLI Example:
.. code-block:: bash
salt '*' layman.delete <overlay name> | Below is the the instruction that describes the task:
### Input:
Remove the given overlay from the your locally installed overlays.
Specify 'ALL' to remove all overlays.
Return a list of the overlays(s) that were removed:
CLI Example:
.. code-block:: bash
salt '*' layman.delete <overlay name>
### Response:
def delete(overlay):
'''
Remove the given overlay from the your locally installed overlays.
Specify 'ALL' to remove all overlays.
Return a list of the overlays(s) that were removed:
CLI Example:
.. code-block:: bash
salt '*' layman.delete <overlay name>
'''
ret = list()
old_overlays = list_local()
cmd = 'layman --quietness=0 --delete {0}'.format(overlay)
delete_attempt = __salt__['cmd.run_all'](cmd, python_shell=False)
if delete_attempt['retcode'] != 0:
raise salt.exceptions.CommandExecutionError(delete_attempt['stdout'])
new_overlays = list_local()
# If we now have no overlays added, We need to ensure that the make.conf
# does not source layman's make.conf, as it will break emerge
if not new_overlays:
srcline = 'source /var/lib/layman/make.conf'
makeconf = _get_makeconf()
if __salt__['file.contains'](makeconf, 'layman'):
__salt__['file.sed'](makeconf, srcline, '')
ret = [overlay for overlay in old_overlays if overlay not in new_overlays]
return ret |
def shape_vecs(*args):
'''Reshape all ndarrays with ``shape==(n,)`` to ``shape==(n,1)``.
Recognizes ndarrays and ignores all others.'''
ret_args = []
flat_vecs = True
for arg in args:
if type(arg) is numpy.ndarray:
if len(arg.shape) == 1:
arg = shape_vec(arg)
else:
flat_vecs = False
ret_args.append(arg)
return flat_vecs, ret_args | Reshape all ndarrays with ``shape==(n,)`` to ``shape==(n,1)``.
Recognizes ndarrays and ignores all others. | Below is the the instruction that describes the task:
### Input:
Reshape all ndarrays with ``shape==(n,)`` to ``shape==(n,1)``.
Recognizes ndarrays and ignores all others.
### Response:
def shape_vecs(*args):
'''Reshape all ndarrays with ``shape==(n,)`` to ``shape==(n,1)``.
Recognizes ndarrays and ignores all others.'''
ret_args = []
flat_vecs = True
for arg in args:
if type(arg) is numpy.ndarray:
if len(arg.shape) == 1:
arg = shape_vec(arg)
else:
flat_vecs = False
ret_args.append(arg)
return flat_vecs, ret_args |
def _first_of_month(self, day_of_week):
"""
Modify to the first occurrence of a given day of the week
in the current month. If no day_of_week is provided,
modify to the first day of the month. Use the supplied consts
to indicate the desired day_of_week, ex. DateTime.MONDAY.
:type day_of_week: int
:rtype: DateTime
"""
dt = self.start_of("day")
if day_of_week is None:
return dt.set(day=1)
month = calendar.monthcalendar(dt.year, dt.month)
calendar_day = (day_of_week - 1) % 7
if month[0][calendar_day] > 0:
day_of_month = month[0][calendar_day]
else:
day_of_month = month[1][calendar_day]
return dt.set(day=day_of_month) | Modify to the first occurrence of a given day of the week
in the current month. If no day_of_week is provided,
modify to the first day of the month. Use the supplied consts
to indicate the desired day_of_week, ex. DateTime.MONDAY.
:type day_of_week: int
:rtype: DateTime | Below is the the instruction that describes the task:
### Input:
Modify to the first occurrence of a given day of the week
in the current month. If no day_of_week is provided,
modify to the first day of the month. Use the supplied consts
to indicate the desired day_of_week, ex. DateTime.MONDAY.
:type day_of_week: int
:rtype: DateTime
### Response:
def _first_of_month(self, day_of_week):
"""
Modify to the first occurrence of a given day of the week
in the current month. If no day_of_week is provided,
modify to the first day of the month. Use the supplied consts
to indicate the desired day_of_week, ex. DateTime.MONDAY.
:type day_of_week: int
:rtype: DateTime
"""
dt = self.start_of("day")
if day_of_week is None:
return dt.set(day=1)
month = calendar.monthcalendar(dt.year, dt.month)
calendar_day = (day_of_week - 1) % 7
if month[0][calendar_day] > 0:
day_of_month = month[0][calendar_day]
else:
day_of_month = month[1][calendar_day]
return dt.set(day=day_of_month) |
def closed(self, reason):
"""Callback performed when the transport is closed."""
self.server.remove_connection(self)
self.protocol.connection_lost(reason)
if not isinstance(reason, ConnectionClosed):
logger.warn("connection closed, reason: %s" % str(reason))
else:
logger.info("connection closed") | Callback performed when the transport is closed. | Below is the the instruction that describes the task:
### Input:
Callback performed when the transport is closed.
### Response:
def closed(self, reason):
"""Callback performed when the transport is closed."""
self.server.remove_connection(self)
self.protocol.connection_lost(reason)
if not isinstance(reason, ConnectionClosed):
logger.warn("connection closed, reason: %s" % str(reason))
else:
logger.info("connection closed") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.