code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def open(self, method, url):
'''
Opens the request.
method:
the request VERB 'GET', 'POST', etc.
url:
the url to connect
'''
flag = VARIANT.create_bool_false()
_method = BSTR(method)
_url = BSTR(url)
_WinHttpRequest._Open(self, _method, _url, flag) | Opens the request.
method:
the request VERB 'GET', 'POST', etc.
url:
the url to connect | Below is the the instruction that describes the task:
### Input:
Opens the request.
method:
the request VERB 'GET', 'POST', etc.
url:
the url to connect
### Response:
def open(self, method, url):
'''
Opens the request.
method:
the request VERB 'GET', 'POST', etc.
url:
the url to connect
'''
flag = VARIANT.create_bool_false()
_method = BSTR(method)
_url = BSTR(url)
_WinHttpRequest._Open(self, _method, _url, flag) |
def itertrain(self, train, valid=None, **kwargs):
'''Train a model using a training and validation set.
This method yields a series of monitor values to the caller. After every
iteration, a pair of monitor dictionaries is generated: one evaluated on
the training dataset, and another evaluated on the validation dataset.
The validation monitors might not be updated during every training
iteration; in this case, the most recent validation monitors will be
yielded along with the training monitors.
Parameters
----------
train : :class:`Dataset <theanets.dataset.Dataset>`
A set of training data for computing updates to model parameters.
valid : :class:`Dataset <theanets.dataset.Dataset>`
A set of validation data for computing monitor values and
determining when the loss has stopped improving.
Yields
------
training : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
validation : dict
A dictionary containing monitor values evaluated on the validation
dataset.
'''
for monitors in downhill.build(
algo=self.algo,
loss=self.network.loss(**kwargs),
updates=self.network.updates(**kwargs),
monitors=self.network.monitors(**kwargs),
inputs=self.network.variables,
params=self.network.params,
monitor_gradients=kwargs.get('monitor_gradients', False),
).iterate(train, valid=valid, **kwargs):
yield monitors | Train a model using a training and validation set.
This method yields a series of monitor values to the caller. After every
iteration, a pair of monitor dictionaries is generated: one evaluated on
the training dataset, and another evaluated on the validation dataset.
The validation monitors might not be updated during every training
iteration; in this case, the most recent validation monitors will be
yielded along with the training monitors.
Parameters
----------
train : :class:`Dataset <theanets.dataset.Dataset>`
A set of training data for computing updates to model parameters.
valid : :class:`Dataset <theanets.dataset.Dataset>`
A set of validation data for computing monitor values and
determining when the loss has stopped improving.
Yields
------
training : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
validation : dict
A dictionary containing monitor values evaluated on the validation
dataset. | Below is the the instruction that describes the task:
### Input:
Train a model using a training and validation set.
This method yields a series of monitor values to the caller. After every
iteration, a pair of monitor dictionaries is generated: one evaluated on
the training dataset, and another evaluated on the validation dataset.
The validation monitors might not be updated during every training
iteration; in this case, the most recent validation monitors will be
yielded along with the training monitors.
Parameters
----------
train : :class:`Dataset <theanets.dataset.Dataset>`
A set of training data for computing updates to model parameters.
valid : :class:`Dataset <theanets.dataset.Dataset>`
A set of validation data for computing monitor values and
determining when the loss has stopped improving.
Yields
------
training : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
validation : dict
A dictionary containing monitor values evaluated on the validation
dataset.
### Response:
def itertrain(self, train, valid=None, **kwargs):
'''Train a model using a training and validation set.
This method yields a series of monitor values to the caller. After every
iteration, a pair of monitor dictionaries is generated: one evaluated on
the training dataset, and another evaluated on the validation dataset.
The validation monitors might not be updated during every training
iteration; in this case, the most recent validation monitors will be
yielded along with the training monitors.
Parameters
----------
train : :class:`Dataset <theanets.dataset.Dataset>`
A set of training data for computing updates to model parameters.
valid : :class:`Dataset <theanets.dataset.Dataset>`
A set of validation data for computing monitor values and
determining when the loss has stopped improving.
Yields
------
training : dict
A dictionary mapping monitor names to values, evaluated on the
training dataset.
validation : dict
A dictionary containing monitor values evaluated on the validation
dataset.
'''
for monitors in downhill.build(
algo=self.algo,
loss=self.network.loss(**kwargs),
updates=self.network.updates(**kwargs),
monitors=self.network.monitors(**kwargs),
inputs=self.network.variables,
params=self.network.params,
monitor_gradients=kwargs.get('monitor_gradients', False),
).iterate(train, valid=valid, **kwargs):
yield monitors |
def scale_matrix(factor, origin=None, direction=None):
"""Return matrix to scale by factor around origin in direction.
Use factor -1 for point symmetry.
>>> v = (numpy.random.rand(4, 5) - 0.5) * 20.0
>>> v[3] = 1.0
>>> S = scale_matrix(-1.234)
>>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])
True
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S = scale_matrix(factor, origin)
>>> S = scale_matrix(factor, origin, direct)
"""
if direction is None:
# uniform scaling
M = numpy.array(((factor, 0.0, 0.0, 0.0),
(0.0, factor, 0.0, 0.0),
(0.0, 0.0, factor, 0.0),
(0.0, 0.0, 0.0, 1.0)), dtype=numpy.float64)
if origin is not None:
M[:3, 3] = origin[:3]
M[:3, 3] *= 1.0 - factor
else:
# nonuniform scaling
direction = unit_vector(direction[:3])
factor = 1.0 - factor
M = numpy.identity(4)
M[:3, :3] -= factor * numpy.outer(direction, direction)
if origin is not None:
M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction
return M | Return matrix to scale by factor around origin in direction.
Use factor -1 for point symmetry.
>>> v = (numpy.random.rand(4, 5) - 0.5) * 20.0
>>> v[3] = 1.0
>>> S = scale_matrix(-1.234)
>>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])
True
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S = scale_matrix(factor, origin)
>>> S = scale_matrix(factor, origin, direct) | Below is the the instruction that describes the task:
### Input:
Return matrix to scale by factor around origin in direction.
Use factor -1 for point symmetry.
>>> v = (numpy.random.rand(4, 5) - 0.5) * 20.0
>>> v[3] = 1.0
>>> S = scale_matrix(-1.234)
>>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])
True
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S = scale_matrix(factor, origin)
>>> S = scale_matrix(factor, origin, direct)
### Response:
def scale_matrix(factor, origin=None, direction=None):
"""Return matrix to scale by factor around origin in direction.
Use factor -1 for point symmetry.
>>> v = (numpy.random.rand(4, 5) - 0.5) * 20.0
>>> v[3] = 1.0
>>> S = scale_matrix(-1.234)
>>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])
True
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S = scale_matrix(factor, origin)
>>> S = scale_matrix(factor, origin, direct)
"""
if direction is None:
# uniform scaling
M = numpy.array(((factor, 0.0, 0.0, 0.0),
(0.0, factor, 0.0, 0.0),
(0.0, 0.0, factor, 0.0),
(0.0, 0.0, 0.0, 1.0)), dtype=numpy.float64)
if origin is not None:
M[:3, 3] = origin[:3]
M[:3, 3] *= 1.0 - factor
else:
# nonuniform scaling
direction = unit_vector(direction[:3])
factor = 1.0 - factor
M = numpy.identity(4)
M[:3, :3] -= factor * numpy.outer(direction, direction)
if origin is not None:
M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction
return M |
def remove_whitespace(text_string):
'''
Removes all whitespace found within text_string and returns new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(text_string.split())
else:
raise InputError("none type or string not passed as an argument") | Removes all whitespace found within text_string and returns new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument | Below is the the instruction that describes the task:
### Input:
Removes all whitespace found within text_string and returns new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
### Response:
def remove_whitespace(text_string):
'''
Removes all whitespace found within text_string and returns new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(text_string.split())
else:
raise InputError("none type or string not passed as an argument") |
def get_column_listing(self, table):
"""
Get the column listing for a given table.
:param table: The table
:type table: str
:rtype: list
"""
sql = self._grammar.compile_column_exists()
database = self._connection.get_database_name()
table = self._connection.get_table_prefix() + table
results = []
for result in self._connection.select(sql, [database, table]):
new_result = {}
for key, value in result.items():
new_result[key.lower()] = value
results.append(new_result)
return self._connection.get_post_processor().process_column_listing(results) | Get the column listing for a given table.
:param table: The table
:type table: str
:rtype: list | Below is the the instruction that describes the task:
### Input:
Get the column listing for a given table.
:param table: The table
:type table: str
:rtype: list
### Response:
def get_column_listing(self, table):
"""
Get the column listing for a given table.
:param table: The table
:type table: str
:rtype: list
"""
sql = self._grammar.compile_column_exists()
database = self._connection.get_database_name()
table = self._connection.get_table_prefix() + table
results = []
for result in self._connection.select(sql, [database, table]):
new_result = {}
for key, value in result.items():
new_result[key.lower()] = value
results.append(new_result)
return self._connection.get_post_processor().process_column_listing(results) |
def _ErrorOfDifferences(self, cov, warning_cutoff=1.0e-10):
"""
inputs:
cov is the covariance matrix of A
returns the statistical error matrix of A_i - A_j
"""
diag = np.matrix(cov.diagonal())
d2 = diag + diag.transpose() - 2 * cov
# Cast warning_cutoff to compare a negative number
cutoff = -abs(warning_cutoff)
# check for any numbers below zero.
if np.any(d2 < 0.0):
if np.any(d2 < cutoff):
print("A squared uncertainty is negative. Largest Magnitude = {0:f}".format(
abs(np.min(d2[d2 < cutoff]))))
else:
d2[np.logical_and(0 > d2, d2 > cutoff)] = 0.0
return np.sqrt(np.array(d2)) | inputs:
cov is the covariance matrix of A
returns the statistical error matrix of A_i - A_j | Below is the the instruction that describes the task:
### Input:
inputs:
cov is the covariance matrix of A
returns the statistical error matrix of A_i - A_j
### Response:
def _ErrorOfDifferences(self, cov, warning_cutoff=1.0e-10):
"""
inputs:
cov is the covariance matrix of A
returns the statistical error matrix of A_i - A_j
"""
diag = np.matrix(cov.diagonal())
d2 = diag + diag.transpose() - 2 * cov
# Cast warning_cutoff to compare a negative number
cutoff = -abs(warning_cutoff)
# check for any numbers below zero.
if np.any(d2 < 0.0):
if np.any(d2 < cutoff):
print("A squared uncertainty is negative. Largest Magnitude = {0:f}".format(
abs(np.min(d2[d2 < cutoff]))))
else:
d2[np.logical_and(0 > d2, d2 > cutoff)] = 0.0
return np.sqrt(np.array(d2)) |
def roll(self, shifts=None, roll_coords=None, **shifts_kwargs):
"""Roll this dataset by an offset along one or more dimensions.
Unlike shift, roll may rotate all variables, including coordinates
if specified. The direction of rotation is consistent with
:py:func:`numpy.roll`.
Parameters
----------
shifts : dict, optional
A dict with keys matching dimensions and values given
by integers to rotate each of the given dimensions. Positive
offsets roll to the right; negative offsets roll to the left.
roll_coords : bool
Indicates whether to roll the coordinates by the offset
The current default of roll_coords (None, equivalent to True) is
deprecated and will change to False in a future version.
Explicitly pass roll_coords to silence the warning.
**shifts_kwargs : {dim: offset, ...}, optional
The keyword arguments form of ``shifts``.
One of shifts or shifts_kwargs must be provided.
Returns
-------
rolled : Dataset
Dataset with the same coordinates and attributes but rolled
variables.
See also
--------
shift
Examples
--------
>>> ds = xr.Dataset({'foo': ('x', list('abcde'))})
>>> ds.roll(x=2)
<xarray.Dataset>
Dimensions: (x: 5)
Coordinates:
* x (x) int64 3 4 0 1 2
Data variables:
foo (x) object 'd' 'e' 'a' 'b' 'c'
"""
shifts = either_dict_or_kwargs(shifts, shifts_kwargs, 'roll')
invalid = [k for k in shifts if k not in self.dims]
if invalid:
raise ValueError("dimensions %r do not exist" % invalid)
if roll_coords is None:
warnings.warn("roll_coords will be set to False in the future."
" Explicitly set roll_coords to silence warning.",
FutureWarning, stacklevel=2)
roll_coords = True
unrolled_vars = () if roll_coords else self.coords
variables = OrderedDict()
for k, v in self.variables.items():
if k not in unrolled_vars:
variables[k] = v.roll(**{k: s for k, s in shifts.items()
if k in v.dims})
else:
variables[k] = v
return self._replace_vars_and_dims(variables) | Roll this dataset by an offset along one or more dimensions.
Unlike shift, roll may rotate all variables, including coordinates
if specified. The direction of rotation is consistent with
:py:func:`numpy.roll`.
Parameters
----------
shifts : dict, optional
A dict with keys matching dimensions and values given
by integers to rotate each of the given dimensions. Positive
offsets roll to the right; negative offsets roll to the left.
roll_coords : bool
Indicates whether to roll the coordinates by the offset
The current default of roll_coords (None, equivalent to True) is
deprecated and will change to False in a future version.
Explicitly pass roll_coords to silence the warning.
**shifts_kwargs : {dim: offset, ...}, optional
The keyword arguments form of ``shifts``.
One of shifts or shifts_kwargs must be provided.
Returns
-------
rolled : Dataset
Dataset with the same coordinates and attributes but rolled
variables.
See also
--------
shift
Examples
--------
>>> ds = xr.Dataset({'foo': ('x', list('abcde'))})
>>> ds.roll(x=2)
<xarray.Dataset>
Dimensions: (x: 5)
Coordinates:
* x (x) int64 3 4 0 1 2
Data variables:
foo (x) object 'd' 'e' 'a' 'b' 'c' | Below is the the instruction that describes the task:
### Input:
Roll this dataset by an offset along one or more dimensions.
Unlike shift, roll may rotate all variables, including coordinates
if specified. The direction of rotation is consistent with
:py:func:`numpy.roll`.
Parameters
----------
shifts : dict, optional
A dict with keys matching dimensions and values given
by integers to rotate each of the given dimensions. Positive
offsets roll to the right; negative offsets roll to the left.
roll_coords : bool
Indicates whether to roll the coordinates by the offset
The current default of roll_coords (None, equivalent to True) is
deprecated and will change to False in a future version.
Explicitly pass roll_coords to silence the warning.
**shifts_kwargs : {dim: offset, ...}, optional
The keyword arguments form of ``shifts``.
One of shifts or shifts_kwargs must be provided.
Returns
-------
rolled : Dataset
Dataset with the same coordinates and attributes but rolled
variables.
See also
--------
shift
Examples
--------
>>> ds = xr.Dataset({'foo': ('x', list('abcde'))})
>>> ds.roll(x=2)
<xarray.Dataset>
Dimensions: (x: 5)
Coordinates:
* x (x) int64 3 4 0 1 2
Data variables:
foo (x) object 'd' 'e' 'a' 'b' 'c'
### Response:
def roll(self, shifts=None, roll_coords=None, **shifts_kwargs):
"""Roll this dataset by an offset along one or more dimensions.
Unlike shift, roll may rotate all variables, including coordinates
if specified. The direction of rotation is consistent with
:py:func:`numpy.roll`.
Parameters
----------
shifts : dict, optional
A dict with keys matching dimensions and values given
by integers to rotate each of the given dimensions. Positive
offsets roll to the right; negative offsets roll to the left.
roll_coords : bool
Indicates whether to roll the coordinates by the offset
The current default of roll_coords (None, equivalent to True) is
deprecated and will change to False in a future version.
Explicitly pass roll_coords to silence the warning.
**shifts_kwargs : {dim: offset, ...}, optional
The keyword arguments form of ``shifts``.
One of shifts or shifts_kwargs must be provided.
Returns
-------
rolled : Dataset
Dataset with the same coordinates and attributes but rolled
variables.
See also
--------
shift
Examples
--------
>>> ds = xr.Dataset({'foo': ('x', list('abcde'))})
>>> ds.roll(x=2)
<xarray.Dataset>
Dimensions: (x: 5)
Coordinates:
* x (x) int64 3 4 0 1 2
Data variables:
foo (x) object 'd' 'e' 'a' 'b' 'c'
"""
shifts = either_dict_or_kwargs(shifts, shifts_kwargs, 'roll')
invalid = [k for k in shifts if k not in self.dims]
if invalid:
raise ValueError("dimensions %r do not exist" % invalid)
if roll_coords is None:
warnings.warn("roll_coords will be set to False in the future."
" Explicitly set roll_coords to silence warning.",
FutureWarning, stacklevel=2)
roll_coords = True
unrolled_vars = () if roll_coords else self.coords
variables = OrderedDict()
for k, v in self.variables.items():
if k not in unrolled_vars:
variables[k] = v.roll(**{k: s for k, s in shifts.items()
if k in v.dims})
else:
variables[k] = v
return self._replace_vars_and_dims(variables) |
def _frame_limit(self, start_time):
"""
Limit to framerate, should be called after
rendering has completed
:param start_time: When execution started
"""
if self._speed:
completion_time = time()
exc_time = completion_time - start_time
sleep_for = (1.0 / abs(self._speed)) - exc_time
if sleep_for > 0:
sleep(sleep_for) | Limit to framerate, should be called after
rendering has completed
:param start_time: When execution started | Below is the the instruction that describes the task:
### Input:
Limit to framerate, should be called after
rendering has completed
:param start_time: When execution started
### Response:
def _frame_limit(self, start_time):
"""
Limit to framerate, should be called after
rendering has completed
:param start_time: When execution started
"""
if self._speed:
completion_time = time()
exc_time = completion_time - start_time
sleep_for = (1.0 / abs(self._speed)) - exc_time
if sleep_for > 0:
sleep(sleep_for) |
def update(cwd, rev, force=False, user=None):
'''
Update to a given revision
cwd
The path to the Mercurial repository
rev
The revision to update to
force : False
Force an update
user : None
Run hg as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt devserver1 hg.update /path/to/repo somebranch
'''
cmd = ['hg', 'update', '{0}'.format(rev)]
if force:
cmd.append('-C')
ret = __salt__['cmd.run_all'](cmd, cwd=cwd, runas=user, python_shell=False)
if ret['retcode'] != 0:
raise CommandExecutionError(
'Hg command failed: {0}'.format(ret.get('stderr', ret['stdout']))
)
return ret['stdout'] | Update to a given revision
cwd
The path to the Mercurial repository
rev
The revision to update to
force : False
Force an update
user : None
Run hg as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt devserver1 hg.update /path/to/repo somebranch | Below is the the instruction that describes the task:
### Input:
Update to a given revision
cwd
The path to the Mercurial repository
rev
The revision to update to
force : False
Force an update
user : None
Run hg as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt devserver1 hg.update /path/to/repo somebranch
### Response:
def update(cwd, rev, force=False, user=None):
'''
Update to a given revision
cwd
The path to the Mercurial repository
rev
The revision to update to
force : False
Force an update
user : None
Run hg as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt devserver1 hg.update /path/to/repo somebranch
'''
cmd = ['hg', 'update', '{0}'.format(rev)]
if force:
cmd.append('-C')
ret = __salt__['cmd.run_all'](cmd, cwd=cwd, runas=user, python_shell=False)
if ret['retcode'] != 0:
raise CommandExecutionError(
'Hg command failed: {0}'.format(ret.get('stderr', ret['stdout']))
)
return ret['stdout'] |
def multi_rpush(self, queue, values, bulk_size=0, transaction=False):
''' Pushes multiple elements to a list
If bulk_size is set it will execute the pipeline every bulk_size elements
This operation will be atomic if transaction=True is passed
'''
# Check that what we receive is iterable
if hasattr(values, '__iter__'):
pipe = self.pipeline(transaction=transaction)
pipe.multi()
self._multi_rpush_pipeline(pipe, queue, values, bulk_size)
pipe.execute()
else:
raise ValueError('Expected an iterable') | Pushes multiple elements to a list
If bulk_size is set it will execute the pipeline every bulk_size elements
This operation will be atomic if transaction=True is passed | Below is the the instruction that describes the task:
### Input:
Pushes multiple elements to a list
If bulk_size is set it will execute the pipeline every bulk_size elements
This operation will be atomic if transaction=True is passed
### Response:
def multi_rpush(self, queue, values, bulk_size=0, transaction=False):
''' Pushes multiple elements to a list
If bulk_size is set it will execute the pipeline every bulk_size elements
This operation will be atomic if transaction=True is passed
'''
# Check that what we receive is iterable
if hasattr(values, '__iter__'):
pipe = self.pipeline(transaction=transaction)
pipe.multi()
self._multi_rpush_pipeline(pipe, queue, values, bulk_size)
pipe.execute()
else:
raise ValueError('Expected an iterable') |
def migrate(gandi, resource, force, background, finalize):
""" Migrate a virtual machine to another datacenter. """
if not gandi.iaas.check_can_migrate(resource):
return
if not force:
proceed = click.confirm('Are you sure you want to migrate VM %s ?'
% resource)
if not proceed:
return
if finalize:
gandi.iaas.need_finalize(resource)
output_keys = ['id', 'type', 'step']
oper = gandi.iaas.migrate(resource, background, finalize=finalize)
if background:
output_generic(gandi, oper, output_keys)
return oper | Migrate a virtual machine to another datacenter. | Below is the the instruction that describes the task:
### Input:
Migrate a virtual machine to another datacenter.
### Response:
def migrate(gandi, resource, force, background, finalize):
""" Migrate a virtual machine to another datacenter. """
if not gandi.iaas.check_can_migrate(resource):
return
if not force:
proceed = click.confirm('Are you sure you want to migrate VM %s ?'
% resource)
if not proceed:
return
if finalize:
gandi.iaas.need_finalize(resource)
output_keys = ['id', 'type', 'step']
oper = gandi.iaas.migrate(resource, background, finalize=finalize)
if background:
output_generic(gandi, oper, output_keys)
return oper |
def line(self, idx):
"""Return the i'th program line.
:param i: The i'th program line.
"""
# TODO: We should parse the response properly.
return self._query(('PGM?', [Integer, Integer], String), self.idx, idx) | Return the i'th program line.
:param i: The i'th program line. | Below is the the instruction that describes the task:
### Input:
Return the i'th program line.
:param i: The i'th program line.
### Response:
def line(self, idx):
"""Return the i'th program line.
:param i: The i'th program line.
"""
# TODO: We should parse the response properly.
return self._query(('PGM?', [Integer, Integer], String), self.idx, idx) |
def read_dir(self, path):
"""
Reads the given path into the tree
"""
self.tree = {}
self.file_count = 0
self.path = path
for root, _, filelist in os.walk(path):
rel = root[len(path):].lstrip('/\\')
# empty rel, means file is in root dir
if not rel:
rel = ' '
for filename in filelist:
filename = filename.split('.')
if len(filename) <= 1:
raise RuntimeError("Files without an extension are not supported: {0}".format(
repr(os.path.join(root, '.'.join(filename))),
))
ext = filename[-1]
filename = '.'.join(filename[:-1])
if ext not in self.tree:
self.tree[ext] = {}
if rel not in self.tree[ext]:
self.tree[ext][rel] = []
self.tree[ext][rel].append(filename)
self.file_count += 1
self.tree_length = self.calculate_tree_length() | Reads the given path into the tree | Below is the the instruction that describes the task:
### Input:
Reads the given path into the tree
### Response:
def read_dir(self, path):
"""
Reads the given path into the tree
"""
self.tree = {}
self.file_count = 0
self.path = path
for root, _, filelist in os.walk(path):
rel = root[len(path):].lstrip('/\\')
# empty rel, means file is in root dir
if not rel:
rel = ' '
for filename in filelist:
filename = filename.split('.')
if len(filename) <= 1:
raise RuntimeError("Files without an extension are not supported: {0}".format(
repr(os.path.join(root, '.'.join(filename))),
))
ext = filename[-1]
filename = '.'.join(filename[:-1])
if ext not in self.tree:
self.tree[ext] = {}
if rel not in self.tree[ext]:
self.tree[ext][rel] = []
self.tree[ext][rel].append(filename)
self.file_count += 1
self.tree_length = self.calculate_tree_length() |
def get_associated_profiles(profile_path, result_role, server):
"""
Get the associated CIM_ReferencedProfile (i.e. the Reference) for the
profile defined by profile_path. This allows the ResultRolefor the
association to be set as part of the call to either "Dependent" or
"Antecedent".
"""
associated_profiles = server.conn.Associators(
ObjectName=profile_path,
AssocClass="CIM_ReferencedProfile",
ResultRole=result_role)
if VERBOSE:
print('GET_ASSICIATED_PROFILES path=%r, result_role=%s\nReturns %s' %
(profile_path, result_role, associated_profiles))
if server.conn.debug:
print('LAST_REQUEST\n%s' % server.conn.last_request)
print('LAST_REPLY\n%s' % server.conn.last_reply)
return associated_profiles | Get the associated CIM_ReferencedProfile (i.e. the Reference) for the
profile defined by profile_path. This allows the ResultRolefor the
association to be set as part of the call to either "Dependent" or
"Antecedent". | Below is the the instruction that describes the task:
### Input:
Get the associated CIM_ReferencedProfile (i.e. the Reference) for the
profile defined by profile_path. This allows the ResultRolefor the
association to be set as part of the call to either "Dependent" or
"Antecedent".
### Response:
def get_associated_profiles(profile_path, result_role, server):
"""
Get the associated CIM_ReferencedProfile (i.e. the Reference) for the
profile defined by profile_path. This allows the ResultRolefor the
association to be set as part of the call to either "Dependent" or
"Antecedent".
"""
associated_profiles = server.conn.Associators(
ObjectName=profile_path,
AssocClass="CIM_ReferencedProfile",
ResultRole=result_role)
if VERBOSE:
print('GET_ASSICIATED_PROFILES path=%r, result_role=%s\nReturns %s' %
(profile_path, result_role, associated_profiles))
if server.conn.debug:
print('LAST_REQUEST\n%s' % server.conn.last_request)
print('LAST_REPLY\n%s' % server.conn.last_reply)
return associated_profiles |
def get_group_velocity(q, # q-point
dynamical_matrix,
q_length=None, # finite distance in q
symmetry=None,
frequency_factor_to_THz=VaspToTHz):
"""
If frequencies and eigenvectors are supplied they are used
instead of calculating them at q-point (but not at q+dq and q-dq).
reciprocal lattice has to be given as
[[a_x, b_x, c_x],
[a_y, b_y, c_y],
[a_z, b_z, c_z]]
"""
gv = GroupVelocity(dynamical_matrix,
q_length=q_length,
symmetry=symmetry,
frequency_factor_to_THz=frequency_factor_to_THz)
gv.set_q_points([q])
return gv.get_group_velocity()[0] | If frequencies and eigenvectors are supplied they are used
instead of calculating them at q-point (but not at q+dq and q-dq).
reciprocal lattice has to be given as
[[a_x, b_x, c_x],
[a_y, b_y, c_y],
[a_z, b_z, c_z]] | Below is the the instruction that describes the task:
### Input:
If frequencies and eigenvectors are supplied they are used
instead of calculating them at q-point (but not at q+dq and q-dq).
reciprocal lattice has to be given as
[[a_x, b_x, c_x],
[a_y, b_y, c_y],
[a_z, b_z, c_z]]
### Response:
def get_group_velocity(q, # q-point
dynamical_matrix,
q_length=None, # finite distance in q
symmetry=None,
frequency_factor_to_THz=VaspToTHz):
"""
If frequencies and eigenvectors are supplied they are used
instead of calculating them at q-point (but not at q+dq and q-dq).
reciprocal lattice has to be given as
[[a_x, b_x, c_x],
[a_y, b_y, c_y],
[a_z, b_z, c_z]]
"""
gv = GroupVelocity(dynamical_matrix,
q_length=q_length,
symmetry=symmetry,
frequency_factor_to_THz=frequency_factor_to_THz)
gv.set_q_points([q])
return gv.get_group_velocity()[0] |
def update(self, other, join='left', overwrite=True, filter_func=None,
errors='ignore'):
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> bool 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
.. versionchanged :: 0.24.0
Changed from `raise_conflict=False|True`
to `errors='ignore'|'raise'`.
Returns
-------
None : method directly changes calling object
Raises
------
ValueError
* When `errors='raise'` and there's overlapping non-NA data.
* When `errors` is not either `'ignore'` or `'raise'`
NotImplementedError
* If `join != 'left'`
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
import pandas.core.computation.expressions as expressions
# TODO: Support other joins
if join != 'left': # pragma: no cover
raise NotImplementedError("Only left join is supported")
if errors not in ['ignore', 'raise']:
raise ValueError("The parameter errors must be either "
"'ignore' or 'raise'")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col]._values
that = other[col]._values
if filter_func is not None:
with np.errstate(all='ignore'):
mask = ~filter_func(this) | isna(that)
else:
if errors == 'raise':
mask_this = notna(that)
mask_that = notna(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isna(that)
else:
mask = notna(this)
# don't overwrite columns unecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that) | Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> bool 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
.. versionchanged :: 0.24.0
Changed from `raise_conflict=False|True`
to `errors='ignore'|'raise'`.
Returns
-------
None : method directly changes calling object
Raises
------
ValueError
* When `errors='raise'` and there's overlapping non-NA data.
* When `errors` is not either `'ignore'` or `'raise'`
NotImplementedError
* If `join != 'left'`
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0 | Below is the the instruction that describes the task:
### Input:
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> bool 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
.. versionchanged :: 0.24.0
Changed from `raise_conflict=False|True`
to `errors='ignore'|'raise'`.
Returns
-------
None : method directly changes calling object
Raises
------
ValueError
* When `errors='raise'` and there's overlapping non-NA data.
* When `errors` is not either `'ignore'` or `'raise'`
NotImplementedError
* If `join != 'left'`
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
### Response:
def update(self, other, join='left', overwrite=True, filter_func=None,
errors='ignore'):
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> bool 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
.. versionchanged :: 0.24.0
Changed from `raise_conflict=False|True`
to `errors='ignore'|'raise'`.
Returns
-------
None : method directly changes calling object
Raises
------
ValueError
* When `errors='raise'` and there's overlapping non-NA data.
* When `errors` is not either `'ignore'` or `'raise'`
NotImplementedError
* If `join != 'left'`
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
import pandas.core.computation.expressions as expressions
# TODO: Support other joins
if join != 'left': # pragma: no cover
raise NotImplementedError("Only left join is supported")
if errors not in ['ignore', 'raise']:
raise ValueError("The parameter errors must be either "
"'ignore' or 'raise'")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col]._values
that = other[col]._values
if filter_func is not None:
with np.errstate(all='ignore'):
mask = ~filter_func(this) | isna(that)
else:
if errors == 'raise':
mask_this = notna(that)
mask_that = notna(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isna(that)
else:
mask = notna(this)
# don't overwrite columns unecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that) |
def factory(
method,
description="",
request_example=None,
request_ctor=None,
responses=None,
method_choices=HTTP_METHODS,
):
"""
desc: Describes a single HTTP method of a URI
args:
- name: method
type: str
desc: The HTTP request method to use
- name: description
type: str
desc: The description of what this call does
required: false
default: ""
- name: request_example
type: dict
desc: An example JSON request body
required: false
default: null
- name: request_ctor
type: method
desc: Docstring will be parsed into help for @request_example
required: false
default: null
- name: responses
type: list
subtypes: [RouteMethodResponse]
desc: >
Each object describes a possible response and describes
the condition(s) that may cause it
ctor: pymarshal.api_docs.routes.RouteMethodResponse.__init__
- name: method_choices
type: list
subtypes: ["str"]
desc: The HTTP methods to allow for @method
hide: true
required: false
default: [DELETE, GET, PATCH, POST, PUT]
"""
return RouteMethod(
method,
description,
request_example,
DocString.from_ctor(request_ctor) if request_ctor else None,
responses,
method_choices,
) | desc: Describes a single HTTP method of a URI
args:
- name: method
type: str
desc: The HTTP request method to use
- name: description
type: str
desc: The description of what this call does
required: false
default: ""
- name: request_example
type: dict
desc: An example JSON request body
required: false
default: null
- name: request_ctor
type: method
desc: Docstring will be parsed into help for @request_example
required: false
default: null
- name: responses
type: list
subtypes: [RouteMethodResponse]
desc: >
Each object describes a possible response and describes
the condition(s) that may cause it
ctor: pymarshal.api_docs.routes.RouteMethodResponse.__init__
- name: method_choices
type: list
subtypes: ["str"]
desc: The HTTP methods to allow for @method
hide: true
required: false
default: [DELETE, GET, PATCH, POST, PUT] | Below is the the instruction that describes the task:
### Input:
desc: Describes a single HTTP method of a URI
args:
- name: method
type: str
desc: The HTTP request method to use
- name: description
type: str
desc: The description of what this call does
required: false
default: ""
- name: request_example
type: dict
desc: An example JSON request body
required: false
default: null
- name: request_ctor
type: method
desc: Docstring will be parsed into help for @request_example
required: false
default: null
- name: responses
type: list
subtypes: [RouteMethodResponse]
desc: >
Each object describes a possible response and describes
the condition(s) that may cause it
ctor: pymarshal.api_docs.routes.RouteMethodResponse.__init__
- name: method_choices
type: list
subtypes: ["str"]
desc: The HTTP methods to allow for @method
hide: true
required: false
default: [DELETE, GET, PATCH, POST, PUT]
### Response:
def factory(
method,
description="",
request_example=None,
request_ctor=None,
responses=None,
method_choices=HTTP_METHODS,
):
"""
desc: Describes a single HTTP method of a URI
args:
- name: method
type: str
desc: The HTTP request method to use
- name: description
type: str
desc: The description of what this call does
required: false
default: ""
- name: request_example
type: dict
desc: An example JSON request body
required: false
default: null
- name: request_ctor
type: method
desc: Docstring will be parsed into help for @request_example
required: false
default: null
- name: responses
type: list
subtypes: [RouteMethodResponse]
desc: >
Each object describes a possible response and describes
the condition(s) that may cause it
ctor: pymarshal.api_docs.routes.RouteMethodResponse.__init__
- name: method_choices
type: list
subtypes: ["str"]
desc: The HTTP methods to allow for @method
hide: true
required: false
default: [DELETE, GET, PATCH, POST, PUT]
"""
return RouteMethod(
method,
description,
request_example,
DocString.from_ctor(request_ctor) if request_ctor else None,
responses,
method_choices,
) |
def first(self):
"""Returns the first item from the query, or None if there are no results"""
if self._results_cache:
return self._results_cache[0]
query = PaginatedResponse(func=self._func, lwrap_type=self._lwrap_type, **self._kwargs)
try:
return next(query)
except StopIteration:
return None | Returns the first item from the query, or None if there are no results | Below is the the instruction that describes the task:
### Input:
Returns the first item from the query, or None if there are no results
### Response:
def first(self):
"""Returns the first item from the query, or None if there are no results"""
if self._results_cache:
return self._results_cache[0]
query = PaginatedResponse(func=self._func, lwrap_type=self._lwrap_type, **self._kwargs)
try:
return next(query)
except StopIteration:
return None |
async def main():
"""Scan command example."""
redis = await aioredis.create_redis(
'redis://localhost')
await redis.mset('key:1', 'value1', 'key:2', 'value2')
cur = b'0' # set initial cursor to 0
while cur:
cur, keys = await redis.scan(cur, match='key:*')
print("Iteration results:", keys)
redis.close()
await redis.wait_closed() | Scan command example. | Below is the the instruction that describes the task:
### Input:
Scan command example.
### Response:
async def main():
"""Scan command example."""
redis = await aioredis.create_redis(
'redis://localhost')
await redis.mset('key:1', 'value1', 'key:2', 'value2')
cur = b'0' # set initial cursor to 0
while cur:
cur, keys = await redis.scan(cur, match='key:*')
print("Iteration results:", keys)
redis.close()
await redis.wait_closed() |
def trace(_name):
"""Function decorator that logs function entry and exit details.
\var{_name} a string, an instance of logging.Logger or a function.
Construct a function or method proxy to generate call traces.
"""
def decorator(_func):
"""This is the actual decorator function that wraps the
\var{_func} function for detailed logging.
"""
def positional(name, value):
"""Format one named positional argument.
"""
if name is __self:
return af_self(name, value)
elif name is __klass:
return af_class(name, value)
else:
return af_named(name, value)
def wrapper(*__argv, **__kwds):
if not logger.isEnabledFor(logging.DEBUG) or _.value:
return _func(*__argv, **__kwds)
try:
_.value = True
params = dict(co_defaults)
params.update(__kwds)
params.update(zip(co_varnames, __argv))
if 'raw_password' in params:
params['raw_password'] = '<censored>'
position = [
positional(n, params.pop(n))
for n in co_varnames[:len(__argv)]
]
defaults = [
af_default(n, params.pop(n))
for n in co_varnames[len(__argv):]
]
nameless = (
af_unnamed(v) for v in __argv[co_argcount:]
)
keywords = (
af_keyword(n, params[n]) for n in sorted(params.keys())
)
params = ', '.join(
filter(None, chain(
position,
defaults,
nameless,
keywords)))
# params = params.replace(', [', '[, ').replace('][, ', ', ')
enter = [pre_enter]
if params:
enter.append(' ')
enter.append(params)
enter.append(' ')
enter.append(')')
leave = [pre_leave]
try:
logger.debug(''.join(enter))
try:
try:
_.value = False
result = _func(*__argv, **__kwds)
finally:
_.value = True
except Exception:
ex_type, value, traceback = sys.exc_info()
leave.append(' => exception thrown\n\traise ')
__mname = ex_type.__module__
if __mname != '__main__':
leave.append(__mname)
leave.append('.')
leave.append(ex_type.__name__)
if value.args:
leave.append('(')
leave.append(
', '.join(chop(v) for v in value.args))
leave.append(')')
else:
leave.append('()')
raise
else:
if result is not None:
leave.append(' => ')
leave.append(chop(result))
finally:
logger.debug(''.join(leave))
finally:
_.value = False
return result
####
# decorator
####
__self = False
__klass = False
def nop(x):
return x
__rewrap = nop
if type(_func) in FunctionTypes:
# functions do not belong to a class.
__cname = None
elif type(_func) in MethodTypes:
# im_self is None for unbound instance methods.
# Assumption: trace is only called on unbound methods.
if _func.im_self is not None:
__rewrap = classmethod
__cname = _func.im_self.__name__
__klass = True
else:
__cname = _func.im_class.__name__
__self = True
_func = _func.im_func
else:
# other callables are not supported yet.
return _func
__module = _func.__module__
__fname = _func.__name__
# Do not wrap initialization and conversion methods.
if __fname in ('__init__', '__new__', '__repr__', '__str__'):
return __rewrap(_func)
# Generate the Fully Qualified Function Name.
__fqfn = list()
if __module != '__main__':
__fqfn.append(__module)
__fqfn.append('.')
if __cname is not None:
__fqfn.append(__cname)
__fqfn.append('.')
__fqfn.append(__fname)
__fqfn = ''.join(__fqfn)
if type(_name) in CallableTypes:
logger = get_logger_factory().get_logger(__fqfn)
elif loggable(_name):
logger = _name
elif isinstance(_name, six.string_types):
logger = get_logger_factory().get_logger(_name)
else:
raise ValueError(
'invalid object %r: must be a function, a method, '
'a string or an object that implements the Logger API' % _name)
pre_enter = ['>>> ', __fqfn, '(']
pre_enter = ''.join(pre_enter)
pre_leave = ['<<< ', __fqfn]
pre_leave = ''.join(pre_leave)
####
# Here we are really mucking around in function internals.
# func_code is the low level 'code' instance that describes
# the function arguments, variable and other stuff.
#
# func.func_code.co_argcount - number of function arguments.
# func.func_code.co_varnames - function variables names, the
# first co_argcount values are the argument names.
# func.func_defaults - contains default arguments
try:
code = _func.__code__
except AttributeError:
co_argcount, co_varnames, co_defaults = \
__lookup_builtin(_func.__name__)
else:
co_argcount = code.co_argcount
co_varnames = code.co_varnames[:co_argcount]
if _func.__defaults__:
co_defaults = dict(
zip(co_varnames[-len(_func.__defaults__):],
_func.__defaults__))
else:
co_defaults = dict()
if __klass:
__klass = co_varnames[0]
if __self:
__self = co_varnames[0]
return __rewrap(wraps(_func)(wrapper))
####
# trace
####
# logging.basicConfig(level = logging.DEBUG)
if type(_name) in CallableTypes:
return decorator(_name)
else:
return decorator | Function decorator that logs function entry and exit details.
\var{_name} a string, an instance of logging.Logger or a function.
Construct a function or method proxy to generate call traces. | Below is the the instruction that describes the task:
### Input:
Function decorator that logs function entry and exit details.
\var{_name} a string, an instance of logging.Logger or a function.
Construct a function or method proxy to generate call traces.
### Response:
def trace(_name):
"""Function decorator that logs function entry and exit details.
\var{_name} a string, an instance of logging.Logger or a function.
Construct a function or method proxy to generate call traces.
"""
def decorator(_func):
"""This is the actual decorator function that wraps the
\var{_func} function for detailed logging.
"""
def positional(name, value):
"""Format one named positional argument.
"""
if name is __self:
return af_self(name, value)
elif name is __klass:
return af_class(name, value)
else:
return af_named(name, value)
def wrapper(*__argv, **__kwds):
if not logger.isEnabledFor(logging.DEBUG) or _.value:
return _func(*__argv, **__kwds)
try:
_.value = True
params = dict(co_defaults)
params.update(__kwds)
params.update(zip(co_varnames, __argv))
if 'raw_password' in params:
params['raw_password'] = '<censored>'
position = [
positional(n, params.pop(n))
for n in co_varnames[:len(__argv)]
]
defaults = [
af_default(n, params.pop(n))
for n in co_varnames[len(__argv):]
]
nameless = (
af_unnamed(v) for v in __argv[co_argcount:]
)
keywords = (
af_keyword(n, params[n]) for n in sorted(params.keys())
)
params = ', '.join(
filter(None, chain(
position,
defaults,
nameless,
keywords)))
# params = params.replace(', [', '[, ').replace('][, ', ', ')
enter = [pre_enter]
if params:
enter.append(' ')
enter.append(params)
enter.append(' ')
enter.append(')')
leave = [pre_leave]
try:
logger.debug(''.join(enter))
try:
try:
_.value = False
result = _func(*__argv, **__kwds)
finally:
_.value = True
except Exception:
ex_type, value, traceback = sys.exc_info()
leave.append(' => exception thrown\n\traise ')
__mname = ex_type.__module__
if __mname != '__main__':
leave.append(__mname)
leave.append('.')
leave.append(ex_type.__name__)
if value.args:
leave.append('(')
leave.append(
', '.join(chop(v) for v in value.args))
leave.append(')')
else:
leave.append('()')
raise
else:
if result is not None:
leave.append(' => ')
leave.append(chop(result))
finally:
logger.debug(''.join(leave))
finally:
_.value = False
return result
####
# decorator
####
__self = False
__klass = False
def nop(x):
return x
__rewrap = nop
if type(_func) in FunctionTypes:
# functions do not belong to a class.
__cname = None
elif type(_func) in MethodTypes:
# im_self is None for unbound instance methods.
# Assumption: trace is only called on unbound methods.
if _func.im_self is not None:
__rewrap = classmethod
__cname = _func.im_self.__name__
__klass = True
else:
__cname = _func.im_class.__name__
__self = True
_func = _func.im_func
else:
# other callables are not supported yet.
return _func
__module = _func.__module__
__fname = _func.__name__
# Do not wrap initialization and conversion methods.
if __fname in ('__init__', '__new__', '__repr__', '__str__'):
return __rewrap(_func)
# Generate the Fully Qualified Function Name.
__fqfn = list()
if __module != '__main__':
__fqfn.append(__module)
__fqfn.append('.')
if __cname is not None:
__fqfn.append(__cname)
__fqfn.append('.')
__fqfn.append(__fname)
__fqfn = ''.join(__fqfn)
if type(_name) in CallableTypes:
logger = get_logger_factory().get_logger(__fqfn)
elif loggable(_name):
logger = _name
elif isinstance(_name, six.string_types):
logger = get_logger_factory().get_logger(_name)
else:
raise ValueError(
'invalid object %r: must be a function, a method, '
'a string or an object that implements the Logger API' % _name)
pre_enter = ['>>> ', __fqfn, '(']
pre_enter = ''.join(pre_enter)
pre_leave = ['<<< ', __fqfn]
pre_leave = ''.join(pre_leave)
####
# Here we are really mucking around in function internals.
# func_code is the low level 'code' instance that describes
# the function arguments, variable and other stuff.
#
# func.func_code.co_argcount - number of function arguments.
# func.func_code.co_varnames - function variables names, the
# first co_argcount values are the argument names.
# func.func_defaults - contains default arguments
try:
code = _func.__code__
except AttributeError:
co_argcount, co_varnames, co_defaults = \
__lookup_builtin(_func.__name__)
else:
co_argcount = code.co_argcount
co_varnames = code.co_varnames[:co_argcount]
if _func.__defaults__:
co_defaults = dict(
zip(co_varnames[-len(_func.__defaults__):],
_func.__defaults__))
else:
co_defaults = dict()
if __klass:
__klass = co_varnames[0]
if __self:
__self = co_varnames[0]
return __rewrap(wraps(_func)(wrapper))
####
# trace
####
# logging.basicConfig(level = logging.DEBUG)
if type(_name) in CallableTypes:
return decorator(_name)
else:
return decorator |
def dbm_starter(priority_msgs, resource_msgs, *args, **kwargs):
"""Start the database manager process
The DFK should start this function. The args, kwargs match that of the monitoring config
"""
dbm = DatabaseManager(*args, **kwargs)
dbm.start(priority_msgs, resource_msgs) | Start the database manager process
The DFK should start this function. The args, kwargs match that of the monitoring config | Below is the the instruction that describes the task:
### Input:
Start the database manager process
The DFK should start this function. The args, kwargs match that of the monitoring config
### Response:
def dbm_starter(priority_msgs, resource_msgs, *args, **kwargs):
"""Start the database manager process
The DFK should start this function. The args, kwargs match that of the monitoring config
"""
dbm = DatabaseManager(*args, **kwargs)
dbm.start(priority_msgs, resource_msgs) |
def is_gtk_desktop():
"""Detect if we are running in a Gtk-based desktop"""
if sys.platform.startswith('linux'):
xdg_desktop = os.environ.get('XDG_CURRENT_DESKTOP', '')
if xdg_desktop:
gtk_desktops = ['Unity', 'GNOME', 'XFCE']
if any([xdg_desktop.startswith(d) for d in gtk_desktops]):
return True
else:
return False
else:
return False
else:
return False | Detect if we are running in a Gtk-based desktop | Below is the the instruction that describes the task:
### Input:
Detect if we are running in a Gtk-based desktop
### Response:
def is_gtk_desktop():
"""Detect if we are running in a Gtk-based desktop"""
if sys.platform.startswith('linux'):
xdg_desktop = os.environ.get('XDG_CURRENT_DESKTOP', '')
if xdg_desktop:
gtk_desktops = ['Unity', 'GNOME', 'XFCE']
if any([xdg_desktop.startswith(d) for d in gtk_desktops]):
return True
else:
return False
else:
return False
else:
return False |
def prepare_minibatch(self, audio_paths, texts, overwrite=False,
is_bi_graphemes=False, seq_length=-1, save_feature_as_csvfile=False):
""" Featurize a minibatch of audio, zero pad them and return a dictionary
Params:
audio_paths (list(str)): List of paths to audio files
texts (list(str)): List of texts corresponding to the audio files
Returns:
dict: See below for contents
"""
assert len(audio_paths) == len(texts),\
"Inputs and outputs to the network must be of the same number"
# Features is a list of (timesteps, feature_dim) arrays
# Calculate the features for each audio clip, as the log of the
# Fourier Transform of the audio
features = [self.featurize(a, overwrite=overwrite, save_feature_as_csvfile=save_feature_as_csvfile) for a in audio_paths]
input_lengths = [f.shape[0] for f in features]
feature_dim = features[0].shape[1]
mb_size = len(features)
# Pad all the inputs so that they are all the same length
if seq_length == -1:
x = np.zeros((mb_size, self.max_seq_length, feature_dim))
else:
x = np.zeros((mb_size, seq_length, feature_dim))
y = np.zeros((mb_size, self.max_label_length))
labelUtil = LabelUtil.getInstance()
label_lengths = []
for i in range(mb_size):
feat = features[i]
feat = self.normalize(feat) # Center using means and std
x[i, :feat.shape[0], :] = feat
if is_bi_graphemes:
label = generate_bi_graphemes_label(texts[i])
label = labelUtil.convert_bi_graphemes_to_num(label)
y[i, :len(label)] = label
else:
label = labelUtil.convert_word_to_num(texts[i])
y[i, :len(texts[i])] = label
label_lengths.append(len(label))
return {
'x': x, # (0-padded features of shape(mb_size,timesteps,feat_dim)
'y': y, # list(int) Flattened labels (integer sequences)
'texts': texts, # list(str) Original texts
'input_lengths': input_lengths, # list(int) Length of each input
'label_lengths': label_lengths, # list(int) Length of each label
} | Featurize a minibatch of audio, zero pad them and return a dictionary
Params:
audio_paths (list(str)): List of paths to audio files
texts (list(str)): List of texts corresponding to the audio files
Returns:
dict: See below for contents | Below is the the instruction that describes the task:
### Input:
Featurize a minibatch of audio, zero pad them and return a dictionary
Params:
audio_paths (list(str)): List of paths to audio files
texts (list(str)): List of texts corresponding to the audio files
Returns:
dict: See below for contents
### Response:
def prepare_minibatch(self, audio_paths, texts, overwrite=False,
is_bi_graphemes=False, seq_length=-1, save_feature_as_csvfile=False):
""" Featurize a minibatch of audio, zero pad them and return a dictionary
Params:
audio_paths (list(str)): List of paths to audio files
texts (list(str)): List of texts corresponding to the audio files
Returns:
dict: See below for contents
"""
assert len(audio_paths) == len(texts),\
"Inputs and outputs to the network must be of the same number"
# Features is a list of (timesteps, feature_dim) arrays
# Calculate the features for each audio clip, as the log of the
# Fourier Transform of the audio
features = [self.featurize(a, overwrite=overwrite, save_feature_as_csvfile=save_feature_as_csvfile) for a in audio_paths]
input_lengths = [f.shape[0] for f in features]
feature_dim = features[0].shape[1]
mb_size = len(features)
# Pad all the inputs so that they are all the same length
if seq_length == -1:
x = np.zeros((mb_size, self.max_seq_length, feature_dim))
else:
x = np.zeros((mb_size, seq_length, feature_dim))
y = np.zeros((mb_size, self.max_label_length))
labelUtil = LabelUtil.getInstance()
label_lengths = []
for i in range(mb_size):
feat = features[i]
feat = self.normalize(feat) # Center using means and std
x[i, :feat.shape[0], :] = feat
if is_bi_graphemes:
label = generate_bi_graphemes_label(texts[i])
label = labelUtil.convert_bi_graphemes_to_num(label)
y[i, :len(label)] = label
else:
label = labelUtil.convert_word_to_num(texts[i])
y[i, :len(texts[i])] = label
label_lengths.append(len(label))
return {
'x': x, # (0-padded features of shape(mb_size,timesteps,feat_dim)
'y': y, # list(int) Flattened labels (integer sequences)
'texts': texts, # list(str) Original texts
'input_lengths': input_lengths, # list(int) Length of each input
'label_lengths': label_lengths, # list(int) Length of each label
} |
def _lerp(x, x0, x1, y0, y1):
"""Affinely map from [x0, x1] onto [y0, y1]."""
return y0 + (x - x0) * float(y1 - y0) / (x1 - x0) | Affinely map from [x0, x1] onto [y0, y1]. | Below is the the instruction that describes the task:
### Input:
Affinely map from [x0, x1] onto [y0, y1].
### Response:
def _lerp(x, x0, x1, y0, y1):
"""Affinely map from [x0, x1] onto [y0, y1]."""
return y0 + (x - x0) * float(y1 - y0) / (x1 - x0) |
def scatter(n_categories=5,n=10,prefix='category',mode=None):
"""
Returns a DataFrame with the required format for
a scatter plot
Parameters:
-----------
n_categories : int
Number of categories
n : int
Number of points for each category
prefix : string
Name for each category
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
categories=[]
for i in range(n_categories):
categories.extend([prefix+str(i+1)]*n)
return pd.DataFrame({'x':np.random.randn(n*n_categories),
'y':np.random.randn(n*n_categories),
'text':getName(n*n_categories,mode=mode),
'categories':categories}) | Returns a DataFrame with the required format for
a scatter plot
Parameters:
-----------
n_categories : int
Number of categories
n : int
Number of points for each category
prefix : string
Name for each category
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names | Below is the the instruction that describes the task:
### Input:
Returns a DataFrame with the required format for
a scatter plot
Parameters:
-----------
n_categories : int
Number of categories
n : int
Number of points for each category
prefix : string
Name for each category
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
### Response:
def scatter(n_categories=5,n=10,prefix='category',mode=None):
"""
Returns a DataFrame with the required format for
a scatter plot
Parameters:
-----------
n_categories : int
Number of categories
n : int
Number of points for each category
prefix : string
Name for each category
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
categories=[]
for i in range(n_categories):
categories.extend([prefix+str(i+1)]*n)
return pd.DataFrame({'x':np.random.randn(n*n_categories),
'y':np.random.randn(n*n_categories),
'text':getName(n*n_categories,mode=mode),
'categories':categories}) |
def init(
dist='dist',
minver=None,
maxver=None,
use_markdown_readme=True,
use_stdeb=False,
use_distribute=False,
):
"""Imports and returns a setup function.
If use_markdown_readme is set,
then README.md is added to setuptools READMES list.
If use_stdeb is set on a Debian based system,
then module stdeb is imported.
Stdeb supports building deb packages on Debian based systems.
The package should only be installed on the same system version
it was built on, though. See http://github.com/astraw/stdeb.
If use_distribute is set, then distribute_setup.py is imported.
"""
if not minver == maxver == None:
import sys
if not minver <= sys.version < (maxver or 'Any'):
sys.stderr.write(
'%s: requires python version in <%s, %s), not %s\n' % (
sys.argv[0], minver or 'any', maxver or 'any', sys.version.split()[0]))
sys.exit(1)
if use_distribute:
from distribute_setup import use_setuptools
use_setuptools(to_dir=dist)
from setuptools import setup
else:
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if use_markdown_readme:
try:
import setuptools.command.sdist
setuptools.command.sdist.READMES = tuple(list(getattr(setuptools.command.sdist, 'READMES', ()))
+ ['README.md'])
except ImportError:
pass
if use_stdeb:
import platform
if 'debian' in platform.dist():
try:
import stdeb
except ImportError:
pass
return setup | Imports and returns a setup function.
If use_markdown_readme is set,
then README.md is added to setuptools READMES list.
If use_stdeb is set on a Debian based system,
then module stdeb is imported.
Stdeb supports building deb packages on Debian based systems.
The package should only be installed on the same system version
it was built on, though. See http://github.com/astraw/stdeb.
If use_distribute is set, then distribute_setup.py is imported. | Below is the the instruction that describes the task:
### Input:
Imports and returns a setup function.
If use_markdown_readme is set,
then README.md is added to setuptools READMES list.
If use_stdeb is set on a Debian based system,
then module stdeb is imported.
Stdeb supports building deb packages on Debian based systems.
The package should only be installed on the same system version
it was built on, though. See http://github.com/astraw/stdeb.
If use_distribute is set, then distribute_setup.py is imported.
### Response:
def init(
dist='dist',
minver=None,
maxver=None,
use_markdown_readme=True,
use_stdeb=False,
use_distribute=False,
):
"""Imports and returns a setup function.
If use_markdown_readme is set,
then README.md is added to setuptools READMES list.
If use_stdeb is set on a Debian based system,
then module stdeb is imported.
Stdeb supports building deb packages on Debian based systems.
The package should only be installed on the same system version
it was built on, though. See http://github.com/astraw/stdeb.
If use_distribute is set, then distribute_setup.py is imported.
"""
if not minver == maxver == None:
import sys
if not minver <= sys.version < (maxver or 'Any'):
sys.stderr.write(
'%s: requires python version in <%s, %s), not %s\n' % (
sys.argv[0], minver or 'any', maxver or 'any', sys.version.split()[0]))
sys.exit(1)
if use_distribute:
from distribute_setup import use_setuptools
use_setuptools(to_dir=dist)
from setuptools import setup
else:
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if use_markdown_readme:
try:
import setuptools.command.sdist
setuptools.command.sdist.READMES = tuple(list(getattr(setuptools.command.sdist, 'READMES', ()))
+ ['README.md'])
except ImportError:
pass
if use_stdeb:
import platform
if 'debian' in platform.dist():
try:
import stdeb
except ImportError:
pass
return setup |
def gru(name, input, state, kernel_r, kernel_u, kernel_c, bias_r, bias_u, bias_c, new_state, number_of_gates = 2):
''' - zt = f(Xt*Wz + Ht_1*Rz + Wbz + Rbz)
- rt = f(Xt*Wr + Ht_1*Rr + Wbr + Rbr)
- ht = g(Xt*Wh + (rt . Ht_1)*Rh + Rbh + Wbh)
- Ht = (1-zt).ht + zt.Ht_1
'''
nn = Build(name)
inputs = nn.concat(input, state)
u = nn.sigmoid(nn.mad(inputs, kernel_u, bias_u))
r = nn.sigmoid(nn.mad(inputs, kernel_r, bias_r))
r_state = nn.mul(r, state)
c = nn.tanh(nn.mad(kernel=kernel_c, bias=bias_c,
x=nn.concat(input, r_state)))
# new_h = u' * state + (1 - u') * c'
# = u' * state + c' - u' * c'
# u' * state + c'
nn.add(nn.mul(u, state), c)
# - u' * c'
nn.sub(nn._, nn.mul(u, c),
out=new_state)
return nn.layers; | - zt = f(Xt*Wz + Ht_1*Rz + Wbz + Rbz)
- rt = f(Xt*Wr + Ht_1*Rr + Wbr + Rbr)
- ht = g(Xt*Wh + (rt . Ht_1)*Rh + Rbh + Wbh)
- Ht = (1-zt).ht + zt.Ht_1 | Below is the the instruction that describes the task:
### Input:
- zt = f(Xt*Wz + Ht_1*Rz + Wbz + Rbz)
- rt = f(Xt*Wr + Ht_1*Rr + Wbr + Rbr)
- ht = g(Xt*Wh + (rt . Ht_1)*Rh + Rbh + Wbh)
- Ht = (1-zt).ht + zt.Ht_1
### Response:
def gru(name, input, state, kernel_r, kernel_u, kernel_c, bias_r, bias_u, bias_c, new_state, number_of_gates = 2):
''' - zt = f(Xt*Wz + Ht_1*Rz + Wbz + Rbz)
- rt = f(Xt*Wr + Ht_1*Rr + Wbr + Rbr)
- ht = g(Xt*Wh + (rt . Ht_1)*Rh + Rbh + Wbh)
- Ht = (1-zt).ht + zt.Ht_1
'''
nn = Build(name)
inputs = nn.concat(input, state)
u = nn.sigmoid(nn.mad(inputs, kernel_u, bias_u))
r = nn.sigmoid(nn.mad(inputs, kernel_r, bias_r))
r_state = nn.mul(r, state)
c = nn.tanh(nn.mad(kernel=kernel_c, bias=bias_c,
x=nn.concat(input, r_state)))
# new_h = u' * state + (1 - u') * c'
# = u' * state + c' - u' * c'
# u' * state + c'
nn.add(nn.mul(u, state), c)
# - u' * c'
nn.sub(nn._, nn.mul(u, c),
out=new_state)
return nn.layers; |
def _get_network_interface(name, resource_group):
'''
Get a network interface.
'''
public_ips = []
private_ips = []
netapi_versions = get_api_versions(kwargs={
'resource_provider': 'Microsoft.Network',
'resource_type': 'publicIPAddresses'
}
)
netapi_version = netapi_versions[0]
netconn = get_conn(client_type='network')
netiface_query = netconn.network_interfaces.get(
resource_group_name=resource_group,
network_interface_name=name
)
netiface = netiface_query.as_dict()
for index, ip_config in enumerate(netiface['ip_configurations']):
if ip_config.get('private_ip_address') is not None:
private_ips.append(ip_config['private_ip_address'])
if 'id' in ip_config.get('public_ip_address', {}):
public_ip_name = get_resource_by_id(
ip_config['public_ip_address']['id'],
netapi_version,
'name'
)
public_ip = _get_public_ip(public_ip_name, resource_group)
public_ips.append(public_ip['ip_address'])
netiface['ip_configurations'][index]['public_ip_address'].update(public_ip)
return netiface, public_ips, private_ips | Get a network interface. | Below is the the instruction that describes the task:
### Input:
Get a network interface.
### Response:
def _get_network_interface(name, resource_group):
'''
Get a network interface.
'''
public_ips = []
private_ips = []
netapi_versions = get_api_versions(kwargs={
'resource_provider': 'Microsoft.Network',
'resource_type': 'publicIPAddresses'
}
)
netapi_version = netapi_versions[0]
netconn = get_conn(client_type='network')
netiface_query = netconn.network_interfaces.get(
resource_group_name=resource_group,
network_interface_name=name
)
netiface = netiface_query.as_dict()
for index, ip_config in enumerate(netiface['ip_configurations']):
if ip_config.get('private_ip_address') is not None:
private_ips.append(ip_config['private_ip_address'])
if 'id' in ip_config.get('public_ip_address', {}):
public_ip_name = get_resource_by_id(
ip_config['public_ip_address']['id'],
netapi_version,
'name'
)
public_ip = _get_public_ip(public_ip_name, resource_group)
public_ips.append(public_ip['ip_address'])
netiface['ip_configurations'][index]['public_ip_address'].update(public_ip)
return netiface, public_ips, private_ips |
def view(self, function_name, extension_name):
""" Builds response according to a function name
:param function_name: Route name / function name
:param extension_name: Name of the extension holding the function
:return: Function
"""
if isinstance(self.Access_Control_Allow_Origin, dict):
d = {
"Access-Control-Allow-Origin": self.Access_Control_Allow_Origin[function_name],
"Access-Control-Allow-Methods": self.Access_Control_Allow_Methods[function_name]
}
else:
d = {
"Access-Control-Allow-Origin": self.Access_Control_Allow_Origin,
"Access-Control-Allow-Methods": self.Access_Control_Allow_Methods[function_name]
}
def r(*x, **y):
val = getattr(self._extensions[extension_name], function_name)(*x, **y)
if isinstance(val, Response):
val.headers.extend(d)
return val
else:
val = list(val)
val[2].update(d)
return tuple(val)
return r | Builds response according to a function name
:param function_name: Route name / function name
:param extension_name: Name of the extension holding the function
:return: Function | Below is the the instruction that describes the task:
### Input:
Builds response according to a function name
:param function_name: Route name / function name
:param extension_name: Name of the extension holding the function
:return: Function
### Response:
def view(self, function_name, extension_name):
""" Builds response according to a function name
:param function_name: Route name / function name
:param extension_name: Name of the extension holding the function
:return: Function
"""
if isinstance(self.Access_Control_Allow_Origin, dict):
d = {
"Access-Control-Allow-Origin": self.Access_Control_Allow_Origin[function_name],
"Access-Control-Allow-Methods": self.Access_Control_Allow_Methods[function_name]
}
else:
d = {
"Access-Control-Allow-Origin": self.Access_Control_Allow_Origin,
"Access-Control-Allow-Methods": self.Access_Control_Allow_Methods[function_name]
}
def r(*x, **y):
val = getattr(self._extensions[extension_name], function_name)(*x, **y)
if isinstance(val, Response):
val.headers.extend(d)
return val
else:
val = list(val)
val[2].update(d)
return tuple(val)
return r |
def max_lv_count(self):
"""
Returns the maximum allowed logical volume count.
"""
self.open()
count = lvm_vg_get_max_lv(self.handle)
self.close()
return count | Returns the maximum allowed logical volume count. | Below is the the instruction that describes the task:
### Input:
Returns the maximum allowed logical volume count.
### Response:
def max_lv_count(self):
"""
Returns the maximum allowed logical volume count.
"""
self.open()
count = lvm_vg_get_max_lv(self.handle)
self.close()
return count |
def path_to_slug(path):
"""
Removes everything from the given URL path, including
language code and ``PAGES_SLUG`` if any is set, returning
a slug that would match a ``Page`` instance's slug.
"""
from yacms.urls import PAGES_SLUG
lang_code = translation.get_language_from_path(path)
for prefix in (lang_code, settings.SITE_PREFIX, PAGES_SLUG):
if prefix:
path = path.replace(prefix, "", 1)
return clean_slashes(path) or "/" | Removes everything from the given URL path, including
language code and ``PAGES_SLUG`` if any is set, returning
a slug that would match a ``Page`` instance's slug. | Below is the the instruction that describes the task:
### Input:
Removes everything from the given URL path, including
language code and ``PAGES_SLUG`` if any is set, returning
a slug that would match a ``Page`` instance's slug.
### Response:
def path_to_slug(path):
"""
Removes everything from the given URL path, including
language code and ``PAGES_SLUG`` if any is set, returning
a slug that would match a ``Page`` instance's slug.
"""
from yacms.urls import PAGES_SLUG
lang_code = translation.get_language_from_path(path)
for prefix in (lang_code, settings.SITE_PREFIX, PAGES_SLUG):
if prefix:
path = path.replace(prefix, "", 1)
return clean_slashes(path) or "/" |
def set_handler(self, language, obj):
"""Define a custom language handler for RiveScript objects.
Pass in a ``None`` value for the object to delete an existing handler (for
example, to prevent Python code from being able to be run by default).
Look in the ``eg`` folder of the rivescript-python distribution for
an example script that sets up a JavaScript language handler.
:param str language: The lowercased name of the programming language.
Examples: python, javascript, perl
:param class obj: An instance of an implementation class object.
It should provide the following interface::
class MyObjectHandler:
def __init__(self):
pass
def load(self, name, code):
# name = the name of the object from the RiveScript code
# code = the source code of the object
def call(self, rs, name, fields):
# rs = the current RiveScript interpreter object
# name = the name of the object being called
# fields = array of arguments passed to the object
return reply
"""
# Allow them to delete a handler too.
if obj is None:
if language in self._handlers:
del self._handlers[language]
else:
self._handlers[language] = obj | Define a custom language handler for RiveScript objects.
Pass in a ``None`` value for the object to delete an existing handler (for
example, to prevent Python code from being able to be run by default).
Look in the ``eg`` folder of the rivescript-python distribution for
an example script that sets up a JavaScript language handler.
:param str language: The lowercased name of the programming language.
Examples: python, javascript, perl
:param class obj: An instance of an implementation class object.
It should provide the following interface::
class MyObjectHandler:
def __init__(self):
pass
def load(self, name, code):
# name = the name of the object from the RiveScript code
# code = the source code of the object
def call(self, rs, name, fields):
# rs = the current RiveScript interpreter object
# name = the name of the object being called
# fields = array of arguments passed to the object
return reply | Below is the the instruction that describes the task:
### Input:
Define a custom language handler for RiveScript objects.
Pass in a ``None`` value for the object to delete an existing handler (for
example, to prevent Python code from being able to be run by default).
Look in the ``eg`` folder of the rivescript-python distribution for
an example script that sets up a JavaScript language handler.
:param str language: The lowercased name of the programming language.
Examples: python, javascript, perl
:param class obj: An instance of an implementation class object.
It should provide the following interface::
class MyObjectHandler:
def __init__(self):
pass
def load(self, name, code):
# name = the name of the object from the RiveScript code
# code = the source code of the object
def call(self, rs, name, fields):
# rs = the current RiveScript interpreter object
# name = the name of the object being called
# fields = array of arguments passed to the object
return reply
### Response:
def set_handler(self, language, obj):
"""Define a custom language handler for RiveScript objects.
Pass in a ``None`` value for the object to delete an existing handler (for
example, to prevent Python code from being able to be run by default).
Look in the ``eg`` folder of the rivescript-python distribution for
an example script that sets up a JavaScript language handler.
:param str language: The lowercased name of the programming language.
Examples: python, javascript, perl
:param class obj: An instance of an implementation class object.
It should provide the following interface::
class MyObjectHandler:
def __init__(self):
pass
def load(self, name, code):
# name = the name of the object from the RiveScript code
# code = the source code of the object
def call(self, rs, name, fields):
# rs = the current RiveScript interpreter object
# name = the name of the object being called
# fields = array of arguments passed to the object
return reply
"""
# Allow them to delete a handler too.
if obj is None:
if language in self._handlers:
del self._handlers[language]
else:
self._handlers[language] = obj |
def format_as_dataframes(explanation):
# type: (Explanation) -> Dict[str, pd.DataFrame]
""" Export an explanation to a dictionary with ``pandas.DataFrame`` values
and string keys that correspond to explanation attributes.
Use this method if several dataframes can be exported from a single
explanation (e.g. for CRF explanation with has both feature weights
and transition matrix).
Note that :func:`eli5.explain_weights` limits number of features
by default. If you need all features, pass ``top=None`` to
:func:`eli5.explain_weights`, or use
:func:`explain_weights_dfs`.
"""
result = {}
for attr in _EXPORTED_ATTRIBUTES:
value = getattr(explanation, attr)
if value:
result[attr] = format_as_dataframe(value)
return result | Export an explanation to a dictionary with ``pandas.DataFrame`` values
and string keys that correspond to explanation attributes.
Use this method if several dataframes can be exported from a single
explanation (e.g. for CRF explanation with has both feature weights
and transition matrix).
Note that :func:`eli5.explain_weights` limits number of features
by default. If you need all features, pass ``top=None`` to
:func:`eli5.explain_weights`, or use
:func:`explain_weights_dfs`. | Below is the the instruction that describes the task:
### Input:
Export an explanation to a dictionary with ``pandas.DataFrame`` values
and string keys that correspond to explanation attributes.
Use this method if several dataframes can be exported from a single
explanation (e.g. for CRF explanation with has both feature weights
and transition matrix).
Note that :func:`eli5.explain_weights` limits number of features
by default. If you need all features, pass ``top=None`` to
:func:`eli5.explain_weights`, or use
:func:`explain_weights_dfs`.
### Response:
def format_as_dataframes(explanation):
# type: (Explanation) -> Dict[str, pd.DataFrame]
""" Export an explanation to a dictionary with ``pandas.DataFrame`` values
and string keys that correspond to explanation attributes.
Use this method if several dataframes can be exported from a single
explanation (e.g. for CRF explanation with has both feature weights
and transition matrix).
Note that :func:`eli5.explain_weights` limits number of features
by default. If you need all features, pass ``top=None`` to
:func:`eli5.explain_weights`, or use
:func:`explain_weights_dfs`.
"""
result = {}
for attr in _EXPORTED_ATTRIBUTES:
value = getattr(explanation, attr)
if value:
result[attr] = format_as_dataframe(value)
return result |
def poll(function, step=0.5, timeout=3, ignore_exceptions=(), exception_message='', message_builder=None,
args=(), kwargs=None, ontimeout=()):
"""Calls the function until bool(return value) is truthy
@param step: Wait time between each function call
@param timeout: Max amount of time that will elapse. If the function is in progress when timeout has passed, the
function will be allowed to complete.
@type ignore_exceptions: tuple
@param ignore_exceptions: A tuple of exceptions that will be ignored if they are raised
@param exception_message: The message that will be raised as an AssertionError if the function never
returns bool(True)
@param ontimeout: On timeout, execute the functions in order, but do not fail if execution fails
@return: True
"""
# Validate usage
try:
iter(ontimeout)
except TypeError:
raise ValueError('Please specify an iterable of callable functions for ontimeout')
kwargs = kwargs or dict()
end_time = time.time() + timeout
while True:
try:
value = function(*args, **kwargs)
if bool(value):
return value
except ignore_exceptions:
pass
time.sleep(step)
if time.time() > end_time:
break
# Execute the callbacks
for fn in ontimeout:
try:
fn(),
except:
continue
if message_builder:
exception_message = message_builder(*args, **kwargs)
raise AssertionError(exception_message) | Calls the function until bool(return value) is truthy
@param step: Wait time between each function call
@param timeout: Max amount of time that will elapse. If the function is in progress when timeout has passed, the
function will be allowed to complete.
@type ignore_exceptions: tuple
@param ignore_exceptions: A tuple of exceptions that will be ignored if they are raised
@param exception_message: The message that will be raised as an AssertionError if the function never
returns bool(True)
@param ontimeout: On timeout, execute the functions in order, but do not fail if execution fails
@return: True | Below is the the instruction that describes the task:
### Input:
Calls the function until bool(return value) is truthy
@param step: Wait time between each function call
@param timeout: Max amount of time that will elapse. If the function is in progress when timeout has passed, the
function will be allowed to complete.
@type ignore_exceptions: tuple
@param ignore_exceptions: A tuple of exceptions that will be ignored if they are raised
@param exception_message: The message that will be raised as an AssertionError if the function never
returns bool(True)
@param ontimeout: On timeout, execute the functions in order, but do not fail if execution fails
@return: True
### Response:
def poll(function, step=0.5, timeout=3, ignore_exceptions=(), exception_message='', message_builder=None,
args=(), kwargs=None, ontimeout=()):
"""Calls the function until bool(return value) is truthy
@param step: Wait time between each function call
@param timeout: Max amount of time that will elapse. If the function is in progress when timeout has passed, the
function will be allowed to complete.
@type ignore_exceptions: tuple
@param ignore_exceptions: A tuple of exceptions that will be ignored if they are raised
@param exception_message: The message that will be raised as an AssertionError if the function never
returns bool(True)
@param ontimeout: On timeout, execute the functions in order, but do not fail if execution fails
@return: True
"""
# Validate usage
try:
iter(ontimeout)
except TypeError:
raise ValueError('Please specify an iterable of callable functions for ontimeout')
kwargs = kwargs or dict()
end_time = time.time() + timeout
while True:
try:
value = function(*args, **kwargs)
if bool(value):
return value
except ignore_exceptions:
pass
time.sleep(step)
if time.time() > end_time:
break
# Execute the callbacks
for fn in ontimeout:
try:
fn(),
except:
continue
if message_builder:
exception_message = message_builder(*args, **kwargs)
raise AssertionError(exception_message) |
def DiffAnyArrays(self, oldObj, newObj, isElementLinks):
"""Diff two arrays which contain Any objects"""
if len(oldObj) != len(newObj):
__Log__.debug('DiffAnyArrays: Array lengths do not match. %d != %d'
% (len(oldObj), len(newObj)))
return False
for i, j in zip(oldObj, newObj):
if not self.DiffAnyObjects(i, j, isElementLinks):
__Log__.debug('DiffAnyArrays: One of the elements do not match.')
return False
return True | Diff two arrays which contain Any objects | Below is the the instruction that describes the task:
### Input:
Diff two arrays which contain Any objects
### Response:
def DiffAnyArrays(self, oldObj, newObj, isElementLinks):
"""Diff two arrays which contain Any objects"""
if len(oldObj) != len(newObj):
__Log__.debug('DiffAnyArrays: Array lengths do not match. %d != %d'
% (len(oldObj), len(newObj)))
return False
for i, j in zip(oldObj, newObj):
if not self.DiffAnyObjects(i, j, isElementLinks):
__Log__.debug('DiffAnyArrays: One of the elements do not match.')
return False
return True |
def _hash(self):
"""Compute the hash value of a set.
Note that we don't define __hash__: not all sets are hashable.
But if you define a hashable set type, its __hash__ should
call this function.
This must be compatible __eq__.
All sets ought to compare equal if they contain the same
elements, regardless of how they are implemented, and
regardless of the order of the elements; so there's not much
freedom for __eq__ or __hash__. We match the algorithm used
by the built-in frozenset type.
"""
MAX = sys.maxint
MASK = 2 * MAX + 1
n = len(self)
h = 1927868237 * (n + 1)
h &= MASK
for x in self:
hx = hash(x)
h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167
h &= MASK
h = h * 69069 + 907133923
h &= MASK
if h > MAX:
h -= MASK + 1
if h == -1:
h = 590923713
return h | Compute the hash value of a set.
Note that we don't define __hash__: not all sets are hashable.
But if you define a hashable set type, its __hash__ should
call this function.
This must be compatible __eq__.
All sets ought to compare equal if they contain the same
elements, regardless of how they are implemented, and
regardless of the order of the elements; so there's not much
freedom for __eq__ or __hash__. We match the algorithm used
by the built-in frozenset type. | Below is the the instruction that describes the task:
### Input:
Compute the hash value of a set.
Note that we don't define __hash__: not all sets are hashable.
But if you define a hashable set type, its __hash__ should
call this function.
This must be compatible __eq__.
All sets ought to compare equal if they contain the same
elements, regardless of how they are implemented, and
regardless of the order of the elements; so there's not much
freedom for __eq__ or __hash__. We match the algorithm used
by the built-in frozenset type.
### Response:
def _hash(self):
"""Compute the hash value of a set.
Note that we don't define __hash__: not all sets are hashable.
But if you define a hashable set type, its __hash__ should
call this function.
This must be compatible __eq__.
All sets ought to compare equal if they contain the same
elements, regardless of how they are implemented, and
regardless of the order of the elements; so there's not much
freedom for __eq__ or __hash__. We match the algorithm used
by the built-in frozenset type.
"""
MAX = sys.maxint
MASK = 2 * MAX + 1
n = len(self)
h = 1927868237 * (n + 1)
h &= MASK
for x in self:
hx = hash(x)
h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167
h &= MASK
h = h * 69069 + 907133923
h &= MASK
if h > MAX:
h -= MASK + 1
if h == -1:
h = 590923713
return h |
def get_random_subgraph(graph, number_edges=None, number_seed_edges=None, seed=None, invert_degrees=None):
"""Generate a random subgraph based on weighted random walks from random seed edges.
:type graph: pybel.BELGraph graph
:param Optional[int] number_edges: Maximum number of edges. Defaults to
:data:`pybel_tools.constants.SAMPLE_RANDOM_EDGE_COUNT` (250).
:param Optional[int] number_seed_edges: Number of nodes to start with (which likely results in different components
in large graphs). Defaults to :data:`SAMPLE_RANDOM_EDGE_SEED_COUNT` (5).
:param Optional[int] seed: A seed for the random state
:param Optional[bool] invert_degrees: Should the degrees be inverted? Defaults to true.
:rtype: pybel.BELGraph
"""
if number_edges is None:
number_edges = SAMPLE_RANDOM_EDGE_COUNT
if number_seed_edges is None:
number_seed_edges = SAMPLE_RANDOM_EDGE_SEED_COUNT
if seed is not None:
random.seed(seed)
# Check if graph will sample full graph, and just return it if it would
if graph.number_of_edges() <= number_edges:
log.info('sampled full graph')
return graph.copy()
log.debug('getting random sub-graph with %d seed edges, %d final edges, and seed=%s', number_seed_edges,
number_edges, seed)
# Get initial graph with `number_seed_edges` edges
result = get_graph_with_random_edges(graph, number_seed_edges)
number_edges_remaining = number_edges - result.number_of_edges()
_helper(
result,
graph,
number_edges_remaining,
node_blacklist=set(), # This is the set of nodes that should no longer be chosen to grow from
invert_degrees=invert_degrees,
)
log.debug('removing isolated nodes')
remove_isolated_nodes(result)
# update metadata
update_node_helper(graph, result)
update_metadata(graph, result)
return result | Generate a random subgraph based on weighted random walks from random seed edges.
:type graph: pybel.BELGraph graph
:param Optional[int] number_edges: Maximum number of edges. Defaults to
:data:`pybel_tools.constants.SAMPLE_RANDOM_EDGE_COUNT` (250).
:param Optional[int] number_seed_edges: Number of nodes to start with (which likely results in different components
in large graphs). Defaults to :data:`SAMPLE_RANDOM_EDGE_SEED_COUNT` (5).
:param Optional[int] seed: A seed for the random state
:param Optional[bool] invert_degrees: Should the degrees be inverted? Defaults to true.
:rtype: pybel.BELGraph | Below is the the instruction that describes the task:
### Input:
Generate a random subgraph based on weighted random walks from random seed edges.
:type graph: pybel.BELGraph graph
:param Optional[int] number_edges: Maximum number of edges. Defaults to
:data:`pybel_tools.constants.SAMPLE_RANDOM_EDGE_COUNT` (250).
:param Optional[int] number_seed_edges: Number of nodes to start with (which likely results in different components
in large graphs). Defaults to :data:`SAMPLE_RANDOM_EDGE_SEED_COUNT` (5).
:param Optional[int] seed: A seed for the random state
:param Optional[bool] invert_degrees: Should the degrees be inverted? Defaults to true.
:rtype: pybel.BELGraph
### Response:
def get_random_subgraph(graph, number_edges=None, number_seed_edges=None, seed=None, invert_degrees=None):
"""Generate a random subgraph based on weighted random walks from random seed edges.
:type graph: pybel.BELGraph graph
:param Optional[int] number_edges: Maximum number of edges. Defaults to
:data:`pybel_tools.constants.SAMPLE_RANDOM_EDGE_COUNT` (250).
:param Optional[int] number_seed_edges: Number of nodes to start with (which likely results in different components
in large graphs). Defaults to :data:`SAMPLE_RANDOM_EDGE_SEED_COUNT` (5).
:param Optional[int] seed: A seed for the random state
:param Optional[bool] invert_degrees: Should the degrees be inverted? Defaults to true.
:rtype: pybel.BELGraph
"""
if number_edges is None:
number_edges = SAMPLE_RANDOM_EDGE_COUNT
if number_seed_edges is None:
number_seed_edges = SAMPLE_RANDOM_EDGE_SEED_COUNT
if seed is not None:
random.seed(seed)
# Check if graph will sample full graph, and just return it if it would
if graph.number_of_edges() <= number_edges:
log.info('sampled full graph')
return graph.copy()
log.debug('getting random sub-graph with %d seed edges, %d final edges, and seed=%s', number_seed_edges,
number_edges, seed)
# Get initial graph with `number_seed_edges` edges
result = get_graph_with_random_edges(graph, number_seed_edges)
number_edges_remaining = number_edges - result.number_of_edges()
_helper(
result,
graph,
number_edges_remaining,
node_blacklist=set(), # This is the set of nodes that should no longer be chosen to grow from
invert_degrees=invert_degrees,
)
log.debug('removing isolated nodes')
remove_isolated_nodes(result)
# update metadata
update_node_helper(graph, result)
update_metadata(graph, result)
return result |
def generate_terms(self, ref, root, file_type=None):
"""An generator that yields term objects, handling includes and argument
children.
:param file_type:
:param doc:
:param root:
:param ref:
"""
last_section = root
t = None
if isinstance(ref, Source):
row_gen = ref
ref_path = row_gen.__class__.__name__
else:
row_gen = get_generator(ref)
ref_path = ref.path
try:
for line_n, row in enumerate(row_gen, 1):
if not row or not row[0] or not row[0].strip() or row[0].strip().startswith('#'):
continue
tt = Term(row[0], None) # Just to get the qualified name constructed property
term_class = self.get_term_class(tt.join_lc)
t = term_class(tt.join_lc,
row[1] if len(row) > 1 else '',
row[2:] if len(row) > 2 else [],
row=line_n,
col=1,
file_name=ref_path, file_type=file_type, doc=self.doc)
# Why did we remove comments from values? It strips out Markdown
#if t.value and str(t.value).startswith('#'): # Comments are ignored
# continue
if t.term_is('include') or t.term_is('declare'):
if t.term_is('include'):
resolved = self.find_include_doc(dirname(ref_path), t.value.strip())
else:
resolved = self.find_declare_doc(dirname(ref_path), t.value.strip())
if row_gen.ref == resolved:
raise IncludeError("Include loop for '{}' ".format(resolved))
yield t
try:
sub_gen = get_generator(resolved.get_resource().get_target())
for t in self.generate_terms(sub_gen, root, file_type=t.record_term_lc):
yield t
if last_section:
yield last_section # Re-assert the last section
except IncludeError as e:
e.term = t
raise
except (OSError, FileNotFoundError, GenerateError, DownloadError) as e:
e = IncludeError("Failed to Include; {}".format(e))
e.term = t
raise e
continue # Already yielded the include/declare term, and includes can't have children
elif t.term_is('section'):
# If there is already a section in the document, emit the existing section,
# rather than a new one.
try:
last_section = self.doc[t.name]
t = last_section
except (KeyError, TypeError): # TypeError -> self.doc is None
last_section = t
yield t
# Yield any child terms, from the term row arguments
if not t.term_is('section') and not t.term_is('header'):
for col, value in enumerate(t.args, 0):
if str(value).strip():
term_name = t.record_term_lc + '.' + str(col)
term_class = self.get_term_class(term_name)
yield term_class(term_name, str(value), [],
row=line_n,
col=col + 2, # The 0th argument starts in col 2
file_name=ref_path,
file_type=file_type,
parent=t) #,
#doc=None,
#section=last_section)
except IncludeError as e:
exc = IncludeError(str(e) + "; in '{}' ".format(ref_path))
exc.term = e.term if hasattr(e, 'term') else None
raise exc | An generator that yields term objects, handling includes and argument
children.
:param file_type:
:param doc:
:param root:
:param ref: | Below is the the instruction that describes the task:
### Input:
An generator that yields term objects, handling includes and argument
children.
:param file_type:
:param doc:
:param root:
:param ref:
### Response:
def generate_terms(self, ref, root, file_type=None):
"""An generator that yields term objects, handling includes and argument
children.
:param file_type:
:param doc:
:param root:
:param ref:
"""
last_section = root
t = None
if isinstance(ref, Source):
row_gen = ref
ref_path = row_gen.__class__.__name__
else:
row_gen = get_generator(ref)
ref_path = ref.path
try:
for line_n, row in enumerate(row_gen, 1):
if not row or not row[0] or not row[0].strip() or row[0].strip().startswith('#'):
continue
tt = Term(row[0], None) # Just to get the qualified name constructed property
term_class = self.get_term_class(tt.join_lc)
t = term_class(tt.join_lc,
row[1] if len(row) > 1 else '',
row[2:] if len(row) > 2 else [],
row=line_n,
col=1,
file_name=ref_path, file_type=file_type, doc=self.doc)
# Why did we remove comments from values? It strips out Markdown
#if t.value and str(t.value).startswith('#'): # Comments are ignored
# continue
if t.term_is('include') or t.term_is('declare'):
if t.term_is('include'):
resolved = self.find_include_doc(dirname(ref_path), t.value.strip())
else:
resolved = self.find_declare_doc(dirname(ref_path), t.value.strip())
if row_gen.ref == resolved:
raise IncludeError("Include loop for '{}' ".format(resolved))
yield t
try:
sub_gen = get_generator(resolved.get_resource().get_target())
for t in self.generate_terms(sub_gen, root, file_type=t.record_term_lc):
yield t
if last_section:
yield last_section # Re-assert the last section
except IncludeError as e:
e.term = t
raise
except (OSError, FileNotFoundError, GenerateError, DownloadError) as e:
e = IncludeError("Failed to Include; {}".format(e))
e.term = t
raise e
continue # Already yielded the include/declare term, and includes can't have children
elif t.term_is('section'):
# If there is already a section in the document, emit the existing section,
# rather than a new one.
try:
last_section = self.doc[t.name]
t = last_section
except (KeyError, TypeError): # TypeError -> self.doc is None
last_section = t
yield t
# Yield any child terms, from the term row arguments
if not t.term_is('section') and not t.term_is('header'):
for col, value in enumerate(t.args, 0):
if str(value).strip():
term_name = t.record_term_lc + '.' + str(col)
term_class = self.get_term_class(term_name)
yield term_class(term_name, str(value), [],
row=line_n,
col=col + 2, # The 0th argument starts in col 2
file_name=ref_path,
file_type=file_type,
parent=t) #,
#doc=None,
#section=last_section)
except IncludeError as e:
exc = IncludeError(str(e) + "; in '{}' ".format(ref_path))
exc.term = e.term if hasattr(e, 'term') else None
raise exc |
def network_stats(self):
"""Return a dictionary containing a summary of the Dot11
elements fields
"""
summary = {}
crypto = set()
akmsuite_types = {
0x00: "Reserved",
0x01: "802.1X",
0x02: "PSK"
}
p = self.payload
while isinstance(p, Dot11Elt):
if p.ID == 0:
summary["ssid"] = plain_str(p.info)
elif p.ID == 3:
summary["channel"] = ord(p.info)
elif isinstance(p, Dot11EltCountry):
summary["country"] = plain_str(p.country_string[:2])
country_descriptor_types = {
b"I": "Indoor",
b"O": "Outdoor",
b"X": "Non-country",
b"\xff": "Ignored"
}
summary["country_desc_type"] = country_descriptor_types.get(
p.country_string[-1:]
)
elif isinstance(p, Dot11EltRates):
summary["rates"] = p.rates
elif isinstance(p, Dot11EltRSN):
if p.akm_suites:
auth = akmsuite_types.get(p.akm_suites[0].suite)
crypto.add("WPA2/%s" % auth)
else:
crypto.add("WPA2")
elif p.ID == 221:
if isinstance(p, Dot11EltMicrosoftWPA) or \
p.info.startswith(b'\x00P\xf2\x01\x01\x00'):
if p.akm_suites:
auth = akmsuite_types.get(p.akm_suites[0].suite)
crypto.add("WPA/%s" % auth)
else:
crypto.add("WPA")
p = p.payload
if not crypto:
if self.cap.privacy:
crypto.add("WEP")
else:
crypto.add("OPN")
summary["crypto"] = crypto
return summary | Return a dictionary containing a summary of the Dot11
elements fields | Below is the the instruction that describes the task:
### Input:
Return a dictionary containing a summary of the Dot11
elements fields
### Response:
def network_stats(self):
"""Return a dictionary containing a summary of the Dot11
elements fields
"""
summary = {}
crypto = set()
akmsuite_types = {
0x00: "Reserved",
0x01: "802.1X",
0x02: "PSK"
}
p = self.payload
while isinstance(p, Dot11Elt):
if p.ID == 0:
summary["ssid"] = plain_str(p.info)
elif p.ID == 3:
summary["channel"] = ord(p.info)
elif isinstance(p, Dot11EltCountry):
summary["country"] = plain_str(p.country_string[:2])
country_descriptor_types = {
b"I": "Indoor",
b"O": "Outdoor",
b"X": "Non-country",
b"\xff": "Ignored"
}
summary["country_desc_type"] = country_descriptor_types.get(
p.country_string[-1:]
)
elif isinstance(p, Dot11EltRates):
summary["rates"] = p.rates
elif isinstance(p, Dot11EltRSN):
if p.akm_suites:
auth = akmsuite_types.get(p.akm_suites[0].suite)
crypto.add("WPA2/%s" % auth)
else:
crypto.add("WPA2")
elif p.ID == 221:
if isinstance(p, Dot11EltMicrosoftWPA) or \
p.info.startswith(b'\x00P\xf2\x01\x01\x00'):
if p.akm_suites:
auth = akmsuite_types.get(p.akm_suites[0].suite)
crypto.add("WPA/%s" % auth)
else:
crypto.add("WPA")
p = p.payload
if not crypto:
if self.cap.privacy:
crypto.add("WEP")
else:
crypto.add("OPN")
summary["crypto"] = crypto
return summary |
def _rem(self, command, *args, **kwargs):
"""
Shortcut for commands that only remove values from the field.
Removed values will be deindexed.
"""
if self.indexable:
self.deindex(args)
return self._traverse_command(command, *args, **kwargs) | Shortcut for commands that only remove values from the field.
Removed values will be deindexed. | Below is the the instruction that describes the task:
### Input:
Shortcut for commands that only remove values from the field.
Removed values will be deindexed.
### Response:
def _rem(self, command, *args, **kwargs):
"""
Shortcut for commands that only remove values from the field.
Removed values will be deindexed.
"""
if self.indexable:
self.deindex(args)
return self._traverse_command(command, *args, **kwargs) |
def exponweib_like(x, alpha, k, loc=0, scale=1):
R"""
Exponentiated Weibull log-likelihood.
The exponentiated Weibull distribution is a generalization of the Weibull
family. Its value lies in being able to model monotone and non-monotone
failure rates.
.. math::
f(x \mid \alpha,k,loc,scale) & = \frac{\alpha k}{scale} (1-e^{-z^k})^{\alpha-1} e^{-z^k} z^{k-1} \\
z & = \frac{x-loc}{scale}
:Parameters:
- `x` : x > 0
- `alpha` : Shape parameter
- `k` : k > 0
- `loc` : Location parameter
- `scale` : Scale parameter (scale > 0).
"""
return flib.exponweib(x, alpha, k, loc, scale) | R"""
Exponentiated Weibull log-likelihood.
The exponentiated Weibull distribution is a generalization of the Weibull
family. Its value lies in being able to model monotone and non-monotone
failure rates.
.. math::
f(x \mid \alpha,k,loc,scale) & = \frac{\alpha k}{scale} (1-e^{-z^k})^{\alpha-1} e^{-z^k} z^{k-1} \\
z & = \frac{x-loc}{scale}
:Parameters:
- `x` : x > 0
- `alpha` : Shape parameter
- `k` : k > 0
- `loc` : Location parameter
- `scale` : Scale parameter (scale > 0). | Below is the the instruction that describes the task:
### Input:
R"""
Exponentiated Weibull log-likelihood.
The exponentiated Weibull distribution is a generalization of the Weibull
family. Its value lies in being able to model monotone and non-monotone
failure rates.
.. math::
f(x \mid \alpha,k,loc,scale) & = \frac{\alpha k}{scale} (1-e^{-z^k})^{\alpha-1} e^{-z^k} z^{k-1} \\
z & = \frac{x-loc}{scale}
:Parameters:
- `x` : x > 0
- `alpha` : Shape parameter
- `k` : k > 0
- `loc` : Location parameter
- `scale` : Scale parameter (scale > 0).
### Response:
def exponweib_like(x, alpha, k, loc=0, scale=1):
R"""
Exponentiated Weibull log-likelihood.
The exponentiated Weibull distribution is a generalization of the Weibull
family. Its value lies in being able to model monotone and non-monotone
failure rates.
.. math::
f(x \mid \alpha,k,loc,scale) & = \frac{\alpha k}{scale} (1-e^{-z^k})^{\alpha-1} e^{-z^k} z^{k-1} \\
z & = \frac{x-loc}{scale}
:Parameters:
- `x` : x > 0
- `alpha` : Shape parameter
- `k` : k > 0
- `loc` : Location parameter
- `scale` : Scale parameter (scale > 0).
"""
return flib.exponweib(x, alpha, k, loc, scale) |
def percentAt(self, value):
"""
Returns the percentage the value represents between the minimum and
maximum for this axis.
:param value | <int> || <float>
:return <float>
"""
min_val = self.minimum()
max_val = self.maximum()
# round the max value to sync with the values in the grid
total_seconds = (max_val - min_val).total_seconds()
value_seconds = (value - min_val).total_seconds()
if value < min_val:
return 0.0
elif max_val < value:
return 1.0
try:
perc = value_seconds / float(total_seconds)
except ZeroDivisionError:
perc = 0.0
return perc | Returns the percentage the value represents between the minimum and
maximum for this axis.
:param value | <int> || <float>
:return <float> | Below is the the instruction that describes the task:
### Input:
Returns the percentage the value represents between the minimum and
maximum for this axis.
:param value | <int> || <float>
:return <float>
### Response:
def percentAt(self, value):
"""
Returns the percentage the value represents between the minimum and
maximum for this axis.
:param value | <int> || <float>
:return <float>
"""
min_val = self.minimum()
max_val = self.maximum()
# round the max value to sync with the values in the grid
total_seconds = (max_val - min_val).total_seconds()
value_seconds = (value - min_val).total_seconds()
if value < min_val:
return 0.0
elif max_val < value:
return 1.0
try:
perc = value_seconds / float(total_seconds)
except ZeroDivisionError:
perc = 0.0
return perc |
def locate(callback, root_frame=None, include_root=False, raw=False):
'''
Locates a frame by criteria.
:param callback:
One argument function to check the frame against. The frame we are
curretly on, is given as that argument.
:param root_frame:
The root frame to start the search from. Can be a callback taking
no arguments.
:param include_root:
`True` if the search should start from the `root_frame` or the one
beneath it. Defaults to `False`.
:param raw:
whether to use raw frames or wrap them in our own object. Defaults to
`False`.
:raises RuntimeError:
When no matching frame is found.
:returns:
The first frame which responds to the `callback`.
'''
def get_from(maybe_callable):
if callable(maybe_callable):
return maybe_callable()
return maybe_callable
# Creates new frames, whether raw or not.
new = lambda frame: frame if raw else Frame(frame)
current_frame = get_from(root_frame or Frame.current_frame(raw=True))
current_frame = new(current_frame)
if not include_root:
current_frame = new(current_frame.f_back)
# The search will stop, because at some point the frame will be falsy.
while current_frame:
found = callback(current_frame)
if found:
return current_frame
current_frame = new(current_frame.f_back)
raise Frame.NotFound('No matching frame found') | Locates a frame by criteria.
:param callback:
One argument function to check the frame against. The frame we are
curretly on, is given as that argument.
:param root_frame:
The root frame to start the search from. Can be a callback taking
no arguments.
:param include_root:
`True` if the search should start from the `root_frame` or the one
beneath it. Defaults to `False`.
:param raw:
whether to use raw frames or wrap them in our own object. Defaults to
`False`.
:raises RuntimeError:
When no matching frame is found.
:returns:
The first frame which responds to the `callback`. | Below is the the instruction that describes the task:
### Input:
Locates a frame by criteria.
:param callback:
One argument function to check the frame against. The frame we are
curretly on, is given as that argument.
:param root_frame:
The root frame to start the search from. Can be a callback taking
no arguments.
:param include_root:
`True` if the search should start from the `root_frame` or the one
beneath it. Defaults to `False`.
:param raw:
whether to use raw frames or wrap them in our own object. Defaults to
`False`.
:raises RuntimeError:
When no matching frame is found.
:returns:
The first frame which responds to the `callback`.
### Response:
def locate(callback, root_frame=None, include_root=False, raw=False):
'''
Locates a frame by criteria.
:param callback:
One argument function to check the frame against. The frame we are
curretly on, is given as that argument.
:param root_frame:
The root frame to start the search from. Can be a callback taking
no arguments.
:param include_root:
`True` if the search should start from the `root_frame` or the one
beneath it. Defaults to `False`.
:param raw:
whether to use raw frames or wrap them in our own object. Defaults to
`False`.
:raises RuntimeError:
When no matching frame is found.
:returns:
The first frame which responds to the `callback`.
'''
def get_from(maybe_callable):
if callable(maybe_callable):
return maybe_callable()
return maybe_callable
# Creates new frames, whether raw or not.
new = lambda frame: frame if raw else Frame(frame)
current_frame = get_from(root_frame or Frame.current_frame(raw=True))
current_frame = new(current_frame)
if not include_root:
current_frame = new(current_frame.f_back)
# The search will stop, because at some point the frame will be falsy.
while current_frame:
found = callback(current_frame)
if found:
return current_frame
current_frame = new(current_frame.f_back)
raise Frame.NotFound('No matching frame found') |
def _modifyInternal(self, *, sort=None, purge=False, done=None):
"""Creates a whole new database from existing one, based on given
modifiers.
:sort: pattern should look like this:
([(<index>, True|False)], {<level_index>: [(<index>, True|False)]}),
where True|False indicate whether to reverse or not,
<index> are one of Model.indexes and <level_index> indicate
a number of level to sort.
Of course, the lists above may contain multiple items.
:done: patterns looks similar to :sort:, except that it has additional
<regexp> values and that True|False means to mark as done|undone.
@note: Should not be used directly. It was defined here, because
:save: decorator needs undecorated version of Model.modify.
:sort: Pattern on which to sort the database.
:purge: Whether to purge done items.
:done: Pattern on which to mark items as done/undone.
:returns: New database, modified according to supplied arguments.
"""
sortAll, sortLevels = sort is not None and sort or ([], {})
doneAll, doneLevels = done is not None and done or ([], {})
def _mark(v, i):
if done is None:
return v[:4]
def _mark_(index, regexp, du):
if du is None:
return v[:4]
if index is None:
for v_ in v[:3]:
if regexp is None or re.match(regexp, str(v_)):
return v[:3] + [du]
return v[:4]
if regexp is None or re.match(regexp, str(v[index])):
return v[:3] + [du]
try:
for doneLevel in doneLevels[i]:
result = _mark_(*doneLevel)
if result is not None:
return result
except KeyError:
pass
for doneAll_ in doneAll:
result = _mark_(*doneAll_)
if result is None:
return v[:4]
return result
def _modify(submodel, i):
_new = list()
for v in submodel:
if purge:
if not v[3]:
_new.append(_mark(v, i) + [_modify(v[4], i + 1)])
else:
_new.append(_mark(v, i) + [_modify(v[4], i + 1)])
levels = sortLevels.get(i) or sortLevels.get(str(i))
for index, reverse in levels or sortAll:
_new = sorted(_new, key=lambda e: e[index], reverse=reverse)
return _new
return _modify(self.data, 1) | Creates a whole new database from existing one, based on given
modifiers.
:sort: pattern should look like this:
([(<index>, True|False)], {<level_index>: [(<index>, True|False)]}),
where True|False indicate whether to reverse or not,
<index> are one of Model.indexes and <level_index> indicate
a number of level to sort.
Of course, the lists above may contain multiple items.
:done: patterns looks similar to :sort:, except that it has additional
<regexp> values and that True|False means to mark as done|undone.
@note: Should not be used directly. It was defined here, because
:save: decorator needs undecorated version of Model.modify.
:sort: Pattern on which to sort the database.
:purge: Whether to purge done items.
:done: Pattern on which to mark items as done/undone.
:returns: New database, modified according to supplied arguments. | Below is the the instruction that describes the task:
### Input:
Creates a whole new database from existing one, based on given
modifiers.
:sort: pattern should look like this:
([(<index>, True|False)], {<level_index>: [(<index>, True|False)]}),
where True|False indicate whether to reverse or not,
<index> are one of Model.indexes and <level_index> indicate
a number of level to sort.
Of course, the lists above may contain multiple items.
:done: patterns looks similar to :sort:, except that it has additional
<regexp> values and that True|False means to mark as done|undone.
@note: Should not be used directly. It was defined here, because
:save: decorator needs undecorated version of Model.modify.
:sort: Pattern on which to sort the database.
:purge: Whether to purge done items.
:done: Pattern on which to mark items as done/undone.
:returns: New database, modified according to supplied arguments.
### Response:
def _modifyInternal(self, *, sort=None, purge=False, done=None):
"""Creates a whole new database from existing one, based on given
modifiers.
:sort: pattern should look like this:
([(<index>, True|False)], {<level_index>: [(<index>, True|False)]}),
where True|False indicate whether to reverse or not,
<index> are one of Model.indexes and <level_index> indicate
a number of level to sort.
Of course, the lists above may contain multiple items.
:done: patterns looks similar to :sort:, except that it has additional
<regexp> values and that True|False means to mark as done|undone.
@note: Should not be used directly. It was defined here, because
:save: decorator needs undecorated version of Model.modify.
:sort: Pattern on which to sort the database.
:purge: Whether to purge done items.
:done: Pattern on which to mark items as done/undone.
:returns: New database, modified according to supplied arguments.
"""
sortAll, sortLevels = sort is not None and sort or ([], {})
doneAll, doneLevels = done is not None and done or ([], {})
def _mark(v, i):
if done is None:
return v[:4]
def _mark_(index, regexp, du):
if du is None:
return v[:4]
if index is None:
for v_ in v[:3]:
if regexp is None or re.match(regexp, str(v_)):
return v[:3] + [du]
return v[:4]
if regexp is None or re.match(regexp, str(v[index])):
return v[:3] + [du]
try:
for doneLevel in doneLevels[i]:
result = _mark_(*doneLevel)
if result is not None:
return result
except KeyError:
pass
for doneAll_ in doneAll:
result = _mark_(*doneAll_)
if result is None:
return v[:4]
return result
def _modify(submodel, i):
_new = list()
for v in submodel:
if purge:
if not v[3]:
_new.append(_mark(v, i) + [_modify(v[4], i + 1)])
else:
_new.append(_mark(v, i) + [_modify(v[4], i + 1)])
levels = sortLevels.get(i) or sortLevels.get(str(i))
for index, reverse in levels or sortAll:
_new = sorted(_new, key=lambda e: e[index], reverse=reverse)
return _new
return _modify(self.data, 1) |
def _fetch_url_data(self, url, username, password, verify, custom_headers):
''' Hit a given http url and return the stats lines '''
# Try to fetch data from the stats URL
auth = (username, password)
url = "%s%s" % (url, STATS_URL)
custom_headers.update(headers(self.agentConfig))
self.log.debug("Fetching haproxy stats from url: %s" % url)
response = requests.get(
url, auth=auth, headers=custom_headers, verify=verify, timeout=self.default_integration_http_timeout
)
response.raise_for_status()
# it only needs additional decoding in py3, so skip it if it's py2
if PY2:
return response.content.splitlines()
else:
content = response.content
# If the content is a string, it can't be decoded again
# But if it's bytes, it can be decoded.
# So, check if it has the decode method
decode_fn = getattr(content, "decode", None)
if callable(decode_fn):
content = content.decode('utf-8')
return content.splitlines() | Hit a given http url and return the stats lines | Below is the the instruction that describes the task:
### Input:
Hit a given http url and return the stats lines
### Response:
def _fetch_url_data(self, url, username, password, verify, custom_headers):
''' Hit a given http url and return the stats lines '''
# Try to fetch data from the stats URL
auth = (username, password)
url = "%s%s" % (url, STATS_URL)
custom_headers.update(headers(self.agentConfig))
self.log.debug("Fetching haproxy stats from url: %s" % url)
response = requests.get(
url, auth=auth, headers=custom_headers, verify=verify, timeout=self.default_integration_http_timeout
)
response.raise_for_status()
# it only needs additional decoding in py3, so skip it if it's py2
if PY2:
return response.content.splitlines()
else:
content = response.content
# If the content is a string, it can't be decoded again
# But if it's bytes, it can be decoded.
# So, check if it has the decode method
decode_fn = getattr(content, "decode", None)
if callable(decode_fn):
content = content.decode('utf-8')
return content.splitlines() |
def count_objects(self):
"""Count the objects of a repository.
The method returns the total number of objects (packed and unpacked)
available on the repository.
:raises RepositoryError: when an error occurs counting the objects
of a repository
"""
cmd_count = ['git', 'count-objects', '-v']
outs = self._exec(cmd_count, cwd=self.dirpath, env=self.gitenv)
outs = outs.decode('utf-8', errors='surrogateescape').rstrip()
try:
cobjs = {k: v for k, v in (x.split(': ') for x in outs.split('\n'))}
nobjs = int(cobjs['count']) + int(cobjs['in-pack'])
except KeyError as e:
error = "unable to parse 'count-objects' output; reason: '%s' entry not found" \
% e.args[0]
raise RepositoryError(cause=error)
except ValueError as e:
error = "unable to parse 'count-objects' output; reason: %s" % str(e)
raise RepositoryError(cause=error)
logger.debug("Git %s repository has %s objects",
self.uri, str(nobjs))
return nobjs | Count the objects of a repository.
The method returns the total number of objects (packed and unpacked)
available on the repository.
:raises RepositoryError: when an error occurs counting the objects
of a repository | Below is the the instruction that describes the task:
### Input:
Count the objects of a repository.
The method returns the total number of objects (packed and unpacked)
available on the repository.
:raises RepositoryError: when an error occurs counting the objects
of a repository
### Response:
def count_objects(self):
"""Count the objects of a repository.
The method returns the total number of objects (packed and unpacked)
available on the repository.
:raises RepositoryError: when an error occurs counting the objects
of a repository
"""
cmd_count = ['git', 'count-objects', '-v']
outs = self._exec(cmd_count, cwd=self.dirpath, env=self.gitenv)
outs = outs.decode('utf-8', errors='surrogateescape').rstrip()
try:
cobjs = {k: v for k, v in (x.split(': ') for x in outs.split('\n'))}
nobjs = int(cobjs['count']) + int(cobjs['in-pack'])
except KeyError as e:
error = "unable to parse 'count-objects' output; reason: '%s' entry not found" \
% e.args[0]
raise RepositoryError(cause=error)
except ValueError as e:
error = "unable to parse 'count-objects' output; reason: %s" % str(e)
raise RepositoryError(cause=error)
logger.debug("Git %s repository has %s objects",
self.uri, str(nobjs))
return nobjs |
def toggleswitch() -> AnnData:
"""Simulated toggleswitch.
Data obtained simulating a simple toggleswitch `Gardner *et al.*, Nature
(2000) <https://doi.org/10.1038/35002131>`__.
Simulate via :func:`~scanpy.api.sim`.
Returns
-------
Annotated data matrix.
"""
filename = os.path.dirname(__file__) + '/toggleswitch.txt'
adata = sc.read(filename, first_column_names=True)
adata.uns['iroot'] = 0
return adata | Simulated toggleswitch.
Data obtained simulating a simple toggleswitch `Gardner *et al.*, Nature
(2000) <https://doi.org/10.1038/35002131>`__.
Simulate via :func:`~scanpy.api.sim`.
Returns
-------
Annotated data matrix. | Below is the the instruction that describes the task:
### Input:
Simulated toggleswitch.
Data obtained simulating a simple toggleswitch `Gardner *et al.*, Nature
(2000) <https://doi.org/10.1038/35002131>`__.
Simulate via :func:`~scanpy.api.sim`.
Returns
-------
Annotated data matrix.
### Response:
def toggleswitch() -> AnnData:
"""Simulated toggleswitch.
Data obtained simulating a simple toggleswitch `Gardner *et al.*, Nature
(2000) <https://doi.org/10.1038/35002131>`__.
Simulate via :func:`~scanpy.api.sim`.
Returns
-------
Annotated data matrix.
"""
filename = os.path.dirname(__file__) + '/toggleswitch.txt'
adata = sc.read(filename, first_column_names=True)
adata.uns['iroot'] = 0
return adata |
def mag_to_fnu(self, mag):
"""Convert a magnitude in this band to a f_ν flux density.
It is assumed that the magnitude has been computed in the appropriate
photometric system. The definition of "appropriate" will vary from
case to case.
"""
if self.native_flux_kind == 'flam':
return flam_ang_to_fnu_cgs(self.mag_to_flam(mag), self.pivot_wavelength())
raise PKError('dont\'t know how to get f_ν from mag for bandpass %s/%s',
self.telescope, self.band) | Convert a magnitude in this band to a f_ν flux density.
It is assumed that the magnitude has been computed in the appropriate
photometric system. The definition of "appropriate" will vary from
case to case. | Below is the the instruction that describes the task:
### Input:
Convert a magnitude in this band to a f_ν flux density.
It is assumed that the magnitude has been computed in the appropriate
photometric system. The definition of "appropriate" will vary from
case to case.
### Response:
def mag_to_fnu(self, mag):
"""Convert a magnitude in this band to a f_ν flux density.
It is assumed that the magnitude has been computed in the appropriate
photometric system. The definition of "appropriate" will vary from
case to case.
"""
if self.native_flux_kind == 'flam':
return flam_ang_to_fnu_cgs(self.mag_to_flam(mag), self.pivot_wavelength())
raise PKError('dont\'t know how to get f_ν from mag for bandpass %s/%s',
self.telescope, self.band) |
def run(self, oslom_exec, oslom_args, log_filename):
"""Run OSLOM and wait for the process to finish."""
args = [oslom_exec, "-f", self.get_path(OslomRunner.TMP_EDGES_FILE)]
args.extend(oslom_args)
with open(log_filename, "w") as logwriter:
start_time = time.time()
retval = subprocess.call(
args, cwd=self.working_dir,
stdout=logwriter, stderr=subprocess.STDOUT)
self.last_result = {
"args": args, "retval": retval,
"time": time.time() - start_time,
"output_dir": self.get_path(
"{}_oslo_files".format(OslomRunner.TMP_EDGES_FILE))
}
return self.last_result | Run OSLOM and wait for the process to finish. | Below is the the instruction that describes the task:
### Input:
Run OSLOM and wait for the process to finish.
### Response:
def run(self, oslom_exec, oslom_args, log_filename):
"""Run OSLOM and wait for the process to finish."""
args = [oslom_exec, "-f", self.get_path(OslomRunner.TMP_EDGES_FILE)]
args.extend(oslom_args)
with open(log_filename, "w") as logwriter:
start_time = time.time()
retval = subprocess.call(
args, cwd=self.working_dir,
stdout=logwriter, stderr=subprocess.STDOUT)
self.last_result = {
"args": args, "retval": retval,
"time": time.time() - start_time,
"output_dir": self.get_path(
"{}_oslo_files".format(OslomRunner.TMP_EDGES_FILE))
}
return self.last_result |
def add_component(self, kind, **kwargs):
"""
Add a new component (star or orbit) to the system. If not provided,
'component' (the name of the new star or orbit) will be created for
you and can be accessed by the 'component' attribute of the returned
ParameterSet.
>>> b.add_component(component.star)
or
>>> b.add_component('orbit', period=2.5)
Available kinds include:
* :func:`phoebe.parameters.component.star`
* :func:`phoebe.parameters.component.orbit`
:parameter kind: function to call that returns a
ParameterSet or list of parameters. This must either be
a callable function that accepts nothing but default
values, or the name of a function (as a string) that can
be found in the :mod:`phoebe.parameters.component` module
(ie. 'star', 'orbit')
:type kind: str or callable
:parameter str component: (optional) name of the newly-created
component
:parameter **kwargs: default values for any of the newly-created
parameters
:return: :class:`phoebe.parameters.parameters.ParameterSet` of
all parameters that have been added
:raises NotImplementedError: if required constraint is not implemented
"""
func = _get_add_func(component, kind)
if kwargs.get('component', False) is None:
# then we want to apply the default below, so let's pop for now
_ = kwargs.pop('component')
kwargs.setdefault('component',
self._default_label(func.func_name,
**{'context': 'component',
'kind': func.func_name}))
if kwargs.pop('check_label', True):
self._check_label(kwargs['component'])
params, constraints = func(**kwargs)
metawargs = {'context': 'component',
'component': kwargs['component'],
'kind': func.func_name}
self._attach_params(params, **metawargs)
redo_kwargs = deepcopy(kwargs)
redo_kwargs['func'] = func.func_name
self._add_history(redo_func='add_component',
redo_kwargs=redo_kwargs,
undo_func='remove_component',
undo_kwargs={'component': kwargs['component']})
for constraint in constraints:
self.add_constraint(*constraint)
# since we've already processed (so that we can get the new qualifiers),
# we'll only raise a warning
self._kwargs_checks(kwargs, warning_only=True)
# return params
return self.get_component(**metawargs) | Add a new component (star or orbit) to the system. If not provided,
'component' (the name of the new star or orbit) will be created for
you and can be accessed by the 'component' attribute of the returned
ParameterSet.
>>> b.add_component(component.star)
or
>>> b.add_component('orbit', period=2.5)
Available kinds include:
* :func:`phoebe.parameters.component.star`
* :func:`phoebe.parameters.component.orbit`
:parameter kind: function to call that returns a
ParameterSet or list of parameters. This must either be
a callable function that accepts nothing but default
values, or the name of a function (as a string) that can
be found in the :mod:`phoebe.parameters.component` module
(ie. 'star', 'orbit')
:type kind: str or callable
:parameter str component: (optional) name of the newly-created
component
:parameter **kwargs: default values for any of the newly-created
parameters
:return: :class:`phoebe.parameters.parameters.ParameterSet` of
all parameters that have been added
:raises NotImplementedError: if required constraint is not implemented | Below is the the instruction that describes the task:
### Input:
Add a new component (star or orbit) to the system. If not provided,
'component' (the name of the new star or orbit) will be created for
you and can be accessed by the 'component' attribute of the returned
ParameterSet.
>>> b.add_component(component.star)
or
>>> b.add_component('orbit', period=2.5)
Available kinds include:
* :func:`phoebe.parameters.component.star`
* :func:`phoebe.parameters.component.orbit`
:parameter kind: function to call that returns a
ParameterSet or list of parameters. This must either be
a callable function that accepts nothing but default
values, or the name of a function (as a string) that can
be found in the :mod:`phoebe.parameters.component` module
(ie. 'star', 'orbit')
:type kind: str or callable
:parameter str component: (optional) name of the newly-created
component
:parameter **kwargs: default values for any of the newly-created
parameters
:return: :class:`phoebe.parameters.parameters.ParameterSet` of
all parameters that have been added
:raises NotImplementedError: if required constraint is not implemented
### Response:
def add_component(self, kind, **kwargs):
"""
Add a new component (star or orbit) to the system. If not provided,
'component' (the name of the new star or orbit) will be created for
you and can be accessed by the 'component' attribute of the returned
ParameterSet.
>>> b.add_component(component.star)
or
>>> b.add_component('orbit', period=2.5)
Available kinds include:
* :func:`phoebe.parameters.component.star`
* :func:`phoebe.parameters.component.orbit`
:parameter kind: function to call that returns a
ParameterSet or list of parameters. This must either be
a callable function that accepts nothing but default
values, or the name of a function (as a string) that can
be found in the :mod:`phoebe.parameters.component` module
(ie. 'star', 'orbit')
:type kind: str or callable
:parameter str component: (optional) name of the newly-created
component
:parameter **kwargs: default values for any of the newly-created
parameters
:return: :class:`phoebe.parameters.parameters.ParameterSet` of
all parameters that have been added
:raises NotImplementedError: if required constraint is not implemented
"""
func = _get_add_func(component, kind)
if kwargs.get('component', False) is None:
# then we want to apply the default below, so let's pop for now
_ = kwargs.pop('component')
kwargs.setdefault('component',
self._default_label(func.func_name,
**{'context': 'component',
'kind': func.func_name}))
if kwargs.pop('check_label', True):
self._check_label(kwargs['component'])
params, constraints = func(**kwargs)
metawargs = {'context': 'component',
'component': kwargs['component'],
'kind': func.func_name}
self._attach_params(params, **metawargs)
redo_kwargs = deepcopy(kwargs)
redo_kwargs['func'] = func.func_name
self._add_history(redo_func='add_component',
redo_kwargs=redo_kwargs,
undo_func='remove_component',
undo_kwargs={'component': kwargs['component']})
for constraint in constraints:
self.add_constraint(*constraint)
# since we've already processed (so that we can get the new qualifiers),
# we'll only raise a warning
self._kwargs_checks(kwargs, warning_only=True)
# return params
return self.get_component(**metawargs) |
def do_default(value, default_value=u'', boolean=False):
"""If the value is undefined it will return the passed default value,
otherwise the value of the variable:
.. sourcecode:: jinja
{{ my_variable|default('my_variable is not defined') }}
This will output the value of ``my_variable`` if the variable was
defined, otherwise ``'my_variable is not defined'``. If you want
to use default with variables that evaluate to false you have to
set the second parameter to `true`:
.. sourcecode:: jinja
{{ ''|default('the string was empty', true) }}
"""
if (boolean and not value) or isinstance(value, Undefined):
return default_value
return value | If the value is undefined it will return the passed default value,
otherwise the value of the variable:
.. sourcecode:: jinja
{{ my_variable|default('my_variable is not defined') }}
This will output the value of ``my_variable`` if the variable was
defined, otherwise ``'my_variable is not defined'``. If you want
to use default with variables that evaluate to false you have to
set the second parameter to `true`:
.. sourcecode:: jinja
{{ ''|default('the string was empty', true) }} | Below is the the instruction that describes the task:
### Input:
If the value is undefined it will return the passed default value,
otherwise the value of the variable:
.. sourcecode:: jinja
{{ my_variable|default('my_variable is not defined') }}
This will output the value of ``my_variable`` if the variable was
defined, otherwise ``'my_variable is not defined'``. If you want
to use default with variables that evaluate to false you have to
set the second parameter to `true`:
.. sourcecode:: jinja
{{ ''|default('the string was empty', true) }}
### Response:
def do_default(value, default_value=u'', boolean=False):
"""If the value is undefined it will return the passed default value,
otherwise the value of the variable:
.. sourcecode:: jinja
{{ my_variable|default('my_variable is not defined') }}
This will output the value of ``my_variable`` if the variable was
defined, otherwise ``'my_variable is not defined'``. If you want
to use default with variables that evaluate to false you have to
set the second parameter to `true`:
.. sourcecode:: jinja
{{ ''|default('the string was empty', true) }}
"""
if (boolean and not value) or isinstance(value, Undefined):
return default_value
return value |
def parse(self, valstr):
# type: (bytes) -> None
'''
A method to parse an El Torito Validation Entry out of a string.
Parameters:
valstr - The string to parse the El Torito Validation Entry out of.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Validation Entry already initialized')
(header_id, self.platform_id, reserved_unused, self.id_string,
self.checksum, keybyte1,
keybyte2) = struct.unpack_from(self.FMT, valstr, 0)
if header_id != 1:
raise pycdlibexception.PyCdlibInvalidISO('El Torito Validation entry header ID not 1')
if self.platform_id not in (0, 1, 2):
raise pycdlibexception.PyCdlibInvalidISO('El Torito Validation entry platform ID not valid')
if keybyte1 != 0x55:
raise pycdlibexception.PyCdlibInvalidISO('El Torito Validation entry first keybyte not 0x55')
if keybyte2 != 0xaa:
raise pycdlibexception.PyCdlibInvalidISO('El Torito Validation entry second keybyte not 0xaa')
# Now that we've done basic checking, calculate the checksum of the
# validation entry and make sure it is right.
if self._checksum(valstr) != 0:
raise pycdlibexception.PyCdlibInvalidISO('El Torito Validation entry checksum not correct')
self._initialized = True | A method to parse an El Torito Validation Entry out of a string.
Parameters:
valstr - The string to parse the El Torito Validation Entry out of.
Returns:
Nothing. | Below is the the instruction that describes the task:
### Input:
A method to parse an El Torito Validation Entry out of a string.
Parameters:
valstr - The string to parse the El Torito Validation Entry out of.
Returns:
Nothing.
### Response:
def parse(self, valstr):
# type: (bytes) -> None
'''
A method to parse an El Torito Validation Entry out of a string.
Parameters:
valstr - The string to parse the El Torito Validation Entry out of.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Validation Entry already initialized')
(header_id, self.platform_id, reserved_unused, self.id_string,
self.checksum, keybyte1,
keybyte2) = struct.unpack_from(self.FMT, valstr, 0)
if header_id != 1:
raise pycdlibexception.PyCdlibInvalidISO('El Torito Validation entry header ID not 1')
if self.platform_id not in (0, 1, 2):
raise pycdlibexception.PyCdlibInvalidISO('El Torito Validation entry platform ID not valid')
if keybyte1 != 0x55:
raise pycdlibexception.PyCdlibInvalidISO('El Torito Validation entry first keybyte not 0x55')
if keybyte2 != 0xaa:
raise pycdlibexception.PyCdlibInvalidISO('El Torito Validation entry second keybyte not 0xaa')
# Now that we've done basic checking, calculate the checksum of the
# validation entry and make sure it is right.
if self._checksum(valstr) != 0:
raise pycdlibexception.PyCdlibInvalidISO('El Torito Validation entry checksum not correct')
self._initialized = True |
def sorted_releases(self):
"""
Releases sorted by version.
"""
releases = [(parse_version(release.version), release)
for release in self.releases]
releases.sort(reverse=True)
return [release[1] for release in releases] | Releases sorted by version. | Below is the the instruction that describes the task:
### Input:
Releases sorted by version.
### Response:
def sorted_releases(self):
"""
Releases sorted by version.
"""
releases = [(parse_version(release.version), release)
for release in self.releases]
releases.sort(reverse=True)
return [release[1] for release in releases] |
def normalize(self, address, **kwargs):
"""Make the address more compareable."""
# TODO: normalize well-known parts like "Street", "Road", etc.
# TODO: consider using https://github.com/openvenues/pypostal
addresses = super(AddressType, self).normalize(address, **kwargs)
return addresses | Make the address more compareable. | Below is the the instruction that describes the task:
### Input:
Make the address more compareable.
### Response:
def normalize(self, address, **kwargs):
"""Make the address more compareable."""
# TODO: normalize well-known parts like "Street", "Road", etc.
# TODO: consider using https://github.com/openvenues/pypostal
addresses = super(AddressType, self).normalize(address, **kwargs)
return addresses |
def lowPass(self, *args):
"""
Creates a copy of the signal with the low pass applied, args specifed are passed through to _butter.
:return:
"""
return Signal(self._butter(self.samples, 'low', *args), fs=self.fs) | Creates a copy of the signal with the low pass applied, args specifed are passed through to _butter.
:return: | Below is the the instruction that describes the task:
### Input:
Creates a copy of the signal with the low pass applied, args specifed are passed through to _butter.
:return:
### Response:
def lowPass(self, *args):
"""
Creates a copy of the signal with the low pass applied, args specifed are passed through to _butter.
:return:
"""
return Signal(self._butter(self.samples, 'low', *args), fs=self.fs) |
def make_tex_table(inputlist, outputfile, close=False, fmt=None,
**kwargs):
"""
Parse table from inputlist
Args:
inputlist: list
List to parse
outputfile: file
.tex file to write
fmt: dictionary
key: integer
column index starting with 0
values: string
format string. eg "{:g}"
**kwargs:
nonestring: string
string when objecttype is None
Returns:
None
"""
output_str = ""
if fmt is None:
fmt = {}
for row in inputlist:
for key, val in enumerate(row):
if val is None:
output_str += r'\text{{{}}}'.format(
str(kwargs.get("nonestring", "None"))
)
else:
# get default
if np.isscalar(val):
temp_str_fmt = "$\\num{{" + fmt.get(
key, "{:g}") + "}}$"
else:
temp_str_fmt = fmt.get(key, "{}")
temp_str = temp_str_fmt.format(val).replace("+", "")
output_str += temp_str + "&"
output_str = output_str[:-1]
output_str += "\\\\\n"
outputfile.write(output_str)
if close:
outputfile.close() | Parse table from inputlist
Args:
inputlist: list
List to parse
outputfile: file
.tex file to write
fmt: dictionary
key: integer
column index starting with 0
values: string
format string. eg "{:g}"
**kwargs:
nonestring: string
string when objecttype is None
Returns:
None | Below is the the instruction that describes the task:
### Input:
Parse table from inputlist
Args:
inputlist: list
List to parse
outputfile: file
.tex file to write
fmt: dictionary
key: integer
column index starting with 0
values: string
format string. eg "{:g}"
**kwargs:
nonestring: string
string when objecttype is None
Returns:
None
### Response:
def make_tex_table(inputlist, outputfile, close=False, fmt=None,
**kwargs):
"""
Parse table from inputlist
Args:
inputlist: list
List to parse
outputfile: file
.tex file to write
fmt: dictionary
key: integer
column index starting with 0
values: string
format string. eg "{:g}"
**kwargs:
nonestring: string
string when objecttype is None
Returns:
None
"""
output_str = ""
if fmt is None:
fmt = {}
for row in inputlist:
for key, val in enumerate(row):
if val is None:
output_str += r'\text{{{}}}'.format(
str(kwargs.get("nonestring", "None"))
)
else:
# get default
if np.isscalar(val):
temp_str_fmt = "$\\num{{" + fmt.get(
key, "{:g}") + "}}$"
else:
temp_str_fmt = fmt.get(key, "{}")
temp_str = temp_str_fmt.format(val).replace("+", "")
output_str += temp_str + "&"
output_str = output_str[:-1]
output_str += "\\\\\n"
outputfile.write(output_str)
if close:
outputfile.close() |
def derep_concat_split(data, sample, nthreads, force):
"""
Running on remote Engine. Refmaps, then merges, then dereplicates,
then denovo clusters reads.
"""
## report location for debugging
LOGGER.info("INSIDE derep %s", sample.name)
## MERGED ASSEMBIES ONLY:
## concatenate edits files within Samples. Returns a new sample.files.edits
## with the concat file. No change if not merged Assembly.
mergefile = os.path.join(data.dirs.edits, sample.name+"_merged_.fastq")
if not force:
if not os.path.exists(mergefile):
sample.files.edits = concat_multiple_edits(data, sample)
else:
LOGGER.info("skipped concat_multiple_edits: {} exists"\
.format(mergefile))
else:
sample.files.edits = concat_multiple_edits(data, sample)
## PAIRED DATA ONLY:
## Denovo: merge or concat fastq pairs [sample.files.pairs]
## Reference: only concat fastq pairs []
## Denovo + Reference: ...
if 'pair' in data.paramsdict['datatype']:
## the output file handle for merged reads
## modify behavior of merging vs concating if reference
if "reference" in data.paramsdict["assembly_method"]:
nmerged = merge_pairs(data, sample.files.edits, mergefile, 0, 0)
else:
nmerged = merge_pairs(data, sample.files.edits, mergefile, 1, 1)
## store results
sample.files.edits = [(mergefile, )]
sample.stats.reads_merged = nmerged
## 3rad uses random adapters to identify pcr duplicates. We will
## remove pcr dupes here. Basically append the radom adapter to
## each sequence, do a regular old vsearch derep, then trim
## off the adapter, and push it down the pipeline. This will
## remove all identical seqs with identical random i5 adapters.
if "3rad" in data.paramsdict["datatype"]:
declone_3rad(data, sample)
derep_and_sort(data,
os.path.join(data.dirs.edits, sample.name+"_declone.fastq"),
os.path.join(data.dirs.edits, sample.name+"_derep.fastq"),
nthreads)
else:
## convert fastq to fasta, then derep and sort reads by their size.
## we pass in only one file b/c paired should be merged by now.
derep_and_sort(data,
sample.files.edits[0][0],
os.path.join(data.dirs.edits, sample.name+"_derep.fastq"),
nthreads) | Running on remote Engine. Refmaps, then merges, then dereplicates,
then denovo clusters reads. | Below is the the instruction that describes the task:
### Input:
Running on remote Engine. Refmaps, then merges, then dereplicates,
then denovo clusters reads.
### Response:
def derep_concat_split(data, sample, nthreads, force):
"""
Running on remote Engine. Refmaps, then merges, then dereplicates,
then denovo clusters reads.
"""
## report location for debugging
LOGGER.info("INSIDE derep %s", sample.name)
## MERGED ASSEMBIES ONLY:
## concatenate edits files within Samples. Returns a new sample.files.edits
## with the concat file. No change if not merged Assembly.
mergefile = os.path.join(data.dirs.edits, sample.name+"_merged_.fastq")
if not force:
if not os.path.exists(mergefile):
sample.files.edits = concat_multiple_edits(data, sample)
else:
LOGGER.info("skipped concat_multiple_edits: {} exists"\
.format(mergefile))
else:
sample.files.edits = concat_multiple_edits(data, sample)
## PAIRED DATA ONLY:
## Denovo: merge or concat fastq pairs [sample.files.pairs]
## Reference: only concat fastq pairs []
## Denovo + Reference: ...
if 'pair' in data.paramsdict['datatype']:
## the output file handle for merged reads
## modify behavior of merging vs concating if reference
if "reference" in data.paramsdict["assembly_method"]:
nmerged = merge_pairs(data, sample.files.edits, mergefile, 0, 0)
else:
nmerged = merge_pairs(data, sample.files.edits, mergefile, 1, 1)
## store results
sample.files.edits = [(mergefile, )]
sample.stats.reads_merged = nmerged
## 3rad uses random adapters to identify pcr duplicates. We will
## remove pcr dupes here. Basically append the radom adapter to
## each sequence, do a regular old vsearch derep, then trim
## off the adapter, and push it down the pipeline. This will
## remove all identical seqs with identical random i5 adapters.
if "3rad" in data.paramsdict["datatype"]:
declone_3rad(data, sample)
derep_and_sort(data,
os.path.join(data.dirs.edits, sample.name+"_declone.fastq"),
os.path.join(data.dirs.edits, sample.name+"_derep.fastq"),
nthreads)
else:
## convert fastq to fasta, then derep and sort reads by their size.
## we pass in only one file b/c paired should be merged by now.
derep_and_sort(data,
sample.files.edits[0][0],
os.path.join(data.dirs.edits, sample.name+"_derep.fastq"),
nthreads) |
def range_by_lex(self, low, high, start=None, num=None, reverse=False):
"""
Return a range of members in a sorted set, by lexicographical range.
"""
if reverse:
fn = self.database.zrevrangebylex
low, high = high, low
else:
fn = self.database.zrangebylex
return fn(self.key, low, high, start, num) | Return a range of members in a sorted set, by lexicographical range. | Below is the the instruction that describes the task:
### Input:
Return a range of members in a sorted set, by lexicographical range.
### Response:
def range_by_lex(self, low, high, start=None, num=None, reverse=False):
"""
Return a range of members in a sorted set, by lexicographical range.
"""
if reverse:
fn = self.database.zrevrangebylex
low, high = high, low
else:
fn = self.database.zrangebylex
return fn(self.key, low, high, start, num) |
def cdx_limit(cdx_iter, limit):
"""
limit cdx to at most `limit`.
"""
# for cdx, _ in itertools.izip(cdx_iter, xrange(limit)):
# yield cdx
return (cdx for cdx, _ in zip(cdx_iter, range(limit))) | limit cdx to at most `limit`. | Below is the the instruction that describes the task:
### Input:
limit cdx to at most `limit`.
### Response:
def cdx_limit(cdx_iter, limit):
"""
limit cdx to at most `limit`.
"""
# for cdx, _ in itertools.izip(cdx_iter, xrange(limit)):
# yield cdx
return (cdx for cdx, _ in zip(cdx_iter, range(limit))) |
def _validate_image_rank(self, img_array):
"""
Images must be either 2D or 3D.
"""
if img_array.ndim == 1 or img_array.ndim > 3:
msg = "{0}D imagery is not allowed.".format(img_array.ndim)
raise IOError(msg) | Images must be either 2D or 3D. | Below is the the instruction that describes the task:
### Input:
Images must be either 2D or 3D.
### Response:
def _validate_image_rank(self, img_array):
"""
Images must be either 2D or 3D.
"""
if img_array.ndim == 1 or img_array.ndim > 3:
msg = "{0}D imagery is not allowed.".format(img_array.ndim)
raise IOError(msg) |
def _updateInhibitionRadius(self):
"""
Update the inhibition radius. The inhibition radius is a measure of the
square (or hypersquare) of columns that each a column is "connected to"
on average. Since columns are are not connected to each other directly, we
determine this quantity by first figuring out how many *inputs* a column is
connected to, and then multiplying it by the total number of columns that
exist for each input. For multiple dimension the aforementioned
calculations are averaged over all dimensions of inputs and columns. This
value is meaningless if global inhibition is enabled.
"""
if self._globalInhibition:
self._inhibitionRadius = int(self._columnDimensions.max())
return
avgConnectedSpan = numpy.average(
[self._avgConnectedSpanForColumnND(i)
for i in xrange(self._numColumns)]
)
columnsPerInput = self._avgColumnsPerInput()
diameter = avgConnectedSpan * columnsPerInput
radius = (diameter - 1) / 2.0
radius = max(1.0, radius)
self._inhibitionRadius = int(radius + 0.5) | Update the inhibition radius. The inhibition radius is a measure of the
square (or hypersquare) of columns that each a column is "connected to"
on average. Since columns are are not connected to each other directly, we
determine this quantity by first figuring out how many *inputs* a column is
connected to, and then multiplying it by the total number of columns that
exist for each input. For multiple dimension the aforementioned
calculations are averaged over all dimensions of inputs and columns. This
value is meaningless if global inhibition is enabled. | Below is the the instruction that describes the task:
### Input:
Update the inhibition radius. The inhibition radius is a measure of the
square (or hypersquare) of columns that each a column is "connected to"
on average. Since columns are are not connected to each other directly, we
determine this quantity by first figuring out how many *inputs* a column is
connected to, and then multiplying it by the total number of columns that
exist for each input. For multiple dimension the aforementioned
calculations are averaged over all dimensions of inputs and columns. This
value is meaningless if global inhibition is enabled.
### Response:
def _updateInhibitionRadius(self):
"""
Update the inhibition radius. The inhibition radius is a measure of the
square (or hypersquare) of columns that each a column is "connected to"
on average. Since columns are are not connected to each other directly, we
determine this quantity by first figuring out how many *inputs* a column is
connected to, and then multiplying it by the total number of columns that
exist for each input. For multiple dimension the aforementioned
calculations are averaged over all dimensions of inputs and columns. This
value is meaningless if global inhibition is enabled.
"""
if self._globalInhibition:
self._inhibitionRadius = int(self._columnDimensions.max())
return
avgConnectedSpan = numpy.average(
[self._avgConnectedSpanForColumnND(i)
for i in xrange(self._numColumns)]
)
columnsPerInput = self._avgColumnsPerInput()
diameter = avgConnectedSpan * columnsPerInput
radius = (diameter - 1) / 2.0
radius = max(1.0, radius)
self._inhibitionRadius = int(radius + 0.5) |
def anti_alias(image):
"""
Apply Anti-Alias filter to a binary image
ANTsR function: N/A
Arguments
---------
image : ANTsImage
binary image to which anti-aliasing will be applied
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> img = ants.image_read(ants.get_data('r16'))
>>> mask = ants.get_mask(img)
>>> mask_aa = ants.anti_alias(mask)
>>> ants.plot(mask)
>>> ants.plot(mask_aa)
"""
if image.pixeltype != 'unsigned char':
if image.max() > 255.:
image = (image - image.max()) / (image.max() - image.min())
image = image.clone('unsigned char')
libfn = utils.get_lib_fn('antiAlias%s' % image._libsuffix)
new_ptr = libfn(image.pointer)
return iio.ANTsImage(pixeltype='float', dimension=image.dimension,
components=image.components, pointer=new_ptr) | Apply Anti-Alias filter to a binary image
ANTsR function: N/A
Arguments
---------
image : ANTsImage
binary image to which anti-aliasing will be applied
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> img = ants.image_read(ants.get_data('r16'))
>>> mask = ants.get_mask(img)
>>> mask_aa = ants.anti_alias(mask)
>>> ants.plot(mask)
>>> ants.plot(mask_aa) | Below is the the instruction that describes the task:
### Input:
Apply Anti-Alias filter to a binary image
ANTsR function: N/A
Arguments
---------
image : ANTsImage
binary image to which anti-aliasing will be applied
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> img = ants.image_read(ants.get_data('r16'))
>>> mask = ants.get_mask(img)
>>> mask_aa = ants.anti_alias(mask)
>>> ants.plot(mask)
>>> ants.plot(mask_aa)
### Response:
def anti_alias(image):
"""
Apply Anti-Alias filter to a binary image
ANTsR function: N/A
Arguments
---------
image : ANTsImage
binary image to which anti-aliasing will be applied
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> img = ants.image_read(ants.get_data('r16'))
>>> mask = ants.get_mask(img)
>>> mask_aa = ants.anti_alias(mask)
>>> ants.plot(mask)
>>> ants.plot(mask_aa)
"""
if image.pixeltype != 'unsigned char':
if image.max() > 255.:
image = (image - image.max()) / (image.max() - image.min())
image = image.clone('unsigned char')
libfn = utils.get_lib_fn('antiAlias%s' % image._libsuffix)
new_ptr = libfn(image.pointer)
return iio.ANTsImage(pixeltype='float', dimension=image.dimension,
components=image.components, pointer=new_ptr) |
def _ProcessImage(self, tag, wall_time, step, image):
"""Processes an image by adding it to accumulated state."""
event = ImageEvent(wall_time=wall_time,
step=step,
encoded_image_string=image.encoded_image_string,
width=image.width,
height=image.height)
self.images.AddItem(tag, event) | Processes an image by adding it to accumulated state. | Below is the the instruction that describes the task:
### Input:
Processes an image by adding it to accumulated state.
### Response:
def _ProcessImage(self, tag, wall_time, step, image):
"""Processes an image by adding it to accumulated state."""
event = ImageEvent(wall_time=wall_time,
step=step,
encoded_image_string=image.encoded_image_string,
width=image.width,
height=image.height)
self.images.AddItem(tag, event) |
def sample(self, num):
"""
Returns a new table with rows randomly sampled.
We create a mask with `num` True bools, and fill it with False bools
until it is the length of the table. We shuffle it, and apply that
mask to the table.
"""
if num > len(self):
return self.copy()
elif num < 0:
raise IndexError("Cannot sample a negative number of rows "
"from a DataTable")
random_row_mask = ([True] * num) + ([False] * (len(self) - num))
shuffle(random_row_mask)
sampled_table = self.mask(random_row_mask)
random_col_name = 'random_sorting_column'
while random_col_name in sampled_table:
random_col_name = '%030x' % randrange(16**30)
sampled_table[random_col_name] = [random()
for _ in xrange(len(sampled_table))]
sampled_table.sort(random_col_name, inplace=True)
del sampled_table[random_col_name]
return sampled_table | Returns a new table with rows randomly sampled.
We create a mask with `num` True bools, and fill it with False bools
until it is the length of the table. We shuffle it, and apply that
mask to the table. | Below is the the instruction that describes the task:
### Input:
Returns a new table with rows randomly sampled.
We create a mask with `num` True bools, and fill it with False bools
until it is the length of the table. We shuffle it, and apply that
mask to the table.
### Response:
def sample(self, num):
"""
Returns a new table with rows randomly sampled.
We create a mask with `num` True bools, and fill it with False bools
until it is the length of the table. We shuffle it, and apply that
mask to the table.
"""
if num > len(self):
return self.copy()
elif num < 0:
raise IndexError("Cannot sample a negative number of rows "
"from a DataTable")
random_row_mask = ([True] * num) + ([False] * (len(self) - num))
shuffle(random_row_mask)
sampled_table = self.mask(random_row_mask)
random_col_name = 'random_sorting_column'
while random_col_name in sampled_table:
random_col_name = '%030x' % randrange(16**30)
sampled_table[random_col_name] = [random()
for _ in xrange(len(sampled_table))]
sampled_table.sort(random_col_name, inplace=True)
del sampled_table[random_col_name]
return sampled_table |
def generic_insert_module(module_name, args, **kwargs):
"""
In general we have a initial template and then insert new data, so we dont repeat the schema for each module
:param module_name: String with module name
:paran **kwargs: Args to be rendered in template
"""
file = create_or_open(
'{}.py'.format(module_name),
os.path.join(
BASE_TEMPLATES_DIR,
'{}_initial.py.tmpl'.format(module_name)
),
args
)
render_template_with_args_in_file(
file,
os.path.join(
BASE_TEMPLATES_DIR,
'{}.py.tmpl'.format(module_name)
),
**kwargs
)
file.close() | In general we have a initial template and then insert new data, so we dont repeat the schema for each module
:param module_name: String with module name
:paran **kwargs: Args to be rendered in template | Below is the the instruction that describes the task:
### Input:
In general we have a initial template and then insert new data, so we dont repeat the schema for each module
:param module_name: String with module name
:paran **kwargs: Args to be rendered in template
### Response:
def generic_insert_module(module_name, args, **kwargs):
"""
In general we have a initial template and then insert new data, so we dont repeat the schema for each module
:param module_name: String with module name
:paran **kwargs: Args to be rendered in template
"""
file = create_or_open(
'{}.py'.format(module_name),
os.path.join(
BASE_TEMPLATES_DIR,
'{}_initial.py.tmpl'.format(module_name)
),
args
)
render_template_with_args_in_file(
file,
os.path.join(
BASE_TEMPLATES_DIR,
'{}.py.tmpl'.format(module_name)
),
**kwargs
)
file.close() |
def cp(i):
"""
Input: {
(repo_uoa) - repo UOA
module_uoa - module UOA
data_uoa - data UOA
xcids[0] - {'repo_uoa', 'module_uoa', 'data_uoa'} - new CID
or
(new_repo_uoa) - new repo UOA
(new_module_uoa) - new module UOA
new_data_uoa - new data alias
(new_data_uid) - new data UID (leave empty to generate new one)
(move) - if 'yes', remove old
(keep_old_uid) - if 'yes', keep old UID
(without_files) - if 'yes', do not move/copy files
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
Output of 'add' function
}
"""
move=i.get('move','')
# Check if global writing is allowed
r=check_writing({})
if r['return']>0: return r
import shutil
o=i.get('out','')
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
if muoa=='': return {'return':1, 'error':'module UOA is not defined'}
if duoa=='': return {'return':1, 'error':'data UOA is not defined'}
# Attempt to load
ii={'module_uoa':muoa, 'data_uoa':duoa}
if ruoa!='': ii['repo_uoa']=ruoa
r=load(ii)
if r['return']>0: return r
rdd=r
muid=r['module_uid']
duoa=r['data_uoa']
duid=r['data_uid']
p=r['path']
dd=r.get('dict',{})
di=r.get('info',{})
du=r.get('updates',{})
dx=r.get('desc',{})
if move!='yes':
control=di.get('control',{})
control['version']=cfg['version']
rdt=get_current_date_time({})
control['iso_datetime']=rdt['iso_datetime']
di['control']=control
# Check if writing is allowed
ii={'module_uoa':muoa, 'module_uid':r['module_uid'], 'repo_uoa':ruoa, 'repo_uid':r['repo_uid']}
r=check_writing(ii)
if r['return']>0: return r
# Check new CID
nruoa=i.get('new_repo_uoa','')
nmuoa=i.get('new_module_uoa','')
nduoa=i.get('new_data_uoa','')
nduid=i.get('new_data_uid','')
xcids=i.get('xcids',[])
if len(xcids)>0:
xcid=xcids[0]
nduoa=xcid.get('data_uoa','')
if nduoa=='': nduoa=duoa
x=xcid.get('module_uoa','')
if x!='': nmuoa=x
x=xcid.get('repo_uoa','')
if x!='': nruoa=x
if i.get('keep_old_uid','')=='yes': nduid=duid
if nmuoa=='': nmuoa=muoa
if nruoa=='': nruoa=ruoa
# Adding new entry
if nruoa==ruoa and nmuoa==muoa and nduid==duid:
return {'return':1, 'error':'moving within the same directory - use "rename" instead'}
# Check if writing is allowed to the new repo
ii={'repo_uoa':nruoa}
r=check_writing(ii)
if r['return']>0: return r
rd=r.get('repo_dict',{})
rshared=rd.get('shared','')
rsync=rd.get('sync','')
ii={'module_uoa':nmuoa, 'data_uoa': nduoa, 'dict':dd, 'info':di,
'updates':du, 'desc':dx, 'ignore_update':'yes'}
if nduid!='': ii['data_uid']=nduid
if nruoa!='': ii['repo_uoa']=nruoa
r=add(ii)
if r['return']>0: return r
pn=r['path']
nmuid=r['module_uid']
# Recursively copying all files (except .cm)
if i.get('without_files','')!='yes':
rx=list_all_files({'path':p})
if rx['return']>0: return rx
for q in rx['list']:
p1=os.path.join(p,q)
pn1=os.path.join(pn,q)
# Create if dir
pn1d=os.path.dirname(pn1)
if not os.path.isdir(pn1d): os.makedirs(pn1d)
shutil.copy(p1,pn1)
if rshared!='' and rsync=='yes':
ppp=os.getcwd()
pp=os.path.split(pn)
pp0=pp[0]
pp1=pp[1]
os.chdir(pp0)
ss=cfg['repo_types'][rshared]['add'].replace('$#files#$', pp1)
rx=os.system(ss)
os.chdir(ppp)
tt='copied'
# If move, remove old one
if move=='yes':
tt='moved'
ii={'module_uoa':muoa, 'data_uoa': duoa}
if ruoa!='': ii['repo_uoa']=ruoa
rx=rm(ii)
if rx['return']>0: return rx
# Check if index and add new
if cfg.get('use_indexing','')=='yes':
if is_uid(nduoa): nduid=nduoa
path='/'+nmuid+'/'+nduid+'/1'
ri=access_index_server({'request':'DELETE', 'path':path})
if ri['return']>0: return ri
ri=access_index_server({'request':'PUT', 'path':path, 'dict':rdd})
if ri['return']>0: return ri
if o=='con':
out('Entry '+muoa+':'+duoa+' was successfully '+tt+'!')
return r | Input: {
(repo_uoa) - repo UOA
module_uoa - module UOA
data_uoa - data UOA
xcids[0] - {'repo_uoa', 'module_uoa', 'data_uoa'} - new CID
or
(new_repo_uoa) - new repo UOA
(new_module_uoa) - new module UOA
new_data_uoa - new data alias
(new_data_uid) - new data UID (leave empty to generate new one)
(move) - if 'yes', remove old
(keep_old_uid) - if 'yes', keep old UID
(without_files) - if 'yes', do not move/copy files
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
Output of 'add' function
} | Below is the the instruction that describes the task:
### Input:
Input: {
(repo_uoa) - repo UOA
module_uoa - module UOA
data_uoa - data UOA
xcids[0] - {'repo_uoa', 'module_uoa', 'data_uoa'} - new CID
or
(new_repo_uoa) - new repo UOA
(new_module_uoa) - new module UOA
new_data_uoa - new data alias
(new_data_uid) - new data UID (leave empty to generate new one)
(move) - if 'yes', remove old
(keep_old_uid) - if 'yes', keep old UID
(without_files) - if 'yes', do not move/copy files
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
Output of 'add' function
}
### Response:
def cp(i):
"""
Input: {
(repo_uoa) - repo UOA
module_uoa - module UOA
data_uoa - data UOA
xcids[0] - {'repo_uoa', 'module_uoa', 'data_uoa'} - new CID
or
(new_repo_uoa) - new repo UOA
(new_module_uoa) - new module UOA
new_data_uoa - new data alias
(new_data_uid) - new data UID (leave empty to generate new one)
(move) - if 'yes', remove old
(keep_old_uid) - if 'yes', keep old UID
(without_files) - if 'yes', do not move/copy files
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
Output of 'add' function
}
"""
move=i.get('move','')
# Check if global writing is allowed
r=check_writing({})
if r['return']>0: return r
import shutil
o=i.get('out','')
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
if muoa=='': return {'return':1, 'error':'module UOA is not defined'}
if duoa=='': return {'return':1, 'error':'data UOA is not defined'}
# Attempt to load
ii={'module_uoa':muoa, 'data_uoa':duoa}
if ruoa!='': ii['repo_uoa']=ruoa
r=load(ii)
if r['return']>0: return r
rdd=r
muid=r['module_uid']
duoa=r['data_uoa']
duid=r['data_uid']
p=r['path']
dd=r.get('dict',{})
di=r.get('info',{})
du=r.get('updates',{})
dx=r.get('desc',{})
if move!='yes':
control=di.get('control',{})
control['version']=cfg['version']
rdt=get_current_date_time({})
control['iso_datetime']=rdt['iso_datetime']
di['control']=control
# Check if writing is allowed
ii={'module_uoa':muoa, 'module_uid':r['module_uid'], 'repo_uoa':ruoa, 'repo_uid':r['repo_uid']}
r=check_writing(ii)
if r['return']>0: return r
# Check new CID
nruoa=i.get('new_repo_uoa','')
nmuoa=i.get('new_module_uoa','')
nduoa=i.get('new_data_uoa','')
nduid=i.get('new_data_uid','')
xcids=i.get('xcids',[])
if len(xcids)>0:
xcid=xcids[0]
nduoa=xcid.get('data_uoa','')
if nduoa=='': nduoa=duoa
x=xcid.get('module_uoa','')
if x!='': nmuoa=x
x=xcid.get('repo_uoa','')
if x!='': nruoa=x
if i.get('keep_old_uid','')=='yes': nduid=duid
if nmuoa=='': nmuoa=muoa
if nruoa=='': nruoa=ruoa
# Adding new entry
if nruoa==ruoa and nmuoa==muoa and nduid==duid:
return {'return':1, 'error':'moving within the same directory - use "rename" instead'}
# Check if writing is allowed to the new repo
ii={'repo_uoa':nruoa}
r=check_writing(ii)
if r['return']>0: return r
rd=r.get('repo_dict',{})
rshared=rd.get('shared','')
rsync=rd.get('sync','')
ii={'module_uoa':nmuoa, 'data_uoa': nduoa, 'dict':dd, 'info':di,
'updates':du, 'desc':dx, 'ignore_update':'yes'}
if nduid!='': ii['data_uid']=nduid
if nruoa!='': ii['repo_uoa']=nruoa
r=add(ii)
if r['return']>0: return r
pn=r['path']
nmuid=r['module_uid']
# Recursively copying all files (except .cm)
if i.get('without_files','')!='yes':
rx=list_all_files({'path':p})
if rx['return']>0: return rx
for q in rx['list']:
p1=os.path.join(p,q)
pn1=os.path.join(pn,q)
# Create if dir
pn1d=os.path.dirname(pn1)
if not os.path.isdir(pn1d): os.makedirs(pn1d)
shutil.copy(p1,pn1)
if rshared!='' and rsync=='yes':
ppp=os.getcwd()
pp=os.path.split(pn)
pp0=pp[0]
pp1=pp[1]
os.chdir(pp0)
ss=cfg['repo_types'][rshared]['add'].replace('$#files#$', pp1)
rx=os.system(ss)
os.chdir(ppp)
tt='copied'
# If move, remove old one
if move=='yes':
tt='moved'
ii={'module_uoa':muoa, 'data_uoa': duoa}
if ruoa!='': ii['repo_uoa']=ruoa
rx=rm(ii)
if rx['return']>0: return rx
# Check if index and add new
if cfg.get('use_indexing','')=='yes':
if is_uid(nduoa): nduid=nduoa
path='/'+nmuid+'/'+nduid+'/1'
ri=access_index_server({'request':'DELETE', 'path':path})
if ri['return']>0: return ri
ri=access_index_server({'request':'PUT', 'path':path, 'dict':rdd})
if ri['return']>0: return ri
if o=='con':
out('Entry '+muoa+':'+duoa+' was successfully '+tt+'!')
return r |
async def sync_services(self):
"""Poll the current state of all services.
Returns:
dict: A dictionary mapping service name to service status
"""
services = {}
servs = await self.list_services()
for i, serv in enumerate(servs):
info = await self.service_info(serv)
status = await self.service_status(serv)
messages = await self.get_messages(serv)
headline = await self.get_headline(serv)
services[serv] = states.ServiceState(info['short_name'], info['long_name'], info['preregistered'], i)
services[serv].state = status['numeric_status']
for message in messages:
services[serv].post_message(message.level, message.message, message.count, message.created)
if headline is not None:
services[serv].set_headline(headline.level, headline.message, headline.created)
return services | Poll the current state of all services.
Returns:
dict: A dictionary mapping service name to service status | Below is the the instruction that describes the task:
### Input:
Poll the current state of all services.
Returns:
dict: A dictionary mapping service name to service status
### Response:
async def sync_services(self):
"""Poll the current state of all services.
Returns:
dict: A dictionary mapping service name to service status
"""
services = {}
servs = await self.list_services()
for i, serv in enumerate(servs):
info = await self.service_info(serv)
status = await self.service_status(serv)
messages = await self.get_messages(serv)
headline = await self.get_headline(serv)
services[serv] = states.ServiceState(info['short_name'], info['long_name'], info['preregistered'], i)
services[serv].state = status['numeric_status']
for message in messages:
services[serv].post_message(message.level, message.message, message.count, message.created)
if headline is not None:
services[serv].set_headline(headline.level, headline.message, headline.created)
return services |
def image_to_file(self, path, get_image=True):
"""Write the image to a file."""
if not self.image_url or get_image:
if not self.refresh_image():
return False
response = requests.get(self.image_url, stream=True)
if response.status_code != 200:
_LOGGER.warning(
"Unexpected response code %s when requesting image: %s",
str(response.status_code), response.text)
raise AbodeException((ERROR.CAM_IMAGE_REQUEST_INVALID))
with open(path, 'wb') as imgfile:
copyfileobj(response.raw, imgfile)
return True | Write the image to a file. | Below is the the instruction that describes the task:
### Input:
Write the image to a file.
### Response:
def image_to_file(self, path, get_image=True):
"""Write the image to a file."""
if not self.image_url or get_image:
if not self.refresh_image():
return False
response = requests.get(self.image_url, stream=True)
if response.status_code != 200:
_LOGGER.warning(
"Unexpected response code %s when requesting image: %s",
str(response.status_code), response.text)
raise AbodeException((ERROR.CAM_IMAGE_REQUEST_INVALID))
with open(path, 'wb') as imgfile:
copyfileobj(response.raw, imgfile)
return True |
def sample_mog(prob, mean, var, rng):
"""Sample from independent mixture of gaussian (MoG) distributions
Each batch is an independent MoG distribution.
Parameters
----------
prob : numpy.ndarray
mixture probability of each gaussian. Shape --> (batch_num, center_num)
mean : numpy.ndarray
mean of each gaussian. Shape --> (batch_num, center_num, sample_dim)
var : numpy.ndarray
variance of each gaussian. Shape --> (batch_num, center_num, sample_dim)
rng : numpy.random.RandomState
Returns
-------
ret : numpy.ndarray
sampling result. Shape --> (batch_num, sample_dim)
"""
gaussian_inds = sample_categorical(prob, rng).astype(numpy.int32)
mean = mean[numpy.arange(mean.shape[0]), gaussian_inds, :]
var = var[numpy.arange(mean.shape[0]), gaussian_inds, :]
ret = sample_normal(mean=mean, var=var, rng=rng)
return ret | Sample from independent mixture of gaussian (MoG) distributions
Each batch is an independent MoG distribution.
Parameters
----------
prob : numpy.ndarray
mixture probability of each gaussian. Shape --> (batch_num, center_num)
mean : numpy.ndarray
mean of each gaussian. Shape --> (batch_num, center_num, sample_dim)
var : numpy.ndarray
variance of each gaussian. Shape --> (batch_num, center_num, sample_dim)
rng : numpy.random.RandomState
Returns
-------
ret : numpy.ndarray
sampling result. Shape --> (batch_num, sample_dim) | Below is the the instruction that describes the task:
### Input:
Sample from independent mixture of gaussian (MoG) distributions
Each batch is an independent MoG distribution.
Parameters
----------
prob : numpy.ndarray
mixture probability of each gaussian. Shape --> (batch_num, center_num)
mean : numpy.ndarray
mean of each gaussian. Shape --> (batch_num, center_num, sample_dim)
var : numpy.ndarray
variance of each gaussian. Shape --> (batch_num, center_num, sample_dim)
rng : numpy.random.RandomState
Returns
-------
ret : numpy.ndarray
sampling result. Shape --> (batch_num, sample_dim)
### Response:
def sample_mog(prob, mean, var, rng):
"""Sample from independent mixture of gaussian (MoG) distributions
Each batch is an independent MoG distribution.
Parameters
----------
prob : numpy.ndarray
mixture probability of each gaussian. Shape --> (batch_num, center_num)
mean : numpy.ndarray
mean of each gaussian. Shape --> (batch_num, center_num, sample_dim)
var : numpy.ndarray
variance of each gaussian. Shape --> (batch_num, center_num, sample_dim)
rng : numpy.random.RandomState
Returns
-------
ret : numpy.ndarray
sampling result. Shape --> (batch_num, sample_dim)
"""
gaussian_inds = sample_categorical(prob, rng).astype(numpy.int32)
mean = mean[numpy.arange(mean.shape[0]), gaussian_inds, :]
var = var[numpy.arange(mean.shape[0]), gaussian_inds, :]
ret = sample_normal(mean=mean, var=var, rng=rng)
return ret |
def _get_enterprise_enrollment_api_admin_users_batch(self, start, end): # pylint: disable=invalid-name
"""
Returns a batched queryset of User objects.
"""
LOGGER.info('Fetching new batch of enterprise enrollment admin users from indexes: %s to %s', start, end)
return User.objects.filter(groups__name=ENTERPRISE_ENROLLMENT_API_ACCESS_GROUP, is_staff=False)[start:end] | Returns a batched queryset of User objects. | Below is the the instruction that describes the task:
### Input:
Returns a batched queryset of User objects.
### Response:
def _get_enterprise_enrollment_api_admin_users_batch(self, start, end): # pylint: disable=invalid-name
"""
Returns a batched queryset of User objects.
"""
LOGGER.info('Fetching new batch of enterprise enrollment admin users from indexes: %s to %s', start, end)
return User.objects.filter(groups__name=ENTERPRISE_ENROLLMENT_API_ACCESS_GROUP, is_staff=False)[start:end] |
def randsample(vec, nr_samples, with_replacement = False):
"""
Draws nr_samples random samples from vec.
"""
if not with_replacement:
return np.random.permutation(vec)[0:nr_samples]
else:
return np.asarray(vec)[np.random.randint(0, len(vec), nr_samples)] | Draws nr_samples random samples from vec. | Below is the the instruction that describes the task:
### Input:
Draws nr_samples random samples from vec.
### Response:
def randsample(vec, nr_samples, with_replacement = False):
"""
Draws nr_samples random samples from vec.
"""
if not with_replacement:
return np.random.permutation(vec)[0:nr_samples]
else:
return np.asarray(vec)[np.random.randint(0, len(vec), nr_samples)] |
def padded_grid_from_shape_psf_shape_and_pixel_scale(cls, shape, psf_shape, pixel_scale):
"""Setup a regular padded grid from a 2D array shape, psf-shape and pixel-scale.
The center of every pixel is used to setup the grid's (y,x) arc-second coordinates, including padded pixels \
which are beyond the input shape but will blurred light into the 2D array's shape due to the psf.
Parameters
----------
shape : (int, int)
The (y,x) shape of the masked-grid's 2D image in units of pixels.
psf_shape : (int, int)
The shape of the psf which defines the blurring region and therefore size of padding.
pixel_scale : float
The scale of each pixel in arc seconds
"""
padded_shape = (shape[0] + psf_shape[0] - 1, shape[1] + psf_shape[1] - 1)
padded_regular_grid = grid_util.regular_grid_1d_masked_from_mask_pixel_scales_and_origin(
mask=np.full(padded_shape, False), pixel_scales=(pixel_scale, pixel_scale))
padded_mask = msk.Mask.unmasked_for_shape_and_pixel_scale(shape=padded_shape, pixel_scale=pixel_scale)
return PaddedRegularGrid(arr=padded_regular_grid, mask=padded_mask, image_shape=shape) | Setup a regular padded grid from a 2D array shape, psf-shape and pixel-scale.
The center of every pixel is used to setup the grid's (y,x) arc-second coordinates, including padded pixels \
which are beyond the input shape but will blurred light into the 2D array's shape due to the psf.
Parameters
----------
shape : (int, int)
The (y,x) shape of the masked-grid's 2D image in units of pixels.
psf_shape : (int, int)
The shape of the psf which defines the blurring region and therefore size of padding.
pixel_scale : float
The scale of each pixel in arc seconds | Below is the the instruction that describes the task:
### Input:
Setup a regular padded grid from a 2D array shape, psf-shape and pixel-scale.
The center of every pixel is used to setup the grid's (y,x) arc-second coordinates, including padded pixels \
which are beyond the input shape but will blurred light into the 2D array's shape due to the psf.
Parameters
----------
shape : (int, int)
The (y,x) shape of the masked-grid's 2D image in units of pixels.
psf_shape : (int, int)
The shape of the psf which defines the blurring region and therefore size of padding.
pixel_scale : float
The scale of each pixel in arc seconds
### Response:
def padded_grid_from_shape_psf_shape_and_pixel_scale(cls, shape, psf_shape, pixel_scale):
"""Setup a regular padded grid from a 2D array shape, psf-shape and pixel-scale.
The center of every pixel is used to setup the grid's (y,x) arc-second coordinates, including padded pixels \
which are beyond the input shape but will blurred light into the 2D array's shape due to the psf.
Parameters
----------
shape : (int, int)
The (y,x) shape of the masked-grid's 2D image in units of pixels.
psf_shape : (int, int)
The shape of the psf which defines the blurring region and therefore size of padding.
pixel_scale : float
The scale of each pixel in arc seconds
"""
padded_shape = (shape[0] + psf_shape[0] - 1, shape[1] + psf_shape[1] - 1)
padded_regular_grid = grid_util.regular_grid_1d_masked_from_mask_pixel_scales_and_origin(
mask=np.full(padded_shape, False), pixel_scales=(pixel_scale, pixel_scale))
padded_mask = msk.Mask.unmasked_for_shape_and_pixel_scale(shape=padded_shape, pixel_scale=pixel_scale)
return PaddedRegularGrid(arr=padded_regular_grid, mask=padded_mask, image_shape=shape) |
def __studies(self, retention_time):
""" Execute the studies configured for the current backend """
cfg = self.config.get_conf()
if 'studies' not in cfg[self.backend_section] or not \
cfg[self.backend_section]['studies']:
logger.debug('No studies for %s' % self.backend_section)
return
studies = [study for study in cfg[self.backend_section]['studies'] if study.strip() != ""]
if not studies:
logger.debug('No studies for %s' % self.backend_section)
return
logger.debug("Executing studies for %s: %s" % (self.backend_section, studies))
time.sleep(2) # Wait so enrichment has finished in ES
enrich_backend = self._get_enrich_backend()
ocean_backend = self._get_ocean_backend(enrich_backend)
active_studies = []
all_studies = enrich_backend.studies
all_studies_names = [study.__name__ for study in enrich_backend.studies]
# Time to check that configured studies are valid
logger.debug("All studies in %s: %s", self.backend_section, all_studies_names)
logger.debug("Configured studies %s", studies)
cfg_studies_types = [study.split(":")[0] for study in studies]
if not set(cfg_studies_types).issubset(set(all_studies_names)):
logger.error('Wrong studies names for %s: %s', self.backend_section, studies)
raise RuntimeError('Wrong studies names ', self.backend_section, studies)
for study in enrich_backend.studies:
if study.__name__ in cfg_studies_types:
active_studies.append(study)
enrich_backend.studies = active_studies
print("Executing for %s the studies %s" % (self.backend_section,
[study for study in studies]))
studies_args = self.__load_studies()
do_studies(ocean_backend, enrich_backend, studies_args, retention_time=retention_time)
# Return studies to its original value
enrich_backend.studies = all_studies | Execute the studies configured for the current backend | Below is the the instruction that describes the task:
### Input:
Execute the studies configured for the current backend
### Response:
def __studies(self, retention_time):
""" Execute the studies configured for the current backend """
cfg = self.config.get_conf()
if 'studies' not in cfg[self.backend_section] or not \
cfg[self.backend_section]['studies']:
logger.debug('No studies for %s' % self.backend_section)
return
studies = [study for study in cfg[self.backend_section]['studies'] if study.strip() != ""]
if not studies:
logger.debug('No studies for %s' % self.backend_section)
return
logger.debug("Executing studies for %s: %s" % (self.backend_section, studies))
time.sleep(2) # Wait so enrichment has finished in ES
enrich_backend = self._get_enrich_backend()
ocean_backend = self._get_ocean_backend(enrich_backend)
active_studies = []
all_studies = enrich_backend.studies
all_studies_names = [study.__name__ for study in enrich_backend.studies]
# Time to check that configured studies are valid
logger.debug("All studies in %s: %s", self.backend_section, all_studies_names)
logger.debug("Configured studies %s", studies)
cfg_studies_types = [study.split(":")[0] for study in studies]
if not set(cfg_studies_types).issubset(set(all_studies_names)):
logger.error('Wrong studies names for %s: %s', self.backend_section, studies)
raise RuntimeError('Wrong studies names ', self.backend_section, studies)
for study in enrich_backend.studies:
if study.__name__ in cfg_studies_types:
active_studies.append(study)
enrich_backend.studies = active_studies
print("Executing for %s the studies %s" % (self.backend_section,
[study for study in studies]))
studies_args = self.__load_studies()
do_studies(ocean_backend, enrich_backend, studies_args, retention_time=retention_time)
# Return studies to its original value
enrich_backend.studies = all_studies |
def get_stp_mst_detail_output_msti_port_external_path_cost(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
msti = ET.SubElement(output, "msti")
instance_id_key = ET.SubElement(msti, "instance-id")
instance_id_key.text = kwargs.pop('instance_id')
port = ET.SubElement(msti, "port")
external_path_cost = ET.SubElement(port, "external-path-cost")
external_path_cost.text = kwargs.pop('external_path_cost')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_stp_mst_detail_output_msti_port_external_path_cost(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
msti = ET.SubElement(output, "msti")
instance_id_key = ET.SubElement(msti, "instance-id")
instance_id_key.text = kwargs.pop('instance_id')
port = ET.SubElement(msti, "port")
external_path_cost = ET.SubElement(port, "external-path-cost")
external_path_cost.text = kwargs.pop('external_path_cost')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def cv_residuals(self, cv=True):
"""Return the residuals of the cross-validation for the fit data"""
vals = self.cv_values(cv)
return (self.y - vals) / self.dy | Return the residuals of the cross-validation for the fit data | Below is the the instruction that describes the task:
### Input:
Return the residuals of the cross-validation for the fit data
### Response:
def cv_residuals(self, cv=True):
"""Return the residuals of the cross-validation for the fit data"""
vals = self.cv_values(cv)
return (self.y - vals) / self.dy |
def _call_vecfield_inf(self, vf, out):
"""Implement ``self(vf, out)`` for exponent ``inf``."""
vf[0].ufuncs.absolute(out=out)
if self.is_weighted:
out *= self.weights[0]
if len(self.domain) == 1:
return
tmp = self.range.element()
for vfi, wi in zip(vf[1:], self.weights[1:]):
vfi.ufuncs.absolute(out=tmp)
if self.is_weighted:
tmp *= wi
out.ufuncs.maximum(tmp, out=out) | Implement ``self(vf, out)`` for exponent ``inf``. | Below is the the instruction that describes the task:
### Input:
Implement ``self(vf, out)`` for exponent ``inf``.
### Response:
def _call_vecfield_inf(self, vf, out):
"""Implement ``self(vf, out)`` for exponent ``inf``."""
vf[0].ufuncs.absolute(out=out)
if self.is_weighted:
out *= self.weights[0]
if len(self.domain) == 1:
return
tmp = self.range.element()
for vfi, wi in zip(vf[1:], self.weights[1:]):
vfi.ufuncs.absolute(out=tmp)
if self.is_weighted:
tmp *= wi
out.ufuncs.maximum(tmp, out=out) |
def sold_out_and_unregistered(context):
''' If the current user is unregistered, returns True if there are no
products in the TICKET_PRODUCT_CATEGORY that are available to that user.
If there *are* products available, the return False.
If the current user *is* registered, then return None (it's not a
pertinent question for people who already have a ticket).
'''
user = user_for_context(context)
if hasattr(user, "attendee") and user.attendee.completed_registration:
# This user has completed registration, and so we don't need to answer
# whether they have sold out yet.
# TODO: what if a user has got to the review phase?
# currently that user will hit the review page, click "Check out and
# pay", and that will fail. Probably good enough for now.
return None
ticket_category = settings.TICKET_PRODUCT_CATEGORY
categories = available_categories(context)
return ticket_category not in [cat.id for cat in categories] | If the current user is unregistered, returns True if there are no
products in the TICKET_PRODUCT_CATEGORY that are available to that user.
If there *are* products available, the return False.
If the current user *is* registered, then return None (it's not a
pertinent question for people who already have a ticket). | Below is the the instruction that describes the task:
### Input:
If the current user is unregistered, returns True if there are no
products in the TICKET_PRODUCT_CATEGORY that are available to that user.
If there *are* products available, the return False.
If the current user *is* registered, then return None (it's not a
pertinent question for people who already have a ticket).
### Response:
def sold_out_and_unregistered(context):
''' If the current user is unregistered, returns True if there are no
products in the TICKET_PRODUCT_CATEGORY that are available to that user.
If there *are* products available, the return False.
If the current user *is* registered, then return None (it's not a
pertinent question for people who already have a ticket).
'''
user = user_for_context(context)
if hasattr(user, "attendee") and user.attendee.completed_registration:
# This user has completed registration, and so we don't need to answer
# whether they have sold out yet.
# TODO: what if a user has got to the review phase?
# currently that user will hit the review page, click "Check out and
# pay", and that will fail. Probably good enough for now.
return None
ticket_category = settings.TICKET_PRODUCT_CATEGORY
categories = available_categories(context)
return ticket_category not in [cat.id for cat in categories] |
def check_and_create_directories(paths):
"""
Check and create directories.
If the directory is exist, It will remove it and create new folder.
:type paths: Array of string or string
:param paths: the location of directory
"""
for path in paths:
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path) | Check and create directories.
If the directory is exist, It will remove it and create new folder.
:type paths: Array of string or string
:param paths: the location of directory | Below is the the instruction that describes the task:
### Input:
Check and create directories.
If the directory is exist, It will remove it and create new folder.
:type paths: Array of string or string
:param paths: the location of directory
### Response:
def check_and_create_directories(paths):
"""
Check and create directories.
If the directory is exist, It will remove it and create new folder.
:type paths: Array of string or string
:param paths: the location of directory
"""
for path in paths:
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path) |
def get_input_score_start_range_metadata(self):
"""Gets the metadata for the input score start range.
return: (osid.Metadata) - metadata for the input score start
range
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['input_score_start_range'])
metadata.update({'existing_decimal_values': self._my_map['inputScoreStartRange']})
return Metadata(**metadata) | Gets the metadata for the input score start range.
return: (osid.Metadata) - metadata for the input score start
range
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the metadata for the input score start range.
return: (osid.Metadata) - metadata for the input score start
range
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_input_score_start_range_metadata(self):
"""Gets the metadata for the input score start range.
return: (osid.Metadata) - metadata for the input score start
range
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['input_score_start_range'])
metadata.update({'existing_decimal_values': self._my_map['inputScoreStartRange']})
return Metadata(**metadata) |
def resolve_meta_key(hub, key, meta):
""" Resolve a value when it's a string and starts with '>' """
if key not in meta:
return None
value = meta[key]
if isinstance(value, str) and value[0] == '>':
topic = value[1:]
if topic not in hub:
raise KeyError('topic %s not found in hub' % topic)
return hub[topic].get()
return value | Resolve a value when it's a string and starts with '>' | Below is the the instruction that describes the task:
### Input:
Resolve a value when it's a string and starts with '>'
### Response:
def resolve_meta_key(hub, key, meta):
""" Resolve a value when it's a string and starts with '>' """
if key not in meta:
return None
value = meta[key]
if isinstance(value, str) and value[0] == '>':
topic = value[1:]
if topic not in hub:
raise KeyError('topic %s not found in hub' % topic)
return hub[topic].get()
return value |
def send_miniprogrampage_message(
self, user_id, title, appid, pagepath, thumb_media_id, kf_account=None
):
"""
发送小程序卡片(要求小程序与公众号已关联)
:param user_id: 用户 ID 。 就是你收到的 `Message` 的 source
:param title: 小程序卡片的标题
:param appid: 小程序的 appid,要求小程序的 appid 需要与公众号有关联关系
:param pagepath: 小程序的页面路径,跟 app.json 对齐,支持参数,比如 pages/index/index?foo=bar
:param thumb_media_id: 小程序卡片图片的媒体 ID,小程序卡片图片建议大小为 520*416
:param kf_account: 需要以某个客服帐号来发消息时指定的客服账户
:return: 返回的 JSON 数据包
"""
data = {
"touser": user_id,
"msgtype": "miniprogrampage",
"miniprogrampage": {
"title": title,
"appid": appid,
"pagepath": pagepath,
"thumb_media_id": thumb_media_id
}
}
if kf_account is not None:
data["customservice"] = {"kf_account": kf_account}
return self.post(
url="https://api.weixin.qq.com/cgi-bin/message/custom/send",
data=data
) | 发送小程序卡片(要求小程序与公众号已关联)
:param user_id: 用户 ID 。 就是你收到的 `Message` 的 source
:param title: 小程序卡片的标题
:param appid: 小程序的 appid,要求小程序的 appid 需要与公众号有关联关系
:param pagepath: 小程序的页面路径,跟 app.json 对齐,支持参数,比如 pages/index/index?foo=bar
:param thumb_media_id: 小程序卡片图片的媒体 ID,小程序卡片图片建议大小为 520*416
:param kf_account: 需要以某个客服帐号来发消息时指定的客服账户
:return: 返回的 JSON 数据包 | Below is the the instruction that describes the task:
### Input:
发送小程序卡片(要求小程序与公众号已关联)
:param user_id: 用户 ID 。 就是你收到的 `Message` 的 source
:param title: 小程序卡片的标题
:param appid: 小程序的 appid,要求小程序的 appid 需要与公众号有关联关系
:param pagepath: 小程序的页面路径,跟 app.json 对齐,支持参数,比如 pages/index/index?foo=bar
:param thumb_media_id: 小程序卡片图片的媒体 ID,小程序卡片图片建议大小为 520*416
:param kf_account: 需要以某个客服帐号来发消息时指定的客服账户
:return: 返回的 JSON 数据包
### Response:
def send_miniprogrampage_message(
self, user_id, title, appid, pagepath, thumb_media_id, kf_account=None
):
"""
发送小程序卡片(要求小程序与公众号已关联)
:param user_id: 用户 ID 。 就是你收到的 `Message` 的 source
:param title: 小程序卡片的标题
:param appid: 小程序的 appid,要求小程序的 appid 需要与公众号有关联关系
:param pagepath: 小程序的页面路径,跟 app.json 对齐,支持参数,比如 pages/index/index?foo=bar
:param thumb_media_id: 小程序卡片图片的媒体 ID,小程序卡片图片建议大小为 520*416
:param kf_account: 需要以某个客服帐号来发消息时指定的客服账户
:return: 返回的 JSON 数据包
"""
data = {
"touser": user_id,
"msgtype": "miniprogrampage",
"miniprogrampage": {
"title": title,
"appid": appid,
"pagepath": pagepath,
"thumb_media_id": thumb_media_id
}
}
if kf_account is not None:
data["customservice"] = {"kf_account": kf_account}
return self.post(
url="https://api.weixin.qq.com/cgi-bin/message/custom/send",
data=data
) |
def transpile_modname_source_target(self, spec, modname, source, target):
"""
Calls the original version.
"""
return self.simple_transpile_modname_source_target(
spec, modname, source, target) | Calls the original version. | Below is the the instruction that describes the task:
### Input:
Calls the original version.
### Response:
def transpile_modname_source_target(self, spec, modname, source, target):
"""
Calls the original version.
"""
return self.simple_transpile_modname_source_target(
spec, modname, source, target) |
def generate_requests(hosts, jolokia_port, jolokia_prefix):
"""Return a generator of requests to fetch the under replicated
partition number from the specified hosts.
:param hosts: list of brokers ip addresses
:type hosts: list of strings
:param jolokia_port: HTTP port for Jolokia
:type jolokia_port: integer
:param jolokia_prefix: HTTP prefix on the server for the Jolokia queries
:type jolokia_prefix: string
:returns: generator of requests
"""
session = FuturesSession()
for host in hosts:
url = "http://{host}:{port}/{prefix}/read/{key}".format(
host=host,
port=jolokia_port,
prefix=jolokia_prefix,
key=UNDER_REPL_KEY,
)
yield host, session.get(url) | Return a generator of requests to fetch the under replicated
partition number from the specified hosts.
:param hosts: list of brokers ip addresses
:type hosts: list of strings
:param jolokia_port: HTTP port for Jolokia
:type jolokia_port: integer
:param jolokia_prefix: HTTP prefix on the server for the Jolokia queries
:type jolokia_prefix: string
:returns: generator of requests | Below is the the instruction that describes the task:
### Input:
Return a generator of requests to fetch the under replicated
partition number from the specified hosts.
:param hosts: list of brokers ip addresses
:type hosts: list of strings
:param jolokia_port: HTTP port for Jolokia
:type jolokia_port: integer
:param jolokia_prefix: HTTP prefix on the server for the Jolokia queries
:type jolokia_prefix: string
:returns: generator of requests
### Response:
def generate_requests(hosts, jolokia_port, jolokia_prefix):
"""Return a generator of requests to fetch the under replicated
partition number from the specified hosts.
:param hosts: list of brokers ip addresses
:type hosts: list of strings
:param jolokia_port: HTTP port for Jolokia
:type jolokia_port: integer
:param jolokia_prefix: HTTP prefix on the server for the Jolokia queries
:type jolokia_prefix: string
:returns: generator of requests
"""
session = FuturesSession()
for host in hosts:
url = "http://{host}:{port}/{prefix}/read/{key}".format(
host=host,
port=jolokia_port,
prefix=jolokia_prefix,
key=UNDER_REPL_KEY,
)
yield host, session.get(url) |
def __sub_make_request(self, foc, gpid, callback):
"""Make right subscription request depending on whether local or global - used by __sub*"""
# global
if isinstance(gpid, string_types):
gpid = uuid_to_hex(gpid)
ref = (foc, gpid)
with self.__sub_add_reference(ref):
req = self._client._request_sub_create(self.__lid, foc, gpid, callback=callback)
# local
elif isinstance(gpid, Sequence) and len(gpid) == 2:
ref = (foc, tuple(gpid))
with self.__sub_add_reference(ref):
req = self._client._request_sub_create_local(self.__lid, foc, *gpid, callback=callback)
else:
raise ValueError('gpid must be string or two-element tuple')
req._run_on_completion(self.__sub_del_reference, ref)
return req | Make right subscription request depending on whether local or global - used by __sub* | Below is the the instruction that describes the task:
### Input:
Make right subscription request depending on whether local or global - used by __sub*
### Response:
def __sub_make_request(self, foc, gpid, callback):
"""Make right subscription request depending on whether local or global - used by __sub*"""
# global
if isinstance(gpid, string_types):
gpid = uuid_to_hex(gpid)
ref = (foc, gpid)
with self.__sub_add_reference(ref):
req = self._client._request_sub_create(self.__lid, foc, gpid, callback=callback)
# local
elif isinstance(gpid, Sequence) and len(gpid) == 2:
ref = (foc, tuple(gpid))
with self.__sub_add_reference(ref):
req = self._client._request_sub_create_local(self.__lid, foc, *gpid, callback=callback)
else:
raise ValueError('gpid must be string or two-element tuple')
req._run_on_completion(self.__sub_del_reference, ref)
return req |
def orient_graph(self, df_data, graph, nb_runs=6, printout=None, **kwargs):
"""Orient an undirected graph using the pairwise method defined by the subclass.
The pairwise method is ran on every undirected edge.
Args:
df_data (pandas.DataFrame): Data
umg (networkx.Graph): Graph to orient
nb_runs (int): number of times to rerun for each pair (bootstrap)
printout (str): (optional) Path to file where to save temporary results
Returns:
networkx.DiGraph: a directed graph, which might contain cycles
.. warning:
Requirement : Name of the nodes in the graph correspond to name of
the variables in df_data
"""
if type(graph) == nx.DiGraph:
edges = [a for a in list(graph.edges()) if (a[1], a[0]) in list(graph.edges())]
oriented_edges = [a for a in list(graph.edges()) if (a[1], a[0]) not in list(graph.edges())]
for a in edges:
if (a[1], a[0]) in list(graph.edges()):
edges.remove(a)
output = nx.DiGraph()
for i in oriented_edges:
output.add_edge(*i)
elif type(graph) == nx.Graph:
edges = list(graph.edges())
output = nx.DiGraph()
else:
raise TypeError("Data type not understood.")
res = []
for idx, (a, b) in enumerate(edges):
weight = self.predict_proba(
df_data[a].values.reshape((-1, 1)), df_data[b].values.reshape((-1, 1)), idx=idx,
nb_runs=nb_runs, **kwargs)
if weight > 0: # a causes b
output.add_edge(a, b, weight=weight)
else:
output.add_edge(b, a, weight=abs(weight))
if printout is not None:
res.append([str(a) + '-' + str(b), weight])
DataFrame(res, columns=['SampleID', 'Predictions']).to_csv(
printout, index=False)
for node in list(df_data.columns.values):
if node not in output.nodes():
output.add_node(node)
return output | Orient an undirected graph using the pairwise method defined by the subclass.
The pairwise method is ran on every undirected edge.
Args:
df_data (pandas.DataFrame): Data
umg (networkx.Graph): Graph to orient
nb_runs (int): number of times to rerun for each pair (bootstrap)
printout (str): (optional) Path to file where to save temporary results
Returns:
networkx.DiGraph: a directed graph, which might contain cycles
.. warning:
Requirement : Name of the nodes in the graph correspond to name of
the variables in df_data | Below is the the instruction that describes the task:
### Input:
Orient an undirected graph using the pairwise method defined by the subclass.
The pairwise method is ran on every undirected edge.
Args:
df_data (pandas.DataFrame): Data
umg (networkx.Graph): Graph to orient
nb_runs (int): number of times to rerun for each pair (bootstrap)
printout (str): (optional) Path to file where to save temporary results
Returns:
networkx.DiGraph: a directed graph, which might contain cycles
.. warning:
Requirement : Name of the nodes in the graph correspond to name of
the variables in df_data
### Response:
def orient_graph(self, df_data, graph, nb_runs=6, printout=None, **kwargs):
"""Orient an undirected graph using the pairwise method defined by the subclass.
The pairwise method is ran on every undirected edge.
Args:
df_data (pandas.DataFrame): Data
umg (networkx.Graph): Graph to orient
nb_runs (int): number of times to rerun for each pair (bootstrap)
printout (str): (optional) Path to file where to save temporary results
Returns:
networkx.DiGraph: a directed graph, which might contain cycles
.. warning:
Requirement : Name of the nodes in the graph correspond to name of
the variables in df_data
"""
if type(graph) == nx.DiGraph:
edges = [a for a in list(graph.edges()) if (a[1], a[0]) in list(graph.edges())]
oriented_edges = [a for a in list(graph.edges()) if (a[1], a[0]) not in list(graph.edges())]
for a in edges:
if (a[1], a[0]) in list(graph.edges()):
edges.remove(a)
output = nx.DiGraph()
for i in oriented_edges:
output.add_edge(*i)
elif type(graph) == nx.Graph:
edges = list(graph.edges())
output = nx.DiGraph()
else:
raise TypeError("Data type not understood.")
res = []
for idx, (a, b) in enumerate(edges):
weight = self.predict_proba(
df_data[a].values.reshape((-1, 1)), df_data[b].values.reshape((-1, 1)), idx=idx,
nb_runs=nb_runs, **kwargs)
if weight > 0: # a causes b
output.add_edge(a, b, weight=weight)
else:
output.add_edge(b, a, weight=abs(weight))
if printout is not None:
res.append([str(a) + '-' + str(b), weight])
DataFrame(res, columns=['SampleID', 'Predictions']).to_csv(
printout, index=False)
for node in list(df_data.columns.values):
if node not in output.nodes():
output.add_node(node)
return output |
def read_uint(self):
"""
Reads an integer. The size depends on the architecture.
Reads a 4 byte small-endian unsinged int on 32 bit arch
Reads an 8 byte small-endian unsinged int on 64 bit arch
"""
if self.reader.sysinfo.ProcessorArchitecture == PROCESSOR_ARCHITECTURE.AMD64:
return int.from_bytes(self.read(8), byteorder = 'little', signed = False)
else:
return int.from_bytes(self.read(4), byteorder = 'little', signed = False) | Reads an integer. The size depends on the architecture.
Reads a 4 byte small-endian unsinged int on 32 bit arch
Reads an 8 byte small-endian unsinged int on 64 bit arch | Below is the the instruction that describes the task:
### Input:
Reads an integer. The size depends on the architecture.
Reads a 4 byte small-endian unsinged int on 32 bit arch
Reads an 8 byte small-endian unsinged int on 64 bit arch
### Response:
def read_uint(self):
"""
Reads an integer. The size depends on the architecture.
Reads a 4 byte small-endian unsinged int on 32 bit arch
Reads an 8 byte small-endian unsinged int on 64 bit arch
"""
if self.reader.sysinfo.ProcessorArchitecture == PROCESSOR_ARCHITECTURE.AMD64:
return int.from_bytes(self.read(8), byteorder = 'little', signed = False)
else:
return int.from_bytes(self.read(4), byteorder = 'little', signed = False) |
def tsplit(string, delimiters):
"""Behaves str.split but supports tuples of delimiters."""
delimiters = tuple(delimiters)
if len(delimiters) < 1:
return [string,]
final_delimiter = delimiters[0]
for i in delimiters[1:]:
string = string.replace(i, final_delimiter)
return string.split(final_delimiter) | Behaves str.split but supports tuples of delimiters. | Below is the the instruction that describes the task:
### Input:
Behaves str.split but supports tuples of delimiters.
### Response:
def tsplit(string, delimiters):
"""Behaves str.split but supports tuples of delimiters."""
delimiters = tuple(delimiters)
if len(delimiters) < 1:
return [string,]
final_delimiter = delimiters[0]
for i in delimiters[1:]:
string = string.replace(i, final_delimiter)
return string.split(final_delimiter) |
def add_cli_drop(main: click.Group) -> click.Group: # noqa: D202
"""Add a ``drop`` command to main :mod:`click` function."""
@main.command()
@click.confirmation_option(prompt='Are you sure you want to drop the db?')
@click.pass_obj
def drop(manager):
"""Drop the database."""
manager.drop_all()
return main | Add a ``drop`` command to main :mod:`click` function. | Below is the the instruction that describes the task:
### Input:
Add a ``drop`` command to main :mod:`click` function.
### Response:
def add_cli_drop(main: click.Group) -> click.Group: # noqa: D202
"""Add a ``drop`` command to main :mod:`click` function."""
@main.command()
@click.confirmation_option(prompt='Are you sure you want to drop the db?')
@click.pass_obj
def drop(manager):
"""Drop the database."""
manager.drop_all()
return main |
def populate(self, priority, address, rtr, data):
"""
:return: None
"""
assert isinstance(data, bytes)
self.needs_high_priority(priority)
self.needs_no_rtr(rtr)
self.needs_data(data, 4)
self.set_attributes(priority, address, rtr)
# 00000011 = channel 1
# 00001100 = channel 2
# so shift 1 bit to the right + and with 03
tmp = (data[0] >> 1) & 0x03
print(tmp)
self.channel = self.byte_to_channel(tmp)
self.needs_valid_channel(self.channel, 2)
(self.delay_time,) = struct.unpack('>L', bytes([0]) + data[1:]) | :return: None | Below is the the instruction that describes the task:
### Input:
:return: None
### Response:
def populate(self, priority, address, rtr, data):
"""
:return: None
"""
assert isinstance(data, bytes)
self.needs_high_priority(priority)
self.needs_no_rtr(rtr)
self.needs_data(data, 4)
self.set_attributes(priority, address, rtr)
# 00000011 = channel 1
# 00001100 = channel 2
# so shift 1 bit to the right + and with 03
tmp = (data[0] >> 1) & 0x03
print(tmp)
self.channel = self.byte_to_channel(tmp)
self.needs_valid_channel(self.channel, 2)
(self.delay_time,) = struct.unpack('>L', bytes([0]) + data[1:]) |
def ln_comment(self, ln):
"""Get an end line comment. CoconutInternalExceptions should always be caught and complained."""
if self.keep_lines:
if not 1 <= ln <= len(self.original_lines) + 1:
raise CoconutInternalException(
"out of bounds line number", ln,
"not in range [1, " + str(len(self.original_lines) + 1) + "]",
)
elif ln == len(self.original_lines) + 1: # trim too large
lni = -1
else:
lni = ln - 1
if self.line_numbers and self.keep_lines:
if self.minify:
comment = str(ln) + " " + self.original_lines[lni]
else:
comment = " line " + str(ln) + ": " + self.original_lines[lni]
elif self.keep_lines:
if self.minify:
comment = self.original_lines[lni]
else:
comment = " " + self.original_lines[lni]
elif self.line_numbers:
if self.minify:
comment = str(ln)
else:
comment = " line " + str(ln)
else:
return ""
return self.wrap_comment(comment, reformat=False) | Get an end line comment. CoconutInternalExceptions should always be caught and complained. | Below is the the instruction that describes the task:
### Input:
Get an end line comment. CoconutInternalExceptions should always be caught and complained.
### Response:
def ln_comment(self, ln):
"""Get an end line comment. CoconutInternalExceptions should always be caught and complained."""
if self.keep_lines:
if not 1 <= ln <= len(self.original_lines) + 1:
raise CoconutInternalException(
"out of bounds line number", ln,
"not in range [1, " + str(len(self.original_lines) + 1) + "]",
)
elif ln == len(self.original_lines) + 1: # trim too large
lni = -1
else:
lni = ln - 1
if self.line_numbers and self.keep_lines:
if self.minify:
comment = str(ln) + " " + self.original_lines[lni]
else:
comment = " line " + str(ln) + ": " + self.original_lines[lni]
elif self.keep_lines:
if self.minify:
comment = self.original_lines[lni]
else:
comment = " " + self.original_lines[lni]
elif self.line_numbers:
if self.minify:
comment = str(ln)
else:
comment = " line " + str(ln)
else:
return ""
return self.wrap_comment(comment, reformat=False) |
def ReadPermission(self, permission_link, options=None):
"""Reads a permission.
:param str permission_link:
The link to the permission.
:param dict options:
The request options for the request.
:return:
The read permission.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(permission_link)
permission_id = base.GetResourceIdOrFullNameFromLink(permission_link)
return self.Read(path,
'permissions',
permission_id,
None,
options) | Reads a permission.
:param str permission_link:
The link to the permission.
:param dict options:
The request options for the request.
:return:
The read permission.
:rtype:
dict | Below is the the instruction that describes the task:
### Input:
Reads a permission.
:param str permission_link:
The link to the permission.
:param dict options:
The request options for the request.
:return:
The read permission.
:rtype:
dict
### Response:
def ReadPermission(self, permission_link, options=None):
"""Reads a permission.
:param str permission_link:
The link to the permission.
:param dict options:
The request options for the request.
:return:
The read permission.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(permission_link)
permission_id = base.GetResourceIdOrFullNameFromLink(permission_link)
return self.Read(path,
'permissions',
permission_id,
None,
options) |
def search(self,
q,
start=1,
num=10,
sortField="username",
sortOrder="asc"):
"""
The User Search operation searches for users in the portal. The
search index is updated whenever users are created, updated, or
deleted. There can be a lag between the time that the user is
updated and the time when it's reflected in the search results. The
results only contain users that the calling user has permissions to
see. Users can control this visibility by changing the access
property of their user.
Inputs:
q -The query string to search the users against.
start - The number of the first entry in the result set response.
The index number is 1-based. The default value of start is
1 (for example, the first search result). The start
parameter, along with the num parameter can be used to
paginate the search results.
num - The maximum number of results to be included in the result
set response. The default value is 10, and the maximum
allowed value is 100. The start parameter, along with the num
parameter can be used to paginate the search results. The
actual number of returned results may be less than num. This
happens when the number of results remaining after start is
less than num.
sortField - Field to sort by. The allowed field names are username
and created.
sortOrder - Describes whether the returned results are in ascending
or descending order. Default is ascending.
Values: asc | desc
"""
params = {
"f" : "json",
"q" : q,
"start" : start,
"num" : num,
"sortField" : sortField,
"sortOrder" : sortOrder
}
url = self._url
return self._get(
url = url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | The User Search operation searches for users in the portal. The
search index is updated whenever users are created, updated, or
deleted. There can be a lag between the time that the user is
updated and the time when it's reflected in the search results. The
results only contain users that the calling user has permissions to
see. Users can control this visibility by changing the access
property of their user.
Inputs:
q -The query string to search the users against.
start - The number of the first entry in the result set response.
The index number is 1-based. The default value of start is
1 (for example, the first search result). The start
parameter, along with the num parameter can be used to
paginate the search results.
num - The maximum number of results to be included in the result
set response. The default value is 10, and the maximum
allowed value is 100. The start parameter, along with the num
parameter can be used to paginate the search results. The
actual number of returned results may be less than num. This
happens when the number of results remaining after start is
less than num.
sortField - Field to sort by. The allowed field names are username
and created.
sortOrder - Describes whether the returned results are in ascending
or descending order. Default is ascending.
Values: asc | desc | Below is the the instruction that describes the task:
### Input:
The User Search operation searches for users in the portal. The
search index is updated whenever users are created, updated, or
deleted. There can be a lag between the time that the user is
updated and the time when it's reflected in the search results. The
results only contain users that the calling user has permissions to
see. Users can control this visibility by changing the access
property of their user.
Inputs:
q -The query string to search the users against.
start - The number of the first entry in the result set response.
The index number is 1-based. The default value of start is
1 (for example, the first search result). The start
parameter, along with the num parameter can be used to
paginate the search results.
num - The maximum number of results to be included in the result
set response. The default value is 10, and the maximum
allowed value is 100. The start parameter, along with the num
parameter can be used to paginate the search results. The
actual number of returned results may be less than num. This
happens when the number of results remaining after start is
less than num.
sortField - Field to sort by. The allowed field names are username
and created.
sortOrder - Describes whether the returned results are in ascending
or descending order. Default is ascending.
Values: asc | desc
### Response:
def search(self,
q,
start=1,
num=10,
sortField="username",
sortOrder="asc"):
"""
The User Search operation searches for users in the portal. The
search index is updated whenever users are created, updated, or
deleted. There can be a lag between the time that the user is
updated and the time when it's reflected in the search results. The
results only contain users that the calling user has permissions to
see. Users can control this visibility by changing the access
property of their user.
Inputs:
q -The query string to search the users against.
start - The number of the first entry in the result set response.
The index number is 1-based. The default value of start is
1 (for example, the first search result). The start
parameter, along with the num parameter can be used to
paginate the search results.
num - The maximum number of results to be included in the result
set response. The default value is 10, and the maximum
allowed value is 100. The start parameter, along with the num
parameter can be used to paginate the search results. The
actual number of returned results may be less than num. This
happens when the number of results remaining after start is
less than num.
sortField - Field to sort by. The allowed field names are username
and created.
sortOrder - Describes whether the returned results are in ascending
or descending order. Default is ascending.
Values: asc | desc
"""
params = {
"f" : "json",
"q" : q,
"start" : start,
"num" : num,
"sortField" : sortField,
"sortOrder" : sortOrder
}
url = self._url
return self._get(
url = url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) |
def build(self, builder):
"""Build XML by appending to builder"""
params = dict(CodedValue=self.coded_value)
if self.order_number is not None:
params["mdsol:OrderNumber"] = str(self.order_number)
if self.specify:
params["mdsol:Specify"] = "Yes"
builder.start("CodeListItem", params)
if self.decode is not None:
self.decode.build(builder)
for alias in self.aliases:
alias.build(builder)
builder.end("CodeListItem") | Build XML by appending to builder | Below is the the instruction that describes the task:
### Input:
Build XML by appending to builder
### Response:
def build(self, builder):
"""Build XML by appending to builder"""
params = dict(CodedValue=self.coded_value)
if self.order_number is not None:
params["mdsol:OrderNumber"] = str(self.order_number)
if self.specify:
params["mdsol:Specify"] = "Yes"
builder.start("CodeListItem", params)
if self.decode is not None:
self.decode.build(builder)
for alias in self.aliases:
alias.build(builder)
builder.end("CodeListItem") |
def download_large(self, image, url_field='url'):
"""Downlaod the binary data of an image attachment at large size.
:param str url_field: the field of the image with the right URL
:return: binary image data
:rtype: bytes
"""
return self.download(image, url_field=url_field, suffix='large') | Downlaod the binary data of an image attachment at large size.
:param str url_field: the field of the image with the right URL
:return: binary image data
:rtype: bytes | Below is the the instruction that describes the task:
### Input:
Downlaod the binary data of an image attachment at large size.
:param str url_field: the field of the image with the right URL
:return: binary image data
:rtype: bytes
### Response:
def download_large(self, image, url_field='url'):
"""Downlaod the binary data of an image attachment at large size.
:param str url_field: the field of the image with the right URL
:return: binary image data
:rtype: bytes
"""
return self.download(image, url_field=url_field, suffix='large') |
def run(
project: 'projects.Project',
step: 'projects.ProjectStep'
) -> dict:
"""
Runs the markdown file and renders the contents to the notebook display
:param project:
:param step:
:return:
A run response dictionary containing
"""
with open(step.source_path, 'r') as f:
code = f.read()
try:
cauldron.display.markdown(code, **project.shared.fetch(None))
return {'success': True}
except Exception as err:
return dict(
success=False,
html_message=templating.render_template(
'markdown-error.html',
error=err
)
) | Runs the markdown file and renders the contents to the notebook display
:param project:
:param step:
:return:
A run response dictionary containing | Below is the the instruction that describes the task:
### Input:
Runs the markdown file and renders the contents to the notebook display
:param project:
:param step:
:return:
A run response dictionary containing
### Response:
def run(
project: 'projects.Project',
step: 'projects.ProjectStep'
) -> dict:
"""
Runs the markdown file and renders the contents to the notebook display
:param project:
:param step:
:return:
A run response dictionary containing
"""
with open(step.source_path, 'r') as f:
code = f.read()
try:
cauldron.display.markdown(code, **project.shared.fetch(None))
return {'success': True}
except Exception as err:
return dict(
success=False,
html_message=templating.render_template(
'markdown-error.html',
error=err
)
) |
def get_subparsers(parser, create=False):
"""
Returns the :class:`argparse._SubParsersAction` instance for given
:class:`ArgumentParser` instance as would have been returned by
:meth:`ArgumentParser.add_subparsers`. The problem with the latter is that
it only works once and raises an exception on the second attempt, and the
public API seems to lack a method to get *existing* subparsers.
:param create:
If `True`, creates the subparser if it does not exist. Default if
`False`.
"""
# note that ArgumentParser._subparsers is *not* what is returned by
# ArgumentParser.add_subparsers().
if parser._subparsers:
actions = [a for a in parser._actions
if isinstance(a, argparse._SubParsersAction)]
assert len(actions) == 1
return actions[0]
else:
if create:
return parser.add_subparsers() | Returns the :class:`argparse._SubParsersAction` instance for given
:class:`ArgumentParser` instance as would have been returned by
:meth:`ArgumentParser.add_subparsers`. The problem with the latter is that
it only works once and raises an exception on the second attempt, and the
public API seems to lack a method to get *existing* subparsers.
:param create:
If `True`, creates the subparser if it does not exist. Default if
`False`. | Below is the the instruction that describes the task:
### Input:
Returns the :class:`argparse._SubParsersAction` instance for given
:class:`ArgumentParser` instance as would have been returned by
:meth:`ArgumentParser.add_subparsers`. The problem with the latter is that
it only works once and raises an exception on the second attempt, and the
public API seems to lack a method to get *existing* subparsers.
:param create:
If `True`, creates the subparser if it does not exist. Default if
`False`.
### Response:
def get_subparsers(parser, create=False):
"""
Returns the :class:`argparse._SubParsersAction` instance for given
:class:`ArgumentParser` instance as would have been returned by
:meth:`ArgumentParser.add_subparsers`. The problem with the latter is that
it only works once and raises an exception on the second attempt, and the
public API seems to lack a method to get *existing* subparsers.
:param create:
If `True`, creates the subparser if it does not exist. Default if
`False`.
"""
# note that ArgumentParser._subparsers is *not* what is returned by
# ArgumentParser.add_subparsers().
if parser._subparsers:
actions = [a for a in parser._actions
if isinstance(a, argparse._SubParsersAction)]
assert len(actions) == 1
return actions[0]
else:
if create:
return parser.add_subparsers() |
def get_module_name(package):
"""
package must have these attributes:
e.g.:
package.DISTRIBUTION_NAME = "DragonPyEmulator"
package.DIST_GROUP = "console_scripts"
package.ENTRY_POINT = "DragonPy"
:return: a string like: "dragonpy.core.cli"
"""
distribution = get_distribution(package.DISTRIBUTION_NAME)
entry_info = distribution.get_entry_info(package.DIST_GROUP, package.ENTRY_POINT)
if not entry_info:
raise RuntimeError(
"Can't find entry info for distribution: %r (group: %r, entry point: %r)" % (
package.DISTRIBUTION_NAME, package.DIST_GROUP, package.ENTRY_POINT
)
)
return entry_info.module_name | package must have these attributes:
e.g.:
package.DISTRIBUTION_NAME = "DragonPyEmulator"
package.DIST_GROUP = "console_scripts"
package.ENTRY_POINT = "DragonPy"
:return: a string like: "dragonpy.core.cli" | Below is the the instruction that describes the task:
### Input:
package must have these attributes:
e.g.:
package.DISTRIBUTION_NAME = "DragonPyEmulator"
package.DIST_GROUP = "console_scripts"
package.ENTRY_POINT = "DragonPy"
:return: a string like: "dragonpy.core.cli"
### Response:
def get_module_name(package):
"""
package must have these attributes:
e.g.:
package.DISTRIBUTION_NAME = "DragonPyEmulator"
package.DIST_GROUP = "console_scripts"
package.ENTRY_POINT = "DragonPy"
:return: a string like: "dragonpy.core.cli"
"""
distribution = get_distribution(package.DISTRIBUTION_NAME)
entry_info = distribution.get_entry_info(package.DIST_GROUP, package.ENTRY_POINT)
if not entry_info:
raise RuntimeError(
"Can't find entry info for distribution: %r (group: %r, entry point: %r)" % (
package.DISTRIBUTION_NAME, package.DIST_GROUP, package.ENTRY_POINT
)
)
return entry_info.module_name |
def dpll(clauses, symbols, model):
"See if the clauses are true in a partial model."
unknown_clauses = [] ## clauses with an unknown truth value
for c in clauses:
val = pl_true(c, model)
if val == False:
return False
if val != True:
unknown_clauses.append(c)
if not unknown_clauses:
return model
P, value = find_pure_symbol(symbols, unknown_clauses)
if P:
return dpll(clauses, removeall(P, symbols), extend(model, P, value))
P, value = find_unit_clause(clauses, model)
if P:
return dpll(clauses, removeall(P, symbols), extend(model, P, value))
P, symbols = symbols[0], symbols[1:]
return (dpll(clauses, symbols, extend(model, P, True)) or
dpll(clauses, symbols, extend(model, P, False))) | See if the clauses are true in a partial model. | Below is the the instruction that describes the task:
### Input:
See if the clauses are true in a partial model.
### Response:
def dpll(clauses, symbols, model):
"See if the clauses are true in a partial model."
unknown_clauses = [] ## clauses with an unknown truth value
for c in clauses:
val = pl_true(c, model)
if val == False:
return False
if val != True:
unknown_clauses.append(c)
if not unknown_clauses:
return model
P, value = find_pure_symbol(symbols, unknown_clauses)
if P:
return dpll(clauses, removeall(P, symbols), extend(model, P, value))
P, value = find_unit_clause(clauses, model)
if P:
return dpll(clauses, removeall(P, symbols), extend(model, P, value))
P, symbols = symbols[0], symbols[1:]
return (dpll(clauses, symbols, extend(model, P, True)) or
dpll(clauses, symbols, extend(model, P, False))) |
def transform_sources(self, sources, with_string=False):
"""Get the defintions of needed strings and functions
after replacement.
"""
modules = {}
updater = partial(
self.replace_source, modules=modules, prefix='string_')
for filename in sources:
updated = update_func_body(sources[filename], updater)
sources[filename] = EXTERN_AND_SEG + updated
logging.debug('modules: %s', modules)
return sources, self.build_funcs(modules) | Get the defintions of needed strings and functions
after replacement. | Below is the the instruction that describes the task:
### Input:
Get the defintions of needed strings and functions
after replacement.
### Response:
def transform_sources(self, sources, with_string=False):
"""Get the defintions of needed strings and functions
after replacement.
"""
modules = {}
updater = partial(
self.replace_source, modules=modules, prefix='string_')
for filename in sources:
updated = update_func_body(sources[filename], updater)
sources[filename] = EXTERN_AND_SEG + updated
logging.debug('modules: %s', modules)
return sources, self.build_funcs(modules) |
def get_argument_parser():
"""Returns an argument parser object for the script."""
desc = 'Filter FASTA file by chromosome names.'
parser = cli.get_argument_parser(desc=desc)
parser.add_argument(
'-f', '--fasta-file', default='-', type=str, help=textwrap.dedent("""\
Path of the FASTA file. The file may be gzip'ed.
If set to ``-``, read from ``stdin``."""))
parser.add_argument(
'-s', '--species', type=str,
choices=sorted(ensembl.SPECIES_CHROMPAT.keys()),
default='human', help=textwrap.dedent("""\
Species for which to extract genes. (This parameter is ignored
if ``--chromosome-pattern`` is specified.)""")
)
parser.add_argument(
'-c', '--chromosome-pattern', type=str, required=False,
default=None, help=textwrap.dedent("""\
Regular expression that chromosome names have to match.
If not specified, determine pattern based on the setting of
``--species``.""")
)
parser.add_argument(
'-o', '--output-file', type=str, required=True,
help=textwrap.dedent("""\
Path of output file. If set to ``-``, print to ``stdout``,
and redirect logging messages to ``stderr``."""))
parser = cli.add_reporting_args(parser)
return parser | Returns an argument parser object for the script. | Below is the the instruction that describes the task:
### Input:
Returns an argument parser object for the script.
### Response:
def get_argument_parser():
"""Returns an argument parser object for the script."""
desc = 'Filter FASTA file by chromosome names.'
parser = cli.get_argument_parser(desc=desc)
parser.add_argument(
'-f', '--fasta-file', default='-', type=str, help=textwrap.dedent("""\
Path of the FASTA file. The file may be gzip'ed.
If set to ``-``, read from ``stdin``."""))
parser.add_argument(
'-s', '--species', type=str,
choices=sorted(ensembl.SPECIES_CHROMPAT.keys()),
default='human', help=textwrap.dedent("""\
Species for which to extract genes. (This parameter is ignored
if ``--chromosome-pattern`` is specified.)""")
)
parser.add_argument(
'-c', '--chromosome-pattern', type=str, required=False,
default=None, help=textwrap.dedent("""\
Regular expression that chromosome names have to match.
If not specified, determine pattern based on the setting of
``--species``.""")
)
parser.add_argument(
'-o', '--output-file', type=str, required=True,
help=textwrap.dedent("""\
Path of output file. If set to ``-``, print to ``stdout``,
and redirect logging messages to ``stderr``."""))
parser = cli.add_reporting_args(parser)
return parser |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.