code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def database_caller_creator(self, name=None):
'''creates a sqlite3 db
returns the related connection object
which will be later used to spawn the cursor
'''
try:
if name:
database = name + '.db'
else:
database = 'sqlite_' + str_generator(self) + '.db'
conn = sqlite3.connect(database)
logger.warning('Database created and opened succesfully: %s' % database, extra=d)
except Exception:
logger.error('Failed to connect or create database / sqlite3', extra=d)
raise DbConnException
return conn | creates a sqlite3 db
returns the related connection object
which will be later used to spawn the cursor | Below is the the instruction that describes the task:
### Input:
creates a sqlite3 db
returns the related connection object
which will be later used to spawn the cursor
### Response:
def database_caller_creator(self, name=None):
'''creates a sqlite3 db
returns the related connection object
which will be later used to spawn the cursor
'''
try:
if name:
database = name + '.db'
else:
database = 'sqlite_' + str_generator(self) + '.db'
conn = sqlite3.connect(database)
logger.warning('Database created and opened succesfully: %s' % database, extra=d)
except Exception:
logger.error('Failed to connect or create database / sqlite3', extra=d)
raise DbConnException
return conn |
def labels(self):
"""Return the 10-tuple of text labels from `header`.
The value is determined from the header entries ``'nlabl'`` and
``'label'``.
"""
label_array = self.header['label']['value']
labels = tuple(''.join(row.astype(str)) for row in label_array)
try:
nlabels = int(self.header['nlabl']['value'])
except KeyError:
nlabels = len(labels)
# Check if there are nontrivial labels after the number given in
# the header. If yes, ignore the 'nlabl' information and return
# all labels.
if any(label.strip() for label in labels[nlabels:]):
return labels
else:
return labels[:nlabels] | Return the 10-tuple of text labels from `header`.
The value is determined from the header entries ``'nlabl'`` and
``'label'``. | Below is the the instruction that describes the task:
### Input:
Return the 10-tuple of text labels from `header`.
The value is determined from the header entries ``'nlabl'`` and
``'label'``.
### Response:
def labels(self):
"""Return the 10-tuple of text labels from `header`.
The value is determined from the header entries ``'nlabl'`` and
``'label'``.
"""
label_array = self.header['label']['value']
labels = tuple(''.join(row.astype(str)) for row in label_array)
try:
nlabels = int(self.header['nlabl']['value'])
except KeyError:
nlabels = len(labels)
# Check if there are nontrivial labels after the number given in
# the header. If yes, ignore the 'nlabl' information and return
# all labels.
if any(label.strip() for label in labels[nlabels:]):
return labels
else:
return labels[:nlabels] |
def batch_help():
"""Help message for Batch Dialog.
.. versionadded:: 3.2.1
:returns: A message object containing helpful information.
:rtype: messaging.message.Message
"""
message = m.Message()
message.add(m.Brand())
message.add(heading())
message.add(content())
return message | Help message for Batch Dialog.
.. versionadded:: 3.2.1
:returns: A message object containing helpful information.
:rtype: messaging.message.Message | Below is the the instruction that describes the task:
### Input:
Help message for Batch Dialog.
.. versionadded:: 3.2.1
:returns: A message object containing helpful information.
:rtype: messaging.message.Message
### Response:
def batch_help():
"""Help message for Batch Dialog.
.. versionadded:: 3.2.1
:returns: A message object containing helpful information.
:rtype: messaging.message.Message
"""
message = m.Message()
message.add(m.Brand())
message.add(heading())
message.add(content())
return message |
def muteThread(self, mute_time=-1, thread_id=None):
"""
Mutes thread
:param mute_time: Mute time in seconds, leave blank to mute forever
:param thread_id: User/Group ID to mute. See :ref:`intro_threads`
"""
thread_id, thread_type = self._getThread(thread_id, None)
data = {"mute_settings": str(mute_time), "thread_fbid": thread_id}
content = self._post(self.req_url.MUTE_THREAD, data, fix_request=True) | Mutes thread
:param mute_time: Mute time in seconds, leave blank to mute forever
:param thread_id: User/Group ID to mute. See :ref:`intro_threads` | Below is the the instruction that describes the task:
### Input:
Mutes thread
:param mute_time: Mute time in seconds, leave blank to mute forever
:param thread_id: User/Group ID to mute. See :ref:`intro_threads`
### Response:
def muteThread(self, mute_time=-1, thread_id=None):
"""
Mutes thread
:param mute_time: Mute time in seconds, leave blank to mute forever
:param thread_id: User/Group ID to mute. See :ref:`intro_threads`
"""
thread_id, thread_type = self._getThread(thread_id, None)
data = {"mute_settings": str(mute_time), "thread_fbid": thread_id}
content = self._post(self.req_url.MUTE_THREAD, data, fix_request=True) |
def arithmetic_crossover(random, mom, dad, args):
"""Return the offspring of arithmetic crossover on the candidates.
This function performs arithmetic crossover (AX), which is similar to a
generalized weighted averaging of the candidate elements. The allele
of each parent is weighted by the *ax_alpha* keyword argument, and
the allele of the complement parent is weighted by 1 - *ax_alpha*.
This averaging is only done on the alleles listed in the *ax_points*
keyword argument. If this argument is ``None``, then all alleles
are used. This means that if this function is used with all default
values, then offspring are simple averages of their parents.
This function also makes use of the bounder function as specified
in the EC's ``evolve`` method.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *ax_alpha* -- the weight for the averaging (default 0.5)
- *ax_points* -- a list of points specifying the alleles to
recombine (default None)
"""
ax_alpha = args.setdefault('ax_alpha', 0.5)
ax_points = args.setdefault('ax_points', None)
crossover_rate = args.setdefault('crossover_rate', 1.0)
bounder = args['_ec'].bounder
children = []
if random.random() < crossover_rate:
bro = copy.copy(dad)
sis = copy.copy(mom)
if ax_points is None:
ax_points = list(range(min(len(bro), len(sis))))
for i in ax_points:
bro[i] = ax_alpha * mom[i] + (1 - ax_alpha) * dad[i]
sis[i] = ax_alpha * dad[i] + (1 - ax_alpha) * mom[i]
bro = bounder(bro, args)
sis = bounder(sis, args)
children.append(bro)
children.append(sis)
else:
children.append(mom)
children.append(dad)
return children | Return the offspring of arithmetic crossover on the candidates.
This function performs arithmetic crossover (AX), which is similar to a
generalized weighted averaging of the candidate elements. The allele
of each parent is weighted by the *ax_alpha* keyword argument, and
the allele of the complement parent is weighted by 1 - *ax_alpha*.
This averaging is only done on the alleles listed in the *ax_points*
keyword argument. If this argument is ``None``, then all alleles
are used. This means that if this function is used with all default
values, then offspring are simple averages of their parents.
This function also makes use of the bounder function as specified
in the EC's ``evolve`` method.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *ax_alpha* -- the weight for the averaging (default 0.5)
- *ax_points* -- a list of points specifying the alleles to
recombine (default None) | Below is the the instruction that describes the task:
### Input:
Return the offspring of arithmetic crossover on the candidates.
This function performs arithmetic crossover (AX), which is similar to a
generalized weighted averaging of the candidate elements. The allele
of each parent is weighted by the *ax_alpha* keyword argument, and
the allele of the complement parent is weighted by 1 - *ax_alpha*.
This averaging is only done on the alleles listed in the *ax_points*
keyword argument. If this argument is ``None``, then all alleles
are used. This means that if this function is used with all default
values, then offspring are simple averages of their parents.
This function also makes use of the bounder function as specified
in the EC's ``evolve`` method.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *ax_alpha* -- the weight for the averaging (default 0.5)
- *ax_points* -- a list of points specifying the alleles to
recombine (default None)
### Response:
def arithmetic_crossover(random, mom, dad, args):
"""Return the offspring of arithmetic crossover on the candidates.
This function performs arithmetic crossover (AX), which is similar to a
generalized weighted averaging of the candidate elements. The allele
of each parent is weighted by the *ax_alpha* keyword argument, and
the allele of the complement parent is weighted by 1 - *ax_alpha*.
This averaging is only done on the alleles listed in the *ax_points*
keyword argument. If this argument is ``None``, then all alleles
are used. This means that if this function is used with all default
values, then offspring are simple averages of their parents.
This function also makes use of the bounder function as specified
in the EC's ``evolve`` method.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *ax_alpha* -- the weight for the averaging (default 0.5)
- *ax_points* -- a list of points specifying the alleles to
recombine (default None)
"""
ax_alpha = args.setdefault('ax_alpha', 0.5)
ax_points = args.setdefault('ax_points', None)
crossover_rate = args.setdefault('crossover_rate', 1.0)
bounder = args['_ec'].bounder
children = []
if random.random() < crossover_rate:
bro = copy.copy(dad)
sis = copy.copy(mom)
if ax_points is None:
ax_points = list(range(min(len(bro), len(sis))))
for i in ax_points:
bro[i] = ax_alpha * mom[i] + (1 - ax_alpha) * dad[i]
sis[i] = ax_alpha * dad[i] + (1 - ax_alpha) * mom[i]
bro = bounder(bro, args)
sis = bounder(sis, args)
children.append(bro)
children.append(sis)
else:
children.append(mom)
children.append(dad)
return children |
def convert_ini(config_dict):
"""Convert _config_dict_ into a list of INI formatted strings.
Args:
config_dict (dict): Configuration dictionary to be flattened.
Returns:
(list) Lines to be written to a file in the format of KEY1_KEY2=value.
"""
config_lines = []
for env, configs in sorted(config_dict.items()):
for resource, app_properties in sorted(configs.items()):
try:
for app_property, value in sorted(app_properties.items()):
variable = '{env}_{resource}_{app_property}'.format(
env=env, resource=resource, app_property=app_property).upper()
if isinstance(value, (dict, DeepChainMap)):
safe_value = "'{0}'".format(json.dumps(dict(value)))
else:
safe_value = json.dumps(value)
line = "{variable}={value}".format(variable=variable, value=safe_value)
LOG.debug('INI line: %s', line)
config_lines.append(line)
except AttributeError:
resource = resource.upper()
app_properties = "'{}'".format(json.dumps(app_properties))
line = '{0}={1}'.format(resource, app_properties)
LOG.debug('INI line: %s', line)
config_lines.append(line)
return config_lines | Convert _config_dict_ into a list of INI formatted strings.
Args:
config_dict (dict): Configuration dictionary to be flattened.
Returns:
(list) Lines to be written to a file in the format of KEY1_KEY2=value. | Below is the the instruction that describes the task:
### Input:
Convert _config_dict_ into a list of INI formatted strings.
Args:
config_dict (dict): Configuration dictionary to be flattened.
Returns:
(list) Lines to be written to a file in the format of KEY1_KEY2=value.
### Response:
def convert_ini(config_dict):
"""Convert _config_dict_ into a list of INI formatted strings.
Args:
config_dict (dict): Configuration dictionary to be flattened.
Returns:
(list) Lines to be written to a file in the format of KEY1_KEY2=value.
"""
config_lines = []
for env, configs in sorted(config_dict.items()):
for resource, app_properties in sorted(configs.items()):
try:
for app_property, value in sorted(app_properties.items()):
variable = '{env}_{resource}_{app_property}'.format(
env=env, resource=resource, app_property=app_property).upper()
if isinstance(value, (dict, DeepChainMap)):
safe_value = "'{0}'".format(json.dumps(dict(value)))
else:
safe_value = json.dumps(value)
line = "{variable}={value}".format(variable=variable, value=safe_value)
LOG.debug('INI line: %s', line)
config_lines.append(line)
except AttributeError:
resource = resource.upper()
app_properties = "'{}'".format(json.dumps(app_properties))
line = '{0}={1}'.format(resource, app_properties)
LOG.debug('INI line: %s', line)
config_lines.append(line)
return config_lines |
def start(self, interval=None):
"""
Emits the start requested signal for this timer, effectively starting
its internal timer.
:param interval | <int>
"""
# update the interval value
with QtCore.QReadLocker(self.__lock):
if interval is None:
interval = self.__interval
else:
self.__interval = interval
# request the timer to start
self._startRequested.emit(interval) | Emits the start requested signal for this timer, effectively starting
its internal timer.
:param interval | <int> | Below is the the instruction that describes the task:
### Input:
Emits the start requested signal for this timer, effectively starting
its internal timer.
:param interval | <int>
### Response:
def start(self, interval=None):
"""
Emits the start requested signal for this timer, effectively starting
its internal timer.
:param interval | <int>
"""
# update the interval value
with QtCore.QReadLocker(self.__lock):
if interval is None:
interval = self.__interval
else:
self.__interval = interval
# request the timer to start
self._startRequested.emit(interval) |
def filter_cells(
data: AnnData,
min_counts: Optional[int] = None,
min_genes: Optional[int] = None,
max_counts: Optional[int] = None,
max_genes: Optional[int] = None,
inplace: bool = True,
copy: bool = False,
) -> Optional[Tuple[np.ndarray, np.ndarray]]:
"""Filter cell outliers based on counts and numbers of genes expressed.
For instance, only keep cells with at least `min_counts` counts or
`min_genes` genes expressed. This is to filter measurement outliers,
i.e. “unreliable” observations.
Only provide one of the optional parameters ``min_counts``, ``min_genes``,
``max_counts``, ``max_genes`` per call.
Parameters
----------
data
The (annotated) data matrix of shape ``n_obs`` × ``n_vars``.
Rows correspond to cells and columns to genes.
min_counts
Minimum number of counts required for a cell to pass filtering.
min_genes
Minimum number of genes expressed required for a cell to pass filtering.
max_counts
Maximum number of counts required for a cell to pass filtering.
max_genes
Maximum number of genes expressed required for a cell to pass filtering.
inplace
Perform computation inplace or return result.
Returns
-------
Depending on ``inplace``, returns the following arrays or directly subsets
and annotates the data matrix:
cells_subset : numpy.ndarray
Boolean index mask that does filtering. ``True`` means that the
cell is kept. ``False`` means the cell is removed.
number_per_cell : numpy.ndarray
Depending on what was tresholded (``counts`` or ``genes``), the array stores
``n_counts`` or ``n_cells`` per gene.
Examples
--------
>>> adata = sc.datasets.krumsiek11()
>>> adata.n_obs
640
>>> adata.var_names
['Gata2' 'Gata1' 'Fog1' 'EKLF' 'Fli1' 'SCL' 'Cebpa'
'Pu.1' 'cJun' 'EgrNab' 'Gfi1']
>>> # add some true zeros
>>> adata.X[adata.X < 0.3] = 0
>>> # simply compute the number of genes per cell
>>> sc.pp.filter_cells(adata, min_genes=0)
>>> adata.n_obs
640
>>> adata.obs['n_genes'].min()
1
>>> # filter manually
>>> adata_copy = adata[adata.obs['n_genes'] >= 3]
>>> adata_copy.obs['n_genes'].min()
>>> adata.n_obs
554
>>> adata.obs['n_genes'].min()
3
>>> # actually do some filtering
>>> sc.pp.filter_cells(adata, min_genes=3)
>>> adata.n_obs
554
>>> adata.obs['n_genes'].min()
3
"""
if copy:
logg.warn('`copy` is deprecated, use `inplace` instead.')
n_given_options = sum(
option is not None for option in
[min_genes, min_counts, max_genes, max_counts])
if n_given_options != 1:
raise ValueError(
'Only provide one of the optional parameters `min_counts`,'
'`min_genes`, `max_counts`, `max_genes` per call.')
if isinstance(data, AnnData):
adata = data.copy() if copy else data
cell_subset, number = materialize_as_ndarray(filter_cells(adata.X, min_counts, min_genes, max_counts, max_genes))
if not inplace:
return cell_subset, number
if min_genes is None and max_genes is None: adata.obs['n_counts'] = number
else: adata.obs['n_genes'] = number
adata._inplace_subset_obs(cell_subset)
return adata if copy else None
X = data # proceed with processing the data matrix
min_number = min_counts if min_genes is None else min_genes
max_number = max_counts if max_genes is None else max_genes
number_per_cell = np.sum(X if min_genes is None and max_genes is None
else X > 0, axis=1)
if issparse(X): number_per_cell = number_per_cell.A1
if min_number is not None:
cell_subset = number_per_cell >= min_number
if max_number is not None:
cell_subset = number_per_cell <= max_number
s = np.sum(~cell_subset)
if s > 0:
logg.info('filtered out {} cells that have'.format(s), end=' ')
if min_genes is not None or min_counts is not None:
logg.info('less than',
str(min_genes) + ' genes expressed'
if min_counts is None else str(min_counts) + ' counts', no_indent=True)
if max_genes is not None or max_counts is not None:
logg.info('more than ',
str(max_genes) + ' genes expressed'
if max_counts is None else str(max_counts) + ' counts', no_indent=True)
return cell_subset, number_per_cell | Filter cell outliers based on counts and numbers of genes expressed.
For instance, only keep cells with at least `min_counts` counts or
`min_genes` genes expressed. This is to filter measurement outliers,
i.e. “unreliable” observations.
Only provide one of the optional parameters ``min_counts``, ``min_genes``,
``max_counts``, ``max_genes`` per call.
Parameters
----------
data
The (annotated) data matrix of shape ``n_obs`` × ``n_vars``.
Rows correspond to cells and columns to genes.
min_counts
Minimum number of counts required for a cell to pass filtering.
min_genes
Minimum number of genes expressed required for a cell to pass filtering.
max_counts
Maximum number of counts required for a cell to pass filtering.
max_genes
Maximum number of genes expressed required for a cell to pass filtering.
inplace
Perform computation inplace or return result.
Returns
-------
Depending on ``inplace``, returns the following arrays or directly subsets
and annotates the data matrix:
cells_subset : numpy.ndarray
Boolean index mask that does filtering. ``True`` means that the
cell is kept. ``False`` means the cell is removed.
number_per_cell : numpy.ndarray
Depending on what was tresholded (``counts`` or ``genes``), the array stores
``n_counts`` or ``n_cells`` per gene.
Examples
--------
>>> adata = sc.datasets.krumsiek11()
>>> adata.n_obs
640
>>> adata.var_names
['Gata2' 'Gata1' 'Fog1' 'EKLF' 'Fli1' 'SCL' 'Cebpa'
'Pu.1' 'cJun' 'EgrNab' 'Gfi1']
>>> # add some true zeros
>>> adata.X[adata.X < 0.3] = 0
>>> # simply compute the number of genes per cell
>>> sc.pp.filter_cells(adata, min_genes=0)
>>> adata.n_obs
640
>>> adata.obs['n_genes'].min()
1
>>> # filter manually
>>> adata_copy = adata[adata.obs['n_genes'] >= 3]
>>> adata_copy.obs['n_genes'].min()
>>> adata.n_obs
554
>>> adata.obs['n_genes'].min()
3
>>> # actually do some filtering
>>> sc.pp.filter_cells(adata, min_genes=3)
>>> adata.n_obs
554
>>> adata.obs['n_genes'].min()
3 | Below is the the instruction that describes the task:
### Input:
Filter cell outliers based on counts and numbers of genes expressed.
For instance, only keep cells with at least `min_counts` counts or
`min_genes` genes expressed. This is to filter measurement outliers,
i.e. “unreliable” observations.
Only provide one of the optional parameters ``min_counts``, ``min_genes``,
``max_counts``, ``max_genes`` per call.
Parameters
----------
data
The (annotated) data matrix of shape ``n_obs`` × ``n_vars``.
Rows correspond to cells and columns to genes.
min_counts
Minimum number of counts required for a cell to pass filtering.
min_genes
Minimum number of genes expressed required for a cell to pass filtering.
max_counts
Maximum number of counts required for a cell to pass filtering.
max_genes
Maximum number of genes expressed required for a cell to pass filtering.
inplace
Perform computation inplace or return result.
Returns
-------
Depending on ``inplace``, returns the following arrays or directly subsets
and annotates the data matrix:
cells_subset : numpy.ndarray
Boolean index mask that does filtering. ``True`` means that the
cell is kept. ``False`` means the cell is removed.
number_per_cell : numpy.ndarray
Depending on what was tresholded (``counts`` or ``genes``), the array stores
``n_counts`` or ``n_cells`` per gene.
Examples
--------
>>> adata = sc.datasets.krumsiek11()
>>> adata.n_obs
640
>>> adata.var_names
['Gata2' 'Gata1' 'Fog1' 'EKLF' 'Fli1' 'SCL' 'Cebpa'
'Pu.1' 'cJun' 'EgrNab' 'Gfi1']
>>> # add some true zeros
>>> adata.X[adata.X < 0.3] = 0
>>> # simply compute the number of genes per cell
>>> sc.pp.filter_cells(adata, min_genes=0)
>>> adata.n_obs
640
>>> adata.obs['n_genes'].min()
1
>>> # filter manually
>>> adata_copy = adata[adata.obs['n_genes'] >= 3]
>>> adata_copy.obs['n_genes'].min()
>>> adata.n_obs
554
>>> adata.obs['n_genes'].min()
3
>>> # actually do some filtering
>>> sc.pp.filter_cells(adata, min_genes=3)
>>> adata.n_obs
554
>>> adata.obs['n_genes'].min()
3
### Response:
def filter_cells(
data: AnnData,
min_counts: Optional[int] = None,
min_genes: Optional[int] = None,
max_counts: Optional[int] = None,
max_genes: Optional[int] = None,
inplace: bool = True,
copy: bool = False,
) -> Optional[Tuple[np.ndarray, np.ndarray]]:
"""Filter cell outliers based on counts and numbers of genes expressed.
For instance, only keep cells with at least `min_counts` counts or
`min_genes` genes expressed. This is to filter measurement outliers,
i.e. “unreliable” observations.
Only provide one of the optional parameters ``min_counts``, ``min_genes``,
``max_counts``, ``max_genes`` per call.
Parameters
----------
data
The (annotated) data matrix of shape ``n_obs`` × ``n_vars``.
Rows correspond to cells and columns to genes.
min_counts
Minimum number of counts required for a cell to pass filtering.
min_genes
Minimum number of genes expressed required for a cell to pass filtering.
max_counts
Maximum number of counts required for a cell to pass filtering.
max_genes
Maximum number of genes expressed required for a cell to pass filtering.
inplace
Perform computation inplace or return result.
Returns
-------
Depending on ``inplace``, returns the following arrays or directly subsets
and annotates the data matrix:
cells_subset : numpy.ndarray
Boolean index mask that does filtering. ``True`` means that the
cell is kept. ``False`` means the cell is removed.
number_per_cell : numpy.ndarray
Depending on what was tresholded (``counts`` or ``genes``), the array stores
``n_counts`` or ``n_cells`` per gene.
Examples
--------
>>> adata = sc.datasets.krumsiek11()
>>> adata.n_obs
640
>>> adata.var_names
['Gata2' 'Gata1' 'Fog1' 'EKLF' 'Fli1' 'SCL' 'Cebpa'
'Pu.1' 'cJun' 'EgrNab' 'Gfi1']
>>> # add some true zeros
>>> adata.X[adata.X < 0.3] = 0
>>> # simply compute the number of genes per cell
>>> sc.pp.filter_cells(adata, min_genes=0)
>>> adata.n_obs
640
>>> adata.obs['n_genes'].min()
1
>>> # filter manually
>>> adata_copy = adata[adata.obs['n_genes'] >= 3]
>>> adata_copy.obs['n_genes'].min()
>>> adata.n_obs
554
>>> adata.obs['n_genes'].min()
3
>>> # actually do some filtering
>>> sc.pp.filter_cells(adata, min_genes=3)
>>> adata.n_obs
554
>>> adata.obs['n_genes'].min()
3
"""
if copy:
logg.warn('`copy` is deprecated, use `inplace` instead.')
n_given_options = sum(
option is not None for option in
[min_genes, min_counts, max_genes, max_counts])
if n_given_options != 1:
raise ValueError(
'Only provide one of the optional parameters `min_counts`,'
'`min_genes`, `max_counts`, `max_genes` per call.')
if isinstance(data, AnnData):
adata = data.copy() if copy else data
cell_subset, number = materialize_as_ndarray(filter_cells(adata.X, min_counts, min_genes, max_counts, max_genes))
if not inplace:
return cell_subset, number
if min_genes is None and max_genes is None: adata.obs['n_counts'] = number
else: adata.obs['n_genes'] = number
adata._inplace_subset_obs(cell_subset)
return adata if copy else None
X = data # proceed with processing the data matrix
min_number = min_counts if min_genes is None else min_genes
max_number = max_counts if max_genes is None else max_genes
number_per_cell = np.sum(X if min_genes is None and max_genes is None
else X > 0, axis=1)
if issparse(X): number_per_cell = number_per_cell.A1
if min_number is not None:
cell_subset = number_per_cell >= min_number
if max_number is not None:
cell_subset = number_per_cell <= max_number
s = np.sum(~cell_subset)
if s > 0:
logg.info('filtered out {} cells that have'.format(s), end=' ')
if min_genes is not None or min_counts is not None:
logg.info('less than',
str(min_genes) + ' genes expressed'
if min_counts is None else str(min_counts) + ' counts', no_indent=True)
if max_genes is not None or max_counts is not None:
logg.info('more than ',
str(max_genes) + ' genes expressed'
if max_counts is None else str(max_counts) + ' counts', no_indent=True)
return cell_subset, number_per_cell |
def _init_obo_version(self, line):
"""Save obo version and release."""
if line[0:14] == "format-version":
self.format_version = line[16:-1]
if line[0:12] == "data-version":
self.data_version = line[14:-1] | Save obo version and release. | Below is the the instruction that describes the task:
### Input:
Save obo version and release.
### Response:
def _init_obo_version(self, line):
"""Save obo version and release."""
if line[0:14] == "format-version":
self.format_version = line[16:-1]
if line[0:12] == "data-version":
self.data_version = line[14:-1] |
def get_declared_fields(bases, attrs, cls_filter,
with_base_fields=True,
extra_attr_name='base_fields'):
"""
Create a list of form field instances from the passed in 'attrs', plus any
similar fields on the base classes (in 'bases'). This is used by both the
Form and ModelForm metclasses.
If 'with_base_fields' is True, all fields from the bases are used.
Otherwise, only fields in the 'declared_fields' attribute on the bases are
used. The distinction is useful in ModelForm subclassing.
Also integrates any additional media definitions
"""
fields = [(field_name, attrs.pop(field_name))\
for field_name, obj in attrs.items()\
if isinstance(obj, cls_filter)]
fields.sort(key=lambda x: x[1].creation_counter)
# If this class is subclassing another Form, add that Form's fields.
# Note that we loop over the bases in *reverse*. This is necessary in
# order to preserve the correct order of fields.
if with_base_fields:
for base in reversed(bases):
if hasattr(base, extra_attr_name):
fields = getattr(base, extra_attr_name).items() + fields
else:
for base in reversed(bases):
if hasattr(base, 'declared_fields'):
fields = base.declared_fields.items() + fields
return SortedDict(fields) | Create a list of form field instances from the passed in 'attrs', plus any
similar fields on the base classes (in 'bases'). This is used by both the
Form and ModelForm metclasses.
If 'with_base_fields' is True, all fields from the bases are used.
Otherwise, only fields in the 'declared_fields' attribute on the bases are
used. The distinction is useful in ModelForm subclassing.
Also integrates any additional media definitions | Below is the the instruction that describes the task:
### Input:
Create a list of form field instances from the passed in 'attrs', plus any
similar fields on the base classes (in 'bases'). This is used by both the
Form and ModelForm metclasses.
If 'with_base_fields' is True, all fields from the bases are used.
Otherwise, only fields in the 'declared_fields' attribute on the bases are
used. The distinction is useful in ModelForm subclassing.
Also integrates any additional media definitions
### Response:
def get_declared_fields(bases, attrs, cls_filter,
with_base_fields=True,
extra_attr_name='base_fields'):
"""
Create a list of form field instances from the passed in 'attrs', plus any
similar fields on the base classes (in 'bases'). This is used by both the
Form and ModelForm metclasses.
If 'with_base_fields' is True, all fields from the bases are used.
Otherwise, only fields in the 'declared_fields' attribute on the bases are
used. The distinction is useful in ModelForm subclassing.
Also integrates any additional media definitions
"""
fields = [(field_name, attrs.pop(field_name))\
for field_name, obj in attrs.items()\
if isinstance(obj, cls_filter)]
fields.sort(key=lambda x: x[1].creation_counter)
# If this class is subclassing another Form, add that Form's fields.
# Note that we loop over the bases in *reverse*. This is necessary in
# order to preserve the correct order of fields.
if with_base_fields:
for base in reversed(bases):
if hasattr(base, extra_attr_name):
fields = getattr(base, extra_attr_name).items() + fields
else:
for base in reversed(bases):
if hasattr(base, 'declared_fields'):
fields = base.declared_fields.items() + fields
return SortedDict(fields) |
def _AddVariable(self, variable):
"""
Add a variable to the model. Should not be used by end-user
"""
if isinstance(variable, Signal):
if not variable in self.signals:
self.signals.append(variable)
elif isinstance(variable, Variable):
if not variable in self.variables:
self.variables.append(variable)
else:
raise TypeError
self._utd_graph = False | Add a variable to the model. Should not be used by end-user | Below is the the instruction that describes the task:
### Input:
Add a variable to the model. Should not be used by end-user
### Response:
def _AddVariable(self, variable):
"""
Add a variable to the model. Should not be used by end-user
"""
if isinstance(variable, Signal):
if not variable in self.signals:
self.signals.append(variable)
elif isinstance(variable, Variable):
if not variable in self.variables:
self.variables.append(variable)
else:
raise TypeError
self._utd_graph = False |
def remove(self, identifier: Union[DataObjectReplica, int]):
"""
Removes a data object from this collection that has the given unique identifier. A `ValueError` will be raised
if a data object with the given identifier does not exist.
:param identifier: the identifier of the data object
"""
if isinstance(identifier, int):
self._remove_by_number(identifier)
elif isinstance(identifier, DataObjectReplica):
self._remove_by_object(identifier)
else:
raise TypeError("Can only remove by number or by object reference: `%s` given" % type(identifier)) | Removes a data object from this collection that has the given unique identifier. A `ValueError` will be raised
if a data object with the given identifier does not exist.
:param identifier: the identifier of the data object | Below is the the instruction that describes the task:
### Input:
Removes a data object from this collection that has the given unique identifier. A `ValueError` will be raised
if a data object with the given identifier does not exist.
:param identifier: the identifier of the data object
### Response:
def remove(self, identifier: Union[DataObjectReplica, int]):
"""
Removes a data object from this collection that has the given unique identifier. A `ValueError` will be raised
if a data object with the given identifier does not exist.
:param identifier: the identifier of the data object
"""
if isinstance(identifier, int):
self._remove_by_number(identifier)
elif isinstance(identifier, DataObjectReplica):
self._remove_by_object(identifier)
else:
raise TypeError("Can only remove by number or by object reference: `%s` given" % type(identifier)) |
def _initialize_counter(self):
"""Initialize our counter pointer.
If we're the top-level factory, instantiate a new counter
Otherwise, point to the top-level factory's counter.
"""
if self._counter is not None:
return
if self.counter_reference is self:
self._counter = _Counter(seq=self.factory._setup_next_sequence())
else:
self.counter_reference._initialize_counter()
self._counter = self.counter_reference._counter | Initialize our counter pointer.
If we're the top-level factory, instantiate a new counter
Otherwise, point to the top-level factory's counter. | Below is the the instruction that describes the task:
### Input:
Initialize our counter pointer.
If we're the top-level factory, instantiate a new counter
Otherwise, point to the top-level factory's counter.
### Response:
def _initialize_counter(self):
"""Initialize our counter pointer.
If we're the top-level factory, instantiate a new counter
Otherwise, point to the top-level factory's counter.
"""
if self._counter is not None:
return
if self.counter_reference is self:
self._counter = _Counter(seq=self.factory._setup_next_sequence())
else:
self.counter_reference._initialize_counter()
self._counter = self.counter_reference._counter |
def generate_lines(input_file,
start=0,
stop=float('inf')):
"""Generate (yield) lines in a gzipped file (*.txt.gz) one line at a time"""
with gzip.GzipFile(input_file, 'rU') as f:
for i, line in enumerate(f):
if i < start:
continue
if i >= stop:
break
yield line.rstrip() | Generate (yield) lines in a gzipped file (*.txt.gz) one line at a time | Below is the the instruction that describes the task:
### Input:
Generate (yield) lines in a gzipped file (*.txt.gz) one line at a time
### Response:
def generate_lines(input_file,
start=0,
stop=float('inf')):
"""Generate (yield) lines in a gzipped file (*.txt.gz) one line at a time"""
with gzip.GzipFile(input_file, 'rU') as f:
for i, line in enumerate(f):
if i < start:
continue
if i >= stop:
break
yield line.rstrip() |
def register_upload_command(self, upload_func):
"""
Add the upload command to the parser and call upload_func(project_name, folders, follow_symlinks) when chosen.
:param upload_func: func Called when this option is chosen: upload_func(project_name, folders, follow_symlinks).
"""
description = "Uploads local files and folders to a remote host."
upload_parser = self.subparsers.add_parser('upload', description=description)
_add_dry_run(upload_parser, help_text="Instead of uploading displays a list of folders/files that "
"need to be uploaded.")
add_project_name_or_id_arg(upload_parser, help_text_suffix="upload files/folders to.")
_add_folders_positional_arg(upload_parser)
_add_follow_symlinks_arg(upload_parser)
upload_parser.set_defaults(func=upload_func) | Add the upload command to the parser and call upload_func(project_name, folders, follow_symlinks) when chosen.
:param upload_func: func Called when this option is chosen: upload_func(project_name, folders, follow_symlinks). | Below is the the instruction that describes the task:
### Input:
Add the upload command to the parser and call upload_func(project_name, folders, follow_symlinks) when chosen.
:param upload_func: func Called when this option is chosen: upload_func(project_name, folders, follow_symlinks).
### Response:
def register_upload_command(self, upload_func):
"""
Add the upload command to the parser and call upload_func(project_name, folders, follow_symlinks) when chosen.
:param upload_func: func Called when this option is chosen: upload_func(project_name, folders, follow_symlinks).
"""
description = "Uploads local files and folders to a remote host."
upload_parser = self.subparsers.add_parser('upload', description=description)
_add_dry_run(upload_parser, help_text="Instead of uploading displays a list of folders/files that "
"need to be uploaded.")
add_project_name_or_id_arg(upload_parser, help_text_suffix="upload files/folders to.")
_add_folders_positional_arg(upload_parser)
_add_follow_symlinks_arg(upload_parser)
upload_parser.set_defaults(func=upload_func) |
def configure_sentry_errors(self):
"""
Configure sentry.errors to use the same loggers as the root handler
@rtype: None
"""
sentry_errors_logger = logging.getLogger('sentry.errors')
root_logger = logging.getLogger()
for handler in root_logger.handlers:
sentry_errors_logger.addHandler(handler) | Configure sentry.errors to use the same loggers as the root handler
@rtype: None | Below is the the instruction that describes the task:
### Input:
Configure sentry.errors to use the same loggers as the root handler
@rtype: None
### Response:
def configure_sentry_errors(self):
"""
Configure sentry.errors to use the same loggers as the root handler
@rtype: None
"""
sentry_errors_logger = logging.getLogger('sentry.errors')
root_logger = logging.getLogger()
for handler in root_logger.handlers:
sentry_errors_logger.addHandler(handler) |
def to_satoshis(input_quantity, input_type):
''' convert to satoshis, no rounding '''
assert input_type in UNIT_CHOICES, input_type
# convert to satoshis
if input_type in ('btc', 'mbtc', 'bit'):
satoshis = float(input_quantity) * float(UNIT_MAPPINGS[input_type]['satoshis_per'])
elif input_type == 'satoshi':
satoshis = input_quantity
else:
raise Exception('Invalid Unit Choice: %s' % input_type)
return int(satoshis) | convert to satoshis, no rounding | Below is the the instruction that describes the task:
### Input:
convert to satoshis, no rounding
### Response:
def to_satoshis(input_quantity, input_type):
''' convert to satoshis, no rounding '''
assert input_type in UNIT_CHOICES, input_type
# convert to satoshis
if input_type in ('btc', 'mbtc', 'bit'):
satoshis = float(input_quantity) * float(UNIT_MAPPINGS[input_type]['satoshis_per'])
elif input_type == 'satoshi':
satoshis = input_quantity
else:
raise Exception('Invalid Unit Choice: %s' % input_type)
return int(satoshis) |
def info(self):
"""Get connection info."""
backend_cls = self.backend_cls or "amqplib"
port = self.port or self.create_backend().default_port
return {"hostname": self.hostname,
"userid": self.userid,
"password": self.password,
"virtual_host": self.virtual_host,
"port": port,
"insist": self.insist,
"ssl": self.ssl,
"transport_cls": backend_cls,
"backend_cls": backend_cls,
"connect_timeout": self.connect_timeout} | Get connection info. | Below is the the instruction that describes the task:
### Input:
Get connection info.
### Response:
def info(self):
"""Get connection info."""
backend_cls = self.backend_cls or "amqplib"
port = self.port or self.create_backend().default_port
return {"hostname": self.hostname,
"userid": self.userid,
"password": self.password,
"virtual_host": self.virtual_host,
"port": port,
"insist": self.insist,
"ssl": self.ssl,
"transport_cls": backend_cls,
"backend_cls": backend_cls,
"connect_timeout": self.connect_timeout} |
def category(msg):
"""Aircraft category number
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
int: category number
"""
if common.typecode(msg) < 1 or common.typecode(msg) > 4:
raise RuntimeError("%s: Not a identification message" % msg)
msgbin = common.hex2bin(msg)
return common.bin2int(msgbin[5:8]) | Aircraft category number
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
int: category number | Below is the the instruction that describes the task:
### Input:
Aircraft category number
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
int: category number
### Response:
def category(msg):
"""Aircraft category number
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
int: category number
"""
if common.typecode(msg) < 1 or common.typecode(msg) > 4:
raise RuntimeError("%s: Not a identification message" % msg)
msgbin = common.hex2bin(msg)
return common.bin2int(msgbin[5:8]) |
def parse(cls, data: bytes) -> 'MessageContent':
"""Parse the bytestring into message content.
Args:
data: The bytestring to parse.
"""
lines = cls._find_lines(data)
view = memoryview(data)
return cls._parse(data, view, lines) | Parse the bytestring into message content.
Args:
data: The bytestring to parse. | Below is the the instruction that describes the task:
### Input:
Parse the bytestring into message content.
Args:
data: The bytestring to parse.
### Response:
def parse(cls, data: bytes) -> 'MessageContent':
"""Parse the bytestring into message content.
Args:
data: The bytestring to parse.
"""
lines = cls._find_lines(data)
view = memoryview(data)
return cls._parse(data, view, lines) |
def get_servo_torque(self):
""" Gets the current torque of Herkulex
Gives the current load on the servo shaft.
It is actually the PWM value to the motors
Args:
none
Returns:
int: the torque on servo shaft. range from -1023 to 1023
Raises:
SerialException: Error occured while opening serial port
"""
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(PWM_RAM)
data.append(BYTE2)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(13)
if ord(rxdata[10])<=127:
return ((ord(rxdata[10])&0x03)<<8) | (ord(rxdata[9])&0xFF)
else:
return (ord(rxdata[10])-0xFF)*0xFF + (ord(rxdata[9])&0xFF)-0xFF
except HerkulexError:
raise HerkulexError("could not communicate with motors") | Gets the current torque of Herkulex
Gives the current load on the servo shaft.
It is actually the PWM value to the motors
Args:
none
Returns:
int: the torque on servo shaft. range from -1023 to 1023
Raises:
SerialException: Error occured while opening serial port | Below is the the instruction that describes the task:
### Input:
Gets the current torque of Herkulex
Gives the current load on the servo shaft.
It is actually the PWM value to the motors
Args:
none
Returns:
int: the torque on servo shaft. range from -1023 to 1023
Raises:
SerialException: Error occured while opening serial port
### Response:
def get_servo_torque(self):
""" Gets the current torque of Herkulex
Gives the current load on the servo shaft.
It is actually the PWM value to the motors
Args:
none
Returns:
int: the torque on servo shaft. range from -1023 to 1023
Raises:
SerialException: Error occured while opening serial port
"""
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(PWM_RAM)
data.append(BYTE2)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(13)
if ord(rxdata[10])<=127:
return ((ord(rxdata[10])&0x03)<<8) | (ord(rxdata[9])&0xFF)
else:
return (ord(rxdata[10])-0xFF)*0xFF + (ord(rxdata[9])&0xFF)-0xFF
except HerkulexError:
raise HerkulexError("could not communicate with motors") |
def remove_functions(source, all_inline=False):
"""removes functions and returns new source, and 2 dicts.
first dict with removed hoisted(global) functions and second with replaced inline functions"""
global INLINE_COUNT
inline = {}
hoisted = {}
n = 0
limit = len(source) - 9 # 8 is length of 'function'
res = ''
last = 0
while n < limit:
if n and source[n - 1] in IDENTIFIER_PART:
n += 1
continue
if source[n:n + 8] == 'function' and source[n +
8] not in IDENTIFIER_PART:
if source[:n].rstrip().endswith(
'.'): # allow function as a property name :)
n += 1
continue
if source[n + 8:].lstrip().startswith(
':'): # allow functions inside objects...
n += 1
continue
entered = n
res += source[last:n]
name = ''
n = pass_white(source, n + 8)
if source[n] in IDENTIFIER_START: # hoisted function
name, n = parse_identifier(source, n)
args, n = pass_bracket(source, n, '()')
if not args:
raise SyntaxError('Function misses bracket with argnames ()')
args = args.strip('() \n')
args = tuple(parse_identifier(e, 0)[0]
for e in argsplit(args)) if args else ()
if len(args) - len(set(args)):
# I know its legal in JS but python does not allow duplicate argnames
# I will not work around it
raise SyntaxError(
'Function has duplicate argument names. Its not legal in this implementation. Sorry.'
)
block, n = pass_bracket(source, n, '{}')
if not block:
raise SyntaxError(
'Function does not have any code block to execute')
mixed = False # named function expression flag
if name and not all_inline:
# Here I will distinguish between named function expression (mixed) and a function statement
before = source[:entered].rstrip()
if any(endswith_keyword(before, e) for e in PRE_EXP_STARTS):
#print 'Ended ith keyword'
mixed = True
elif before and before[-1] not in PRE_ALLOWED and not before[
-2:] in INCREMENTS:
#print 'Ended with'+repr(before[-1]), before[-1]=='}'
mixed = True
else:
#print 'FUNCTION STATEMENT'
#its a function statement.
# todo remove fucking label if present!
hoisted[name] = block, args
if not name or mixed or all_inline: # its a function expression (can be both named and not named)
#print 'FUNCTION EXPRESSION'
INLINE_COUNT += 1
iname = INLINE_NAME % INLINE_COUNT # inline name
res += ' ' + iname
inline['%s@%s' % (
iname, name
)] = block, args #here added real name at the end because it has to be added to the func scope
last = n
else:
n += 1
res += source[last:]
return res, hoisted, inline | removes functions and returns new source, and 2 dicts.
first dict with removed hoisted(global) functions and second with replaced inline functions | Below is the the instruction that describes the task:
### Input:
removes functions and returns new source, and 2 dicts.
first dict with removed hoisted(global) functions and second with replaced inline functions
### Response:
def remove_functions(source, all_inline=False):
"""removes functions and returns new source, and 2 dicts.
first dict with removed hoisted(global) functions and second with replaced inline functions"""
global INLINE_COUNT
inline = {}
hoisted = {}
n = 0
limit = len(source) - 9 # 8 is length of 'function'
res = ''
last = 0
while n < limit:
if n and source[n - 1] in IDENTIFIER_PART:
n += 1
continue
if source[n:n + 8] == 'function' and source[n +
8] not in IDENTIFIER_PART:
if source[:n].rstrip().endswith(
'.'): # allow function as a property name :)
n += 1
continue
if source[n + 8:].lstrip().startswith(
':'): # allow functions inside objects...
n += 1
continue
entered = n
res += source[last:n]
name = ''
n = pass_white(source, n + 8)
if source[n] in IDENTIFIER_START: # hoisted function
name, n = parse_identifier(source, n)
args, n = pass_bracket(source, n, '()')
if not args:
raise SyntaxError('Function misses bracket with argnames ()')
args = args.strip('() \n')
args = tuple(parse_identifier(e, 0)[0]
for e in argsplit(args)) if args else ()
if len(args) - len(set(args)):
# I know its legal in JS but python does not allow duplicate argnames
# I will not work around it
raise SyntaxError(
'Function has duplicate argument names. Its not legal in this implementation. Sorry.'
)
block, n = pass_bracket(source, n, '{}')
if not block:
raise SyntaxError(
'Function does not have any code block to execute')
mixed = False # named function expression flag
if name and not all_inline:
# Here I will distinguish between named function expression (mixed) and a function statement
before = source[:entered].rstrip()
if any(endswith_keyword(before, e) for e in PRE_EXP_STARTS):
#print 'Ended ith keyword'
mixed = True
elif before and before[-1] not in PRE_ALLOWED and not before[
-2:] in INCREMENTS:
#print 'Ended with'+repr(before[-1]), before[-1]=='}'
mixed = True
else:
#print 'FUNCTION STATEMENT'
#its a function statement.
# todo remove fucking label if present!
hoisted[name] = block, args
if not name or mixed or all_inline: # its a function expression (can be both named and not named)
#print 'FUNCTION EXPRESSION'
INLINE_COUNT += 1
iname = INLINE_NAME % INLINE_COUNT # inline name
res += ' ' + iname
inline['%s@%s' % (
iname, name
)] = block, args #here added real name at the end because it has to be added to the func scope
last = n
else:
n += 1
res += source[last:]
return res, hoisted, inline |
def org_remove_member(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /org-xxxx/removeMember API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Organizations#API-method%3A-%2Forg-xxxx%2FremoveMember
"""
return DXHTTPRequest('/%s/removeMember' % object_id, input_params, always_retry=always_retry, **kwargs) | Invokes the /org-xxxx/removeMember API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Organizations#API-method%3A-%2Forg-xxxx%2FremoveMember | Below is the the instruction that describes the task:
### Input:
Invokes the /org-xxxx/removeMember API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Organizations#API-method%3A-%2Forg-xxxx%2FremoveMember
### Response:
def org_remove_member(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /org-xxxx/removeMember API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Organizations#API-method%3A-%2Forg-xxxx%2FremoveMember
"""
return DXHTTPRequest('/%s/removeMember' % object_id, input_params, always_retry=always_retry, **kwargs) |
def do_levmarq_n_directions(s, directions, max_iter=2, run_length=2,
damping=1e-3, collect_stats=False, marquardt_damping=True, **kwargs):
"""
Optimization of a state along a specific set of directions in parameter
space.
Parameters
----------
s : :class:`peri.states.State`
The state to optimize
directions : np.ndarray
[n,d] element numpy.ndarray of the n directions in the d-
dimensional space to optimize along. `directions` is trans-
formed to a unit vector internally
Other Parameters
----------------
Any parameters passed to LMEngine.
"""
# normal = direction / np.sqrt(np.dot(direction, direction))
normals = np.array([d/np.sqrt(np.dot(d,d)) for d in directions])
if np.isnan(normals).any():
raise ValueError('`directions` must not be 0s or contain nan')
obj = OptState(s, normals)
lo = LMOptObj(obj, max_iter=max_iter, run_length=run_length, damping=
damping, marquardt_damping=marquardt_damping, **kwargs)
lo.do_run_1()
if collect_stats:
return lo.get_termination_stats() | Optimization of a state along a specific set of directions in parameter
space.
Parameters
----------
s : :class:`peri.states.State`
The state to optimize
directions : np.ndarray
[n,d] element numpy.ndarray of the n directions in the d-
dimensional space to optimize along. `directions` is trans-
formed to a unit vector internally
Other Parameters
----------------
Any parameters passed to LMEngine. | Below is the the instruction that describes the task:
### Input:
Optimization of a state along a specific set of directions in parameter
space.
Parameters
----------
s : :class:`peri.states.State`
The state to optimize
directions : np.ndarray
[n,d] element numpy.ndarray of the n directions in the d-
dimensional space to optimize along. `directions` is trans-
formed to a unit vector internally
Other Parameters
----------------
Any parameters passed to LMEngine.
### Response:
def do_levmarq_n_directions(s, directions, max_iter=2, run_length=2,
damping=1e-3, collect_stats=False, marquardt_damping=True, **kwargs):
"""
Optimization of a state along a specific set of directions in parameter
space.
Parameters
----------
s : :class:`peri.states.State`
The state to optimize
directions : np.ndarray
[n,d] element numpy.ndarray of the n directions in the d-
dimensional space to optimize along. `directions` is trans-
formed to a unit vector internally
Other Parameters
----------------
Any parameters passed to LMEngine.
"""
# normal = direction / np.sqrt(np.dot(direction, direction))
normals = np.array([d/np.sqrt(np.dot(d,d)) for d in directions])
if np.isnan(normals).any():
raise ValueError('`directions` must not be 0s or contain nan')
obj = OptState(s, normals)
lo = LMOptObj(obj, max_iter=max_iter, run_length=run_length, damping=
damping, marquardt_damping=marquardt_damping, **kwargs)
lo.do_run_1()
if collect_stats:
return lo.get_termination_stats() |
def group_subscribe(self, topics):
"""Add topics to the current group subscription.
This is used by the group leader to ensure that it receives metadata
updates for all topics that any member of the group is subscribed to.
Arguments:
topics (list of str): topics to add to the group subscription
"""
if self._user_assignment:
raise IllegalStateError(self._SUBSCRIPTION_EXCEPTION_MESSAGE)
self._group_subscription.update(topics) | Add topics to the current group subscription.
This is used by the group leader to ensure that it receives metadata
updates for all topics that any member of the group is subscribed to.
Arguments:
topics (list of str): topics to add to the group subscription | Below is the the instruction that describes the task:
### Input:
Add topics to the current group subscription.
This is used by the group leader to ensure that it receives metadata
updates for all topics that any member of the group is subscribed to.
Arguments:
topics (list of str): topics to add to the group subscription
### Response:
def group_subscribe(self, topics):
"""Add topics to the current group subscription.
This is used by the group leader to ensure that it receives metadata
updates for all topics that any member of the group is subscribed to.
Arguments:
topics (list of str): topics to add to the group subscription
"""
if self._user_assignment:
raise IllegalStateError(self._SUBSCRIPTION_EXCEPTION_MESSAGE)
self._group_subscription.update(topics) |
def get_groups_of_user(self, user_id, **kwargs): # noqa: E501
"""Get groups of the user. # noqa: E501
An endpoint for retrieving groups of the user. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/users/{user-id}/groups -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_groups_of_user(user_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str user_id: The ID of the user whose details are retrieved. (required)
:param int limit: The number of results to return (2-1000), default is 50.
:param str after: The entity ID to fetch after the given one.
:param str order: The order of the records based on creation time, ASC or DESC; by default ASC
:param str include: Comma separated additional data to return. Currently supported: total_count
:return: GroupSummaryList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.get_groups_of_user_with_http_info(user_id, **kwargs) # noqa: E501
else:
(data) = self.get_groups_of_user_with_http_info(user_id, **kwargs) # noqa: E501
return data | Get groups of the user. # noqa: E501
An endpoint for retrieving groups of the user. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/users/{user-id}/groups -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_groups_of_user(user_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str user_id: The ID of the user whose details are retrieved. (required)
:param int limit: The number of results to return (2-1000), default is 50.
:param str after: The entity ID to fetch after the given one.
:param str order: The order of the records based on creation time, ASC or DESC; by default ASC
:param str include: Comma separated additional data to return. Currently supported: total_count
:return: GroupSummaryList
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Get groups of the user. # noqa: E501
An endpoint for retrieving groups of the user. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/users/{user-id}/groups -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_groups_of_user(user_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str user_id: The ID of the user whose details are retrieved. (required)
:param int limit: The number of results to return (2-1000), default is 50.
:param str after: The entity ID to fetch after the given one.
:param str order: The order of the records based on creation time, ASC or DESC; by default ASC
:param str include: Comma separated additional data to return. Currently supported: total_count
:return: GroupSummaryList
If the method is called asynchronously,
returns the request thread.
### Response:
def get_groups_of_user(self, user_id, **kwargs): # noqa: E501
"""Get groups of the user. # noqa: E501
An endpoint for retrieving groups of the user. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/users/{user-id}/groups -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_groups_of_user(user_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str user_id: The ID of the user whose details are retrieved. (required)
:param int limit: The number of results to return (2-1000), default is 50.
:param str after: The entity ID to fetch after the given one.
:param str order: The order of the records based on creation time, ASC or DESC; by default ASC
:param str include: Comma separated additional data to return. Currently supported: total_count
:return: GroupSummaryList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.get_groups_of_user_with_http_info(user_id, **kwargs) # noqa: E501
else:
(data) = self.get_groups_of_user_with_http_info(user_id, **kwargs) # noqa: E501
return data |
def media_type_str(mediatype):
"""Convert internal API media type to string."""
if mediatype == const.MEDIA_TYPE_UNKNOWN:
return 'Unknown'
if mediatype == const.MEDIA_TYPE_VIDEO:
return 'Video'
if mediatype == const.MEDIA_TYPE_MUSIC:
return 'Music'
if mediatype == const.MEDIA_TYPE_TV:
return 'TV'
return 'Unsupported' | Convert internal API media type to string. | Below is the the instruction that describes the task:
### Input:
Convert internal API media type to string.
### Response:
def media_type_str(mediatype):
"""Convert internal API media type to string."""
if mediatype == const.MEDIA_TYPE_UNKNOWN:
return 'Unknown'
if mediatype == const.MEDIA_TYPE_VIDEO:
return 'Video'
if mediatype == const.MEDIA_TYPE_MUSIC:
return 'Music'
if mediatype == const.MEDIA_TYPE_TV:
return 'TV'
return 'Unsupported' |
def qname(self, stmt):
"""Return (prefixed) node name of `stmt`.
The result is prefixed with the local prefix unless we are
inside a global grouping.
"""
if self.gg_level: return stmt.arg
return self.prefix_stack[-1] + ":" + stmt.arg | Return (prefixed) node name of `stmt`.
The result is prefixed with the local prefix unless we are
inside a global grouping. | Below is the the instruction that describes the task:
### Input:
Return (prefixed) node name of `stmt`.
The result is prefixed with the local prefix unless we are
inside a global grouping.
### Response:
def qname(self, stmt):
"""Return (prefixed) node name of `stmt`.
The result is prefixed with the local prefix unless we are
inside a global grouping.
"""
if self.gg_level: return stmt.arg
return self.prefix_stack[-1] + ":" + stmt.arg |
def ready(self, node_id, metadata_priority=True):
"""Check whether a node is connected and ok to send more requests.
Arguments:
node_id (int): the id of the node to check
metadata_priority (bool): Mark node as not-ready if a metadata
refresh is required. Default: True
Returns:
bool: True if we are ready to send to the given node
"""
self.maybe_connect(node_id)
return self.is_ready(node_id, metadata_priority=metadata_priority) | Check whether a node is connected and ok to send more requests.
Arguments:
node_id (int): the id of the node to check
metadata_priority (bool): Mark node as not-ready if a metadata
refresh is required. Default: True
Returns:
bool: True if we are ready to send to the given node | Below is the the instruction that describes the task:
### Input:
Check whether a node is connected and ok to send more requests.
Arguments:
node_id (int): the id of the node to check
metadata_priority (bool): Mark node as not-ready if a metadata
refresh is required. Default: True
Returns:
bool: True if we are ready to send to the given node
### Response:
def ready(self, node_id, metadata_priority=True):
"""Check whether a node is connected and ok to send more requests.
Arguments:
node_id (int): the id of the node to check
metadata_priority (bool): Mark node as not-ready if a metadata
refresh is required. Default: True
Returns:
bool: True if we are ready to send to the given node
"""
self.maybe_connect(node_id)
return self.is_ready(node_id, metadata_priority=metadata_priority) |
def startPacketCapture(self, pcap_output_file, pcap_data_link_type="DLT_EN10MB"):
"""
:param pcap_output_file: PCAP destination file for the capture
:param pcap_data_link_type: PCAP data link type (DLT_*), default is DLT_EN10MB
"""
self._capturing = True
self._pcap_output_file = pcap_output_file
self._pcap_data_link_type = pcap_data_link_type | :param pcap_output_file: PCAP destination file for the capture
:param pcap_data_link_type: PCAP data link type (DLT_*), default is DLT_EN10MB | Below is the the instruction that describes the task:
### Input:
:param pcap_output_file: PCAP destination file for the capture
:param pcap_data_link_type: PCAP data link type (DLT_*), default is DLT_EN10MB
### Response:
def startPacketCapture(self, pcap_output_file, pcap_data_link_type="DLT_EN10MB"):
"""
:param pcap_output_file: PCAP destination file for the capture
:param pcap_data_link_type: PCAP data link type (DLT_*), default is DLT_EN10MB
"""
self._capturing = True
self._pcap_output_file = pcap_output_file
self._pcap_data_link_type = pcap_data_link_type |
def get_pstats_print2list(fnames, filter_fnames=None, exclude_fnames=None,
sort=None, sort_reverse=None, limit=None):
"""Print stats with a filter or exclude filenames, sort index and limit.
:param list fnames: cProfile standard files to process.
:param list filter_fnames: Relative paths to filter and show them.
:param list exclude_fnames: Relative paths to avoid show them.
:param str sort: Standard `pstats` key of value to sort the result.
\n\t\t\t'calls' (call count)
\n\t\t\t'cumulative' (cumulative time)
\n\t\t\t'cumtime' (cumulative time)
\n\t\t\t'file' (file name)
\n\t\t\t'filename' (file name)
\n\t\t\t'module' (file name)
\n\t\t\t'ncalls' (call count)
\n\t\t\t'pcalls' (primitive call count)
\n\t\t\t'line' (line number)
\n\t\t\t'name' (function name)
\n\t\t\t'nfl' (name/file/line)
\n\t\t\t'stdname' (standard name)
\n\t\t\t'time' (internal time)
\n\t\t\t'tottime' (internal time)
:param bool sort_reverse: Reverse sort order.
:param int limit: Limit max result.
:returns: List of dicts with `pstats` print result after filters, sorted
and limited.
"""
if isinstance(fnames, basestring):
fnames = [fnames]
fnames_expanded = [
os.path.expandvars(os.path.expanduser(fname)) for fname in fnames]
stream = StringIO()
try:
stats = pstats.Stats(fnames[0], stream=stream)
for fname in fnames_expanded[1:]:
stats.add(fname)
except TypeError:
print("No cProfile stats valid.")
return False
except EOFError:
print("Empty file cProfile stats valid.")
return False
except IOError:
print("Error to open file.")
return False
stats.print_stats()
stream.seek(0)
field_list = get_field_list()
line_stats_re = re.compile(
r'(?P<%s>\d+/?\d+|\d+)\s+(?P<%s>\d+\.?\d+)\s+(?P<%s>\d+\.?\d+)\s+'
r'(?P<%s>\d+\.?\d+)\s+(?P<%s>\d+\.?\d+)\s+(?P<%s>.*):(?P<%s>\d+)'
r'\((?P<%s>.*)\)' % tuple(field_list))
stats_list = []
count = 0
for line in stream:
line = line.strip('\r\n ')
line_stats_match = line_stats_re.match(line) if line else None
fname = line_stats_match.group('file') if line_stats_match else None
if fname and is_fname_match(fname, filter_fnames) and \
not is_exclude(fname, exclude_fnames):
data = dict([(field, line_stats_match.group(field))
for field in field_list])
data['rcalls'], data['calls'] = (
data.get('ncalls', '') + '/' + data.get('ncalls', '')
).split('/')[:2]
data['factor'] = "%.2f" % (
(float(data['rcalls']) - float(data['calls']) + 1) *
float(data['cumtime']))
data['cumulative'] = data['cumtime']
stats_list.append(data)
count += 1
return sorted(stats_list, key=lambda key: float(key[sort or 'factor']),
reverse=not sort_reverse)[:limit] | Print stats with a filter or exclude filenames, sort index and limit.
:param list fnames: cProfile standard files to process.
:param list filter_fnames: Relative paths to filter and show them.
:param list exclude_fnames: Relative paths to avoid show them.
:param str sort: Standard `pstats` key of value to sort the result.
\n\t\t\t'calls' (call count)
\n\t\t\t'cumulative' (cumulative time)
\n\t\t\t'cumtime' (cumulative time)
\n\t\t\t'file' (file name)
\n\t\t\t'filename' (file name)
\n\t\t\t'module' (file name)
\n\t\t\t'ncalls' (call count)
\n\t\t\t'pcalls' (primitive call count)
\n\t\t\t'line' (line number)
\n\t\t\t'name' (function name)
\n\t\t\t'nfl' (name/file/line)
\n\t\t\t'stdname' (standard name)
\n\t\t\t'time' (internal time)
\n\t\t\t'tottime' (internal time)
:param bool sort_reverse: Reverse sort order.
:param int limit: Limit max result.
:returns: List of dicts with `pstats` print result after filters, sorted
and limited. | Below is the the instruction that describes the task:
### Input:
Print stats with a filter or exclude filenames, sort index and limit.
:param list fnames: cProfile standard files to process.
:param list filter_fnames: Relative paths to filter and show them.
:param list exclude_fnames: Relative paths to avoid show them.
:param str sort: Standard `pstats` key of value to sort the result.
\n\t\t\t'calls' (call count)
\n\t\t\t'cumulative' (cumulative time)
\n\t\t\t'cumtime' (cumulative time)
\n\t\t\t'file' (file name)
\n\t\t\t'filename' (file name)
\n\t\t\t'module' (file name)
\n\t\t\t'ncalls' (call count)
\n\t\t\t'pcalls' (primitive call count)
\n\t\t\t'line' (line number)
\n\t\t\t'name' (function name)
\n\t\t\t'nfl' (name/file/line)
\n\t\t\t'stdname' (standard name)
\n\t\t\t'time' (internal time)
\n\t\t\t'tottime' (internal time)
:param bool sort_reverse: Reverse sort order.
:param int limit: Limit max result.
:returns: List of dicts with `pstats` print result after filters, sorted
and limited.
### Response:
def get_pstats_print2list(fnames, filter_fnames=None, exclude_fnames=None,
sort=None, sort_reverse=None, limit=None):
"""Print stats with a filter or exclude filenames, sort index and limit.
:param list fnames: cProfile standard files to process.
:param list filter_fnames: Relative paths to filter and show them.
:param list exclude_fnames: Relative paths to avoid show them.
:param str sort: Standard `pstats` key of value to sort the result.
\n\t\t\t'calls' (call count)
\n\t\t\t'cumulative' (cumulative time)
\n\t\t\t'cumtime' (cumulative time)
\n\t\t\t'file' (file name)
\n\t\t\t'filename' (file name)
\n\t\t\t'module' (file name)
\n\t\t\t'ncalls' (call count)
\n\t\t\t'pcalls' (primitive call count)
\n\t\t\t'line' (line number)
\n\t\t\t'name' (function name)
\n\t\t\t'nfl' (name/file/line)
\n\t\t\t'stdname' (standard name)
\n\t\t\t'time' (internal time)
\n\t\t\t'tottime' (internal time)
:param bool sort_reverse: Reverse sort order.
:param int limit: Limit max result.
:returns: List of dicts with `pstats` print result after filters, sorted
and limited.
"""
if isinstance(fnames, basestring):
fnames = [fnames]
fnames_expanded = [
os.path.expandvars(os.path.expanduser(fname)) for fname in fnames]
stream = StringIO()
try:
stats = pstats.Stats(fnames[0], stream=stream)
for fname in fnames_expanded[1:]:
stats.add(fname)
except TypeError:
print("No cProfile stats valid.")
return False
except EOFError:
print("Empty file cProfile stats valid.")
return False
except IOError:
print("Error to open file.")
return False
stats.print_stats()
stream.seek(0)
field_list = get_field_list()
line_stats_re = re.compile(
r'(?P<%s>\d+/?\d+|\d+)\s+(?P<%s>\d+\.?\d+)\s+(?P<%s>\d+\.?\d+)\s+'
r'(?P<%s>\d+\.?\d+)\s+(?P<%s>\d+\.?\d+)\s+(?P<%s>.*):(?P<%s>\d+)'
r'\((?P<%s>.*)\)' % tuple(field_list))
stats_list = []
count = 0
for line in stream:
line = line.strip('\r\n ')
line_stats_match = line_stats_re.match(line) if line else None
fname = line_stats_match.group('file') if line_stats_match else None
if fname and is_fname_match(fname, filter_fnames) and \
not is_exclude(fname, exclude_fnames):
data = dict([(field, line_stats_match.group(field))
for field in field_list])
data['rcalls'], data['calls'] = (
data.get('ncalls', '') + '/' + data.get('ncalls', '')
).split('/')[:2]
data['factor'] = "%.2f" % (
(float(data['rcalls']) - float(data['calls']) + 1) *
float(data['cumtime']))
data['cumulative'] = data['cumtime']
stats_list.append(data)
count += 1
return sorted(stats_list, key=lambda key: float(key[sort or 'factor']),
reverse=not sort_reverse)[:limit] |
def validate(self):
"""Validate the configuration against its contract.
:raises DbtProjectError: If the configuration fails validation.
"""
try:
Configuration(**self.serialize())
except ValidationException as e:
raise DbtProjectError(str(e))
if getattr(self.args, 'version_check', False):
self.validate_version() | Validate the configuration against its contract.
:raises DbtProjectError: If the configuration fails validation. | Below is the the instruction that describes the task:
### Input:
Validate the configuration against its contract.
:raises DbtProjectError: If the configuration fails validation.
### Response:
def validate(self):
"""Validate the configuration against its contract.
:raises DbtProjectError: If the configuration fails validation.
"""
try:
Configuration(**self.serialize())
except ValidationException as e:
raise DbtProjectError(str(e))
if getattr(self.args, 'version_check', False):
self.validate_version() |
def to_dict(self):
"""Convert to a nested dict. """
return {k: v.to_dict() if isinstance(v, AttrDict) else v
for k, v in self.__dict__.items() if not k.startswith('_')} | Convert to a nested dict. | Below is the the instruction that describes the task:
### Input:
Convert to a nested dict.
### Response:
def to_dict(self):
"""Convert to a nested dict. """
return {k: v.to_dict() if isinstance(v, AttrDict) else v
for k, v in self.__dict__.items() if not k.startswith('_')} |
def _load_library(library_names, library_file_extensions,
library_search_paths, version_check_callback):
"""
Finds, loads and returns the most recent version of the library.
"""
candidates = _find_library_candidates(library_names,
library_file_extensions,
library_search_paths)
library_versions = []
for filename in candidates:
version = version_check_callback(filename)
if version is not None and version >= (3, 0, 0):
library_versions.append((version, filename))
if not library_versions:
return None
library_versions.sort()
return ctypes.CDLL(library_versions[-1][1]) | Finds, loads and returns the most recent version of the library. | Below is the the instruction that describes the task:
### Input:
Finds, loads and returns the most recent version of the library.
### Response:
def _load_library(library_names, library_file_extensions,
library_search_paths, version_check_callback):
"""
Finds, loads and returns the most recent version of the library.
"""
candidates = _find_library_candidates(library_names,
library_file_extensions,
library_search_paths)
library_versions = []
for filename in candidates:
version = version_check_callback(filename)
if version is not None and version >= (3, 0, 0):
library_versions.append((version, filename))
if not library_versions:
return None
library_versions.sort()
return ctypes.CDLL(library_versions[-1][1]) |
def _determine_uses(self, included_files, forward_declarations):
"""Set up the use type of each symbol."""
file_uses = dict.fromkeys(included_files, UNUSED)
decl_uses = dict.fromkeys(forward_declarations, UNUSED)
symbol_table = self.symbol_table
for name, node in forward_declarations.items():
try:
symbol_table.lookup_symbol(node.name, node.namespace)
decl_uses[name] |= USES_REFERENCE
except symbols.Error:
module = Module(name, None)
symbol_table.add_symbol(node.name, node.namespace, node,
module)
def _do_lookup(name, namespace):
try:
file_use_node = symbol_table.lookup_symbol(name, namespace)
except symbols.Error:
return
name = file_use_node[1].filename
file_uses[name] = file_uses.get(name, 0) | USES_DECLARATION
def _add_declaration(name, namespace):
if not name:
# Ignore anonymous struct. It is not standard, but we might as
# well avoid crashing if it is easy.
return
names = [n for n in namespace if n is not None]
if names:
name = '::'.join(names) + '::' + name
if name in decl_uses:
decl_uses[name] |= USES_DECLARATION
def _add_reference(name, namespace):
try:
file_use_node = symbol_table.lookup_symbol(name, namespace)
except symbols.Error:
return
name = file_use_node[1].filename
if file_use_node[1].ast_list is None:
decl_uses[name] |= USES_REFERENCE
elif name in file_uses:
# enum and typedef can't be forward declared
if isinstance(file_use_node[0], (ast.Enum, ast.Typedef)):
file_uses[name] |= USES_DECLARATION
else:
file_uses[name] |= USES_REFERENCE
def _add_use(node, namespace, name=''):
if isinstance(node, basestring):
name = node
elif isinstance(node, list):
# name contains a list of tokens.
name = '::'.join([n.name for n in name])
# node is a Type so look for its symbol immediately.
if name:
_do_lookup(name, namespace)
return
# Try to search for the value of the variable declaration for any
# symbols, such as `#define` values or other variable names which
# may be included in other files.
obj = getattr(node, 'initial_value', None)
if obj:
_do_lookup(obj, namespace)
# If node is a VariableDeclaration, check if the variable type is
# a symbol used in other includes.
obj = getattr(node, 'type', None)
if obj and isinstance(obj.name, basestring):
_do_lookup(obj.name, namespace)
if not isinstance(node, basestring):
# Happens when variables are defined with inlined types, e.g.:
# enum {...} variable;
return
def _add_variable(node, namespace, reference=False):
obj = node.type if isinstance(
node, ast.VariableDeclaration) else node
if obj.reference or obj.pointer or reference:
_add_reference(obj.name, namespace)
else:
# Add a use for the variable declaration type as well as the
# variable value.
_add_use(obj.name, namespace)
_add_use(node, namespace)
# This needs to recurse when the node is a templated type.
_add_template_use(obj.name,
obj.templated_types,
namespace,
reference)
def _process_function(function, namespace):
reference = function.body is None
if function.return_type:
return_type = function.return_type
_add_variable(return_type, namespace, reference)
for s in function.specializations:
_add_variable(s, namespace, not function.body)
templated_types = function.templated_types or ()
for p in function.parameters:
node = p.type
if node.name not in templated_types:
if function.body and p.name:
# Assume that if the function has a body and a name
# the parameter type is really used.
# NOTE(nnorwitz): this is over-aggressive. It would be
# better to iterate through the body and determine
# actual uses based on local vars and data members
# used.
_add_use(node.name, namespace)
elif (
p.default and
p.default[0].name != '0' and
p.default[0].name != 'NULL' and
p.default[0].name != 'nullptr'
):
_add_use(node.name, namespace)
elif node.reference or node.pointer or reference:
_add_reference(node.name, namespace)
else:
_add_use(node.name, namespace)
_add_template_use(node.name,
node.templated_types,
namespace,
reference)
def _process_function_body(function, namespace):
previous = None
save = namespace[:]
for t in function.body:
if t.token_type == tokenize.NAME:
previous = t
if not keywords.is_keyword(t.name):
# TODO(nnorwitz): handle static function calls.
# TODO(nnorwitz): handle using statements in file.
# TODO(nnorwitz): handle using statements in function.
# TODO(nnorwitz): handle namespace assignment in file.
_add_use(t.name, namespace)
elif t.name == '::' and previous is not None:
namespace.append(previous.name)
elif t.name in (':', ';'):
namespace = save[:]
def _add_template_use(name, types, namespace, reference=False):
for cls in types or ():
if cls.pointer or cls.reference or reference:
_add_reference(cls.name, namespace)
elif name.endswith('_ptr'):
# Special case templated classes that end w/_ptr.
# These are things like auto_ptr which do
# not require the class definition, only decl.
_add_reference(cls.name, namespace)
elif name.startswith('Q') and name.endswith('Pointer'):
# Special case templated classes from the Qt framework.
_add_reference(cls.name, namespace)
else:
_add_use(cls.name, namespace)
_add_template_use(cls.name, cls.templated_types,
namespace, reference)
def _process_types(nodes, namespace):
for node in nodes:
if isinstance(node, ast.Type):
_add_variable(node, namespace)
# Iterate through the source AST/tokens, marking each symbols use.
ast_seq = [self.ast_list]
namespace_stack = []
while ast_seq:
for node in ast_seq.pop():
if isinstance(node, ast.VariableDeclaration):
namespace = namespace_stack + node.namespace
_add_variable(node, namespace)
elif isinstance(node, ast.Function):
namespace = namespace_stack + node.namespace
_process_function(node, namespace)
if node.body:
_process_function_body(node, namespace)
elif isinstance(node, ast.Typedef):
namespace = namespace_stack + node.namespace
_process_types(node.alias, namespace)
elif isinstance(node, ast.Friend):
expr = node.expr
namespace = namespace_stack + node.namespace
if isinstance(expr, ast.Type):
_add_reference(expr.name, namespace)
elif isinstance(expr, ast.Function):
_process_function(expr, namespace)
elif isinstance(node, ast.Union) and node.body is not None:
ast_seq.append(node.body)
elif isinstance(node, ast.Class) and node.body is not None:
_add_declaration(node.name, node.namespace)
namespace = namespace_stack + node.namespace
_add_template_use('', node.bases, namespace)
ast_seq.append(node.body)
elif isinstance(node, ast.Using):
if node.names[0].name == 'namespace':
namespace_stack.append(node.names[1].name)
return file_uses, decl_uses | Set up the use type of each symbol. | Below is the the instruction that describes the task:
### Input:
Set up the use type of each symbol.
### Response:
def _determine_uses(self, included_files, forward_declarations):
"""Set up the use type of each symbol."""
file_uses = dict.fromkeys(included_files, UNUSED)
decl_uses = dict.fromkeys(forward_declarations, UNUSED)
symbol_table = self.symbol_table
for name, node in forward_declarations.items():
try:
symbol_table.lookup_symbol(node.name, node.namespace)
decl_uses[name] |= USES_REFERENCE
except symbols.Error:
module = Module(name, None)
symbol_table.add_symbol(node.name, node.namespace, node,
module)
def _do_lookup(name, namespace):
try:
file_use_node = symbol_table.lookup_symbol(name, namespace)
except symbols.Error:
return
name = file_use_node[1].filename
file_uses[name] = file_uses.get(name, 0) | USES_DECLARATION
def _add_declaration(name, namespace):
if not name:
# Ignore anonymous struct. It is not standard, but we might as
# well avoid crashing if it is easy.
return
names = [n for n in namespace if n is not None]
if names:
name = '::'.join(names) + '::' + name
if name in decl_uses:
decl_uses[name] |= USES_DECLARATION
def _add_reference(name, namespace):
try:
file_use_node = symbol_table.lookup_symbol(name, namespace)
except symbols.Error:
return
name = file_use_node[1].filename
if file_use_node[1].ast_list is None:
decl_uses[name] |= USES_REFERENCE
elif name in file_uses:
# enum and typedef can't be forward declared
if isinstance(file_use_node[0], (ast.Enum, ast.Typedef)):
file_uses[name] |= USES_DECLARATION
else:
file_uses[name] |= USES_REFERENCE
def _add_use(node, namespace, name=''):
if isinstance(node, basestring):
name = node
elif isinstance(node, list):
# name contains a list of tokens.
name = '::'.join([n.name for n in name])
# node is a Type so look for its symbol immediately.
if name:
_do_lookup(name, namespace)
return
# Try to search for the value of the variable declaration for any
# symbols, such as `#define` values or other variable names which
# may be included in other files.
obj = getattr(node, 'initial_value', None)
if obj:
_do_lookup(obj, namespace)
# If node is a VariableDeclaration, check if the variable type is
# a symbol used in other includes.
obj = getattr(node, 'type', None)
if obj and isinstance(obj.name, basestring):
_do_lookup(obj.name, namespace)
if not isinstance(node, basestring):
# Happens when variables are defined with inlined types, e.g.:
# enum {...} variable;
return
def _add_variable(node, namespace, reference=False):
obj = node.type if isinstance(
node, ast.VariableDeclaration) else node
if obj.reference or obj.pointer or reference:
_add_reference(obj.name, namespace)
else:
# Add a use for the variable declaration type as well as the
# variable value.
_add_use(obj.name, namespace)
_add_use(node, namespace)
# This needs to recurse when the node is a templated type.
_add_template_use(obj.name,
obj.templated_types,
namespace,
reference)
def _process_function(function, namespace):
reference = function.body is None
if function.return_type:
return_type = function.return_type
_add_variable(return_type, namespace, reference)
for s in function.specializations:
_add_variable(s, namespace, not function.body)
templated_types = function.templated_types or ()
for p in function.parameters:
node = p.type
if node.name not in templated_types:
if function.body and p.name:
# Assume that if the function has a body and a name
# the parameter type is really used.
# NOTE(nnorwitz): this is over-aggressive. It would be
# better to iterate through the body and determine
# actual uses based on local vars and data members
# used.
_add_use(node.name, namespace)
elif (
p.default and
p.default[0].name != '0' and
p.default[0].name != 'NULL' and
p.default[0].name != 'nullptr'
):
_add_use(node.name, namespace)
elif node.reference or node.pointer or reference:
_add_reference(node.name, namespace)
else:
_add_use(node.name, namespace)
_add_template_use(node.name,
node.templated_types,
namespace,
reference)
def _process_function_body(function, namespace):
previous = None
save = namespace[:]
for t in function.body:
if t.token_type == tokenize.NAME:
previous = t
if not keywords.is_keyword(t.name):
# TODO(nnorwitz): handle static function calls.
# TODO(nnorwitz): handle using statements in file.
# TODO(nnorwitz): handle using statements in function.
# TODO(nnorwitz): handle namespace assignment in file.
_add_use(t.name, namespace)
elif t.name == '::' and previous is not None:
namespace.append(previous.name)
elif t.name in (':', ';'):
namespace = save[:]
def _add_template_use(name, types, namespace, reference=False):
for cls in types or ():
if cls.pointer or cls.reference or reference:
_add_reference(cls.name, namespace)
elif name.endswith('_ptr'):
# Special case templated classes that end w/_ptr.
# These are things like auto_ptr which do
# not require the class definition, only decl.
_add_reference(cls.name, namespace)
elif name.startswith('Q') and name.endswith('Pointer'):
# Special case templated classes from the Qt framework.
_add_reference(cls.name, namespace)
else:
_add_use(cls.name, namespace)
_add_template_use(cls.name, cls.templated_types,
namespace, reference)
def _process_types(nodes, namespace):
for node in nodes:
if isinstance(node, ast.Type):
_add_variable(node, namespace)
# Iterate through the source AST/tokens, marking each symbols use.
ast_seq = [self.ast_list]
namespace_stack = []
while ast_seq:
for node in ast_seq.pop():
if isinstance(node, ast.VariableDeclaration):
namespace = namespace_stack + node.namespace
_add_variable(node, namespace)
elif isinstance(node, ast.Function):
namespace = namespace_stack + node.namespace
_process_function(node, namespace)
if node.body:
_process_function_body(node, namespace)
elif isinstance(node, ast.Typedef):
namespace = namespace_stack + node.namespace
_process_types(node.alias, namespace)
elif isinstance(node, ast.Friend):
expr = node.expr
namespace = namespace_stack + node.namespace
if isinstance(expr, ast.Type):
_add_reference(expr.name, namespace)
elif isinstance(expr, ast.Function):
_process_function(expr, namespace)
elif isinstance(node, ast.Union) and node.body is not None:
ast_seq.append(node.body)
elif isinstance(node, ast.Class) and node.body is not None:
_add_declaration(node.name, node.namespace)
namespace = namespace_stack + node.namespace
_add_template_use('', node.bases, namespace)
ast_seq.append(node.body)
elif isinstance(node, ast.Using):
if node.names[0].name == 'namespace':
namespace_stack.append(node.names[1].name)
return file_uses, decl_uses |
def convertWCS(inwcs,drizwcs):
""" Copy WCSObject WCS into Drizzle compatible array."""
drizwcs[0] = inwcs.crpix[0]
drizwcs[1] = inwcs.crval[0]
drizwcs[2] = inwcs.crpix[1]
drizwcs[3] = inwcs.crval[1]
drizwcs[4] = inwcs.cd[0][0]
drizwcs[5] = inwcs.cd[1][0]
drizwcs[6] = inwcs.cd[0][1]
drizwcs[7] = inwcs.cd[1][1]
return drizwcs | Copy WCSObject WCS into Drizzle compatible array. | Below is the the instruction that describes the task:
### Input:
Copy WCSObject WCS into Drizzle compatible array.
### Response:
def convertWCS(inwcs,drizwcs):
""" Copy WCSObject WCS into Drizzle compatible array."""
drizwcs[0] = inwcs.crpix[0]
drizwcs[1] = inwcs.crval[0]
drizwcs[2] = inwcs.crpix[1]
drizwcs[3] = inwcs.crval[1]
drizwcs[4] = inwcs.cd[0][0]
drizwcs[5] = inwcs.cd[1][0]
drizwcs[6] = inwcs.cd[0][1]
drizwcs[7] = inwcs.cd[1][1]
return drizwcs |
def _process_stock_genotype(self, limit):
"""
The genotypes of the stocks.
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
raw = '/'.join((self.rawdir, 'stock_genotype'))
LOG.info("processing stock genotype")
line_counter = 0
with open(raw, 'r') as f:
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
f.readline() # read the header row; skip
for line in filereader:
(stock_genotype_id, stock_id, genotype_id) = line
stock_key = stock_id
stock_id = self.idhash['stock'][stock_key]
genotype_key = genotype_id
genotype_id = self.idhash['genotype'][genotype_key]
if self.test_mode \
and int(genotype_key) not in self.test_keys['genotype']:
continue
graph.addTriple(stock_id, self.globaltt['has_genotype'], genotype_id)
line_counter += 1
if not self.test_mode and limit is not None and line_counter > limit:
break
return | The genotypes of the stocks.
:param limit:
:return: | Below is the the instruction that describes the task:
### Input:
The genotypes of the stocks.
:param limit:
:return:
### Response:
def _process_stock_genotype(self, limit):
"""
The genotypes of the stocks.
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
raw = '/'.join((self.rawdir, 'stock_genotype'))
LOG.info("processing stock genotype")
line_counter = 0
with open(raw, 'r') as f:
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
f.readline() # read the header row; skip
for line in filereader:
(stock_genotype_id, stock_id, genotype_id) = line
stock_key = stock_id
stock_id = self.idhash['stock'][stock_key]
genotype_key = genotype_id
genotype_id = self.idhash['genotype'][genotype_key]
if self.test_mode \
and int(genotype_key) not in self.test_keys['genotype']:
continue
graph.addTriple(stock_id, self.globaltt['has_genotype'], genotype_id)
line_counter += 1
if not self.test_mode and limit is not None and line_counter > limit:
break
return |
def reset (self):
"""
Reset all variables to default values.
"""
# self.url is constructed by self.build_url() out of base_url
# and (base_ref or parent) as absolute and normed url.
# This the real url we use when checking so it also referred to
# as 'real url'
self.url = None
# a splitted version of url for convenience
self.urlparts = None
# the scheme, host, port and anchor part of url
self.scheme = self.host = self.port = self.anchor = None
# the result message string and flag
self.result = u""
self.has_result = False
# valid or not
self.valid = True
# list of warnings (without duplicates)
self.warnings = []
# list of infos
self.info = []
# content size
self.size = -1
# last modification time of content in HTTP-date format as specified in RFC2616 chapter 3.3.1
self.modified = None
# download time
self.dltime = -1
# check time
self.checktime = 0
# connection object
self.url_connection = None
# data of url content, (data == None) means no data is available
self.data = None
# cache url is set by build_url() calling set_cache_url()
self.cache_url = None
# extern flags (is_extern, is_strict)
self.extern = None
# flag if the result should be cached
self.caching = True
# title is either the URL or parsed from content
self.title = None
# flag if content should be checked or not
self.do_check_content = True
# MIME content type
self.content_type = u""
# URLs seen through redirections
self.aliases = [] | Reset all variables to default values. | Below is the the instruction that describes the task:
### Input:
Reset all variables to default values.
### Response:
def reset (self):
"""
Reset all variables to default values.
"""
# self.url is constructed by self.build_url() out of base_url
# and (base_ref or parent) as absolute and normed url.
# This the real url we use when checking so it also referred to
# as 'real url'
self.url = None
# a splitted version of url for convenience
self.urlparts = None
# the scheme, host, port and anchor part of url
self.scheme = self.host = self.port = self.anchor = None
# the result message string and flag
self.result = u""
self.has_result = False
# valid or not
self.valid = True
# list of warnings (without duplicates)
self.warnings = []
# list of infos
self.info = []
# content size
self.size = -1
# last modification time of content in HTTP-date format as specified in RFC2616 chapter 3.3.1
self.modified = None
# download time
self.dltime = -1
# check time
self.checktime = 0
# connection object
self.url_connection = None
# data of url content, (data == None) means no data is available
self.data = None
# cache url is set by build_url() calling set_cache_url()
self.cache_url = None
# extern flags (is_extern, is_strict)
self.extern = None
# flag if the result should be cached
self.caching = True
# title is either the URL or parsed from content
self.title = None
# flag if content should be checked or not
self.do_check_content = True
# MIME content type
self.content_type = u""
# URLs seen through redirections
self.aliases = [] |
def ParseArguments(self):
"""Parses the command line arguments.
Returns:
bool: True if the arguments were successfully parsed.
"""
loggers.ConfigureLogging()
argument_parser = argparse.ArgumentParser(
description=self.DESCRIPTION, epilog=self.EPILOG, add_help=False,
formatter_class=argparse.RawDescriptionHelpFormatter)
self.AddBasicOptions(argument_parser)
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
argument_parser, names=['storage_file'])
data_location_group = argument_parser.add_argument_group(
'data location arguments')
argument_helper_names = ['artifact_definitions', 'data_location']
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
data_location_group, names=argument_helper_names)
extraction_group = argument_parser.add_argument_group(
'extraction arguments')
argument_helper_names = [
'artifact_filters', 'extraction', 'filter_file', 'hashers',
'parsers', 'yara_rules']
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
extraction_group, names=argument_helper_names)
self.AddStorageMediaImageOptions(extraction_group)
self.AddTimeZoneOption(extraction_group)
self.AddVSSProcessingOptions(extraction_group)
self.AddCredentialOptions(extraction_group)
info_group = argument_parser.add_argument_group('informational arguments')
self.AddInformationalOptions(info_group)
info_group.add_argument(
'--info', dest='show_info', action='store_true', default=False,
help='Print out information about supported plugins and parsers.')
info_group.add_argument(
'--use_markdown', '--use-markdown', dest='use_markdown',
action='store_true', default=False, help=(
'Output lists in Markdown format use in combination with '
'"--hashers list", "--parsers list" or "--timezone list"'))
info_group.add_argument(
'--no_dependencies_check', '--no-dependencies-check',
dest='dependencies_check', action='store_false', default=True,
help='Disable the dependencies check.')
self.AddLogFileOptions(info_group)
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
info_group, names=['status_view'])
output_group = argument_parser.add_argument_group('output arguments')
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
output_group, names=['text_prepend'])
processing_group = argument_parser.add_argument_group(
'processing arguments')
self.AddPerformanceOptions(processing_group)
self.AddProcessingOptions(processing_group)
processing_group.add_argument(
'--sigsegv_handler', '--sigsegv-handler', dest='sigsegv_handler',
action='store_true', default=False, help=(
'Enables the SIGSEGV handler. WARNING this functionality is '
'experimental and will a deadlock worker process if a real '
'segfault is caught, but not signal SIGSEGV. This functionality '
'is therefore primarily intended for debugging purposes'))
profiling_group = argument_parser.add_argument_group('profiling arguments')
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
profiling_group, names=['profiling'])
storage_group = argument_parser.add_argument_group('storage arguments')
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
storage_group, names=['storage_format'])
argument_parser.add_argument(
self._SOURCE_OPTION, action='store', metavar='SOURCE', nargs='?',
default=None, type=str, help=(
'Path to a source device, file or directory. If the source is '
'a supported storage media device or image file, archive file '
'or a directory, the files within are processed recursively.'))
try:
options = argument_parser.parse_args()
except UnicodeEncodeError:
# If we get here we are attempting to print help in a non-Unicode
# terminal.
self._output_writer.Write('\n')
self._output_writer.Write(argument_parser.format_help())
return False
# Properly prepare the attributes according to local encoding.
if self.preferred_encoding == 'ascii':
logger.warning(
'The preferred encoding of your system is ASCII, which is not '
'optimal for the typically non-ASCII characters that need to be '
'parsed and processed. The tool will most likely crash and die, '
'perhaps in a way that may not be recoverable. A five second delay '
'is introduced to give you time to cancel the runtime and '
'reconfigure your preferred encoding, otherwise continue at own '
'risk.')
time.sleep(5)
if self._process_archives:
logger.warning(
'Scanning archive files currently can cause deadlock. Continue at '
'your own risk.')
time.sleep(5)
try:
self.ParseOptions(options)
except errors.BadConfigOption as exception:
self._output_writer.Write('ERROR: {0!s}\n'.format(exception))
self._output_writer.Write('\n')
self._output_writer.Write(argument_parser.format_usage())
return False
self._command_line_arguments = self.GetCommandLineArguments()
loggers.ConfigureLogging(
debug_output=self._debug_mode, filename=self._log_file,
quiet_mode=self._quiet_mode)
return True | Parses the command line arguments.
Returns:
bool: True if the arguments were successfully parsed. | Below is the the instruction that describes the task:
### Input:
Parses the command line arguments.
Returns:
bool: True if the arguments were successfully parsed.
### Response:
def ParseArguments(self):
"""Parses the command line arguments.
Returns:
bool: True if the arguments were successfully parsed.
"""
loggers.ConfigureLogging()
argument_parser = argparse.ArgumentParser(
description=self.DESCRIPTION, epilog=self.EPILOG, add_help=False,
formatter_class=argparse.RawDescriptionHelpFormatter)
self.AddBasicOptions(argument_parser)
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
argument_parser, names=['storage_file'])
data_location_group = argument_parser.add_argument_group(
'data location arguments')
argument_helper_names = ['artifact_definitions', 'data_location']
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
data_location_group, names=argument_helper_names)
extraction_group = argument_parser.add_argument_group(
'extraction arguments')
argument_helper_names = [
'artifact_filters', 'extraction', 'filter_file', 'hashers',
'parsers', 'yara_rules']
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
extraction_group, names=argument_helper_names)
self.AddStorageMediaImageOptions(extraction_group)
self.AddTimeZoneOption(extraction_group)
self.AddVSSProcessingOptions(extraction_group)
self.AddCredentialOptions(extraction_group)
info_group = argument_parser.add_argument_group('informational arguments')
self.AddInformationalOptions(info_group)
info_group.add_argument(
'--info', dest='show_info', action='store_true', default=False,
help='Print out information about supported plugins and parsers.')
info_group.add_argument(
'--use_markdown', '--use-markdown', dest='use_markdown',
action='store_true', default=False, help=(
'Output lists in Markdown format use in combination with '
'"--hashers list", "--parsers list" or "--timezone list"'))
info_group.add_argument(
'--no_dependencies_check', '--no-dependencies-check',
dest='dependencies_check', action='store_false', default=True,
help='Disable the dependencies check.')
self.AddLogFileOptions(info_group)
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
info_group, names=['status_view'])
output_group = argument_parser.add_argument_group('output arguments')
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
output_group, names=['text_prepend'])
processing_group = argument_parser.add_argument_group(
'processing arguments')
self.AddPerformanceOptions(processing_group)
self.AddProcessingOptions(processing_group)
processing_group.add_argument(
'--sigsegv_handler', '--sigsegv-handler', dest='sigsegv_handler',
action='store_true', default=False, help=(
'Enables the SIGSEGV handler. WARNING this functionality is '
'experimental and will a deadlock worker process if a real '
'segfault is caught, but not signal SIGSEGV. This functionality '
'is therefore primarily intended for debugging purposes'))
profiling_group = argument_parser.add_argument_group('profiling arguments')
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
profiling_group, names=['profiling'])
storage_group = argument_parser.add_argument_group('storage arguments')
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
storage_group, names=['storage_format'])
argument_parser.add_argument(
self._SOURCE_OPTION, action='store', metavar='SOURCE', nargs='?',
default=None, type=str, help=(
'Path to a source device, file or directory. If the source is '
'a supported storage media device or image file, archive file '
'or a directory, the files within are processed recursively.'))
try:
options = argument_parser.parse_args()
except UnicodeEncodeError:
# If we get here we are attempting to print help in a non-Unicode
# terminal.
self._output_writer.Write('\n')
self._output_writer.Write(argument_parser.format_help())
return False
# Properly prepare the attributes according to local encoding.
if self.preferred_encoding == 'ascii':
logger.warning(
'The preferred encoding of your system is ASCII, which is not '
'optimal for the typically non-ASCII characters that need to be '
'parsed and processed. The tool will most likely crash and die, '
'perhaps in a way that may not be recoverable. A five second delay '
'is introduced to give you time to cancel the runtime and '
'reconfigure your preferred encoding, otherwise continue at own '
'risk.')
time.sleep(5)
if self._process_archives:
logger.warning(
'Scanning archive files currently can cause deadlock. Continue at '
'your own risk.')
time.sleep(5)
try:
self.ParseOptions(options)
except errors.BadConfigOption as exception:
self._output_writer.Write('ERROR: {0!s}\n'.format(exception))
self._output_writer.Write('\n')
self._output_writer.Write(argument_parser.format_usage())
return False
self._command_line_arguments = self.GetCommandLineArguments()
loggers.ConfigureLogging(
debug_output=self._debug_mode, filename=self._log_file,
quiet_mode=self._quiet_mode)
return True |
def get_compiler(self, using=None, connection=None):
""" Overrides the Query method get_compiler in order to return
an instance of the above custom compiler.
"""
# Copy the body of this method from Django except the final
# return statement. We will ignore code coverage for this.
if using is None and connection is None: # pragma: no cover
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
# Check that the compiler will be able to execute the query
for alias, aggregate in self.annotation_select.items():
connection.ops.check_expression_support(aggregate)
# Instantiate the custom compiler.
return {
CTEUpdateQuery: CTEUpdateQueryCompiler,
CTEInsertQuery: CTEInsertQueryCompiler,
CTEDeleteQuery: CTEDeleteQueryCompiler,
CTEAggregateQuery: CTEAggregateQueryCompiler,
}.get(self.__class__, CTEQueryCompiler)(self, connection, using) | Overrides the Query method get_compiler in order to return
an instance of the above custom compiler. | Below is the the instruction that describes the task:
### Input:
Overrides the Query method get_compiler in order to return
an instance of the above custom compiler.
### Response:
def get_compiler(self, using=None, connection=None):
""" Overrides the Query method get_compiler in order to return
an instance of the above custom compiler.
"""
# Copy the body of this method from Django except the final
# return statement. We will ignore code coverage for this.
if using is None and connection is None: # pragma: no cover
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
# Check that the compiler will be able to execute the query
for alias, aggregate in self.annotation_select.items():
connection.ops.check_expression_support(aggregate)
# Instantiate the custom compiler.
return {
CTEUpdateQuery: CTEUpdateQueryCompiler,
CTEInsertQuery: CTEInsertQueryCompiler,
CTEDeleteQuery: CTEDeleteQueryCompiler,
CTEAggregateQuery: CTEAggregateQueryCompiler,
}.get(self.__class__, CTEQueryCompiler)(self, connection, using) |
def update_load_balancer(access_token, subscription_id, resource_group, lb_name, body):
'''Updates a load balancer model, i.e. PUT an updated LB body.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
lb_name (str): Name of the new load balancer.
body (str): JSON body of an updated load balancer.
Returns:
HTTP response. Load Balancer JSON body.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Network/loadBalancers/', lb_name,
'?api-version=', NETWORK_API])
return do_put(endpoint, body, access_token) | Updates a load balancer model, i.e. PUT an updated LB body.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
lb_name (str): Name of the new load balancer.
body (str): JSON body of an updated load balancer.
Returns:
HTTP response. Load Balancer JSON body. | Below is the the instruction that describes the task:
### Input:
Updates a load balancer model, i.e. PUT an updated LB body.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
lb_name (str): Name of the new load balancer.
body (str): JSON body of an updated load balancer.
Returns:
HTTP response. Load Balancer JSON body.
### Response:
def update_load_balancer(access_token, subscription_id, resource_group, lb_name, body):
'''Updates a load balancer model, i.e. PUT an updated LB body.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
lb_name (str): Name of the new load balancer.
body (str): JSON body of an updated load balancer.
Returns:
HTTP response. Load Balancer JSON body.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Network/loadBalancers/', lb_name,
'?api-version=', NETWORK_API])
return do_put(endpoint, body, access_token) |
def process_insert_get_id(self, query, sql, values, sequence=None):
"""
Process an "insert get ID" query.
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param sql: The sql query to execute
:type sql: str
:param values: The value bindings
:type values: list
:param sequence: The ids sequence
:type sequence: str
:return: The inserted row id
:rtype: int
"""
query.get_connection().insert(sql, values)
id = query.get_connection().get_cursor().lastrowid
if isinstance(id, int):
return id
if str(id).isdigit():
return int(id)
return id | Process an "insert get ID" query.
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param sql: The sql query to execute
:type sql: str
:param values: The value bindings
:type values: list
:param sequence: The ids sequence
:type sequence: str
:return: The inserted row id
:rtype: int | Below is the the instruction that describes the task:
### Input:
Process an "insert get ID" query.
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param sql: The sql query to execute
:type sql: str
:param values: The value bindings
:type values: list
:param sequence: The ids sequence
:type sequence: str
:return: The inserted row id
:rtype: int
### Response:
def process_insert_get_id(self, query, sql, values, sequence=None):
"""
Process an "insert get ID" query.
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param sql: The sql query to execute
:type sql: str
:param values: The value bindings
:type values: list
:param sequence: The ids sequence
:type sequence: str
:return: The inserted row id
:rtype: int
"""
query.get_connection().insert(sql, values)
id = query.get_connection().get_cursor().lastrowid
if isinstance(id, int):
return id
if str(id).isdigit():
return int(id)
return id |
def get_supported_permissions(self):
"""
Get permissions which this handler can treat.
Specified with :attr:`includes` and :attr:`excludes` of this instance.
Returns
-------
set
A set instance of `app_label.codename` formatted permission strings
"""
if not hasattr(self, '_perms_cache'):
if (self.includes and
isinstance(self.includes, collections.Callable)):
includes = self.includes(self)
else:
includes = self.includes or []
if (self.excludes and
isinstance(self.excludes, collections.Callable)):
excludes = self.excludes(self)
else:
excludes = self.excludes or []
includes = set(includes)
excludes = set(excludes)
includes = includes.difference(excludes)
self._perms_cache = includes
return self._perms_cache | Get permissions which this handler can treat.
Specified with :attr:`includes` and :attr:`excludes` of this instance.
Returns
-------
set
A set instance of `app_label.codename` formatted permission strings | Below is the the instruction that describes the task:
### Input:
Get permissions which this handler can treat.
Specified with :attr:`includes` and :attr:`excludes` of this instance.
Returns
-------
set
A set instance of `app_label.codename` formatted permission strings
### Response:
def get_supported_permissions(self):
"""
Get permissions which this handler can treat.
Specified with :attr:`includes` and :attr:`excludes` of this instance.
Returns
-------
set
A set instance of `app_label.codename` formatted permission strings
"""
if not hasattr(self, '_perms_cache'):
if (self.includes and
isinstance(self.includes, collections.Callable)):
includes = self.includes(self)
else:
includes = self.includes or []
if (self.excludes and
isinstance(self.excludes, collections.Callable)):
excludes = self.excludes(self)
else:
excludes = self.excludes or []
includes = set(includes)
excludes = set(excludes)
includes = includes.difference(excludes)
self._perms_cache = includes
return self._perms_cache |
def _process_keystroke_commands(self, inp):
"""Process keystrokes that issue commands (side effects)."""
if inp in (u'1', u'2'):
# chose 1 or 2-character wide
if int(inp) != self.screen.wide:
self.screen.wide = int(inp)
self.on_resize(None, None)
elif inp in (u'_', u'-'):
# adjust name length -2
nlen = max(1, self.screen.style.name_len - 2)
if nlen != self.screen.style.name_len:
self.screen.style.name_len = nlen
self.on_resize(None, None)
elif inp in (u'+', u'='):
# adjust name length +2
nlen = min(self.term.width - 8, self.screen.style.name_len + 2)
if nlen != self.screen.style.name_len:
self.screen.style.name_len = nlen
self.on_resize(None, None)
elif inp == u'2' and self.screen.wide != 2:
# change 2 or 1-cell wide view
self.screen.wide = 2
self.on_resize(None, None) | Process keystrokes that issue commands (side effects). | Below is the the instruction that describes the task:
### Input:
Process keystrokes that issue commands (side effects).
### Response:
def _process_keystroke_commands(self, inp):
"""Process keystrokes that issue commands (side effects)."""
if inp in (u'1', u'2'):
# chose 1 or 2-character wide
if int(inp) != self.screen.wide:
self.screen.wide = int(inp)
self.on_resize(None, None)
elif inp in (u'_', u'-'):
# adjust name length -2
nlen = max(1, self.screen.style.name_len - 2)
if nlen != self.screen.style.name_len:
self.screen.style.name_len = nlen
self.on_resize(None, None)
elif inp in (u'+', u'='):
# adjust name length +2
nlen = min(self.term.width - 8, self.screen.style.name_len + 2)
if nlen != self.screen.style.name_len:
self.screen.style.name_len = nlen
self.on_resize(None, None)
elif inp == u'2' and self.screen.wide != 2:
# change 2 or 1-cell wide view
self.screen.wide = 2
self.on_resize(None, None) |
def version_object_and_next(string, retries=0): # type: (str, int) -> VersionThing
"""
Try three parsing strategies, favoring semver, then pep440, then whatev.
:param string:
:return:
"""
if retries > 2:
raise JiggleVersionException(
"Can't parse, ran out of retries: " + unicode(string)
)
if string == "" or string is None:
raise JiggleVersionException("No version string, can only use default logic.")
if string[0] == "v":
string = string[1:]
try:
version = semantic_version.Version(string)
next_version = version.next_patch()
_ = semantic_version.Version(unicode(string))
return version, next_version, "semantic_version"
except:
logger.debug("Not sem_ver:" + unicode(string))
try:
version = parver.Version.parse(string)
next_version = version.bump_dev()
_ = parver.Version.parse(unicode(next_version))
return version, next_version, "pep440 (parver)"
except:
try:
logger.debug("Not par_ver:" + unicode(string))
# Version.supported_version_schemes = [Pep440VersionScheme, Simple4VersionScheme]
version = versio_version.Version(string, scheme=Simple4VersionScheme)
version.bump()
return (
versio_version.Version(string, scheme=Simple4VersionScheme),
version,
"simple-4 part (versio)",
)
except:
# let above libs try first before we do primitive clean up work
retries += 1
if "a" in string:
return version_object_and_next(string.replace("a", ".a"), retries)
elif "b" in string:
return version_object_and_next(string.replace("b", ".b"), retries)
elif len(string.split(".")) == 1:
# convert 2 part to 3 part.
return version_object_and_next(string + ".0.0", retries)
elif len(string.split(".")) == 2:
# convert 2 part to 3 part, e.g. 1.1 -> 1.1.0
return version_object_and_next(string + ".0", retries)
elif string.isnumeric() and "." not in string:
# e.g. "21" -> "21.0.0"
return version_object_and_next(string + ".0.0", retries)
else:
logger.debug("Not versio:" + unicode(string))
# versio only does pep440 by default
# print(versio.version.Version.supported_version_schemes)
raise | Try three parsing strategies, favoring semver, then pep440, then whatev.
:param string:
:return: | Below is the the instruction that describes the task:
### Input:
Try three parsing strategies, favoring semver, then pep440, then whatev.
:param string:
:return:
### Response:
def version_object_and_next(string, retries=0): # type: (str, int) -> VersionThing
"""
Try three parsing strategies, favoring semver, then pep440, then whatev.
:param string:
:return:
"""
if retries > 2:
raise JiggleVersionException(
"Can't parse, ran out of retries: " + unicode(string)
)
if string == "" or string is None:
raise JiggleVersionException("No version string, can only use default logic.")
if string[0] == "v":
string = string[1:]
try:
version = semantic_version.Version(string)
next_version = version.next_patch()
_ = semantic_version.Version(unicode(string))
return version, next_version, "semantic_version"
except:
logger.debug("Not sem_ver:" + unicode(string))
try:
version = parver.Version.parse(string)
next_version = version.bump_dev()
_ = parver.Version.parse(unicode(next_version))
return version, next_version, "pep440 (parver)"
except:
try:
logger.debug("Not par_ver:" + unicode(string))
# Version.supported_version_schemes = [Pep440VersionScheme, Simple4VersionScheme]
version = versio_version.Version(string, scheme=Simple4VersionScheme)
version.bump()
return (
versio_version.Version(string, scheme=Simple4VersionScheme),
version,
"simple-4 part (versio)",
)
except:
# let above libs try first before we do primitive clean up work
retries += 1
if "a" in string:
return version_object_and_next(string.replace("a", ".a"), retries)
elif "b" in string:
return version_object_and_next(string.replace("b", ".b"), retries)
elif len(string.split(".")) == 1:
# convert 2 part to 3 part.
return version_object_and_next(string + ".0.0", retries)
elif len(string.split(".")) == 2:
# convert 2 part to 3 part, e.g. 1.1 -> 1.1.0
return version_object_and_next(string + ".0", retries)
elif string.isnumeric() and "." not in string:
# e.g. "21" -> "21.0.0"
return version_object_and_next(string + ".0.0", retries)
else:
logger.debug("Not versio:" + unicode(string))
# versio only does pep440 by default
# print(versio.version.Version.supported_version_schemes)
raise |
def find_following_working_day(self, day):
"""Looks for the following working day, if not already a working day.
**WARNING**: this function doesn't take into account the calendar
holidays, only the days of the week and the weekend days parameters.
"""
day = cleaned_date(day)
while day.weekday() in self.get_weekend_days():
day = day + timedelta(days=1)
return day | Looks for the following working day, if not already a working day.
**WARNING**: this function doesn't take into account the calendar
holidays, only the days of the week and the weekend days parameters. | Below is the the instruction that describes the task:
### Input:
Looks for the following working day, if not already a working day.
**WARNING**: this function doesn't take into account the calendar
holidays, only the days of the week and the weekend days parameters.
### Response:
def find_following_working_day(self, day):
"""Looks for the following working day, if not already a working day.
**WARNING**: this function doesn't take into account the calendar
holidays, only the days of the week and the weekend days parameters.
"""
day = cleaned_date(day)
while day.weekday() in self.get_weekend_days():
day = day + timedelta(days=1)
return day |
def multiget_count(self, keys, column_parent, predicate, consistency_level):
"""
Perform a get_count in parallel on the given list<binary> keys. The return value maps keys to the count found.
Parameters:
- keys
- column_parent
- predicate
- consistency_level
"""
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_multiget_count(keys, column_parent, predicate, consistency_level)
return d | Perform a get_count in parallel on the given list<binary> keys. The return value maps keys to the count found.
Parameters:
- keys
- column_parent
- predicate
- consistency_level | Below is the the instruction that describes the task:
### Input:
Perform a get_count in parallel on the given list<binary> keys. The return value maps keys to the count found.
Parameters:
- keys
- column_parent
- predicate
- consistency_level
### Response:
def multiget_count(self, keys, column_parent, predicate, consistency_level):
"""
Perform a get_count in parallel on the given list<binary> keys. The return value maps keys to the count found.
Parameters:
- keys
- column_parent
- predicate
- consistency_level
"""
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_multiget_count(keys, column_parent, predicate, consistency_level)
return d |
def meta_retrieve(self, meta_lookahead = None):
"""
Get metadata from the query itself. This is guaranteed to only
return a Python dictionary.
Note that if the query failed, the metadata might not be in JSON
format, in which case there may be additional, non-JSON data
which can be retrieved using the following
::
raw_meta = req.raw.value
:return: A dictionary containing the query metadata
"""
if not self.__meta_received:
if meta_lookahead or self.meta_lookahead:
self.buffered_remainder = list(self)
else:
raise RuntimeError(
'This property only valid once all rows are received!')
if isinstance(self.raw.value, dict):
return self.raw.value
return {} | Get metadata from the query itself. This is guaranteed to only
return a Python dictionary.
Note that if the query failed, the metadata might not be in JSON
format, in which case there may be additional, non-JSON data
which can be retrieved using the following
::
raw_meta = req.raw.value
:return: A dictionary containing the query metadata | Below is the the instruction that describes the task:
### Input:
Get metadata from the query itself. This is guaranteed to only
return a Python dictionary.
Note that if the query failed, the metadata might not be in JSON
format, in which case there may be additional, non-JSON data
which can be retrieved using the following
::
raw_meta = req.raw.value
:return: A dictionary containing the query metadata
### Response:
def meta_retrieve(self, meta_lookahead = None):
"""
Get metadata from the query itself. This is guaranteed to only
return a Python dictionary.
Note that if the query failed, the metadata might not be in JSON
format, in which case there may be additional, non-JSON data
which can be retrieved using the following
::
raw_meta = req.raw.value
:return: A dictionary containing the query metadata
"""
if not self.__meta_received:
if meta_lookahead or self.meta_lookahead:
self.buffered_remainder = list(self)
else:
raise RuntimeError(
'This property only valid once all rows are received!')
if isinstance(self.raw.value, dict):
return self.raw.value
return {} |
def list_containers(self):
"""
List all available docker containers.
Container objects returned from this methods will contain a limited
amount of metadata in property `short_metadata`. These are just a subset
of `.inspect()`, but don't require an API call against dockerd.
:return: collection of instances of :class:`conu.DockerContainer`
"""
result = []
for c in self.d.containers(all=True):
name = None
names = c.get("Names", None)
if names:
name = names[0]
i = DockerImage(None, identifier=c["ImageID"])
cont = DockerContainer(i, c["Id"], name=name)
# TODO: docker_client.containers produces different metadata than inspect
inspect_to_container_metadata(cont.metadata, c, i)
result.append(cont)
return result | List all available docker containers.
Container objects returned from this methods will contain a limited
amount of metadata in property `short_metadata`. These are just a subset
of `.inspect()`, but don't require an API call against dockerd.
:return: collection of instances of :class:`conu.DockerContainer` | Below is the the instruction that describes the task:
### Input:
List all available docker containers.
Container objects returned from this methods will contain a limited
amount of metadata in property `short_metadata`. These are just a subset
of `.inspect()`, but don't require an API call against dockerd.
:return: collection of instances of :class:`conu.DockerContainer`
### Response:
def list_containers(self):
"""
List all available docker containers.
Container objects returned from this methods will contain a limited
amount of metadata in property `short_metadata`. These are just a subset
of `.inspect()`, but don't require an API call against dockerd.
:return: collection of instances of :class:`conu.DockerContainer`
"""
result = []
for c in self.d.containers(all=True):
name = None
names = c.get("Names", None)
if names:
name = names[0]
i = DockerImage(None, identifier=c["ImageID"])
cont = DockerContainer(i, c["Id"], name=name)
# TODO: docker_client.containers produces different metadata than inspect
inspect_to_container_metadata(cont.metadata, c, i)
result.append(cont)
return result |
def get_dag(self, dag_id):
"""
:param dag_id: DAG ID
:type dag_id: unicode
:return: if the given DAG ID exists in the bag, return the BaseDag
corresponding to that ID. Otherwise, throw an Exception
:rtype: airflow.utils.dag_processing.SimpleDag
"""
if dag_id not in self.dag_id_to_simple_dag:
raise AirflowException("Unknown DAG ID {}".format(dag_id))
return self.dag_id_to_simple_dag[dag_id] | :param dag_id: DAG ID
:type dag_id: unicode
:return: if the given DAG ID exists in the bag, return the BaseDag
corresponding to that ID. Otherwise, throw an Exception
:rtype: airflow.utils.dag_processing.SimpleDag | Below is the the instruction that describes the task:
### Input:
:param dag_id: DAG ID
:type dag_id: unicode
:return: if the given DAG ID exists in the bag, return the BaseDag
corresponding to that ID. Otherwise, throw an Exception
:rtype: airflow.utils.dag_processing.SimpleDag
### Response:
def get_dag(self, dag_id):
"""
:param dag_id: DAG ID
:type dag_id: unicode
:return: if the given DAG ID exists in the bag, return the BaseDag
corresponding to that ID. Otherwise, throw an Exception
:rtype: airflow.utils.dag_processing.SimpleDag
"""
if dag_id not in self.dag_id_to_simple_dag:
raise AirflowException("Unknown DAG ID {}".format(dag_id))
return self.dag_id_to_simple_dag[dag_id] |
def _set_af_vrf_name(self, v, load=False):
"""
Setter method for af_vrf_name, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/af_vrf_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_af_vrf_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_af_vrf_name() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..255']}), is_leaf=True, yang_name="af-vrf-name", rest_name="vrf-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'VRF name', u'cli-full-no': None, u'alt-name': u'vrf-name'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """af_vrf_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..255']}), is_leaf=True, yang_name="af-vrf-name", rest_name="vrf-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'VRF name', u'cli-full-no': None, u'alt-name': u'vrf-name'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='string', is_config=True)""",
})
self.__af_vrf_name = t
if hasattr(self, '_set'):
self._set() | Setter method for af_vrf_name, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/af_vrf_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_af_vrf_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_af_vrf_name() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for af_vrf_name, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/af_vrf_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_af_vrf_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_af_vrf_name() directly.
### Response:
def _set_af_vrf_name(self, v, load=False):
"""
Setter method for af_vrf_name, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/af_vrf_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_af_vrf_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_af_vrf_name() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..255']}), is_leaf=True, yang_name="af-vrf-name", rest_name="vrf-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'VRF name', u'cli-full-no': None, u'alt-name': u'vrf-name'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """af_vrf_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..255']}), is_leaf=True, yang_name="af-vrf-name", rest_name="vrf-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'VRF name', u'cli-full-no': None, u'alt-name': u'vrf-name'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='string', is_config=True)""",
})
self.__af_vrf_name = t
if hasattr(self, '_set'):
self._set() |
def lookup(self, path_info: str) -> MatchResult:
""" lookup url match for path_info
"""
for name, pattern in self.patterns.items():
match = pattern.match(path_info)
if match is None:
continue
match.name = name
return match
return None | lookup url match for path_info | Below is the the instruction that describes the task:
### Input:
lookup url match for path_info
### Response:
def lookup(self, path_info: str) -> MatchResult:
""" lookup url match for path_info
"""
for name, pattern in self.patterns.items():
match = pattern.match(path_info)
if match is None:
continue
match.name = name
return match
return None |
def load(f):
"""Load audio metadata from filepath or file-like object.
Parameters:
f (str, os.PathLike, or file-like object):
A filepath, path-like object or file-like object of an audio file.
Returns:
Format: An audio format object.
Raises:
UnsupportedFormat: If file is not of a supported format.
ValueError: If filepath/file-like object is not valid or readable.
"""
if isinstance(f, (os.PathLike, str)):
fileobj = open(f, 'rb')
else:
try:
f.read(0)
except AttributeError:
raise ValueError("Not a valid file-like object.")
except Exception:
raise ValueError("Can't read from file-like object.")
fileobj = f
parser_cls = determine_format(fileobj, os.path.splitext(fileobj.name)[1])
if parser_cls is None:
raise UnsupportedFormat("Supported format signature not found.")
else:
fileobj.seek(0, os.SEEK_SET)
return parser_cls.load(fileobj) | Load audio metadata from filepath or file-like object.
Parameters:
f (str, os.PathLike, or file-like object):
A filepath, path-like object or file-like object of an audio file.
Returns:
Format: An audio format object.
Raises:
UnsupportedFormat: If file is not of a supported format.
ValueError: If filepath/file-like object is not valid or readable. | Below is the the instruction that describes the task:
### Input:
Load audio metadata from filepath or file-like object.
Parameters:
f (str, os.PathLike, or file-like object):
A filepath, path-like object or file-like object of an audio file.
Returns:
Format: An audio format object.
Raises:
UnsupportedFormat: If file is not of a supported format.
ValueError: If filepath/file-like object is not valid or readable.
### Response:
def load(f):
"""Load audio metadata from filepath or file-like object.
Parameters:
f (str, os.PathLike, or file-like object):
A filepath, path-like object or file-like object of an audio file.
Returns:
Format: An audio format object.
Raises:
UnsupportedFormat: If file is not of a supported format.
ValueError: If filepath/file-like object is not valid or readable.
"""
if isinstance(f, (os.PathLike, str)):
fileobj = open(f, 'rb')
else:
try:
f.read(0)
except AttributeError:
raise ValueError("Not a valid file-like object.")
except Exception:
raise ValueError("Can't read from file-like object.")
fileobj = f
parser_cls = determine_format(fileobj, os.path.splitext(fileobj.name)[1])
if parser_cls is None:
raise UnsupportedFormat("Supported format signature not found.")
else:
fileobj.seek(0, os.SEEK_SET)
return parser_cls.load(fileobj) |
def build(self, package_dir, output_style='nested'):
"""Builds the Sass/SCSS files in the specified :attr:`sass_path`.
It finds :attr:`sass_path` and locates :attr:`css_path`
as relative to the given ``package_dir``.
:param package_dir: the path of package directory
:type package_dir: :class:`str`, :class:`basestring`
:param output_style: an optional coding style of the compiled result.
choose one of: ``'nested'`` (default),
``'expanded'``, ``'compact'``, ``'compressed'``
:type output_style: :class:`str`
:returns: the set of compiled CSS filenames
:rtype: :class:`frozenset`
.. versionadded:: 0.6.0
The ``output_style`` parameter.
"""
sass_path = os.path.join(package_dir, self.sass_path)
css_path = os.path.join(package_dir, self.css_path)
css_files = build_directory(
sass_path, css_path,
output_style=output_style,
strip_extension=self.strip_extension,
).values()
return frozenset(
os.path.join(self.css_path, filename)
for filename in css_files
) | Builds the Sass/SCSS files in the specified :attr:`sass_path`.
It finds :attr:`sass_path` and locates :attr:`css_path`
as relative to the given ``package_dir``.
:param package_dir: the path of package directory
:type package_dir: :class:`str`, :class:`basestring`
:param output_style: an optional coding style of the compiled result.
choose one of: ``'nested'`` (default),
``'expanded'``, ``'compact'``, ``'compressed'``
:type output_style: :class:`str`
:returns: the set of compiled CSS filenames
:rtype: :class:`frozenset`
.. versionadded:: 0.6.0
The ``output_style`` parameter. | Below is the the instruction that describes the task:
### Input:
Builds the Sass/SCSS files in the specified :attr:`sass_path`.
It finds :attr:`sass_path` and locates :attr:`css_path`
as relative to the given ``package_dir``.
:param package_dir: the path of package directory
:type package_dir: :class:`str`, :class:`basestring`
:param output_style: an optional coding style of the compiled result.
choose one of: ``'nested'`` (default),
``'expanded'``, ``'compact'``, ``'compressed'``
:type output_style: :class:`str`
:returns: the set of compiled CSS filenames
:rtype: :class:`frozenset`
.. versionadded:: 0.6.0
The ``output_style`` parameter.
### Response:
def build(self, package_dir, output_style='nested'):
"""Builds the Sass/SCSS files in the specified :attr:`sass_path`.
It finds :attr:`sass_path` and locates :attr:`css_path`
as relative to the given ``package_dir``.
:param package_dir: the path of package directory
:type package_dir: :class:`str`, :class:`basestring`
:param output_style: an optional coding style of the compiled result.
choose one of: ``'nested'`` (default),
``'expanded'``, ``'compact'``, ``'compressed'``
:type output_style: :class:`str`
:returns: the set of compiled CSS filenames
:rtype: :class:`frozenset`
.. versionadded:: 0.6.0
The ``output_style`` parameter.
"""
sass_path = os.path.join(package_dir, self.sass_path)
css_path = os.path.join(package_dir, self.css_path)
css_files = build_directory(
sass_path, css_path,
output_style=output_style,
strip_extension=self.strip_extension,
).values()
return frozenset(
os.path.join(self.css_path, filename)
for filename in css_files
) |
def request(self, hash_, quickkey, doc_type, page=None,
output=None, size_id=None, metadata=None,
request_conversion_only=None):
"""Query conversion server
hash_: 4 characters of file hash
quickkey: File quickkey
doc_type: "i" for image, "d" for documents
page: The page to convert. If page is set to 'initial', the first
10 pages of the document will be provided. (document)
output: "pdf", "img", or "swf" (document)
size_id: 0,1,2 (document)
0-9, a-f, z (image)
metadata: Set to 1 to get metadata dict
request_conversion_only: Request conversion w/o content
"""
if len(hash_) > 4:
hash_ = hash_[:4]
query = QueryParams({
'quickkey': quickkey,
'doc_type': doc_type,
'page': page,
'output': output,
'size_id': size_id,
'metadata': metadata,
'request_conversion_only': request_conversion_only
})
url = API_ENDPOINT + '?' + hash_ + '&' + urlencode(query)
response = self.http.get(url, stream=True)
if response.status_code == 204:
raise ConversionServerError("Unable to fulfill request. "
"The document will not be converted.",
response.status_code)
response.raise_for_status()
if response.headers['content-type'] == 'application/json':
return response.json()
return response | Query conversion server
hash_: 4 characters of file hash
quickkey: File quickkey
doc_type: "i" for image, "d" for documents
page: The page to convert. If page is set to 'initial', the first
10 pages of the document will be provided. (document)
output: "pdf", "img", or "swf" (document)
size_id: 0,1,2 (document)
0-9, a-f, z (image)
metadata: Set to 1 to get metadata dict
request_conversion_only: Request conversion w/o content | Below is the the instruction that describes the task:
### Input:
Query conversion server
hash_: 4 characters of file hash
quickkey: File quickkey
doc_type: "i" for image, "d" for documents
page: The page to convert. If page is set to 'initial', the first
10 pages of the document will be provided. (document)
output: "pdf", "img", or "swf" (document)
size_id: 0,1,2 (document)
0-9, a-f, z (image)
metadata: Set to 1 to get metadata dict
request_conversion_only: Request conversion w/o content
### Response:
def request(self, hash_, quickkey, doc_type, page=None,
output=None, size_id=None, metadata=None,
request_conversion_only=None):
"""Query conversion server
hash_: 4 characters of file hash
quickkey: File quickkey
doc_type: "i" for image, "d" for documents
page: The page to convert. If page is set to 'initial', the first
10 pages of the document will be provided. (document)
output: "pdf", "img", or "swf" (document)
size_id: 0,1,2 (document)
0-9, a-f, z (image)
metadata: Set to 1 to get metadata dict
request_conversion_only: Request conversion w/o content
"""
if len(hash_) > 4:
hash_ = hash_[:4]
query = QueryParams({
'quickkey': quickkey,
'doc_type': doc_type,
'page': page,
'output': output,
'size_id': size_id,
'metadata': metadata,
'request_conversion_only': request_conversion_only
})
url = API_ENDPOINT + '?' + hash_ + '&' + urlencode(query)
response = self.http.get(url, stream=True)
if response.status_code == 204:
raise ConversionServerError("Unable to fulfill request. "
"The document will not be converted.",
response.status_code)
response.raise_for_status()
if response.headers['content-type'] == 'application/json':
return response.json()
return response |
def on_event(self, event):
"""Pygame event processing callback method.
:param event: Event to process.
"""
if self.state > 0:
if event.type == MOUSEBUTTONDOWN:
key = self.layout.get_key_at(pygame.mouse.get_pos())
if key is not None:
self.on_key_down(key)
elif event.type == MOUSEBUTTONUP:
self.on_key_up()
elif event.type == KEYDOWN:
value = pygame.key.name(event.key)
# TODO : Find from layout (consider checking layout key space ?)
elif event.type == KEYUP:
value = pygame.key.name(event.key) | Pygame event processing callback method.
:param event: Event to process. | Below is the the instruction that describes the task:
### Input:
Pygame event processing callback method.
:param event: Event to process.
### Response:
def on_event(self, event):
"""Pygame event processing callback method.
:param event: Event to process.
"""
if self.state > 0:
if event.type == MOUSEBUTTONDOWN:
key = self.layout.get_key_at(pygame.mouse.get_pos())
if key is not None:
self.on_key_down(key)
elif event.type == MOUSEBUTTONUP:
self.on_key_up()
elif event.type == KEYDOWN:
value = pygame.key.name(event.key)
# TODO : Find from layout (consider checking layout key space ?)
elif event.type == KEYUP:
value = pygame.key.name(event.key) |
def set_message_last_post(cr, uid, pool, models):
"""
Given a list of models, set their 'message_last_post' fields to an
estimated last post datetime.
To be called in post-migration scripts
:param cr: database cursor
:param uid: user id, assumed to be openerp.SUPERUSER_ID
:param pool: orm pool, assumed to be openerp.pooler.get_pool(cr.dbname)
:param models: a list of model names for which 'message_last_post' needs \
to be filled
:return:
"""
if type(models) is not list:
models = [models]
for model in models:
model_pool = pool[model]
cr.execute(
"UPDATE {table} "
"SET message_last_post=(SELECT max(mm.date) "
"FROM mail_message mm "
"WHERE mm.model=%s "
"AND mm.date IS NOT NULL "
"AND mm.res_id={table}.id)".format(
table=model_pool._table), (model,)
) | Given a list of models, set their 'message_last_post' fields to an
estimated last post datetime.
To be called in post-migration scripts
:param cr: database cursor
:param uid: user id, assumed to be openerp.SUPERUSER_ID
:param pool: orm pool, assumed to be openerp.pooler.get_pool(cr.dbname)
:param models: a list of model names for which 'message_last_post' needs \
to be filled
:return: | Below is the the instruction that describes the task:
### Input:
Given a list of models, set their 'message_last_post' fields to an
estimated last post datetime.
To be called in post-migration scripts
:param cr: database cursor
:param uid: user id, assumed to be openerp.SUPERUSER_ID
:param pool: orm pool, assumed to be openerp.pooler.get_pool(cr.dbname)
:param models: a list of model names for which 'message_last_post' needs \
to be filled
:return:
### Response:
def set_message_last_post(cr, uid, pool, models):
"""
Given a list of models, set their 'message_last_post' fields to an
estimated last post datetime.
To be called in post-migration scripts
:param cr: database cursor
:param uid: user id, assumed to be openerp.SUPERUSER_ID
:param pool: orm pool, assumed to be openerp.pooler.get_pool(cr.dbname)
:param models: a list of model names for which 'message_last_post' needs \
to be filled
:return:
"""
if type(models) is not list:
models = [models]
for model in models:
model_pool = pool[model]
cr.execute(
"UPDATE {table} "
"SET message_last_post=(SELECT max(mm.date) "
"FROM mail_message mm "
"WHERE mm.model=%s "
"AND mm.date IS NOT NULL "
"AND mm.res_id={table}.id)".format(
table=model_pool._table), (model,)
) |
def load_yaml(filename):
"""
Loads a YAML-formatted file.
"""
with open(filename) as f:
ydoc = yaml.safe_load(f.read())
return (ydoc, serialize_tojson(ydoc)) | Loads a YAML-formatted file. | Below is the the instruction that describes the task:
### Input:
Loads a YAML-formatted file.
### Response:
def load_yaml(filename):
"""
Loads a YAML-formatted file.
"""
with open(filename) as f:
ydoc = yaml.safe_load(f.read())
return (ydoc, serialize_tojson(ydoc)) |
def gradient(self, wrt):
"""Gets the autodiff of current symbol.
This function can only be used if current symbol is a loss function.
.. note:: This function is currently not implemented.
Parameters
----------
wrt : Array of String
keyword arguments of the symbol that the gradients are taken.
Returns
-------
grad : Symbol
A gradient Symbol with returns to be the corresponding gradients.
"""
handle = SymbolHandle()
c_wrt = c_str_array(wrt)
check_call(_LIB.MXSymbolGrad(self.handle,
mx_uint(len(wrt)),
c_wrt,
ctypes.byref(handle)))
return Symbol(handle) | Gets the autodiff of current symbol.
This function can only be used if current symbol is a loss function.
.. note:: This function is currently not implemented.
Parameters
----------
wrt : Array of String
keyword arguments of the symbol that the gradients are taken.
Returns
-------
grad : Symbol
A gradient Symbol with returns to be the corresponding gradients. | Below is the the instruction that describes the task:
### Input:
Gets the autodiff of current symbol.
This function can only be used if current symbol is a loss function.
.. note:: This function is currently not implemented.
Parameters
----------
wrt : Array of String
keyword arguments of the symbol that the gradients are taken.
Returns
-------
grad : Symbol
A gradient Symbol with returns to be the corresponding gradients.
### Response:
def gradient(self, wrt):
"""Gets the autodiff of current symbol.
This function can only be used if current symbol is a loss function.
.. note:: This function is currently not implemented.
Parameters
----------
wrt : Array of String
keyword arguments of the symbol that the gradients are taken.
Returns
-------
grad : Symbol
A gradient Symbol with returns to be the corresponding gradients.
"""
handle = SymbolHandle()
c_wrt = c_str_array(wrt)
check_call(_LIB.MXSymbolGrad(self.handle,
mx_uint(len(wrt)),
c_wrt,
ctypes.byref(handle)))
return Symbol(handle) |
def bundle_visualization_url(self, bundle_id, channel=None):
'''Generate the path to the visualization for bundles.
@param charm_id The ID of the bundle.
@param channel Optional channel name.
@return The url to the visualization.
'''
url = '{}/{}/diagram.svg'.format(self.url, _get_path(bundle_id))
return _add_channel(url, channel) | Generate the path to the visualization for bundles.
@param charm_id The ID of the bundle.
@param channel Optional channel name.
@return The url to the visualization. | Below is the the instruction that describes the task:
### Input:
Generate the path to the visualization for bundles.
@param charm_id The ID of the bundle.
@param channel Optional channel name.
@return The url to the visualization.
### Response:
def bundle_visualization_url(self, bundle_id, channel=None):
'''Generate the path to the visualization for bundles.
@param charm_id The ID of the bundle.
@param channel Optional channel name.
@return The url to the visualization.
'''
url = '{}/{}/diagram.svg'.format(self.url, _get_path(bundle_id))
return _add_channel(url, channel) |
def _ProcessPathSpec(self, extraction_worker, parser_mediator, path_spec):
"""Processes a path specification.
Args:
extraction_worker (worker.ExtractionWorker): extraction worker.
parser_mediator (ParserMediator): parser mediator.
path_spec (dfvfs.PathSpec): path specification.
"""
self._current_display_name = parser_mediator.GetDisplayNameForPathSpec(
path_spec)
try:
extraction_worker.ProcessPathSpec(parser_mediator, path_spec)
except KeyboardInterrupt:
self._abort = True
self._processing_status.aborted = True
if self._status_update_callback:
self._status_update_callback(self._processing_status)
# We cannot recover from a CacheFullError and abort processing when
# it is raised.
except dfvfs_errors.CacheFullError:
# TODO: signal engine of failure.
self._abort = True
logger.error((
'ABORT: detected cache full error while processing '
'path spec: {0:s}').format(self._current_display_name))
# All exceptions need to be caught here to prevent the worker
# from being killed by an uncaught exception.
except Exception as exception: # pylint: disable=broad-except
parser_mediator.ProduceExtractionWarning((
'unable to process path specification with error: '
'{0!s}').format(exception), path_spec=path_spec)
if getattr(self._processing_configuration, 'debug_output', False):
logger.warning(
'Unhandled exception while processing path spec: {0:s}.'.format(
self._current_display_name))
logger.exception(exception)
pdb.post_mortem() | Processes a path specification.
Args:
extraction_worker (worker.ExtractionWorker): extraction worker.
parser_mediator (ParserMediator): parser mediator.
path_spec (dfvfs.PathSpec): path specification. | Below is the the instruction that describes the task:
### Input:
Processes a path specification.
Args:
extraction_worker (worker.ExtractionWorker): extraction worker.
parser_mediator (ParserMediator): parser mediator.
path_spec (dfvfs.PathSpec): path specification.
### Response:
def _ProcessPathSpec(self, extraction_worker, parser_mediator, path_spec):
"""Processes a path specification.
Args:
extraction_worker (worker.ExtractionWorker): extraction worker.
parser_mediator (ParserMediator): parser mediator.
path_spec (dfvfs.PathSpec): path specification.
"""
self._current_display_name = parser_mediator.GetDisplayNameForPathSpec(
path_spec)
try:
extraction_worker.ProcessPathSpec(parser_mediator, path_spec)
except KeyboardInterrupt:
self._abort = True
self._processing_status.aborted = True
if self._status_update_callback:
self._status_update_callback(self._processing_status)
# We cannot recover from a CacheFullError and abort processing when
# it is raised.
except dfvfs_errors.CacheFullError:
# TODO: signal engine of failure.
self._abort = True
logger.error((
'ABORT: detected cache full error while processing '
'path spec: {0:s}').format(self._current_display_name))
# All exceptions need to be caught here to prevent the worker
# from being killed by an uncaught exception.
except Exception as exception: # pylint: disable=broad-except
parser_mediator.ProduceExtractionWarning((
'unable to process path specification with error: '
'{0!s}').format(exception), path_spec=path_spec)
if getattr(self._processing_configuration, 'debug_output', False):
logger.warning(
'Unhandled exception while processing path spec: {0:s}.'.format(
self._current_display_name))
logger.exception(exception)
pdb.post_mortem() |
def set_input_score_end_range(self, score):
"""Sets the input score start range.
arg: score (decimal): the new start range
raise: InvalidArgument - ``score`` is invalid
raise: NoAccess - ``range`` cannot be modified
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.grading.GradeSystemForm.set_lowest_numeric_score
if self.get_input_score_end_range_metadata().is_read_only():
raise errors.NoAccess()
try:
score = float(score)
except ValueError:
raise errors.InvalidArgument()
if not self._is_valid_decimal(score, self.get_input_score_end_range_metadata()):
raise errors.InvalidArgument()
self._my_map['inputScoreEndRange'] = score | Sets the input score start range.
arg: score (decimal): the new start range
raise: InvalidArgument - ``score`` is invalid
raise: NoAccess - ``range`` cannot be modified
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Sets the input score start range.
arg: score (decimal): the new start range
raise: InvalidArgument - ``score`` is invalid
raise: NoAccess - ``range`` cannot be modified
*compliance: mandatory -- This method must be implemented.*
### Response:
def set_input_score_end_range(self, score):
"""Sets the input score start range.
arg: score (decimal): the new start range
raise: InvalidArgument - ``score`` is invalid
raise: NoAccess - ``range`` cannot be modified
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.grading.GradeSystemForm.set_lowest_numeric_score
if self.get_input_score_end_range_metadata().is_read_only():
raise errors.NoAccess()
try:
score = float(score)
except ValueError:
raise errors.InvalidArgument()
if not self._is_valid_decimal(score, self.get_input_score_end_range_metadata()):
raise errors.InvalidArgument()
self._my_map['inputScoreEndRange'] = score |
def verify_eth_transaction(signed_hextx, eth_data_field):
"""
Verify ethDataField field in transaction
:param signed_hextx:
:param eth_data_field:
:return:
"""
logging.info('verifying ethDataField value for transaction')
ethdata_hash = []
for s in signed_hextx.split('80a0'):
ethdata_hash.append(s)
ethdata_hash = ethdata_hash[1][:64]
result = (eth_data_field == ethdata_hash)
if not result:
error_message = 'There was a problem verifying the transaction'
raise UnverifiedTransactionError(error_message)
logging.info('verified ethDataField') | Verify ethDataField field in transaction
:param signed_hextx:
:param eth_data_field:
:return: | Below is the the instruction that describes the task:
### Input:
Verify ethDataField field in transaction
:param signed_hextx:
:param eth_data_field:
:return:
### Response:
def verify_eth_transaction(signed_hextx, eth_data_field):
"""
Verify ethDataField field in transaction
:param signed_hextx:
:param eth_data_field:
:return:
"""
logging.info('verifying ethDataField value for transaction')
ethdata_hash = []
for s in signed_hextx.split('80a0'):
ethdata_hash.append(s)
ethdata_hash = ethdata_hash[1][:64]
result = (eth_data_field == ethdata_hash)
if not result:
error_message = 'There was a problem verifying the transaction'
raise UnverifiedTransactionError(error_message)
logging.info('verified ethDataField') |
async def info(self):
"""Return device info."""
"""
{'MasterCapability': 9, 'TransportPort': 3975}
"""
act = self.service.action("X_GetDeviceInfo")
res = await act.async_call()
return res | Return device info. | Below is the the instruction that describes the task:
### Input:
Return device info.
### Response:
async def info(self):
"""Return device info."""
"""
{'MasterCapability': 9, 'TransportPort': 3975}
"""
act = self.service.action("X_GetDeviceInfo")
res = await act.async_call()
return res |
def p_stringValueList(p):
"""stringValueList : stringValue
| stringValueList stringValue
"""
if len(p) == 2:
p[0] = _fixStringValue(p[1], p)
else:
p[0] = p[1] + _fixStringValue(p[2], p) | stringValueList : stringValue
| stringValueList stringValue | Below is the the instruction that describes the task:
### Input:
stringValueList : stringValue
| stringValueList stringValue
### Response:
def p_stringValueList(p):
"""stringValueList : stringValue
| stringValueList stringValue
"""
if len(p) == 2:
p[0] = _fixStringValue(p[1], p)
else:
p[0] = p[1] + _fixStringValue(p[2], p) |
def write_svg(self):
"""
Returns PUML from the system as a SVG image. Requires plantuml library.
"""
import plantuml
puml = self.write_puml()
server = plantuml.PlantUML(url=self.url)
svg = server.processes(puml)
return svg | Returns PUML from the system as a SVG image. Requires plantuml library. | Below is the the instruction that describes the task:
### Input:
Returns PUML from the system as a SVG image. Requires plantuml library.
### Response:
def write_svg(self):
"""
Returns PUML from the system as a SVG image. Requires plantuml library.
"""
import plantuml
puml = self.write_puml()
server = plantuml.PlantUML(url=self.url)
svg = server.processes(puml)
return svg |
def unget_service(self, service):
# type: (Any) -> bool
"""
Releases a service object for the associated service.
:param service: An instance of a service returned by ``get_service()``
:return: True if the bundle usage has been removed
"""
return self.__registry.unget_service(
self.__bundle, self.__reference, service
) | Releases a service object for the associated service.
:param service: An instance of a service returned by ``get_service()``
:return: True if the bundle usage has been removed | Below is the the instruction that describes the task:
### Input:
Releases a service object for the associated service.
:param service: An instance of a service returned by ``get_service()``
:return: True if the bundle usage has been removed
### Response:
def unget_service(self, service):
# type: (Any) -> bool
"""
Releases a service object for the associated service.
:param service: An instance of a service returned by ``get_service()``
:return: True if the bundle usage has been removed
"""
return self.__registry.unget_service(
self.__bundle, self.__reference, service
) |
def _write_mflist_ins(ins_filename,df,prefix):
""" write an instruction file for a MODFLOW list file
Parameters
----------
ins_filename : str
name of the instruction file to write
df : pandas.DataFrame
the dataframe of list file entries
prefix : str
the prefix to add to the column names to form
obseravtions names
"""
dt_str = df.index.map(lambda x: x.strftime("%Y%m%d"))
name_len = 11 - (len(prefix)+1)
with open(ins_filename,'w') as f:
f.write('pif ~\nl1\n')
for dt in dt_str:
f.write("l1 ")
for col in df.columns:
obsnme = "{0}_{1}_{2}".format(prefix,col[:name_len],dt)
f.write(" w !{0}!".format(obsnme))
f.write("\n") | write an instruction file for a MODFLOW list file
Parameters
----------
ins_filename : str
name of the instruction file to write
df : pandas.DataFrame
the dataframe of list file entries
prefix : str
the prefix to add to the column names to form
obseravtions names | Below is the the instruction that describes the task:
### Input:
write an instruction file for a MODFLOW list file
Parameters
----------
ins_filename : str
name of the instruction file to write
df : pandas.DataFrame
the dataframe of list file entries
prefix : str
the prefix to add to the column names to form
obseravtions names
### Response:
def _write_mflist_ins(ins_filename,df,prefix):
""" write an instruction file for a MODFLOW list file
Parameters
----------
ins_filename : str
name of the instruction file to write
df : pandas.DataFrame
the dataframe of list file entries
prefix : str
the prefix to add to the column names to form
obseravtions names
"""
dt_str = df.index.map(lambda x: x.strftime("%Y%m%d"))
name_len = 11 - (len(prefix)+1)
with open(ins_filename,'w') as f:
f.write('pif ~\nl1\n')
for dt in dt_str:
f.write("l1 ")
for col in df.columns:
obsnme = "{0}_{1}_{2}".format(prefix,col[:name_len],dt)
f.write(" w !{0}!".format(obsnme))
f.write("\n") |
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
opt = "setup"
if opt not in options:
options[opt] = filters.Filter(classname="weka.filters.AllFilter")
if opt not in self.help:
self.help[opt] = "The filter to apply to the dataset (Filter)."
opt = "keep_relationname"
if opt not in options:
options[opt] = False
if opt not in self.help:
self.help[opt] = "Whether to keep the original relation name (bool)."
return super(Filter, self).fix_config(options) | Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
### Response:
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
opt = "setup"
if opt not in options:
options[opt] = filters.Filter(classname="weka.filters.AllFilter")
if opt not in self.help:
self.help[opt] = "The filter to apply to the dataset (Filter)."
opt = "keep_relationname"
if opt not in options:
options[opt] = False
if opt not in self.help:
self.help[opt] = "Whether to keep the original relation name (bool)."
return super(Filter, self).fix_config(options) |
def create_from_snapshot(self, *args, **kwargs):
"""
Creates a Block Storage volume
Note: Every argument and parameter given to this method will be
assigned to the object.
Args:
name: string - a name for the volume
snapshot_id: string - unique identifier for the volume snapshot
size_gigabytes: int - size of the Block Storage volume in GiB
filesystem_type: string, optional - name of the filesystem type the
volume will be formated with ('ext4' or 'xfs')
filesystem_label: string, optional - the label to be applied to the
filesystem, only used in conjunction with filesystem_type
Optional Args:
description: string - text field to describe a volume
"""
data = self.get_data('volumes/',
type=POST,
params={'name': self.name,
'snapshot_id': self.snapshot_id,
'region': self.region,
'size_gigabytes': self.size_gigabytes,
'description': self.description,
'filesystem_type': self.filesystem_type,
'filesystem_label': self.filesystem_label
})
if data:
self.id = data['volume']['id']
self.created_at = data['volume']['created_at']
return self | Creates a Block Storage volume
Note: Every argument and parameter given to this method will be
assigned to the object.
Args:
name: string - a name for the volume
snapshot_id: string - unique identifier for the volume snapshot
size_gigabytes: int - size of the Block Storage volume in GiB
filesystem_type: string, optional - name of the filesystem type the
volume will be formated with ('ext4' or 'xfs')
filesystem_label: string, optional - the label to be applied to the
filesystem, only used in conjunction with filesystem_type
Optional Args:
description: string - text field to describe a volume | Below is the the instruction that describes the task:
### Input:
Creates a Block Storage volume
Note: Every argument and parameter given to this method will be
assigned to the object.
Args:
name: string - a name for the volume
snapshot_id: string - unique identifier for the volume snapshot
size_gigabytes: int - size of the Block Storage volume in GiB
filesystem_type: string, optional - name of the filesystem type the
volume will be formated with ('ext4' or 'xfs')
filesystem_label: string, optional - the label to be applied to the
filesystem, only used in conjunction with filesystem_type
Optional Args:
description: string - text field to describe a volume
### Response:
def create_from_snapshot(self, *args, **kwargs):
"""
Creates a Block Storage volume
Note: Every argument and parameter given to this method will be
assigned to the object.
Args:
name: string - a name for the volume
snapshot_id: string - unique identifier for the volume snapshot
size_gigabytes: int - size of the Block Storage volume in GiB
filesystem_type: string, optional - name of the filesystem type the
volume will be formated with ('ext4' or 'xfs')
filesystem_label: string, optional - the label to be applied to the
filesystem, only used in conjunction with filesystem_type
Optional Args:
description: string - text field to describe a volume
"""
data = self.get_data('volumes/',
type=POST,
params={'name': self.name,
'snapshot_id': self.snapshot_id,
'region': self.region,
'size_gigabytes': self.size_gigabytes,
'description': self.description,
'filesystem_type': self.filesystem_type,
'filesystem_label': self.filesystem_label
})
if data:
self.id = data['volume']['id']
self.created_at = data['volume']['created_at']
return self |
def fit_linear(xdata, ydata):
"""
Returns slope and intercept of line of best fit:
y = a*x + b
through the supplied data.
Parameters
----------
xdata, ydata:
Arrays of x data and y data (having matching lengths).
"""
x = _n.array(xdata)
y = _n.array(ydata)
ax = _n.average(x)
ay = _n.average(y)
axx = _n.average(x*x)
ayx = _n.average(y*x)
slope = (ayx - ay*ax) / (axx - ax*ax)
intercept = ay - slope*ax
return slope, intercept | Returns slope and intercept of line of best fit:
y = a*x + b
through the supplied data.
Parameters
----------
xdata, ydata:
Arrays of x data and y data (having matching lengths). | Below is the the instruction that describes the task:
### Input:
Returns slope and intercept of line of best fit:
y = a*x + b
through the supplied data.
Parameters
----------
xdata, ydata:
Arrays of x data and y data (having matching lengths).
### Response:
def fit_linear(xdata, ydata):
"""
Returns slope and intercept of line of best fit:
y = a*x + b
through the supplied data.
Parameters
----------
xdata, ydata:
Arrays of x data and y data (having matching lengths).
"""
x = _n.array(xdata)
y = _n.array(ydata)
ax = _n.average(x)
ay = _n.average(y)
axx = _n.average(x*x)
ayx = _n.average(y*x)
slope = (ayx - ay*ax) / (axx - ax*ax)
intercept = ay - slope*ax
return slope, intercept |
def create_scaling_policy(self, scaling_policy):
"""
Creates a new Scaling Policy.
:type scaling_policy: :class:`boto.ec2.autoscale.policy.ScalingPolicy`
:param scaling_policy: ScalingPolicy object.
"""
params = {'AdjustmentType': scaling_policy.adjustment_type,
'AutoScalingGroupName': scaling_policy.as_name,
'PolicyName': scaling_policy.name,
'ScalingAdjustment': scaling_policy.scaling_adjustment}
if scaling_policy.cooldown is not None:
params['Cooldown'] = scaling_policy.cooldown
return self.get_object('PutScalingPolicy', params, Request) | Creates a new Scaling Policy.
:type scaling_policy: :class:`boto.ec2.autoscale.policy.ScalingPolicy`
:param scaling_policy: ScalingPolicy object. | Below is the the instruction that describes the task:
### Input:
Creates a new Scaling Policy.
:type scaling_policy: :class:`boto.ec2.autoscale.policy.ScalingPolicy`
:param scaling_policy: ScalingPolicy object.
### Response:
def create_scaling_policy(self, scaling_policy):
"""
Creates a new Scaling Policy.
:type scaling_policy: :class:`boto.ec2.autoscale.policy.ScalingPolicy`
:param scaling_policy: ScalingPolicy object.
"""
params = {'AdjustmentType': scaling_policy.adjustment_type,
'AutoScalingGroupName': scaling_policy.as_name,
'PolicyName': scaling_policy.name,
'ScalingAdjustment': scaling_policy.scaling_adjustment}
if scaling_policy.cooldown is not None:
params['Cooldown'] = scaling_policy.cooldown
return self.get_object('PutScalingPolicy', params, Request) |
def draw_cross(self, position, color=(255, 0, 0), radius=4):
"""Draw a cross on the canvas.
:param position: (row, col) tuple
:param color: RGB tuple
:param radius: radius of the cross (int)
"""
y, x = position
for xmod in np.arange(-radius, radius+1, 1):
xpos = x + xmod
if xpos < 0:
continue # Negative indices will draw on the opposite side.
if xpos >= self.shape[1]:
continue # Out of bounds.
self[int(y), int(xpos)] = color
for ymod in np.arange(-radius, radius+1, 1):
ypos = y + ymod
if ypos < 0:
continue # Negative indices will draw on the opposite side.
if ypos >= self.shape[0]:
continue # Out of bounds.
self[int(ypos), int(x)] = color | Draw a cross on the canvas.
:param position: (row, col) tuple
:param color: RGB tuple
:param radius: radius of the cross (int) | Below is the the instruction that describes the task:
### Input:
Draw a cross on the canvas.
:param position: (row, col) tuple
:param color: RGB tuple
:param radius: radius of the cross (int)
### Response:
def draw_cross(self, position, color=(255, 0, 0), radius=4):
"""Draw a cross on the canvas.
:param position: (row, col) tuple
:param color: RGB tuple
:param radius: radius of the cross (int)
"""
y, x = position
for xmod in np.arange(-radius, radius+1, 1):
xpos = x + xmod
if xpos < 0:
continue # Negative indices will draw on the opposite side.
if xpos >= self.shape[1]:
continue # Out of bounds.
self[int(y), int(xpos)] = color
for ymod in np.arange(-radius, radius+1, 1):
ypos = y + ymod
if ypos < 0:
continue # Negative indices will draw on the opposite side.
if ypos >= self.shape[0]:
continue # Out of bounds.
self[int(ypos), int(x)] = color |
def image_read(filename, dimension=None, pixeltype='float', reorient=False):
"""
Read an ANTsImage from file
ANTsR function: `antsImageRead`
Arguments
---------
filename : string
Name of the file to read the image from.
dimension : int
Number of dimensions of the image read. This need not be the same as
the dimensions of the image in the file. Allowed values: 2, 3, 4.
If not provided, the dimension is obtained from the image file
pixeltype : string
C++ datatype to be used to represent the pixels read. This datatype
need not be the same as the datatype used in the file.
Options: unsigned char, unsigned int, float, double
reorient : boolean | string
if True, the image will be reoriented to RPI if it is 3D
if False, nothing will happen
if string, this should be the 3-letter orientation to which the
input image will reoriented if 3D.
if the image is 2D, this argument is ignored
Returns
-------
ANTsImage
"""
if filename.endswith('.npy'):
filename = os.path.expanduser(filename)
img_array = np.load(filename)
if os.path.exists(filename.replace('.npy', '.json')):
with open(filename.replace('.npy', '.json')) as json_data:
img_header = json.load(json_data)
ants_image = from_numpy(img_array,
origin=img_header.get('origin', None),
spacing=img_header.get('spacing', None),
direction=np.asarray(img_header.get('direction',None)),
has_components=img_header.get('components',1)>1)
else:
img_header = {}
ants_image = from_numpy(img_array)
else:
filename = os.path.expanduser(filename)
if not os.path.exists(filename):
raise ValueError('File %s does not exist!' % filename)
hinfo = image_header_info(filename)
ptype = hinfo['pixeltype']
pclass = hinfo['pixelclass']
ndim = hinfo['nDimensions']
ncomp = hinfo['nComponents']
is_rgb = True if pclass == 'rgb' else False
if dimension is not None:
ndim = dimension
# error handling on pixelclass
if pclass not in _supported_pclasses:
raise ValueError('Pixel class %s not supported!' % pclass)
# error handling on pixeltype
if ptype in _unsupported_ptypes:
ptype = _unsupported_ptype_map.get(ptype, 'unsupported')
if ptype == 'unsupported':
raise ValueError('Pixeltype %s not supported' % ptype)
# error handling on dimension
if (ndim < 2) or (ndim > 4):
raise ValueError('Found %i-dimensional image - not supported!' % ndim)
libfn = utils.get_lib_fn(_image_read_dict[pclass][ptype][ndim])
itk_pointer = libfn(filename)
ants_image = iio.ANTsImage(pixeltype=ptype, dimension=ndim, components=ncomp,
pointer=itk_pointer, is_rgb=is_rgb)
if pixeltype is not None:
ants_image = ants_image.clone(pixeltype)
if (reorient != False) and (ants_image.dimension == 3):
if reorient == True:
ants_image = ants_image.reorient_image2('RPI')
elif isinstance(reorient, str):
ants_image = ants_image.reorient_image2(reorient)
return ants_image | Read an ANTsImage from file
ANTsR function: `antsImageRead`
Arguments
---------
filename : string
Name of the file to read the image from.
dimension : int
Number of dimensions of the image read. This need not be the same as
the dimensions of the image in the file. Allowed values: 2, 3, 4.
If not provided, the dimension is obtained from the image file
pixeltype : string
C++ datatype to be used to represent the pixels read. This datatype
need not be the same as the datatype used in the file.
Options: unsigned char, unsigned int, float, double
reorient : boolean | string
if True, the image will be reoriented to RPI if it is 3D
if False, nothing will happen
if string, this should be the 3-letter orientation to which the
input image will reoriented if 3D.
if the image is 2D, this argument is ignored
Returns
-------
ANTsImage | Below is the the instruction that describes the task:
### Input:
Read an ANTsImage from file
ANTsR function: `antsImageRead`
Arguments
---------
filename : string
Name of the file to read the image from.
dimension : int
Number of dimensions of the image read. This need not be the same as
the dimensions of the image in the file. Allowed values: 2, 3, 4.
If not provided, the dimension is obtained from the image file
pixeltype : string
C++ datatype to be used to represent the pixels read. This datatype
need not be the same as the datatype used in the file.
Options: unsigned char, unsigned int, float, double
reorient : boolean | string
if True, the image will be reoriented to RPI if it is 3D
if False, nothing will happen
if string, this should be the 3-letter orientation to which the
input image will reoriented if 3D.
if the image is 2D, this argument is ignored
Returns
-------
ANTsImage
### Response:
def image_read(filename, dimension=None, pixeltype='float', reorient=False):
"""
Read an ANTsImage from file
ANTsR function: `antsImageRead`
Arguments
---------
filename : string
Name of the file to read the image from.
dimension : int
Number of dimensions of the image read. This need not be the same as
the dimensions of the image in the file. Allowed values: 2, 3, 4.
If not provided, the dimension is obtained from the image file
pixeltype : string
C++ datatype to be used to represent the pixels read. This datatype
need not be the same as the datatype used in the file.
Options: unsigned char, unsigned int, float, double
reorient : boolean | string
if True, the image will be reoriented to RPI if it is 3D
if False, nothing will happen
if string, this should be the 3-letter orientation to which the
input image will reoriented if 3D.
if the image is 2D, this argument is ignored
Returns
-------
ANTsImage
"""
if filename.endswith('.npy'):
filename = os.path.expanduser(filename)
img_array = np.load(filename)
if os.path.exists(filename.replace('.npy', '.json')):
with open(filename.replace('.npy', '.json')) as json_data:
img_header = json.load(json_data)
ants_image = from_numpy(img_array,
origin=img_header.get('origin', None),
spacing=img_header.get('spacing', None),
direction=np.asarray(img_header.get('direction',None)),
has_components=img_header.get('components',1)>1)
else:
img_header = {}
ants_image = from_numpy(img_array)
else:
filename = os.path.expanduser(filename)
if not os.path.exists(filename):
raise ValueError('File %s does not exist!' % filename)
hinfo = image_header_info(filename)
ptype = hinfo['pixeltype']
pclass = hinfo['pixelclass']
ndim = hinfo['nDimensions']
ncomp = hinfo['nComponents']
is_rgb = True if pclass == 'rgb' else False
if dimension is not None:
ndim = dimension
# error handling on pixelclass
if pclass not in _supported_pclasses:
raise ValueError('Pixel class %s not supported!' % pclass)
# error handling on pixeltype
if ptype in _unsupported_ptypes:
ptype = _unsupported_ptype_map.get(ptype, 'unsupported')
if ptype == 'unsupported':
raise ValueError('Pixeltype %s not supported' % ptype)
# error handling on dimension
if (ndim < 2) or (ndim > 4):
raise ValueError('Found %i-dimensional image - not supported!' % ndim)
libfn = utils.get_lib_fn(_image_read_dict[pclass][ptype][ndim])
itk_pointer = libfn(filename)
ants_image = iio.ANTsImage(pixeltype=ptype, dimension=ndim, components=ncomp,
pointer=itk_pointer, is_rgb=is_rgb)
if pixeltype is not None:
ants_image = ants_image.clone(pixeltype)
if (reorient != False) and (ants_image.dimension == 3):
if reorient == True:
ants_image = ants_image.reorient_image2('RPI')
elif isinstance(reorient, str):
ants_image = ants_image.reorient_image2(reorient)
return ants_image |
def get_peptable_headerfields(headertypes, lookup=False, poolnames=False):
"""Called by driver to generate headerfields object"""
field_defs = {'isoquant': get_isoquant_fields,
'precursorquant': get_precursorquant_fields,
'peptidefdr': get_peptidefdr_fields,
'peptidepep': get_peptidepep_fields,
'proteindata': get_proteininfo_fields,
}
return generate_headerfields(headertypes, field_defs, poolnames, lookup) | Called by driver to generate headerfields object | Below is the the instruction that describes the task:
### Input:
Called by driver to generate headerfields object
### Response:
def get_peptable_headerfields(headertypes, lookup=False, poolnames=False):
"""Called by driver to generate headerfields object"""
field_defs = {'isoquant': get_isoquant_fields,
'precursorquant': get_precursorquant_fields,
'peptidefdr': get_peptidefdr_fields,
'peptidepep': get_peptidepep_fields,
'proteindata': get_proteininfo_fields,
}
return generate_headerfields(headertypes, field_defs, poolnames, lookup) |
def _qualifiers_tomof(qualifiers, indent, maxline=MAX_MOF_LINE):
"""
Return a MOF string with the qualifier values, including the surrounding
square brackets. The qualifiers are ordered by their name.
Return empty string if no qualifiers.
Normally multiline output and may fold qualifiers into multiple lines.
The order of qualifiers is preserved.
Parameters:
qualifiers (NocaseDict): Qualifiers to format.
indent (:term:`integer`): Number of spaces to indent each line of
the returned string, counted to the opening bracket in the first line.
Returns:
:term:`unicode string`: MOF string.
"""
if not qualifiers:
return u''
mof = []
mof.append(_indent_str(indent))
mof.append(u'[')
line_pos = indent + 1
mof_quals = []
for q in qualifiers.itervalues():
mof_quals.append(q.tomof(indent + 1 + MOF_INDENT, maxline, line_pos))
delim = ',\n' + _indent_str(indent + 1)
mof.append(delim.join(mof_quals))
mof.append(u']\n')
return u''.join(mof) | Return a MOF string with the qualifier values, including the surrounding
square brackets. The qualifiers are ordered by their name.
Return empty string if no qualifiers.
Normally multiline output and may fold qualifiers into multiple lines.
The order of qualifiers is preserved.
Parameters:
qualifiers (NocaseDict): Qualifiers to format.
indent (:term:`integer`): Number of spaces to indent each line of
the returned string, counted to the opening bracket in the first line.
Returns:
:term:`unicode string`: MOF string. | Below is the the instruction that describes the task:
### Input:
Return a MOF string with the qualifier values, including the surrounding
square brackets. The qualifiers are ordered by their name.
Return empty string if no qualifiers.
Normally multiline output and may fold qualifiers into multiple lines.
The order of qualifiers is preserved.
Parameters:
qualifiers (NocaseDict): Qualifiers to format.
indent (:term:`integer`): Number of spaces to indent each line of
the returned string, counted to the opening bracket in the first line.
Returns:
:term:`unicode string`: MOF string.
### Response:
def _qualifiers_tomof(qualifiers, indent, maxline=MAX_MOF_LINE):
"""
Return a MOF string with the qualifier values, including the surrounding
square brackets. The qualifiers are ordered by their name.
Return empty string if no qualifiers.
Normally multiline output and may fold qualifiers into multiple lines.
The order of qualifiers is preserved.
Parameters:
qualifiers (NocaseDict): Qualifiers to format.
indent (:term:`integer`): Number of spaces to indent each line of
the returned string, counted to the opening bracket in the first line.
Returns:
:term:`unicode string`: MOF string.
"""
if not qualifiers:
return u''
mof = []
mof.append(_indent_str(indent))
mof.append(u'[')
line_pos = indent + 1
mof_quals = []
for q in qualifiers.itervalues():
mof_quals.append(q.tomof(indent + 1 + MOF_INDENT, maxline, line_pos))
delim = ',\n' + _indent_str(indent + 1)
mof.append(delim.join(mof_quals))
mof.append(u']\n')
return u''.join(mof) |
def extract(self, item, list_article_candidate):
"""Compares the extracted publish dates.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the most likely publish date
"""
list_publish_date = []
for article_candidate in list_article_candidate:
if article_candidate.publish_date != None:
list_publish_date.append((article_candidate.publish_date, article_candidate.extractor))
# If there is no value in the list, return None.
if len(list_publish_date) == 0:
return None
# If there are more options than one, return the result from date_extractor.
list_date_extractor = [x for x in list_publish_date if x[1] == "date_extractor"]
if len(list_date_extractor) == 0:
# If there is no date extracted by date_extractor, return the first result of list_publish_date.
return list_publish_date[0][0]
else:
return list_date_extractor[0][0] | Compares the extracted publish dates.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the most likely publish date | Below is the the instruction that describes the task:
### Input:
Compares the extracted publish dates.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the most likely publish date
### Response:
def extract(self, item, list_article_candidate):
"""Compares the extracted publish dates.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the most likely publish date
"""
list_publish_date = []
for article_candidate in list_article_candidate:
if article_candidate.publish_date != None:
list_publish_date.append((article_candidate.publish_date, article_candidate.extractor))
# If there is no value in the list, return None.
if len(list_publish_date) == 0:
return None
# If there are more options than one, return the result from date_extractor.
list_date_extractor = [x for x in list_publish_date if x[1] == "date_extractor"]
if len(list_date_extractor) == 0:
# If there is no date extracted by date_extractor, return the first result of list_publish_date.
return list_publish_date[0][0]
else:
return list_date_extractor[0][0] |
def sim(self, src, tar, threshold=0.25, max_mismatches=2):
"""Return the MLIPNS similarity of two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
threshold : float
A number [0, 1] indicating the maximum similarity score, below
which the strings are considered 'similar' (0.25 by default)
max_mismatches : int
A number indicating the allowable number of mismatches to remove
before declaring two strings not similar (2 by default)
Returns
-------
float
MLIPNS similarity
Examples
--------
>>> sim_mlipns('cat', 'hat')
1.0
>>> sim_mlipns('Niall', 'Neil')
0.0
>>> sim_mlipns('aluminum', 'Catalan')
0.0
>>> sim_mlipns('ATCG', 'TAGC')
0.0
"""
if tar == src:
return 1.0
if not src or not tar:
return 0.0
mismatches = 0
ham = Hamming().dist_abs(src, tar, diff_lens=True)
max_length = max(len(src), len(tar))
while src and tar and mismatches <= max_mismatches:
if (
max_length < 1
or (1 - (max_length - ham) / max_length) <= threshold
):
return 1.0
else:
mismatches += 1
ham -= 1
max_length -= 1
if max_length < 1:
return 1.0
return 0.0 | Return the MLIPNS similarity of two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
threshold : float
A number [0, 1] indicating the maximum similarity score, below
which the strings are considered 'similar' (0.25 by default)
max_mismatches : int
A number indicating the allowable number of mismatches to remove
before declaring two strings not similar (2 by default)
Returns
-------
float
MLIPNS similarity
Examples
--------
>>> sim_mlipns('cat', 'hat')
1.0
>>> sim_mlipns('Niall', 'Neil')
0.0
>>> sim_mlipns('aluminum', 'Catalan')
0.0
>>> sim_mlipns('ATCG', 'TAGC')
0.0 | Below is the the instruction that describes the task:
### Input:
Return the MLIPNS similarity of two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
threshold : float
A number [0, 1] indicating the maximum similarity score, below
which the strings are considered 'similar' (0.25 by default)
max_mismatches : int
A number indicating the allowable number of mismatches to remove
before declaring two strings not similar (2 by default)
Returns
-------
float
MLIPNS similarity
Examples
--------
>>> sim_mlipns('cat', 'hat')
1.0
>>> sim_mlipns('Niall', 'Neil')
0.0
>>> sim_mlipns('aluminum', 'Catalan')
0.0
>>> sim_mlipns('ATCG', 'TAGC')
0.0
### Response:
def sim(self, src, tar, threshold=0.25, max_mismatches=2):
"""Return the MLIPNS similarity of two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
threshold : float
A number [0, 1] indicating the maximum similarity score, below
which the strings are considered 'similar' (0.25 by default)
max_mismatches : int
A number indicating the allowable number of mismatches to remove
before declaring two strings not similar (2 by default)
Returns
-------
float
MLIPNS similarity
Examples
--------
>>> sim_mlipns('cat', 'hat')
1.0
>>> sim_mlipns('Niall', 'Neil')
0.0
>>> sim_mlipns('aluminum', 'Catalan')
0.0
>>> sim_mlipns('ATCG', 'TAGC')
0.0
"""
if tar == src:
return 1.0
if not src or not tar:
return 0.0
mismatches = 0
ham = Hamming().dist_abs(src, tar, diff_lens=True)
max_length = max(len(src), len(tar))
while src and tar and mismatches <= max_mismatches:
if (
max_length < 1
or (1 - (max_length - ham) / max_length) <= threshold
):
return 1.0
else:
mismatches += 1
ham -= 1
max_length -= 1
if max_length < 1:
return 1.0
return 0.0 |
def get_vertical_orientation_property(value, is_bytes=False):
"""Get `VO` property."""
obj = unidata.ascii_vertical_orientation if is_bytes else unidata.unicode_vertical_orientation
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['verticalorientation'].get(negated, negated)
else:
value = unidata.unicode_alias['verticalorientation'].get(value, value)
return obj[value] | Get `VO` property. | Below is the the instruction that describes the task:
### Input:
Get `VO` property.
### Response:
def get_vertical_orientation_property(value, is_bytes=False):
"""Get `VO` property."""
obj = unidata.ascii_vertical_orientation if is_bytes else unidata.unicode_vertical_orientation
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['verticalorientation'].get(negated, negated)
else:
value = unidata.unicode_alias['verticalorientation'].get(value, value)
return obj[value] |
def list_gen(self, keyword=None, arg=None):
"""Generator for LIST command.
See list() for more information.
Yields:
An element in the list returned by list().
"""
if keyword:
keyword = keyword.upper()
if keyword is None or keyword == "ACTIVE":
return self.list_active_gen(arg)
if keyword == "ACTIVE.TIMES":
return self.list_active_times_gen()
if keyword == "DISTRIB.PATS":
return self.list_distrib_pats_gen()
if keyword == "HEADERS":
return self.list_headers_gen(arg)
if keyword == "NEWSGROUPS":
return self.list_newsgroups_gen(arg)
if keyword == "OVERVIEW.FMT":
return self.list_overview_fmt_gen()
if keyword == "EXTENSIONS":
return self.list_extensions_gen()
raise NotImplementedError() | Generator for LIST command.
See list() for more information.
Yields:
An element in the list returned by list(). | Below is the the instruction that describes the task:
### Input:
Generator for LIST command.
See list() for more information.
Yields:
An element in the list returned by list().
### Response:
def list_gen(self, keyword=None, arg=None):
"""Generator for LIST command.
See list() for more information.
Yields:
An element in the list returned by list().
"""
if keyword:
keyword = keyword.upper()
if keyword is None or keyword == "ACTIVE":
return self.list_active_gen(arg)
if keyword == "ACTIVE.TIMES":
return self.list_active_times_gen()
if keyword == "DISTRIB.PATS":
return self.list_distrib_pats_gen()
if keyword == "HEADERS":
return self.list_headers_gen(arg)
if keyword == "NEWSGROUPS":
return self.list_newsgroups_gen(arg)
if keyword == "OVERVIEW.FMT":
return self.list_overview_fmt_gen()
if keyword == "EXTENSIONS":
return self.list_extensions_gen()
raise NotImplementedError() |
def to_coo(self, fp=None, vartype_header=False):
"""Serialize the binary quadratic model to a COOrdinate_ format encoding.
.. _COOrdinate: https://en.wikipedia.org/wiki/Sparse_matrix#Coordinate_list_(COO)
Args:
fp (file, optional):
`.write()`-supporting `file object`_ to save the linear and quadratic biases
of a binary quadratic model to. The model is stored as a list of 3-tuples,
(i, j, bias), where :math:`i=j` for linear biases. If not provided,
returns a string.
vartype_header (bool, optional, default=False):
If true, the binary quadratic model's variable type as prepended to the
string or file as a header.
.. _file object: https://docs.python.org/3/glossary.html#term-file-object
.. note:: Variables must use index lables (numeric lables). Binary quadratic
models saved to COOrdinate format encoding do not preserve offsets.
Examples:
This is an example of a binary quadratic model encoded in COOrdinate format.
.. code-block:: none
0 0 0.50000
0 1 0.50000
1 1 -1.50000
The Coordinate format with a header
.. code-block:: none
# vartype=SPIN
0 0 0.50000
0 1 0.50000
1 1 -1.50000
This is an example of writing a binary quadratic model to a COOrdinate-format
file.
>>> bqm = dimod.BinaryQuadraticModel({0: -1.0, 1: 1.0}, {(0, 1): -1.0}, 0.0, dimod.SPIN)
>>> with open('tmp.ising', 'w') as file: # doctest: +SKIP
... bqm.to_coo(file)
This is an example of writing a binary quadratic model to a COOrdinate-format string.
>>> bqm = dimod.BinaryQuadraticModel({0: -1.0, 1: 1.0}, {(0, 1): -1.0}, 0.0, dimod.SPIN)
>>> bqm.to_coo() # doctest: +SKIP
0 0 -1.000000
0 1 -1.000000
1 1 1.000000
"""
import dimod.serialization.coo as coo
if fp is None:
return coo.dumps(self, vartype_header)
else:
coo.dump(self, fp, vartype_header) | Serialize the binary quadratic model to a COOrdinate_ format encoding.
.. _COOrdinate: https://en.wikipedia.org/wiki/Sparse_matrix#Coordinate_list_(COO)
Args:
fp (file, optional):
`.write()`-supporting `file object`_ to save the linear and quadratic biases
of a binary quadratic model to. The model is stored as a list of 3-tuples,
(i, j, bias), where :math:`i=j` for linear biases. If not provided,
returns a string.
vartype_header (bool, optional, default=False):
If true, the binary quadratic model's variable type as prepended to the
string or file as a header.
.. _file object: https://docs.python.org/3/glossary.html#term-file-object
.. note:: Variables must use index lables (numeric lables). Binary quadratic
models saved to COOrdinate format encoding do not preserve offsets.
Examples:
This is an example of a binary quadratic model encoded in COOrdinate format.
.. code-block:: none
0 0 0.50000
0 1 0.50000
1 1 -1.50000
The Coordinate format with a header
.. code-block:: none
# vartype=SPIN
0 0 0.50000
0 1 0.50000
1 1 -1.50000
This is an example of writing a binary quadratic model to a COOrdinate-format
file.
>>> bqm = dimod.BinaryQuadraticModel({0: -1.0, 1: 1.0}, {(0, 1): -1.0}, 0.0, dimod.SPIN)
>>> with open('tmp.ising', 'w') as file: # doctest: +SKIP
... bqm.to_coo(file)
This is an example of writing a binary quadratic model to a COOrdinate-format string.
>>> bqm = dimod.BinaryQuadraticModel({0: -1.0, 1: 1.0}, {(0, 1): -1.0}, 0.0, dimod.SPIN)
>>> bqm.to_coo() # doctest: +SKIP
0 0 -1.000000
0 1 -1.000000
1 1 1.000000 | Below is the the instruction that describes the task:
### Input:
Serialize the binary quadratic model to a COOrdinate_ format encoding.
.. _COOrdinate: https://en.wikipedia.org/wiki/Sparse_matrix#Coordinate_list_(COO)
Args:
fp (file, optional):
`.write()`-supporting `file object`_ to save the linear and quadratic biases
of a binary quadratic model to. The model is stored as a list of 3-tuples,
(i, j, bias), where :math:`i=j` for linear biases. If not provided,
returns a string.
vartype_header (bool, optional, default=False):
If true, the binary quadratic model's variable type as prepended to the
string or file as a header.
.. _file object: https://docs.python.org/3/glossary.html#term-file-object
.. note:: Variables must use index lables (numeric lables). Binary quadratic
models saved to COOrdinate format encoding do not preserve offsets.
Examples:
This is an example of a binary quadratic model encoded in COOrdinate format.
.. code-block:: none
0 0 0.50000
0 1 0.50000
1 1 -1.50000
The Coordinate format with a header
.. code-block:: none
# vartype=SPIN
0 0 0.50000
0 1 0.50000
1 1 -1.50000
This is an example of writing a binary quadratic model to a COOrdinate-format
file.
>>> bqm = dimod.BinaryQuadraticModel({0: -1.0, 1: 1.0}, {(0, 1): -1.0}, 0.0, dimod.SPIN)
>>> with open('tmp.ising', 'w') as file: # doctest: +SKIP
... bqm.to_coo(file)
This is an example of writing a binary quadratic model to a COOrdinate-format string.
>>> bqm = dimod.BinaryQuadraticModel({0: -1.0, 1: 1.0}, {(0, 1): -1.0}, 0.0, dimod.SPIN)
>>> bqm.to_coo() # doctest: +SKIP
0 0 -1.000000
0 1 -1.000000
1 1 1.000000
### Response:
def to_coo(self, fp=None, vartype_header=False):
"""Serialize the binary quadratic model to a COOrdinate_ format encoding.
.. _COOrdinate: https://en.wikipedia.org/wiki/Sparse_matrix#Coordinate_list_(COO)
Args:
fp (file, optional):
`.write()`-supporting `file object`_ to save the linear and quadratic biases
of a binary quadratic model to. The model is stored as a list of 3-tuples,
(i, j, bias), where :math:`i=j` for linear biases. If not provided,
returns a string.
vartype_header (bool, optional, default=False):
If true, the binary quadratic model's variable type as prepended to the
string or file as a header.
.. _file object: https://docs.python.org/3/glossary.html#term-file-object
.. note:: Variables must use index lables (numeric lables). Binary quadratic
models saved to COOrdinate format encoding do not preserve offsets.
Examples:
This is an example of a binary quadratic model encoded in COOrdinate format.
.. code-block:: none
0 0 0.50000
0 1 0.50000
1 1 -1.50000
The Coordinate format with a header
.. code-block:: none
# vartype=SPIN
0 0 0.50000
0 1 0.50000
1 1 -1.50000
This is an example of writing a binary quadratic model to a COOrdinate-format
file.
>>> bqm = dimod.BinaryQuadraticModel({0: -1.0, 1: 1.0}, {(0, 1): -1.0}, 0.0, dimod.SPIN)
>>> with open('tmp.ising', 'w') as file: # doctest: +SKIP
... bqm.to_coo(file)
This is an example of writing a binary quadratic model to a COOrdinate-format string.
>>> bqm = dimod.BinaryQuadraticModel({0: -1.0, 1: 1.0}, {(0, 1): -1.0}, 0.0, dimod.SPIN)
>>> bqm.to_coo() # doctest: +SKIP
0 0 -1.000000
0 1 -1.000000
1 1 1.000000
"""
import dimod.serialization.coo as coo
if fp is None:
return coo.dumps(self, vartype_header)
else:
coo.dump(self, fp, vartype_header) |
def _read_stream_as_string(stream, encoding):
"""Read stream as string
Originally in azure-batch-samples.Python.Batch.common.helpers
:param stream: input stream generator
:param str encoding: The encoding of the file. The default is utf-8.
:return: The file content.
:rtype: str
"""
output = io.BytesIO()
try:
for data in stream:
output.write(data)
if encoding is None:
encoding = 'utf-8'
return output.getvalue().decode(encoding)
finally:
output.close()
raise RuntimeError('could not write data to stream or decode bytes') | Read stream as string
Originally in azure-batch-samples.Python.Batch.common.helpers
:param stream: input stream generator
:param str encoding: The encoding of the file. The default is utf-8.
:return: The file content.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Read stream as string
Originally in azure-batch-samples.Python.Batch.common.helpers
:param stream: input stream generator
:param str encoding: The encoding of the file. The default is utf-8.
:return: The file content.
:rtype: str
### Response:
def _read_stream_as_string(stream, encoding):
"""Read stream as string
Originally in azure-batch-samples.Python.Batch.common.helpers
:param stream: input stream generator
:param str encoding: The encoding of the file. The default is utf-8.
:return: The file content.
:rtype: str
"""
output = io.BytesIO()
try:
for data in stream:
output.write(data)
if encoding is None:
encoding = 'utf-8'
return output.getvalue().decode(encoding)
finally:
output.close()
raise RuntimeError('could not write data to stream or decode bytes') |
def read(self, timeout=20.0):
"""
read data on the IN endpoint associated to the HID interface
"""
start = time()
while len(self.rcv_data) == 0:
sleep(0)
if time() - start > timeout:
# Read operations should typically take ~1-2ms.
# If this exception occurs, then it could indicate
# a problem in one of the following areas:
# 1. Bad usb driver causing either a dropped read or write
# 2. CMSIS-DAP firmware problem cause a dropped read or write
# 3. CMSIS-DAP is performing a long operation or is being
# halted in a debugger
raise DAPAccessIntf.DeviceError("Read timed out")
return self.rcv_data.popleft() | read data on the IN endpoint associated to the HID interface | Below is the the instruction that describes the task:
### Input:
read data on the IN endpoint associated to the HID interface
### Response:
def read(self, timeout=20.0):
"""
read data on the IN endpoint associated to the HID interface
"""
start = time()
while len(self.rcv_data) == 0:
sleep(0)
if time() - start > timeout:
# Read operations should typically take ~1-2ms.
# If this exception occurs, then it could indicate
# a problem in one of the following areas:
# 1. Bad usb driver causing either a dropped read or write
# 2. CMSIS-DAP firmware problem cause a dropped read or write
# 3. CMSIS-DAP is performing a long operation or is being
# halted in a debugger
raise DAPAccessIntf.DeviceError("Read timed out")
return self.rcv_data.popleft() |
def copy(self):
"""Copy text to clipboard"""
if not self.selectedIndexes():
return
(row_min, row_max,
col_min, col_max) = get_idx_rect(self.selectedIndexes())
index = header = False
df = self.model().df
obj = df.iloc[slice(row_min, row_max + 1),
slice(col_min, col_max + 1)]
output = io.StringIO()
obj.to_csv(output, sep='\t', index=index, header=header)
if not PY2:
contents = output.getvalue()
else:
contents = output.getvalue().decode('utf-8')
output.close()
clipboard = QApplication.clipboard()
clipboard.setText(contents) | Copy text to clipboard | Below is the the instruction that describes the task:
### Input:
Copy text to clipboard
### Response:
def copy(self):
"""Copy text to clipboard"""
if not self.selectedIndexes():
return
(row_min, row_max,
col_min, col_max) = get_idx_rect(self.selectedIndexes())
index = header = False
df = self.model().df
obj = df.iloc[slice(row_min, row_max + 1),
slice(col_min, col_max + 1)]
output = io.StringIO()
obj.to_csv(output, sep='\t', index=index, header=header)
if not PY2:
contents = output.getvalue()
else:
contents = output.getvalue().decode('utf-8')
output.close()
clipboard = QApplication.clipboard()
clipboard.setText(contents) |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: FunctionContext for this FunctionInstance
:rtype: twilio.rest.serverless.v1.service.function.FunctionContext
"""
if self._context is None:
self._context = FunctionContext(
self._version,
service_sid=self._solution['service_sid'],
sid=self._solution['sid'],
)
return self._context | Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: FunctionContext for this FunctionInstance
:rtype: twilio.rest.serverless.v1.service.function.FunctionContext | Below is the the instruction that describes the task:
### Input:
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: FunctionContext for this FunctionInstance
:rtype: twilio.rest.serverless.v1.service.function.FunctionContext
### Response:
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: FunctionContext for this FunctionInstance
:rtype: twilio.rest.serverless.v1.service.function.FunctionContext
"""
if self._context is None:
self._context = FunctionContext(
self._version,
service_sid=self._solution['service_sid'],
sid=self._solution['sid'],
)
return self._context |
def add_case(self, case_obj):
"""Add a case obj with individuals to adapter
Args:
case_obj (puzzle.models.Case)
"""
for ind_obj in case_obj.individuals:
self._add_individual(ind_obj)
logger.debug("Adding case {0} to plugin".format(case_obj.case_id))
self.case_objs.append(case_obj)
if case_obj.tabix_index:
logger.debug("Setting filters.can_filter_range to True")
self.filters.can_filter_range = True | Add a case obj with individuals to adapter
Args:
case_obj (puzzle.models.Case) | Below is the the instruction that describes the task:
### Input:
Add a case obj with individuals to adapter
Args:
case_obj (puzzle.models.Case)
### Response:
def add_case(self, case_obj):
"""Add a case obj with individuals to adapter
Args:
case_obj (puzzle.models.Case)
"""
for ind_obj in case_obj.individuals:
self._add_individual(ind_obj)
logger.debug("Adding case {0} to plugin".format(case_obj.case_id))
self.case_objs.append(case_obj)
if case_obj.tabix_index:
logger.debug("Setting filters.can_filter_range to True")
self.filters.can_filter_range = True |
def _setup_xauth(self):
'''
Set up the Xauthority file and the XAUTHORITY environment variable.
'''
handle, filename = tempfile.mkstemp(prefix='PyVirtualDisplay.',
suffix='.Xauthority')
self._xauth_filename = filename
os.close(handle)
# Save old environment
self._old_xauth = {}
self._old_xauth['AUTHFILE'] = os.getenv('AUTHFILE')
self._old_xauth['XAUTHORITY'] = os.getenv('XAUTHORITY')
os.environ['AUTHFILE'] = os.environ['XAUTHORITY'] = filename
cookie = xauth.generate_mcookie()
xauth.call('add', self.new_display_var, '.', cookie) | Set up the Xauthority file and the XAUTHORITY environment variable. | Below is the the instruction that describes the task:
### Input:
Set up the Xauthority file and the XAUTHORITY environment variable.
### Response:
def _setup_xauth(self):
'''
Set up the Xauthority file and the XAUTHORITY environment variable.
'''
handle, filename = tempfile.mkstemp(prefix='PyVirtualDisplay.',
suffix='.Xauthority')
self._xauth_filename = filename
os.close(handle)
# Save old environment
self._old_xauth = {}
self._old_xauth['AUTHFILE'] = os.getenv('AUTHFILE')
self._old_xauth['XAUTHORITY'] = os.getenv('XAUTHORITY')
os.environ['AUTHFILE'] = os.environ['XAUTHORITY'] = filename
cookie = xauth.generate_mcookie()
xauth.call('add', self.new_display_var, '.', cookie) |
def load(self, config):
"""load the configuration"""
self.config = config
if 'start' not in self.config:
raise ParseError('missing start entry')
if 'states' not in self.config:
raise ParseError('missing states entry')
if 'transitions' not in self.config:
raise ParseError('missing transitions entry')
for state, val in self.config['states'].iteritems():
state = State(state)
state.entering = Action.load(val.get('entering'))
state.leaving = Action.load(val.get('leaving'))
self.states.add(state)
self.start = self.states[self.config['start']]
for transition, val in self.config['transitions'].iteritems():
if '->' in transition:
# from->to
lft, rgt = transition.split('->')
if lft == '*':
sfroms = self.states.keys()
else:
sfroms = lft.split(',')
if rgt == '*':
stos = self.states.keys()
else:
stos = rgt.split(',')
pairs = ((f, t) for f in sfroms for t in stos)
else:
# self transition 'from1,from2' = from1->from1, from2->from2
if transition == '*':
ss = self.states.keys()
else:
ss = transition.split(',')
pairs = ((x, x) for x in ss)
for sfrom, sto in pairs:
if sfrom not in self.states:
raise ParseError("Could find state %r" % sfrom)
if sto not in self.states:
raise ParseError("Could find state %r" % sto)
s_from = self.states[sfrom]
s_to = self.states[sto]
if not isinstance(val, list):
val = [val]
for v in val:
when = v['when']
actions = Action.load(v.get('actions'))
transition = Transition(s_from, s_to, Condition(when), actions)
s_from.transitions.append(transition) | load the configuration | Below is the the instruction that describes the task:
### Input:
load the configuration
### Response:
def load(self, config):
"""load the configuration"""
self.config = config
if 'start' not in self.config:
raise ParseError('missing start entry')
if 'states' not in self.config:
raise ParseError('missing states entry')
if 'transitions' not in self.config:
raise ParseError('missing transitions entry')
for state, val in self.config['states'].iteritems():
state = State(state)
state.entering = Action.load(val.get('entering'))
state.leaving = Action.load(val.get('leaving'))
self.states.add(state)
self.start = self.states[self.config['start']]
for transition, val in self.config['transitions'].iteritems():
if '->' in transition:
# from->to
lft, rgt = transition.split('->')
if lft == '*':
sfroms = self.states.keys()
else:
sfroms = lft.split(',')
if rgt == '*':
stos = self.states.keys()
else:
stos = rgt.split(',')
pairs = ((f, t) for f in sfroms for t in stos)
else:
# self transition 'from1,from2' = from1->from1, from2->from2
if transition == '*':
ss = self.states.keys()
else:
ss = transition.split(',')
pairs = ((x, x) for x in ss)
for sfrom, sto in pairs:
if sfrom not in self.states:
raise ParseError("Could find state %r" % sfrom)
if sto not in self.states:
raise ParseError("Could find state %r" % sto)
s_from = self.states[sfrom]
s_to = self.states[sto]
if not isinstance(val, list):
val = [val]
for v in val:
when = v['when']
actions = Action.load(v.get('actions'))
transition = Transition(s_from, s_to, Condition(when), actions)
s_from.transitions.append(transition) |
def cli(ctx, hpo_term, check_terms, output, p_value_limit, verbose, username,
password, to_json):
"Give hpo terms either on the form 'HP:0001623', or '0001623'"
loglevel = LEVELS.get(min(verbose, 3))
configure_stream(level=loglevel)
if not hpo_term:
logger.info("Please specify at least one hpo term with '-t/--hpo_term'.")
ctx.abort()
if not (username and password):
logger.info("Please specify username with -u and password with -p.")
logger.info("Contact [email protected].")
ctx.abort()
hpo_list = []
for term in hpo_term:
if len(term.split(':')) < 2:
term = ':'.join(['HP', term])
hpo_list.append(term)
logger.info("HPO terms used: {0}".format(','.join(hpo_list)))
if check_terms:
for term in hpo_list:
try:
if not validate_term(username, password, term):
logger.info("HPO term : {0} does not exist".format(term))
else:
logger.info("HPO term : {0} does exist!".format(term))
except RuntimeError as err:
click.echo(err)
ctx.abort()
ctx.abort()
else:
try:
for result in query(username, password, *hpo_list):
if to_json:
click.echo(json.dumps(result))
else:
print_string = "{0}\t{1}:{2}\t{3}\t{4}".format(
result['p_value'],
result['disease_source'],
result['disease_nr'],
result['description'],
','.join(result['gene_symbols'])
)
p_value = result['p_value']
if p_value <= p_value_limit:
click.echo(print_string)
except RuntimeError as e:
click.echo(e)
ctx.abort() | Give hpo terms either on the form 'HP:0001623', or '0001623 | Below is the the instruction that describes the task:
### Input:
Give hpo terms either on the form 'HP:0001623', or '0001623
### Response:
def cli(ctx, hpo_term, check_terms, output, p_value_limit, verbose, username,
password, to_json):
"Give hpo terms either on the form 'HP:0001623', or '0001623'"
loglevel = LEVELS.get(min(verbose, 3))
configure_stream(level=loglevel)
if not hpo_term:
logger.info("Please specify at least one hpo term with '-t/--hpo_term'.")
ctx.abort()
if not (username and password):
logger.info("Please specify username with -u and password with -p.")
logger.info("Contact [email protected].")
ctx.abort()
hpo_list = []
for term in hpo_term:
if len(term.split(':')) < 2:
term = ':'.join(['HP', term])
hpo_list.append(term)
logger.info("HPO terms used: {0}".format(','.join(hpo_list)))
if check_terms:
for term in hpo_list:
try:
if not validate_term(username, password, term):
logger.info("HPO term : {0} does not exist".format(term))
else:
logger.info("HPO term : {0} does exist!".format(term))
except RuntimeError as err:
click.echo(err)
ctx.abort()
ctx.abort()
else:
try:
for result in query(username, password, *hpo_list):
if to_json:
click.echo(json.dumps(result))
else:
print_string = "{0}\t{1}:{2}\t{3}\t{4}".format(
result['p_value'],
result['disease_source'],
result['disease_nr'],
result['description'],
','.join(result['gene_symbols'])
)
p_value = result['p_value']
if p_value <= p_value_limit:
click.echo(print_string)
except RuntimeError as e:
click.echo(e)
ctx.abort() |
def differential_pressure_meter_solver(D, rho, mu, k, D2=None, P1=None, P2=None,
m=None, meter_type=ISO_5167_ORIFICE,
taps=None):
r'''Calculates either the mass flow rate, the upstream pressure, the second
pressure value, or the orifice diameter for a differential
pressure flow meter based on the geometry of the meter, measured pressures
of the meter, and the density, viscosity, and isentropic exponent of the
fluid. This solves an equation iteratively to obtain the correct flow rate.
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
rho : float
Density of fluid at `P1`, [kg/m^3]
mu : float
Viscosity of fluid at `P1`, [Pa*s]
k : float
Isentropic exponent of fluid, [-]
D2 : float, optional
Diameter of orifice, or venturi meter orifice, or flow tube orifice,
or cone meter end diameter, or wedge meter fluid flow height, [m]
P1 : float, optional
Static pressure of fluid upstream of differential pressure meter at the
cross-section of the pressure tap, [Pa]
P2 : float, optional
Static pressure of fluid downstream of differential pressure meter or
at the prescribed location (varies by type of meter) [Pa]
m : float, optional
Mass flow rate of fluid through the flow meter, [kg/s]
meter_type : str, optional
One of ('ISO 5167 orifice', 'long radius nozzle', 'ISA 1932 nozzle',
'venuri nozzle', 'as cast convergent venturi tube',
'machined convergent venturi tube',
'rough welded convergent venturi tube', 'cone meter',
'wedge meter'), [-]
taps : str, optional
The orientation of the taps; one of 'corner', 'flange', 'D', or 'D/2';
applies for orifice meters only, [-]
Returns
-------
ans : float
One of `m`, the mass flow rate of the fluid; `P1`, the pressure
upstream of the flow meter; `P2`, the second pressure
tap's value; and `D2`, the diameter of the measuring device; units
of respectively, kg/s, Pa, Pa, or m
Notes
-----
See the appropriate functions for the documentation for the formulas and
references used in each method.
The solvers make some assumptions about the range of values answers may be
in.
Note that the solver for the upstream pressure uses the provided values of
density, viscosity and isentropic exponent; whereas these values all
depend on pressure (albeit to a small extent). An outer loop should be
added with pressure-dependent values calculated in it for maximum accuracy.
It would be possible to solve for the upstream pipe diameter, but there is
no use for that functionality.
Examples
--------
>>> differential_pressure_meter_solver(D=0.07366, D2=0.05, P1=200000.0,
... P2=183000.0, rho=999.1, mu=0.0011, k=1.33,
... meter_type='ISO 5167 orifice', taps='D')
7.702338035732167
>>> differential_pressure_meter_solver(D=0.07366, m=7.702338, P1=200000.0,
... P2=183000.0, rho=999.1, mu=0.0011, k=1.33,
... meter_type='ISO 5167 orifice', taps='D')
0.04999999990831885
'''
if m is None and None not in (D, D2, P1, P2):
def to_solve(m_D):
m = m_D*D
epsilon, C = differential_pressure_meter_C_epsilon(D, D2, m, P1, P2, rho,
mu, k, meter_type,
taps=taps)
m_calc = flow_meter_discharge(D=D, Do=D2, P1=P1, P2=P2, rho=rho,
C=C, expansibility=epsilon)
err = m - m_calc
return err
# Diameter to mass flow ratio
m_D_guess = 40
if rho < 100.0:
m_D_guess *= 1e-2
return secant(to_solve, m_D_guess)*D
elif D2 is None and None not in (D, m, P1, P2):
def to_solve(D2):
epsilon, C = differential_pressure_meter_C_epsilon(D, D2, m, P1, P2, rho,
mu, k, meter_type,
taps=taps)
m_calc = flow_meter_discharge(D=D, Do=D2, P1=P1, P2=P2, rho=rho,
C=C, expansibility=epsilon)
return m - m_calc
return brenth(to_solve, D*(1-1E-9), D*5E-3)
elif P2 is None and None not in (D, D2, m, P1):
def to_solve(P2):
epsilon, C = differential_pressure_meter_C_epsilon(D, D2, m, P1, P2, rho,
mu, k, meter_type,
taps=taps)
m_calc = flow_meter_discharge(D=D, Do=D2, P1=P1, P2=P2, rho=rho,
C=C, expansibility=epsilon)
return m - m_calc
return brenth(to_solve, P1*(1-1E-9), P1*0.5)
elif P1 is None and None not in (D, D2, m, P2):
def to_solve(P1):
epsilon, C = differential_pressure_meter_C_epsilon(D, D2, m, P1, P2, rho,
mu, k, meter_type,
taps=taps)
m_calc = flow_meter_discharge(D=D, Do=D2, P1=P1, P2=P2, rho=rho,
C=C, expansibility=epsilon)
return m - m_calc
return brenth(to_solve, P2*(1+1E-9), P2*1.4)
else:
raise ValueError('Solver is capable of solving for one of P2, D2, or m only.') | r'''Calculates either the mass flow rate, the upstream pressure, the second
pressure value, or the orifice diameter for a differential
pressure flow meter based on the geometry of the meter, measured pressures
of the meter, and the density, viscosity, and isentropic exponent of the
fluid. This solves an equation iteratively to obtain the correct flow rate.
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
rho : float
Density of fluid at `P1`, [kg/m^3]
mu : float
Viscosity of fluid at `P1`, [Pa*s]
k : float
Isentropic exponent of fluid, [-]
D2 : float, optional
Diameter of orifice, or venturi meter orifice, or flow tube orifice,
or cone meter end diameter, or wedge meter fluid flow height, [m]
P1 : float, optional
Static pressure of fluid upstream of differential pressure meter at the
cross-section of the pressure tap, [Pa]
P2 : float, optional
Static pressure of fluid downstream of differential pressure meter or
at the prescribed location (varies by type of meter) [Pa]
m : float, optional
Mass flow rate of fluid through the flow meter, [kg/s]
meter_type : str, optional
One of ('ISO 5167 orifice', 'long radius nozzle', 'ISA 1932 nozzle',
'venuri nozzle', 'as cast convergent venturi tube',
'machined convergent venturi tube',
'rough welded convergent venturi tube', 'cone meter',
'wedge meter'), [-]
taps : str, optional
The orientation of the taps; one of 'corner', 'flange', 'D', or 'D/2';
applies for orifice meters only, [-]
Returns
-------
ans : float
One of `m`, the mass flow rate of the fluid; `P1`, the pressure
upstream of the flow meter; `P2`, the second pressure
tap's value; and `D2`, the diameter of the measuring device; units
of respectively, kg/s, Pa, Pa, or m
Notes
-----
See the appropriate functions for the documentation for the formulas and
references used in each method.
The solvers make some assumptions about the range of values answers may be
in.
Note that the solver for the upstream pressure uses the provided values of
density, viscosity and isentropic exponent; whereas these values all
depend on pressure (albeit to a small extent). An outer loop should be
added with pressure-dependent values calculated in it for maximum accuracy.
It would be possible to solve for the upstream pipe diameter, but there is
no use for that functionality.
Examples
--------
>>> differential_pressure_meter_solver(D=0.07366, D2=0.05, P1=200000.0,
... P2=183000.0, rho=999.1, mu=0.0011, k=1.33,
... meter_type='ISO 5167 orifice', taps='D')
7.702338035732167
>>> differential_pressure_meter_solver(D=0.07366, m=7.702338, P1=200000.0,
... P2=183000.0, rho=999.1, mu=0.0011, k=1.33,
... meter_type='ISO 5167 orifice', taps='D')
0.04999999990831885 | Below is the the instruction that describes the task:
### Input:
r'''Calculates either the mass flow rate, the upstream pressure, the second
pressure value, or the orifice diameter for a differential
pressure flow meter based on the geometry of the meter, measured pressures
of the meter, and the density, viscosity, and isentropic exponent of the
fluid. This solves an equation iteratively to obtain the correct flow rate.
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
rho : float
Density of fluid at `P1`, [kg/m^3]
mu : float
Viscosity of fluid at `P1`, [Pa*s]
k : float
Isentropic exponent of fluid, [-]
D2 : float, optional
Diameter of orifice, or venturi meter orifice, or flow tube orifice,
or cone meter end diameter, or wedge meter fluid flow height, [m]
P1 : float, optional
Static pressure of fluid upstream of differential pressure meter at the
cross-section of the pressure tap, [Pa]
P2 : float, optional
Static pressure of fluid downstream of differential pressure meter or
at the prescribed location (varies by type of meter) [Pa]
m : float, optional
Mass flow rate of fluid through the flow meter, [kg/s]
meter_type : str, optional
One of ('ISO 5167 orifice', 'long radius nozzle', 'ISA 1932 nozzle',
'venuri nozzle', 'as cast convergent venturi tube',
'machined convergent venturi tube',
'rough welded convergent venturi tube', 'cone meter',
'wedge meter'), [-]
taps : str, optional
The orientation of the taps; one of 'corner', 'flange', 'D', or 'D/2';
applies for orifice meters only, [-]
Returns
-------
ans : float
One of `m`, the mass flow rate of the fluid; `P1`, the pressure
upstream of the flow meter; `P2`, the second pressure
tap's value; and `D2`, the diameter of the measuring device; units
of respectively, kg/s, Pa, Pa, or m
Notes
-----
See the appropriate functions for the documentation for the formulas and
references used in each method.
The solvers make some assumptions about the range of values answers may be
in.
Note that the solver for the upstream pressure uses the provided values of
density, viscosity and isentropic exponent; whereas these values all
depend on pressure (albeit to a small extent). An outer loop should be
added with pressure-dependent values calculated in it for maximum accuracy.
It would be possible to solve for the upstream pipe diameter, but there is
no use for that functionality.
Examples
--------
>>> differential_pressure_meter_solver(D=0.07366, D2=0.05, P1=200000.0,
... P2=183000.0, rho=999.1, mu=0.0011, k=1.33,
... meter_type='ISO 5167 orifice', taps='D')
7.702338035732167
>>> differential_pressure_meter_solver(D=0.07366, m=7.702338, P1=200000.0,
... P2=183000.0, rho=999.1, mu=0.0011, k=1.33,
... meter_type='ISO 5167 orifice', taps='D')
0.04999999990831885
### Response:
def differential_pressure_meter_solver(D, rho, mu, k, D2=None, P1=None, P2=None,
m=None, meter_type=ISO_5167_ORIFICE,
taps=None):
r'''Calculates either the mass flow rate, the upstream pressure, the second
pressure value, or the orifice diameter for a differential
pressure flow meter based on the geometry of the meter, measured pressures
of the meter, and the density, viscosity, and isentropic exponent of the
fluid. This solves an equation iteratively to obtain the correct flow rate.
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
rho : float
Density of fluid at `P1`, [kg/m^3]
mu : float
Viscosity of fluid at `P1`, [Pa*s]
k : float
Isentropic exponent of fluid, [-]
D2 : float, optional
Diameter of orifice, or venturi meter orifice, or flow tube orifice,
or cone meter end diameter, or wedge meter fluid flow height, [m]
P1 : float, optional
Static pressure of fluid upstream of differential pressure meter at the
cross-section of the pressure tap, [Pa]
P2 : float, optional
Static pressure of fluid downstream of differential pressure meter or
at the prescribed location (varies by type of meter) [Pa]
m : float, optional
Mass flow rate of fluid through the flow meter, [kg/s]
meter_type : str, optional
One of ('ISO 5167 orifice', 'long radius nozzle', 'ISA 1932 nozzle',
'venuri nozzle', 'as cast convergent venturi tube',
'machined convergent venturi tube',
'rough welded convergent venturi tube', 'cone meter',
'wedge meter'), [-]
taps : str, optional
The orientation of the taps; one of 'corner', 'flange', 'D', or 'D/2';
applies for orifice meters only, [-]
Returns
-------
ans : float
One of `m`, the mass flow rate of the fluid; `P1`, the pressure
upstream of the flow meter; `P2`, the second pressure
tap's value; and `D2`, the diameter of the measuring device; units
of respectively, kg/s, Pa, Pa, or m
Notes
-----
See the appropriate functions for the documentation for the formulas and
references used in each method.
The solvers make some assumptions about the range of values answers may be
in.
Note that the solver for the upstream pressure uses the provided values of
density, viscosity and isentropic exponent; whereas these values all
depend on pressure (albeit to a small extent). An outer loop should be
added with pressure-dependent values calculated in it for maximum accuracy.
It would be possible to solve for the upstream pipe diameter, but there is
no use for that functionality.
Examples
--------
>>> differential_pressure_meter_solver(D=0.07366, D2=0.05, P1=200000.0,
... P2=183000.0, rho=999.1, mu=0.0011, k=1.33,
... meter_type='ISO 5167 orifice', taps='D')
7.702338035732167
>>> differential_pressure_meter_solver(D=0.07366, m=7.702338, P1=200000.0,
... P2=183000.0, rho=999.1, mu=0.0011, k=1.33,
... meter_type='ISO 5167 orifice', taps='D')
0.04999999990831885
'''
if m is None and None not in (D, D2, P1, P2):
def to_solve(m_D):
m = m_D*D
epsilon, C = differential_pressure_meter_C_epsilon(D, D2, m, P1, P2, rho,
mu, k, meter_type,
taps=taps)
m_calc = flow_meter_discharge(D=D, Do=D2, P1=P1, P2=P2, rho=rho,
C=C, expansibility=epsilon)
err = m - m_calc
return err
# Diameter to mass flow ratio
m_D_guess = 40
if rho < 100.0:
m_D_guess *= 1e-2
return secant(to_solve, m_D_guess)*D
elif D2 is None and None not in (D, m, P1, P2):
def to_solve(D2):
epsilon, C = differential_pressure_meter_C_epsilon(D, D2, m, P1, P2, rho,
mu, k, meter_type,
taps=taps)
m_calc = flow_meter_discharge(D=D, Do=D2, P1=P1, P2=P2, rho=rho,
C=C, expansibility=epsilon)
return m - m_calc
return brenth(to_solve, D*(1-1E-9), D*5E-3)
elif P2 is None and None not in (D, D2, m, P1):
def to_solve(P2):
epsilon, C = differential_pressure_meter_C_epsilon(D, D2, m, P1, P2, rho,
mu, k, meter_type,
taps=taps)
m_calc = flow_meter_discharge(D=D, Do=D2, P1=P1, P2=P2, rho=rho,
C=C, expansibility=epsilon)
return m - m_calc
return brenth(to_solve, P1*(1-1E-9), P1*0.5)
elif P1 is None and None not in (D, D2, m, P2):
def to_solve(P1):
epsilon, C = differential_pressure_meter_C_epsilon(D, D2, m, P1, P2, rho,
mu, k, meter_type,
taps=taps)
m_calc = flow_meter_discharge(D=D, Do=D2, P1=P1, P2=P2, rho=rho,
C=C, expansibility=epsilon)
return m - m_calc
return brenth(to_solve, P2*(1+1E-9), P2*1.4)
else:
raise ValueError('Solver is capable of solving for one of P2, D2, or m only.') |
def make_str_node(rawtext, app, prefixed_name, obj, parent, modname, options):
"""Render a Python object to text using the repr() function.
:param rawtext: Text being replaced with link node.
:param app: Sphinx application context
:param prefixed_name: The dotted Python name for obj.
:param obj: The Python object to be rendered to text.
:param parent: The parent Python object of obj.
:param module: The name of the module containing obj.
:param options: Options dictionary passed to role func.
"""
text = str(obj)
node = nodes.Text(text, rawsource=rawtext)
return node | Render a Python object to text using the repr() function.
:param rawtext: Text being replaced with link node.
:param app: Sphinx application context
:param prefixed_name: The dotted Python name for obj.
:param obj: The Python object to be rendered to text.
:param parent: The parent Python object of obj.
:param module: The name of the module containing obj.
:param options: Options dictionary passed to role func. | Below is the the instruction that describes the task:
### Input:
Render a Python object to text using the repr() function.
:param rawtext: Text being replaced with link node.
:param app: Sphinx application context
:param prefixed_name: The dotted Python name for obj.
:param obj: The Python object to be rendered to text.
:param parent: The parent Python object of obj.
:param module: The name of the module containing obj.
:param options: Options dictionary passed to role func.
### Response:
def make_str_node(rawtext, app, prefixed_name, obj, parent, modname, options):
"""Render a Python object to text using the repr() function.
:param rawtext: Text being replaced with link node.
:param app: Sphinx application context
:param prefixed_name: The dotted Python name for obj.
:param obj: The Python object to be rendered to text.
:param parent: The parent Python object of obj.
:param module: The name of the module containing obj.
:param options: Options dictionary passed to role func.
"""
text = str(obj)
node = nodes.Text(text, rawsource=rawtext)
return node |
def ffill(self, dim, limit=None):
'''Fill NaN values by propogating values forward
*Requires bottleneck.*
Parameters
----------
dim : str
Specifies the dimension along which to propagate values when
filling.
limit : int, default None
The maximum number of consecutive NaN values to forward fill. In
other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. Must be greater
than 0 or None for no limit.
Returns
-------
Dataset
'''
from .missing import ffill, _apply_over_vars_with_dim
new = _apply_over_vars_with_dim(ffill, self, dim=dim, limit=limit)
return new | Fill NaN values by propogating values forward
*Requires bottleneck.*
Parameters
----------
dim : str
Specifies the dimension along which to propagate values when
filling.
limit : int, default None
The maximum number of consecutive NaN values to forward fill. In
other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. Must be greater
than 0 or None for no limit.
Returns
-------
Dataset | Below is the the instruction that describes the task:
### Input:
Fill NaN values by propogating values forward
*Requires bottleneck.*
Parameters
----------
dim : str
Specifies the dimension along which to propagate values when
filling.
limit : int, default None
The maximum number of consecutive NaN values to forward fill. In
other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. Must be greater
than 0 or None for no limit.
Returns
-------
Dataset
### Response:
def ffill(self, dim, limit=None):
'''Fill NaN values by propogating values forward
*Requires bottleneck.*
Parameters
----------
dim : str
Specifies the dimension along which to propagate values when
filling.
limit : int, default None
The maximum number of consecutive NaN values to forward fill. In
other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. Must be greater
than 0 or None for no limit.
Returns
-------
Dataset
'''
from .missing import ffill, _apply_over_vars_with_dim
new = _apply_over_vars_with_dim(ffill, self, dim=dim, limit=limit)
return new |
def graphite(context, server, port, interval, prefix):
"""Display energy stats of all actors"""
fritz = context.obj
fritz.login()
sid_ttl = time.time() + 600
# Find actors and create carbon keys
click.echo(" * Requesting actors list")
simple_chars = re.compile('[^A-Za-z0-9]+')
actors = fritz.get_actors()
keys = {}
for actor in actors:
keys[actor.name] = "{}.{}".format(
prefix,
simple_chars.sub('_', actor.name)
)
# Connect to carbon
click.echo(" * Trying to connect to carbon")
timeout = 2
sock = socket.socket()
sock.settimeout(timeout)
try:
sock.connect((server, port))
except socket.timeout:
raise Exception("Took over {} second(s) to connect to {}".format(
timeout, server
))
except Exception as error:
raise Exception("unknown exception while connecting to {} - {}".format(
server, error
))
def send(key, value):
"""Send a key-value-pair to carbon"""
now = int(time.time())
payload = "{} {} {}\n".format(key, value, now)
sock.sendall(payload)
while True:
if time.time() > sid_ttl:
click.echo(" * Requesting new SID")
fritz.login()
sid_ttl = time.time() + 600
click.echo(" * Requesting statistics")
for actor in actors:
power = actor.get_power()
total = actor.get_energy()
click.echo(" -> {}: {:.2f} Watt current, {:.3f} wH total".format(
actor.name, power / 1000, total / 100
))
send(keys[actor.name] + '.current', power)
send(keys[actor.name] + '.total', total)
time.sleep(interval) | Display energy stats of all actors | Below is the the instruction that describes the task:
### Input:
Display energy stats of all actors
### Response:
def graphite(context, server, port, interval, prefix):
"""Display energy stats of all actors"""
fritz = context.obj
fritz.login()
sid_ttl = time.time() + 600
# Find actors and create carbon keys
click.echo(" * Requesting actors list")
simple_chars = re.compile('[^A-Za-z0-9]+')
actors = fritz.get_actors()
keys = {}
for actor in actors:
keys[actor.name] = "{}.{}".format(
prefix,
simple_chars.sub('_', actor.name)
)
# Connect to carbon
click.echo(" * Trying to connect to carbon")
timeout = 2
sock = socket.socket()
sock.settimeout(timeout)
try:
sock.connect((server, port))
except socket.timeout:
raise Exception("Took over {} second(s) to connect to {}".format(
timeout, server
))
except Exception as error:
raise Exception("unknown exception while connecting to {} - {}".format(
server, error
))
def send(key, value):
"""Send a key-value-pair to carbon"""
now = int(time.time())
payload = "{} {} {}\n".format(key, value, now)
sock.sendall(payload)
while True:
if time.time() > sid_ttl:
click.echo(" * Requesting new SID")
fritz.login()
sid_ttl = time.time() + 600
click.echo(" * Requesting statistics")
for actor in actors:
power = actor.get_power()
total = actor.get_energy()
click.echo(" -> {}: {:.2f} Watt current, {:.3f} wH total".format(
actor.name, power / 1000, total / 100
))
send(keys[actor.name] + '.current', power)
send(keys[actor.name] + '.total', total)
time.sleep(interval) |
def line(name, content=None, match=None, mode=None, location=None,
before=None, after=None, show_changes=True, backup=False,
quiet=False, indent=True, create=False, user=None,
group=None, file_mode=None):
'''
Line-based editing of a file.
.. versionadded:: 2015.8.0
:param name:
Filesystem path to the file to be edited.
:param content:
Content of the line. Allowed to be empty if mode=delete.
:param match:
Match the target line for an action by
a fragment of a string or regular expression.
If neither ``before`` nor ``after`` are provided, and ``match``
is also ``None``, match becomes the ``content`` value.
:param mode:
Defines how to edit a line. One of the following options is
required:
- ensure
If line does not exist, it will be added.
- replace
If line already exists, it will be replaced.
- delete
Delete the line, once found.
- insert
Insert a line.
.. note::
If ``mode=insert`` is used, at least one of the following
options must also be defined: ``location``, ``before``, or
``after``. If ``location`` is used, it takes precedence
over the other two options.
:param location:
Defines where to place content in the line. Note this option is only
used when ``mode=insert`` is specified. If a location is passed in, it
takes precedence over both the ``before`` and ``after`` kwargs. Valid
locations are:
- start
Place the content at the beginning of the file.
- end
Place the content at the end of the file.
:param before:
Regular expression or an exact case-sensitive fragment of the string.
This option is only used when either the ``ensure`` or ``insert`` mode
is defined.
:param after:
Regular expression or an exact case-sensitive fragment of the string.
This option is only used when either the ``ensure`` or ``insert`` mode
is defined.
:param show_changes:
Output a unified diff of the old file and the new file.
If ``False`` return a boolean if any changes were made.
Default is ``True``
.. note::
Using this option will store two copies of the file in-memory
(the original version and the edited version) in order to generate the diff.
:param backup:
Create a backup of the original file with the extension:
"Year-Month-Day-Hour-Minutes-Seconds".
:param quiet:
Do not raise any exceptions. E.g. ignore the fact that the file that is
tried to be edited does not exist and nothing really happened.
:param indent:
Keep indentation with the previous line. This option is not considered when
the ``delete`` mode is specified.
:param create:
Create an empty file if doesn't exists.
.. versionadded:: 2016.11.0
:param user:
The user to own the file, this defaults to the user salt is running as
on the minion.
.. versionadded:: 2016.11.0
:param group:
The group ownership set for the file, this defaults to the group salt
is running as on the minion On Windows, this is ignored.
.. versionadded:: 2016.11.0
:param file_mode:
The permissions to set on this file, aka 644, 0775, 4664. Not supported
on Windows.
.. versionadded:: 2016.11.0
If an equal sign (``=``) appears in an argument to a Salt command, it is
interpreted as a keyword argument in the format of ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: yaml
update_config:
file.line:
- name: /etc/myconfig.conf
- mode: ensure
- content: my key = my value
- before: somekey.*?
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.line')
managed(
name,
create=create,
user=user,
group=group,
mode=file_mode,
replace=False)
check_res, check_msg = _check_file(name)
if not check_res:
return _error(ret, check_msg)
# We've set the content to be empty in the function params but we want to make sure
# it gets passed when needed. Feature #37092
mode = mode and mode.lower() or mode
if mode is None:
return _error(ret, 'Mode was not defined. How to process the file?')
modeswithemptycontent = ['delete']
if mode not in modeswithemptycontent and content is None:
return _error(ret, 'Content can only be empty if mode is {0}'.format(modeswithemptycontent))
del modeswithemptycontent
changes = __salt__['file.line'](
name, content, match=match, mode=mode, location=location,
before=before, after=after, show_changes=show_changes,
backup=backup, quiet=quiet, indent=indent)
if changes:
ret['changes']['diff'] = changes
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Changes would be made'
else:
ret['result'] = True
ret['comment'] = 'Changes were made'
else:
ret['result'] = True
ret['comment'] = 'No changes needed to be made'
return ret | Line-based editing of a file.
.. versionadded:: 2015.8.0
:param name:
Filesystem path to the file to be edited.
:param content:
Content of the line. Allowed to be empty if mode=delete.
:param match:
Match the target line for an action by
a fragment of a string or regular expression.
If neither ``before`` nor ``after`` are provided, and ``match``
is also ``None``, match becomes the ``content`` value.
:param mode:
Defines how to edit a line. One of the following options is
required:
- ensure
If line does not exist, it will be added.
- replace
If line already exists, it will be replaced.
- delete
Delete the line, once found.
- insert
Insert a line.
.. note::
If ``mode=insert`` is used, at least one of the following
options must also be defined: ``location``, ``before``, or
``after``. If ``location`` is used, it takes precedence
over the other two options.
:param location:
Defines where to place content in the line. Note this option is only
used when ``mode=insert`` is specified. If a location is passed in, it
takes precedence over both the ``before`` and ``after`` kwargs. Valid
locations are:
- start
Place the content at the beginning of the file.
- end
Place the content at the end of the file.
:param before:
Regular expression or an exact case-sensitive fragment of the string.
This option is only used when either the ``ensure`` or ``insert`` mode
is defined.
:param after:
Regular expression or an exact case-sensitive fragment of the string.
This option is only used when either the ``ensure`` or ``insert`` mode
is defined.
:param show_changes:
Output a unified diff of the old file and the new file.
If ``False`` return a boolean if any changes were made.
Default is ``True``
.. note::
Using this option will store two copies of the file in-memory
(the original version and the edited version) in order to generate the diff.
:param backup:
Create a backup of the original file with the extension:
"Year-Month-Day-Hour-Minutes-Seconds".
:param quiet:
Do not raise any exceptions. E.g. ignore the fact that the file that is
tried to be edited does not exist and nothing really happened.
:param indent:
Keep indentation with the previous line. This option is not considered when
the ``delete`` mode is specified.
:param create:
Create an empty file if doesn't exists.
.. versionadded:: 2016.11.0
:param user:
The user to own the file, this defaults to the user salt is running as
on the minion.
.. versionadded:: 2016.11.0
:param group:
The group ownership set for the file, this defaults to the group salt
is running as on the minion On Windows, this is ignored.
.. versionadded:: 2016.11.0
:param file_mode:
The permissions to set on this file, aka 644, 0775, 4664. Not supported
on Windows.
.. versionadded:: 2016.11.0
If an equal sign (``=``) appears in an argument to a Salt command, it is
interpreted as a keyword argument in the format of ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: yaml
update_config:
file.line:
- name: /etc/myconfig.conf
- mode: ensure
- content: my key = my value
- before: somekey.*? | Below is the the instruction that describes the task:
### Input:
Line-based editing of a file.
.. versionadded:: 2015.8.0
:param name:
Filesystem path to the file to be edited.
:param content:
Content of the line. Allowed to be empty if mode=delete.
:param match:
Match the target line for an action by
a fragment of a string or regular expression.
If neither ``before`` nor ``after`` are provided, and ``match``
is also ``None``, match becomes the ``content`` value.
:param mode:
Defines how to edit a line. One of the following options is
required:
- ensure
If line does not exist, it will be added.
- replace
If line already exists, it will be replaced.
- delete
Delete the line, once found.
- insert
Insert a line.
.. note::
If ``mode=insert`` is used, at least one of the following
options must also be defined: ``location``, ``before``, or
``after``. If ``location`` is used, it takes precedence
over the other two options.
:param location:
Defines where to place content in the line. Note this option is only
used when ``mode=insert`` is specified. If a location is passed in, it
takes precedence over both the ``before`` and ``after`` kwargs. Valid
locations are:
- start
Place the content at the beginning of the file.
- end
Place the content at the end of the file.
:param before:
Regular expression or an exact case-sensitive fragment of the string.
This option is only used when either the ``ensure`` or ``insert`` mode
is defined.
:param after:
Regular expression or an exact case-sensitive fragment of the string.
This option is only used when either the ``ensure`` or ``insert`` mode
is defined.
:param show_changes:
Output a unified diff of the old file and the new file.
If ``False`` return a boolean if any changes were made.
Default is ``True``
.. note::
Using this option will store two copies of the file in-memory
(the original version and the edited version) in order to generate the diff.
:param backup:
Create a backup of the original file with the extension:
"Year-Month-Day-Hour-Minutes-Seconds".
:param quiet:
Do not raise any exceptions. E.g. ignore the fact that the file that is
tried to be edited does not exist and nothing really happened.
:param indent:
Keep indentation with the previous line. This option is not considered when
the ``delete`` mode is specified.
:param create:
Create an empty file if doesn't exists.
.. versionadded:: 2016.11.0
:param user:
The user to own the file, this defaults to the user salt is running as
on the minion.
.. versionadded:: 2016.11.0
:param group:
The group ownership set for the file, this defaults to the group salt
is running as on the minion On Windows, this is ignored.
.. versionadded:: 2016.11.0
:param file_mode:
The permissions to set on this file, aka 644, 0775, 4664. Not supported
on Windows.
.. versionadded:: 2016.11.0
If an equal sign (``=``) appears in an argument to a Salt command, it is
interpreted as a keyword argument in the format of ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: yaml
update_config:
file.line:
- name: /etc/myconfig.conf
- mode: ensure
- content: my key = my value
- before: somekey.*?
### Response:
def line(name, content=None, match=None, mode=None, location=None,
before=None, after=None, show_changes=True, backup=False,
quiet=False, indent=True, create=False, user=None,
group=None, file_mode=None):
'''
Line-based editing of a file.
.. versionadded:: 2015.8.0
:param name:
Filesystem path to the file to be edited.
:param content:
Content of the line. Allowed to be empty if mode=delete.
:param match:
Match the target line for an action by
a fragment of a string or regular expression.
If neither ``before`` nor ``after`` are provided, and ``match``
is also ``None``, match becomes the ``content`` value.
:param mode:
Defines how to edit a line. One of the following options is
required:
- ensure
If line does not exist, it will be added.
- replace
If line already exists, it will be replaced.
- delete
Delete the line, once found.
- insert
Insert a line.
.. note::
If ``mode=insert`` is used, at least one of the following
options must also be defined: ``location``, ``before``, or
``after``. If ``location`` is used, it takes precedence
over the other two options.
:param location:
Defines where to place content in the line. Note this option is only
used when ``mode=insert`` is specified. If a location is passed in, it
takes precedence over both the ``before`` and ``after`` kwargs. Valid
locations are:
- start
Place the content at the beginning of the file.
- end
Place the content at the end of the file.
:param before:
Regular expression or an exact case-sensitive fragment of the string.
This option is only used when either the ``ensure`` or ``insert`` mode
is defined.
:param after:
Regular expression or an exact case-sensitive fragment of the string.
This option is only used when either the ``ensure`` or ``insert`` mode
is defined.
:param show_changes:
Output a unified diff of the old file and the new file.
If ``False`` return a boolean if any changes were made.
Default is ``True``
.. note::
Using this option will store two copies of the file in-memory
(the original version and the edited version) in order to generate the diff.
:param backup:
Create a backup of the original file with the extension:
"Year-Month-Day-Hour-Minutes-Seconds".
:param quiet:
Do not raise any exceptions. E.g. ignore the fact that the file that is
tried to be edited does not exist and nothing really happened.
:param indent:
Keep indentation with the previous line. This option is not considered when
the ``delete`` mode is specified.
:param create:
Create an empty file if doesn't exists.
.. versionadded:: 2016.11.0
:param user:
The user to own the file, this defaults to the user salt is running as
on the minion.
.. versionadded:: 2016.11.0
:param group:
The group ownership set for the file, this defaults to the group salt
is running as on the minion On Windows, this is ignored.
.. versionadded:: 2016.11.0
:param file_mode:
The permissions to set on this file, aka 644, 0775, 4664. Not supported
on Windows.
.. versionadded:: 2016.11.0
If an equal sign (``=``) appears in an argument to a Salt command, it is
interpreted as a keyword argument in the format of ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: yaml
update_config:
file.line:
- name: /etc/myconfig.conf
- mode: ensure
- content: my key = my value
- before: somekey.*?
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.line')
managed(
name,
create=create,
user=user,
group=group,
mode=file_mode,
replace=False)
check_res, check_msg = _check_file(name)
if not check_res:
return _error(ret, check_msg)
# We've set the content to be empty in the function params but we want to make sure
# it gets passed when needed. Feature #37092
mode = mode and mode.lower() or mode
if mode is None:
return _error(ret, 'Mode was not defined. How to process the file?')
modeswithemptycontent = ['delete']
if mode not in modeswithemptycontent and content is None:
return _error(ret, 'Content can only be empty if mode is {0}'.format(modeswithemptycontent))
del modeswithemptycontent
changes = __salt__['file.line'](
name, content, match=match, mode=mode, location=location,
before=before, after=after, show_changes=show_changes,
backup=backup, quiet=quiet, indent=indent)
if changes:
ret['changes']['diff'] = changes
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Changes would be made'
else:
ret['result'] = True
ret['comment'] = 'Changes were made'
else:
ret['result'] = True
ret['comment'] = 'No changes needed to be made'
return ret |
def attach_kernel_driver(self, interface):
r"""Re-attach an interface's kernel driver, which was previously
detached using detach_kernel_driver().
The interface parameter is the device interface number to attach the
driver to.
"""
self._ctx.managed_open()
self._ctx.backend.attach_kernel_driver(
self._ctx.handle,
interface) | r"""Re-attach an interface's kernel driver, which was previously
detached using detach_kernel_driver().
The interface parameter is the device interface number to attach the
driver to. | Below is the the instruction that describes the task:
### Input:
r"""Re-attach an interface's kernel driver, which was previously
detached using detach_kernel_driver().
The interface parameter is the device interface number to attach the
driver to.
### Response:
def attach_kernel_driver(self, interface):
r"""Re-attach an interface's kernel driver, which was previously
detached using detach_kernel_driver().
The interface parameter is the device interface number to attach the
driver to.
"""
self._ctx.managed_open()
self._ctx.backend.attach_kernel_driver(
self._ctx.handle,
interface) |
def ed25519_private_key_to_string(key):
"""Convert an ed25519 private key to a base64-encoded string.
Args:
key (Ed25519PrivateKey): the key to write to the file.
Returns:
str: the key representation as a str
"""
return base64.b64encode(key.private_bytes(
encoding=serialization.Encoding.Raw,
format=serialization.PrivateFormat.Raw,
encryption_algorithm=serialization.NoEncryption()
), None).decode('utf-8') | Convert an ed25519 private key to a base64-encoded string.
Args:
key (Ed25519PrivateKey): the key to write to the file.
Returns:
str: the key representation as a str | Below is the the instruction that describes the task:
### Input:
Convert an ed25519 private key to a base64-encoded string.
Args:
key (Ed25519PrivateKey): the key to write to the file.
Returns:
str: the key representation as a str
### Response:
def ed25519_private_key_to_string(key):
"""Convert an ed25519 private key to a base64-encoded string.
Args:
key (Ed25519PrivateKey): the key to write to the file.
Returns:
str: the key representation as a str
"""
return base64.b64encode(key.private_bytes(
encoding=serialization.Encoding.Raw,
format=serialization.PrivateFormat.Raw,
encryption_algorithm=serialization.NoEncryption()
), None).decode('utf-8') |
def cidr_to_ipv4_netmask(cidr_bits):
'''
Returns an IPv4 netmask
'''
try:
cidr_bits = int(cidr_bits)
if not 1 <= cidr_bits <= 32:
return ''
except ValueError:
return ''
netmask = ''
for idx in range(4):
if idx:
netmask += '.'
if cidr_bits >= 8:
netmask += '255'
cidr_bits -= 8
else:
netmask += '{0:d}'.format(256 - (2 ** (8 - cidr_bits)))
cidr_bits = 0
return netmask | Returns an IPv4 netmask | Below is the the instruction that describes the task:
### Input:
Returns an IPv4 netmask
### Response:
def cidr_to_ipv4_netmask(cidr_bits):
'''
Returns an IPv4 netmask
'''
try:
cidr_bits = int(cidr_bits)
if not 1 <= cidr_bits <= 32:
return ''
except ValueError:
return ''
netmask = ''
for idx in range(4):
if idx:
netmask += '.'
if cidr_bits >= 8:
netmask += '255'
cidr_bits -= 8
else:
netmask += '{0:d}'.format(256 - (2 ** (8 - cidr_bits)))
cidr_bits = 0
return netmask |
def disconnect(self, node):
"""
Disconnect node
:param node:
:return:
"""
if super(OneOrMore, self).__len__() < 2:
raise AttemptedCardinalityViolation("One or more expected")
return super(OneOrMore, self).disconnect(node) | Disconnect node
:param node:
:return: | Below is the the instruction that describes the task:
### Input:
Disconnect node
:param node:
:return:
### Response:
def disconnect(self, node):
"""
Disconnect node
:param node:
:return:
"""
if super(OneOrMore, self).__len__() < 2:
raise AttemptedCardinalityViolation("One or more expected")
return super(OneOrMore, self).disconnect(node) |
def do_filesize(self, line):
"""filesize FILE
Prints the size of the file, in bytes. This function is primarily
for testing.
"""
if len(line) == 0:
print_err("Must provide a filename")
return
filename = resolve_path(line)
self.print(auto(get_filesize, filename)) | filesize FILE
Prints the size of the file, in bytes. This function is primarily
for testing. | Below is the the instruction that describes the task:
### Input:
filesize FILE
Prints the size of the file, in bytes. This function is primarily
for testing.
### Response:
def do_filesize(self, line):
"""filesize FILE
Prints the size of the file, in bytes. This function is primarily
for testing.
"""
if len(line) == 0:
print_err("Must provide a filename")
return
filename = resolve_path(line)
self.print(auto(get_filesize, filename)) |
def export_icon(self, icon, size, color='black', scale='auto',
filename=None, export_dir='exported'):
"""
Exports given icon with provided parameters.
If the desired icon size is less than 150x150 pixels, we will first
create a 150x150 pixels image and then scale it down, so that
it's much less likely that the edges of the icon end up cropped.
:param icon: valid icon name
:param filename: name of the output file
:param size: icon size in pixels
:param color: color name or hex value
:param scale: scaling factor between 0 and 1,
or 'auto' for automatic scaling
:param export_dir: path to export directory
"""
org_size = size
size = max(150, size)
image = Image.new("RGBA", (size, size), color=(0, 0, 0, 0))
draw = ImageDraw.Draw(image)
if scale == 'auto':
scale_factor = 1
else:
scale_factor = float(scale)
font = ImageFont.truetype(self.ttf_file, int(size * scale_factor))
width, height = draw.textsize(self.css_icons[icon], font=font)
# If auto-scaling is enabled, we need to make sure the resulting
# graphic fits inside the boundary. The values are rounded and may be
# off by a pixel or two, so we may need to do a few iterations.
# The use of a decrementing multiplication factor protects us from
# getting into an infinite loop.
if scale == 'auto':
iteration = 0
factor = 1
while True:
width, height = draw.textsize(self.css_icons[icon], font=font)
# Check if the image fits
dim = max(width, height)
if dim > size:
font = ImageFont.truetype(self.ttf_file,
int(size * size/dim * factor))
else:
break
# Adjust the factor every two iterations
iteration += 1
if iteration % 2 == 0:
factor *= 0.99
draw.text((float(size - width) / 2, float(size - height) / 2),
self.css_icons[icon], font=font, fill=color)
# Get bounding box
bbox = image.getbbox()
# Create an alpha mask
image_mask = Image.new("L", (size, size), 0)
draw_mask = ImageDraw.Draw(image_mask)
# Draw the icon on the mask
draw_mask.text((float(size - width) / 2, float(size - height) / 2),
self.css_icons[icon], font=font, fill=255)
# Create a solid color image and apply the mask
icon_image = Image.new("RGBA", (size, size), color)
icon_image.putalpha(image_mask)
if bbox:
icon_image = icon_image.crop(bbox)
border_w = int((size - (bbox[2] - bbox[0])) / 2)
border_h = int((size - (bbox[3] - bbox[1])) / 2)
# Create output image
out_image = Image.new("RGBA", (size, size), (0, 0, 0, 0))
out_image.paste(icon_image, (border_w, border_h))
# If necessary, scale the image to the target size
if org_size != size:
out_image = out_image.resize((org_size, org_size), Image.ANTIALIAS)
# Make sure export directory exists
if not os.path.exists(export_dir):
os.makedirs(export_dir)
# Default filename
if not filename:
filename = icon + '.png'
# Save file
out_image.save(os.path.join(export_dir, filename)) | Exports given icon with provided parameters.
If the desired icon size is less than 150x150 pixels, we will first
create a 150x150 pixels image and then scale it down, so that
it's much less likely that the edges of the icon end up cropped.
:param icon: valid icon name
:param filename: name of the output file
:param size: icon size in pixels
:param color: color name or hex value
:param scale: scaling factor between 0 and 1,
or 'auto' for automatic scaling
:param export_dir: path to export directory | Below is the the instruction that describes the task:
### Input:
Exports given icon with provided parameters.
If the desired icon size is less than 150x150 pixels, we will first
create a 150x150 pixels image and then scale it down, so that
it's much less likely that the edges of the icon end up cropped.
:param icon: valid icon name
:param filename: name of the output file
:param size: icon size in pixels
:param color: color name or hex value
:param scale: scaling factor between 0 and 1,
or 'auto' for automatic scaling
:param export_dir: path to export directory
### Response:
def export_icon(self, icon, size, color='black', scale='auto',
filename=None, export_dir='exported'):
"""
Exports given icon with provided parameters.
If the desired icon size is less than 150x150 pixels, we will first
create a 150x150 pixels image and then scale it down, so that
it's much less likely that the edges of the icon end up cropped.
:param icon: valid icon name
:param filename: name of the output file
:param size: icon size in pixels
:param color: color name or hex value
:param scale: scaling factor between 0 and 1,
or 'auto' for automatic scaling
:param export_dir: path to export directory
"""
org_size = size
size = max(150, size)
image = Image.new("RGBA", (size, size), color=(0, 0, 0, 0))
draw = ImageDraw.Draw(image)
if scale == 'auto':
scale_factor = 1
else:
scale_factor = float(scale)
font = ImageFont.truetype(self.ttf_file, int(size * scale_factor))
width, height = draw.textsize(self.css_icons[icon], font=font)
# If auto-scaling is enabled, we need to make sure the resulting
# graphic fits inside the boundary. The values are rounded and may be
# off by a pixel or two, so we may need to do a few iterations.
# The use of a decrementing multiplication factor protects us from
# getting into an infinite loop.
if scale == 'auto':
iteration = 0
factor = 1
while True:
width, height = draw.textsize(self.css_icons[icon], font=font)
# Check if the image fits
dim = max(width, height)
if dim > size:
font = ImageFont.truetype(self.ttf_file,
int(size * size/dim * factor))
else:
break
# Adjust the factor every two iterations
iteration += 1
if iteration % 2 == 0:
factor *= 0.99
draw.text((float(size - width) / 2, float(size - height) / 2),
self.css_icons[icon], font=font, fill=color)
# Get bounding box
bbox = image.getbbox()
# Create an alpha mask
image_mask = Image.new("L", (size, size), 0)
draw_mask = ImageDraw.Draw(image_mask)
# Draw the icon on the mask
draw_mask.text((float(size - width) / 2, float(size - height) / 2),
self.css_icons[icon], font=font, fill=255)
# Create a solid color image and apply the mask
icon_image = Image.new("RGBA", (size, size), color)
icon_image.putalpha(image_mask)
if bbox:
icon_image = icon_image.crop(bbox)
border_w = int((size - (bbox[2] - bbox[0])) / 2)
border_h = int((size - (bbox[3] - bbox[1])) / 2)
# Create output image
out_image = Image.new("RGBA", (size, size), (0, 0, 0, 0))
out_image.paste(icon_image, (border_w, border_h))
# If necessary, scale the image to the target size
if org_size != size:
out_image = out_image.resize((org_size, org_size), Image.ANTIALIAS)
# Make sure export directory exists
if not os.path.exists(export_dir):
os.makedirs(export_dir)
# Default filename
if not filename:
filename = icon + '.png'
# Save file
out_image.save(os.path.join(export_dir, filename)) |
Subsets and Splits