code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def label_to_node(self, selection='leaves'):
'''Return a dictionary mapping labels (strings) to ``Node`` objects
* If ``selection`` is ``"all"``, the dictionary will contain all nodes
* If ``selection`` is ``"leaves"``, the dictionary will only contain leaves
* If ``selection`` is ``"internal"``, the dictionary will only contain internal nodes
* If ``selection`` is a ``set``, the dictionary will contain all nodes labeled by a label in ``selection``
* If multiple nodes are labeled by a given label, only the last (preorder traversal) will be obtained
Args:
``selection`` (``str`` or ``set``): The selection of nodes to get
* ``"all"`` to select all nodes
* ``"leaves"`` to select leaves
* ``"internal"`` to select internal nodes
* A ``set`` of labels to specify nodes to select
Returns:
``dict``: Dictionary mapping labels to the corresponding nodes
'''
if not isinstance(selection,set) and not isinstance(selection,list) and (not isinstance(selection,str) or not (selection != 'all' or selection != 'leaves' or selection != 'internal')):
raise RuntimeError('"selection" must be one of the strings "all", "leaves", or "internal", or it must be a set containing Node labels')
if isinstance(selection, str):
selection = selection[0]
elif isinstance(selection,list):
selection = set(selection)
label_to_node = dict()
for node in self.traverse_preorder():
if selection == 'a' or (selection == 'i' and not node.is_leaf()) or (selection == 'l' and node.is_leaf()) or str(node) in selection:
label_to_node[str(node)] = node
if not isinstance(selection,str) and len(label_to_node) != len(selection):
warn("Not all given labels exist in the tree")
return label_to_node | Return a dictionary mapping labels (strings) to ``Node`` objects
* If ``selection`` is ``"all"``, the dictionary will contain all nodes
* If ``selection`` is ``"leaves"``, the dictionary will only contain leaves
* If ``selection`` is ``"internal"``, the dictionary will only contain internal nodes
* If ``selection`` is a ``set``, the dictionary will contain all nodes labeled by a label in ``selection``
* If multiple nodes are labeled by a given label, only the last (preorder traversal) will be obtained
Args:
``selection`` (``str`` or ``set``): The selection of nodes to get
* ``"all"`` to select all nodes
* ``"leaves"`` to select leaves
* ``"internal"`` to select internal nodes
* A ``set`` of labels to specify nodes to select
Returns:
``dict``: Dictionary mapping labels to the corresponding nodes | Below is the the instruction that describes the task:
### Input:
Return a dictionary mapping labels (strings) to ``Node`` objects
* If ``selection`` is ``"all"``, the dictionary will contain all nodes
* If ``selection`` is ``"leaves"``, the dictionary will only contain leaves
* If ``selection`` is ``"internal"``, the dictionary will only contain internal nodes
* If ``selection`` is a ``set``, the dictionary will contain all nodes labeled by a label in ``selection``
* If multiple nodes are labeled by a given label, only the last (preorder traversal) will be obtained
Args:
``selection`` (``str`` or ``set``): The selection of nodes to get
* ``"all"`` to select all nodes
* ``"leaves"`` to select leaves
* ``"internal"`` to select internal nodes
* A ``set`` of labels to specify nodes to select
Returns:
``dict``: Dictionary mapping labels to the corresponding nodes
### Response:
def label_to_node(self, selection='leaves'):
'''Return a dictionary mapping labels (strings) to ``Node`` objects
* If ``selection`` is ``"all"``, the dictionary will contain all nodes
* If ``selection`` is ``"leaves"``, the dictionary will only contain leaves
* If ``selection`` is ``"internal"``, the dictionary will only contain internal nodes
* If ``selection`` is a ``set``, the dictionary will contain all nodes labeled by a label in ``selection``
* If multiple nodes are labeled by a given label, only the last (preorder traversal) will be obtained
Args:
``selection`` (``str`` or ``set``): The selection of nodes to get
* ``"all"`` to select all nodes
* ``"leaves"`` to select leaves
* ``"internal"`` to select internal nodes
* A ``set`` of labels to specify nodes to select
Returns:
``dict``: Dictionary mapping labels to the corresponding nodes
'''
if not isinstance(selection,set) and not isinstance(selection,list) and (not isinstance(selection,str) or not (selection != 'all' or selection != 'leaves' or selection != 'internal')):
raise RuntimeError('"selection" must be one of the strings "all", "leaves", or "internal", or it must be a set containing Node labels')
if isinstance(selection, str):
selection = selection[0]
elif isinstance(selection,list):
selection = set(selection)
label_to_node = dict()
for node in self.traverse_preorder():
if selection == 'a' or (selection == 'i' and not node.is_leaf()) or (selection == 'l' and node.is_leaf()) or str(node) in selection:
label_to_node[str(node)] = node
if not isinstance(selection,str) and len(label_to_node) != len(selection):
warn("Not all given labels exist in the tree")
return label_to_node |
def remove_from_ptr_size(self, ptr_size):
# type: (int) -> bool
'''
Remove the space for a path table record from the volume descriptor.
Parameters:
ptr_size - The length of the Path Table Record being removed from this Volume Descriptor.
Returns:
True if extents need to be removed from the Volume Descriptor, False otherwise.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('This Volume Descriptor is not yet initialized')
# Next remove from the Path Table Record size.
self.path_tbl_size -= ptr_size
new_extents = utils.ceiling_div(self.path_tbl_size, 4096) * 2
need_remove_extents = False
if new_extents > self.path_table_num_extents:
# This should never happen.
raise pycdlibexception.PyCdlibInvalidInput('This should never happen')
elif new_extents < self.path_table_num_extents:
self.path_table_num_extents -= 2
need_remove_extents = True
return need_remove_extents | Remove the space for a path table record from the volume descriptor.
Parameters:
ptr_size - The length of the Path Table Record being removed from this Volume Descriptor.
Returns:
True if extents need to be removed from the Volume Descriptor, False otherwise. | Below is the the instruction that describes the task:
### Input:
Remove the space for a path table record from the volume descriptor.
Parameters:
ptr_size - The length of the Path Table Record being removed from this Volume Descriptor.
Returns:
True if extents need to be removed from the Volume Descriptor, False otherwise.
### Response:
def remove_from_ptr_size(self, ptr_size):
# type: (int) -> bool
'''
Remove the space for a path table record from the volume descriptor.
Parameters:
ptr_size - The length of the Path Table Record being removed from this Volume Descriptor.
Returns:
True if extents need to be removed from the Volume Descriptor, False otherwise.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('This Volume Descriptor is not yet initialized')
# Next remove from the Path Table Record size.
self.path_tbl_size -= ptr_size
new_extents = utils.ceiling_div(self.path_tbl_size, 4096) * 2
need_remove_extents = False
if new_extents > self.path_table_num_extents:
# This should never happen.
raise pycdlibexception.PyCdlibInvalidInput('This should never happen')
elif new_extents < self.path_table_num_extents:
self.path_table_num_extents -= 2
need_remove_extents = True
return need_remove_extents |
def validateBusName(n):
"""
Verifies that the supplied name is a valid DBus Bus name. Throws
an L{error.MarshallingError} if the format is invalid
@type n: C{string}
@param n: A DBus bus name
"""
try:
if '.' not in n:
raise Exception('At least two components required')
if '..' in n:
raise Exception('".." not allowed in bus names')
if len(n) > 255:
raise Exception('Name exceeds maximum length of 255')
if n[0] == '.':
raise Exception('Names may not begin with a "."')
if n[0].isdigit():
raise Exception('Names may not begin with a digit')
if bus_re.search(n):
raise Exception(
'Names contains a character outside the set [A-Za-z0-9_.\\-:]')
if not n[0] == ':' and dot_digit_re.search(n):
raise Exception(
'No coponents of an interface name may begin with a digit')
except Exception as e:
raise MarshallingError('Invalid bus name "%s": %s' % (n, str(e))) | Verifies that the supplied name is a valid DBus Bus name. Throws
an L{error.MarshallingError} if the format is invalid
@type n: C{string}
@param n: A DBus bus name | Below is the the instruction that describes the task:
### Input:
Verifies that the supplied name is a valid DBus Bus name. Throws
an L{error.MarshallingError} if the format is invalid
@type n: C{string}
@param n: A DBus bus name
### Response:
def validateBusName(n):
"""
Verifies that the supplied name is a valid DBus Bus name. Throws
an L{error.MarshallingError} if the format is invalid
@type n: C{string}
@param n: A DBus bus name
"""
try:
if '.' not in n:
raise Exception('At least two components required')
if '..' in n:
raise Exception('".." not allowed in bus names')
if len(n) > 255:
raise Exception('Name exceeds maximum length of 255')
if n[0] == '.':
raise Exception('Names may not begin with a "."')
if n[0].isdigit():
raise Exception('Names may not begin with a digit')
if bus_re.search(n):
raise Exception(
'Names contains a character outside the set [A-Za-z0-9_.\\-:]')
if not n[0] == ':' and dot_digit_re.search(n):
raise Exception(
'No coponents of an interface name may begin with a digit')
except Exception as e:
raise MarshallingError('Invalid bus name "%s": %s' % (n, str(e))) |
def get_release_data(self, package_name: str, version: str) -> Tuple[str, str, str]:
"""
Returns ``(package_name, version, manifest_uri)`` associated with the given
package name and version, *if* they are published to the currently set registry.
* Parameters:
* ``name``: Must be a valid package name.
* ``version``: Must be a valid package version.
"""
validate_package_name(package_name)
validate_package_version(version)
self._validate_set_registry()
release_id = self.registry._get_release_id(package_name, version)
return self.get_release_id_data(release_id) | Returns ``(package_name, version, manifest_uri)`` associated with the given
package name and version, *if* they are published to the currently set registry.
* Parameters:
* ``name``: Must be a valid package name.
* ``version``: Must be a valid package version. | Below is the the instruction that describes the task:
### Input:
Returns ``(package_name, version, manifest_uri)`` associated with the given
package name and version, *if* they are published to the currently set registry.
* Parameters:
* ``name``: Must be a valid package name.
* ``version``: Must be a valid package version.
### Response:
def get_release_data(self, package_name: str, version: str) -> Tuple[str, str, str]:
"""
Returns ``(package_name, version, manifest_uri)`` associated with the given
package name and version, *if* they are published to the currently set registry.
* Parameters:
* ``name``: Must be a valid package name.
* ``version``: Must be a valid package version.
"""
validate_package_name(package_name)
validate_package_version(version)
self._validate_set_registry()
release_id = self.registry._get_release_id(package_name, version)
return self.get_release_id_data(release_id) |
def home_dir():
'''
Returns:
str : Path to home directory (or ``Documents`` directory on Windows).
'''
if os.name == 'nt':
from win32com.shell import shell, shellcon
return shell.SHGetFolderPath(0, shellcon.CSIDL_PERSONAL, 0, 0)
else:
return os.path.expanduser('~') | Returns:
str : Path to home directory (or ``Documents`` directory on Windows). | Below is the the instruction that describes the task:
### Input:
Returns:
str : Path to home directory (or ``Documents`` directory on Windows).
### Response:
def home_dir():
'''
Returns:
str : Path to home directory (or ``Documents`` directory on Windows).
'''
if os.name == 'nt':
from win32com.shell import shell, shellcon
return shell.SHGetFolderPath(0, shellcon.CSIDL_PERSONAL, 0, 0)
else:
return os.path.expanduser('~') |
def count_tokens(tokens, to_lower=False, counter=None):
r"""Counts tokens in the specified string.
For token_delim='(td)' and seq_delim='(sd)', a specified string of two sequences of tokens may
look like::
(td)token1(td)token2(td)token3(td)(sd)(td)token4(td)token5(td)(sd)
Parameters
----------
tokens : list of str
A source list of tokens.
to_lower : bool, default False
Whether to convert the source source_str to the lower case.
counter : Counter or None, default None
The Counter instance to be updated with the counts of `tokens`. If
None, return a new Counter instance counting tokens from `tokens`.
Returns
-------
The `counter` Counter instance after being updated with the token
counts of `source_str`. If `counter` is None, return a new Counter
instance counting tokens from `source_str`.
Examples
--------
>>> import re
>>> source_str = ' Life is great ! \n life is good . \n'
>>> source_str_tokens = filter(None, re.split(' |\n', source_str))
>>> gluonnlp.data.count_tokens(source_str_tokens)
Counter({'is': 2, 'Life': 1, 'great': 1, '!': 1, 'life': 1, 'good': 1, '.': 1})
"""
if to_lower:
tokens = [t.lower() for t in tokens]
if counter is None:
return Counter(tokens)
else:
counter.update(tokens)
return counter | r"""Counts tokens in the specified string.
For token_delim='(td)' and seq_delim='(sd)', a specified string of two sequences of tokens may
look like::
(td)token1(td)token2(td)token3(td)(sd)(td)token4(td)token5(td)(sd)
Parameters
----------
tokens : list of str
A source list of tokens.
to_lower : bool, default False
Whether to convert the source source_str to the lower case.
counter : Counter or None, default None
The Counter instance to be updated with the counts of `tokens`. If
None, return a new Counter instance counting tokens from `tokens`.
Returns
-------
The `counter` Counter instance after being updated with the token
counts of `source_str`. If `counter` is None, return a new Counter
instance counting tokens from `source_str`.
Examples
--------
>>> import re
>>> source_str = ' Life is great ! \n life is good . \n'
>>> source_str_tokens = filter(None, re.split(' |\n', source_str))
>>> gluonnlp.data.count_tokens(source_str_tokens)
Counter({'is': 2, 'Life': 1, 'great': 1, '!': 1, 'life': 1, 'good': 1, '.': 1}) | Below is the the instruction that describes the task:
### Input:
r"""Counts tokens in the specified string.
For token_delim='(td)' and seq_delim='(sd)', a specified string of two sequences of tokens may
look like::
(td)token1(td)token2(td)token3(td)(sd)(td)token4(td)token5(td)(sd)
Parameters
----------
tokens : list of str
A source list of tokens.
to_lower : bool, default False
Whether to convert the source source_str to the lower case.
counter : Counter or None, default None
The Counter instance to be updated with the counts of `tokens`. If
None, return a new Counter instance counting tokens from `tokens`.
Returns
-------
The `counter` Counter instance after being updated with the token
counts of `source_str`. If `counter` is None, return a new Counter
instance counting tokens from `source_str`.
Examples
--------
>>> import re
>>> source_str = ' Life is great ! \n life is good . \n'
>>> source_str_tokens = filter(None, re.split(' |\n', source_str))
>>> gluonnlp.data.count_tokens(source_str_tokens)
Counter({'is': 2, 'Life': 1, 'great': 1, '!': 1, 'life': 1, 'good': 1, '.': 1})
### Response:
def count_tokens(tokens, to_lower=False, counter=None):
r"""Counts tokens in the specified string.
For token_delim='(td)' and seq_delim='(sd)', a specified string of two sequences of tokens may
look like::
(td)token1(td)token2(td)token3(td)(sd)(td)token4(td)token5(td)(sd)
Parameters
----------
tokens : list of str
A source list of tokens.
to_lower : bool, default False
Whether to convert the source source_str to the lower case.
counter : Counter or None, default None
The Counter instance to be updated with the counts of `tokens`. If
None, return a new Counter instance counting tokens from `tokens`.
Returns
-------
The `counter` Counter instance after being updated with the token
counts of `source_str`. If `counter` is None, return a new Counter
instance counting tokens from `source_str`.
Examples
--------
>>> import re
>>> source_str = ' Life is great ! \n life is good . \n'
>>> source_str_tokens = filter(None, re.split(' |\n', source_str))
>>> gluonnlp.data.count_tokens(source_str_tokens)
Counter({'is': 2, 'Life': 1, 'great': 1, '!': 1, 'life': 1, 'good': 1, '.': 1})
"""
if to_lower:
tokens = [t.lower() for t in tokens]
if counter is None:
return Counter(tokens)
else:
counter.update(tokens)
return counter |
def get_assessments_offered_by_query(self, assessment_offered_query):
"""Gets a list of ``AssessmentOffered`` elements matching the given assessment offered query.
arg: assessment_offered_query
(osid.assessment.AssessmentOfferedQuery): the assessment
offered query
return: (osid.assessment.AssessmentOfferedList) - the returned
``AssessmentOfferedList``
raise: NullArgument - ``assessment_offered_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
raise: Unsupported - ``assessment_offered_query`` is not of
this service
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceQuerySession.get_resources_by_query
and_list = list()
or_list = list()
for term in assessment_offered_query._query_terms:
if '$in' in assessment_offered_query._query_terms[term] and '$nin' in assessment_offered_query._query_terms[term]:
and_list.append(
{'$or': [{term: {'$in': assessment_offered_query._query_terms[term]['$in']}},
{term: {'$nin': assessment_offered_query._query_terms[term]['$nin']}}]})
else:
and_list.append({term: assessment_offered_query._query_terms[term]})
for term in assessment_offered_query._keyword_terms:
or_list.append({term: assessment_offered_query._keyword_terms[term]})
if or_list:
and_list.append({'$or': or_list})
view_filter = self._view_filter()
if view_filter:
and_list.append(view_filter)
if and_list:
query_terms = {'$and': and_list}
collection = JSONClientValidated('assessment',
collection='AssessmentOffered',
runtime=self._runtime)
result = collection.find(query_terms).sort('_id', DESCENDING)
else:
result = []
return objects.AssessmentOfferedList(result, runtime=self._runtime, proxy=self._proxy) | Gets a list of ``AssessmentOffered`` elements matching the given assessment offered query.
arg: assessment_offered_query
(osid.assessment.AssessmentOfferedQuery): the assessment
offered query
return: (osid.assessment.AssessmentOfferedList) - the returned
``AssessmentOfferedList``
raise: NullArgument - ``assessment_offered_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
raise: Unsupported - ``assessment_offered_query`` is not of
this service
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets a list of ``AssessmentOffered`` elements matching the given assessment offered query.
arg: assessment_offered_query
(osid.assessment.AssessmentOfferedQuery): the assessment
offered query
return: (osid.assessment.AssessmentOfferedList) - the returned
``AssessmentOfferedList``
raise: NullArgument - ``assessment_offered_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
raise: Unsupported - ``assessment_offered_query`` is not of
this service
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_assessments_offered_by_query(self, assessment_offered_query):
"""Gets a list of ``AssessmentOffered`` elements matching the given assessment offered query.
arg: assessment_offered_query
(osid.assessment.AssessmentOfferedQuery): the assessment
offered query
return: (osid.assessment.AssessmentOfferedList) - the returned
``AssessmentOfferedList``
raise: NullArgument - ``assessment_offered_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
raise: Unsupported - ``assessment_offered_query`` is not of
this service
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceQuerySession.get_resources_by_query
and_list = list()
or_list = list()
for term in assessment_offered_query._query_terms:
if '$in' in assessment_offered_query._query_terms[term] and '$nin' in assessment_offered_query._query_terms[term]:
and_list.append(
{'$or': [{term: {'$in': assessment_offered_query._query_terms[term]['$in']}},
{term: {'$nin': assessment_offered_query._query_terms[term]['$nin']}}]})
else:
and_list.append({term: assessment_offered_query._query_terms[term]})
for term in assessment_offered_query._keyword_terms:
or_list.append({term: assessment_offered_query._keyword_terms[term]})
if or_list:
and_list.append({'$or': or_list})
view_filter = self._view_filter()
if view_filter:
and_list.append(view_filter)
if and_list:
query_terms = {'$and': and_list}
collection = JSONClientValidated('assessment',
collection='AssessmentOffered',
runtime=self._runtime)
result = collection.find(query_terms).sort('_id', DESCENDING)
else:
result = []
return objects.AssessmentOfferedList(result, runtime=self._runtime, proxy=self._proxy) |
def handle_endtag(self, tag):
"""
Called by HTMLParser.feed when an end tag is found.
"""
if tag in PARENT_ELEMENTS:
self.current_parent_element['tag'] = ''
self.current_parent_element['attrs'] = ''
if tag == 'li':
self.parsing_li = True
if tag != 'br':
self.cleaned_html += '</{}>'.format(tag) | Called by HTMLParser.feed when an end tag is found. | Below is the the instruction that describes the task:
### Input:
Called by HTMLParser.feed when an end tag is found.
### Response:
def handle_endtag(self, tag):
"""
Called by HTMLParser.feed when an end tag is found.
"""
if tag in PARENT_ELEMENTS:
self.current_parent_element['tag'] = ''
self.current_parent_element['attrs'] = ''
if tag == 'li':
self.parsing_li = True
if tag != 'br':
self.cleaned_html += '</{}>'.format(tag) |
def catvl(z, ver, vnew, lamb, lambnew, br):
"""
trapz integrates over altitude axis, axis = -2
concatenate over reaction dimension, axis = -1
br: column integrated brightness
lamb: wavelength [nm]
ver: volume emission rate [photons / cm^-3 s^-3 ...]
"""
if ver is not None:
br = np.concatenate((br, np.trapz(vnew, z, axis=-2)), axis=-1) # must come first!
ver = np.concatenate((ver, vnew), axis=-1)
lamb = np.concatenate((lamb, lambnew))
else:
ver = vnew.copy(order='F')
lamb = lambnew.copy()
br = np.trapz(ver, z, axis=-2)
return ver, lamb, br | trapz integrates over altitude axis, axis = -2
concatenate over reaction dimension, axis = -1
br: column integrated brightness
lamb: wavelength [nm]
ver: volume emission rate [photons / cm^-3 s^-3 ...] | Below is the the instruction that describes the task:
### Input:
trapz integrates over altitude axis, axis = -2
concatenate over reaction dimension, axis = -1
br: column integrated brightness
lamb: wavelength [nm]
ver: volume emission rate [photons / cm^-3 s^-3 ...]
### Response:
def catvl(z, ver, vnew, lamb, lambnew, br):
"""
trapz integrates over altitude axis, axis = -2
concatenate over reaction dimension, axis = -1
br: column integrated brightness
lamb: wavelength [nm]
ver: volume emission rate [photons / cm^-3 s^-3 ...]
"""
if ver is not None:
br = np.concatenate((br, np.trapz(vnew, z, axis=-2)), axis=-1) # must come first!
ver = np.concatenate((ver, vnew), axis=-1)
lamb = np.concatenate((lamb, lambnew))
else:
ver = vnew.copy(order='F')
lamb = lambnew.copy()
br = np.trapz(ver, z, axis=-2)
return ver, lamb, br |
def stop_process(self, pids, status='success'):
'''
stop_process(self, pids, status='success')
Stops a running process
:Parameters:
* *pid* (`string`) -- Identifier of an existing process
* *result* (`string`) -- the value the process will be terminated with. Any of the following possible values: success , failure , error , warning , terminated
'''
if status not in process_result_statuses:
raise OperetoClientError('Invalid process result [%s]'%status)
pids = self._get_pids(pids)
for pid in pids:
self._call_rest_api('post', '/processes/'+pid+'/terminate/'+status, error='Failed to stop process') | stop_process(self, pids, status='success')
Stops a running process
:Parameters:
* *pid* (`string`) -- Identifier of an existing process
* *result* (`string`) -- the value the process will be terminated with. Any of the following possible values: success , failure , error , warning , terminated | Below is the the instruction that describes the task:
### Input:
stop_process(self, pids, status='success')
Stops a running process
:Parameters:
* *pid* (`string`) -- Identifier of an existing process
* *result* (`string`) -- the value the process will be terminated with. Any of the following possible values: success , failure , error , warning , terminated
### Response:
def stop_process(self, pids, status='success'):
'''
stop_process(self, pids, status='success')
Stops a running process
:Parameters:
* *pid* (`string`) -- Identifier of an existing process
* *result* (`string`) -- the value the process will be terminated with. Any of the following possible values: success , failure , error , warning , terminated
'''
if status not in process_result_statuses:
raise OperetoClientError('Invalid process result [%s]'%status)
pids = self._get_pids(pids)
for pid in pids:
self._call_rest_api('post', '/processes/'+pid+'/terminate/'+status, error='Failed to stop process') |
def detach(gandi, resource, background, force):
""" Detach disks from currectly attached vm.
Resource can be a disk name, or ID
"""
resource = sorted(tuple(set(resource)))
if not force:
proceed = click.confirm('Are you sure you want to detach %s?' %
', '.join(resource))
if not proceed:
return
result = gandi.disk.detach(resource, background)
if background:
gandi.pretty_echo(result)
return result | Detach disks from currectly attached vm.
Resource can be a disk name, or ID | Below is the the instruction that describes the task:
### Input:
Detach disks from currectly attached vm.
Resource can be a disk name, or ID
### Response:
def detach(gandi, resource, background, force):
""" Detach disks from currectly attached vm.
Resource can be a disk name, or ID
"""
resource = sorted(tuple(set(resource)))
if not force:
proceed = click.confirm('Are you sure you want to detach %s?' %
', '.join(resource))
if not proceed:
return
result = gandi.disk.detach(resource, background)
if background:
gandi.pretty_echo(result)
return result |
def repeat_func_eof(func: Callable[[], Union[T, Awaitable[T]]], eof: Any, *, interval: float=0, use_is: bool=False) -> AsyncIterator[T]:
"""
Repeats the result of a 0-ary function until an `eof` item is reached.
The `eof` item itself is not part of the resulting stream; by setting `use_is` to true,
eof is checked by identity rather than equality.
`times` and `interval` behave exactly like with `aiostream.create.repeat`.
"""
pred = (lambda item: item != eof) if not use_is else (lambda item: (item is not eof))
base = repeat_func.raw(func, interval=interval)
return cast(AsyncIterator[T], stream.takewhile.raw(base, pred)) | Repeats the result of a 0-ary function until an `eof` item is reached.
The `eof` item itself is not part of the resulting stream; by setting `use_is` to true,
eof is checked by identity rather than equality.
`times` and `interval` behave exactly like with `aiostream.create.repeat`. | Below is the the instruction that describes the task:
### Input:
Repeats the result of a 0-ary function until an `eof` item is reached.
The `eof` item itself is not part of the resulting stream; by setting `use_is` to true,
eof is checked by identity rather than equality.
`times` and `interval` behave exactly like with `aiostream.create.repeat`.
### Response:
def repeat_func_eof(func: Callable[[], Union[T, Awaitable[T]]], eof: Any, *, interval: float=0, use_is: bool=False) -> AsyncIterator[T]:
"""
Repeats the result of a 0-ary function until an `eof` item is reached.
The `eof` item itself is not part of the resulting stream; by setting `use_is` to true,
eof is checked by identity rather than equality.
`times` and `interval` behave exactly like with `aiostream.create.repeat`.
"""
pred = (lambda item: item != eof) if not use_is else (lambda item: (item is not eof))
base = repeat_func.raw(func, interval=interval)
return cast(AsyncIterator[T], stream.takewhile.raw(base, pred)) |
def aes_decrypt(key, stdin, chunk_size=65536):
"""
Generator that decrypts a content stream using AES 256 in CBC
mode.
:param key: Any string to use as the decryption key.
:param stdin: Where to read the encrypted data from.
:param chunk_size: Largest amount to read at once.
"""
if not AES256CBC_Support:
raise Exception(
'AES256CBC not supported; likely pycrypto is not installed')
# Always use 256-bit key
key = hashlib.sha256(key).digest()
# At least 16 and a multiple of 16
chunk_size = max(16, chunk_size >> 4 << 4)
iv = stdin.read(16)
while len(iv) < 16:
chunk = stdin.read(16 - len(iv))
if not chunk:
raise IOError('EOF reading IV')
decryptor = Crypto.Cipher.AES.new(key, Crypto.Cipher.AES.MODE_CBC, iv)
data = ''
while True:
chunk = stdin.read(chunk_size)
if not chunk:
if len(data) != 16:
raise IOError('EOF reading encrypted stream')
data = decryptor.decrypt(data)
trailing = ord(data[-1])
if trailing > 15:
raise IOError(
'EOF reading encrypted stream or trailing value corrupted '
'%s' % trailing)
yield data[:trailing]
break
data += chunk
if len(data) > 16:
# Always leave at least one byte pending
trailing = (len(data) % 16) or 16
yield decryptor.decrypt(data[:-trailing])
data = data[-trailing:] | Generator that decrypts a content stream using AES 256 in CBC
mode.
:param key: Any string to use as the decryption key.
:param stdin: Where to read the encrypted data from.
:param chunk_size: Largest amount to read at once. | Below is the the instruction that describes the task:
### Input:
Generator that decrypts a content stream using AES 256 in CBC
mode.
:param key: Any string to use as the decryption key.
:param stdin: Where to read the encrypted data from.
:param chunk_size: Largest amount to read at once.
### Response:
def aes_decrypt(key, stdin, chunk_size=65536):
"""
Generator that decrypts a content stream using AES 256 in CBC
mode.
:param key: Any string to use as the decryption key.
:param stdin: Where to read the encrypted data from.
:param chunk_size: Largest amount to read at once.
"""
if not AES256CBC_Support:
raise Exception(
'AES256CBC not supported; likely pycrypto is not installed')
# Always use 256-bit key
key = hashlib.sha256(key).digest()
# At least 16 and a multiple of 16
chunk_size = max(16, chunk_size >> 4 << 4)
iv = stdin.read(16)
while len(iv) < 16:
chunk = stdin.read(16 - len(iv))
if not chunk:
raise IOError('EOF reading IV')
decryptor = Crypto.Cipher.AES.new(key, Crypto.Cipher.AES.MODE_CBC, iv)
data = ''
while True:
chunk = stdin.read(chunk_size)
if not chunk:
if len(data) != 16:
raise IOError('EOF reading encrypted stream')
data = decryptor.decrypt(data)
trailing = ord(data[-1])
if trailing > 15:
raise IOError(
'EOF reading encrypted stream or trailing value corrupted '
'%s' % trailing)
yield data[:trailing]
break
data += chunk
if len(data) > 16:
# Always leave at least one byte pending
trailing = (len(data) % 16) or 16
yield decryptor.decrypt(data[:-trailing])
data = data[-trailing:] |
def fetch_local_package(self, config):
"""Make a local path available to current stacker config.
Args:
config (dict): 'local' path config dictionary
"""
# Update sys.path & merge in remote configs (if necessary)
self.update_paths_and_config(config=config,
pkg_dir_name=config['source'],
pkg_cache_dir=os.getcwd()) | Make a local path available to current stacker config.
Args:
config (dict): 'local' path config dictionary | Below is the the instruction that describes the task:
### Input:
Make a local path available to current stacker config.
Args:
config (dict): 'local' path config dictionary
### Response:
def fetch_local_package(self, config):
"""Make a local path available to current stacker config.
Args:
config (dict): 'local' path config dictionary
"""
# Update sys.path & merge in remote configs (if necessary)
self.update_paths_and_config(config=config,
pkg_dir_name=config['source'],
pkg_cache_dir=os.getcwd()) |
def _peg_pose_in_hole_frame(self):
"""
A helper function that takes in a named data field and returns the pose of that
object in the base frame.
"""
# World frame
peg_pos_in_world = self.sim.data.get_body_xpos("cylinder")
peg_rot_in_world = self.sim.data.get_body_xmat("cylinder").reshape((3, 3))
peg_pose_in_world = T.make_pose(peg_pos_in_world, peg_rot_in_world)
# World frame
hole_pos_in_world = self.sim.data.get_body_xpos("hole")
hole_rot_in_world = self.sim.data.get_body_xmat("hole").reshape((3, 3))
hole_pose_in_world = T.make_pose(hole_pos_in_world, hole_rot_in_world)
world_pose_in_hole = T.pose_inv(hole_pose_in_world)
peg_pose_in_hole = T.pose_in_A_to_pose_in_B(
peg_pose_in_world, world_pose_in_hole
)
return peg_pose_in_hole | A helper function that takes in a named data field and returns the pose of that
object in the base frame. | Below is the the instruction that describes the task:
### Input:
A helper function that takes in a named data field and returns the pose of that
object in the base frame.
### Response:
def _peg_pose_in_hole_frame(self):
"""
A helper function that takes in a named data field and returns the pose of that
object in the base frame.
"""
# World frame
peg_pos_in_world = self.sim.data.get_body_xpos("cylinder")
peg_rot_in_world = self.sim.data.get_body_xmat("cylinder").reshape((3, 3))
peg_pose_in_world = T.make_pose(peg_pos_in_world, peg_rot_in_world)
# World frame
hole_pos_in_world = self.sim.data.get_body_xpos("hole")
hole_rot_in_world = self.sim.data.get_body_xmat("hole").reshape((3, 3))
hole_pose_in_world = T.make_pose(hole_pos_in_world, hole_rot_in_world)
world_pose_in_hole = T.pose_inv(hole_pose_in_world)
peg_pose_in_hole = T.pose_in_A_to_pose_in_B(
peg_pose_in_world, world_pose_in_hole
)
return peg_pose_in_hole |
def create_question_pdfs(nb, pages_per_q, folder, zoom) -> list:
"""
Converts each cells in tbe notebook to a PDF named something like
'q04c.pdf'. Places PDFs in the specified folder and returns the list of
created PDF locations.
"""
html_cells = nb_to_html_cells(nb)
q_nums = nb_to_q_nums(nb)
os.makedirs(folder, exist_ok=True)
pdf_options = PDF_OPTS.copy()
pdf_options['zoom'] = ZOOM_FACTOR * zoom
pdf_names = []
for question, cell in zip(q_nums, html_cells):
# Create question PDFs
pdf_name = os.path.join(folder, '{}.pdf'.format(question))
pdfkit.from_string(cell.prettify(), pdf_name, options=pdf_options)
pad_pdf_pages(pdf_name, pages_per_q)
print('Created ' + pdf_name)
pdf_names.append(pdf_name)
return pdf_names | Converts each cells in tbe notebook to a PDF named something like
'q04c.pdf'. Places PDFs in the specified folder and returns the list of
created PDF locations. | Below is the the instruction that describes the task:
### Input:
Converts each cells in tbe notebook to a PDF named something like
'q04c.pdf'. Places PDFs in the specified folder and returns the list of
created PDF locations.
### Response:
def create_question_pdfs(nb, pages_per_q, folder, zoom) -> list:
"""
Converts each cells in tbe notebook to a PDF named something like
'q04c.pdf'. Places PDFs in the specified folder and returns the list of
created PDF locations.
"""
html_cells = nb_to_html_cells(nb)
q_nums = nb_to_q_nums(nb)
os.makedirs(folder, exist_ok=True)
pdf_options = PDF_OPTS.copy()
pdf_options['zoom'] = ZOOM_FACTOR * zoom
pdf_names = []
for question, cell in zip(q_nums, html_cells):
# Create question PDFs
pdf_name = os.path.join(folder, '{}.pdf'.format(question))
pdfkit.from_string(cell.prettify(), pdf_name, options=pdf_options)
pad_pdf_pages(pdf_name, pages_per_q)
print('Created ' + pdf_name)
pdf_names.append(pdf_name)
return pdf_names |
def reads_supporting_variants(variants, samfile, **kwargs):
"""
Given a SAM/BAM file and a collection of variants, generates a sequence
of variants paired with reads which support each variant.
"""
for variant, allele_reads in reads_overlapping_variants(
variants=variants,
samfile=samfile,
**kwargs):
yield variant, filter_non_alt_reads_for_variant(variant, allele_reads) | Given a SAM/BAM file and a collection of variants, generates a sequence
of variants paired with reads which support each variant. | Below is the the instruction that describes the task:
### Input:
Given a SAM/BAM file and a collection of variants, generates a sequence
of variants paired with reads which support each variant.
### Response:
def reads_supporting_variants(variants, samfile, **kwargs):
"""
Given a SAM/BAM file and a collection of variants, generates a sequence
of variants paired with reads which support each variant.
"""
for variant, allele_reads in reads_overlapping_variants(
variants=variants,
samfile=samfile,
**kwargs):
yield variant, filter_non_alt_reads_for_variant(variant, allele_reads) |
def plot_predict(self, h=5, past_values=20, intervals=True, oos_data=None, **kwargs):
""" Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
past_values : int (default : 20)
How many past observations to show on the forecast graph?
intervals : Boolean
Would you like to show 95% prediction intervals for the forecast?
oos_data : pd.DataFrame
Data for the variables to be used out of sample (ys can be NaNs)
Returns
----------
- Plot of the forecast
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
nsims = kwargs.get('nsims', 200)
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
_, X_oos = dmatrices(self.formula, oos_data)
X_oos = np.array([X_oos])[0]
full_X = self.X.copy()
full_X = np.append(full_X,X_oos,axis=0)
Z = full_X
date_index = self.shift_dates(h)
# Retrieve data, dates and (transformed) latent variables
if self.latent_variables.estimation_method in ['M-H']:
lower_final = 0
upper_final = 0
plot_values_final = 0
plot_index = date_index[-h-past_values:]
for i in range(nsims):
t_params = self.draw_latent_variables(nsims=1).T[0]
a, P = self._forecast_model(t_params, Z, h)
smoothed_series = np.zeros(self.y.shape[0]+h)
series_variance = np.zeros(self.y.shape[0]+h)
for t in range(self.y.shape[0]+h):
smoothed_series[t] = np.dot(Z[t],a[:,t])
series_variance[t] = np.dot(np.dot(Z[t],P[:,:,t]),Z[t].T)
plot_values = smoothed_series[-h-past_values:]
lower = smoothed_series[-h:] - 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(t_params[0]),0.5)
upper = smoothed_series[-h:] + 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(t_params[0]),0.5)
lower_final += np.append(plot_values[-h-1], lower)
upper_final += np.append(plot_values[-h-1], upper)
plot_values_final += plot_values
plot_values_final = plot_values_final / nsims
lower_final = lower_final / nsims
upper_final = upper_final / nsims
plt.figure(figsize=figsize)
if intervals == True:
plt.fill_between(date_index[-h-1:], lower_final, upper_final, alpha=0.2)
plt.plot(plot_index, plot_values_final)
plt.title("Forecast for " + self.data_name)
plt.xlabel("Time")
plt.ylabel(self.data_name)
plt.show()
else:
a, P = self._forecast_model(self.latent_variables.get_z_values(), h)
plot_values = a[0][-h-past_values:]
forecasted_values = a[0][-h:]
smoothed_series = np.zeros(self.y.shape[0]+h)
series_variance = np.zeros(self.y.shape[0]+h)
for t in range(self.y.shape[0]+h):
smoothed_series[t] = np.dot(Z[t],a[:,t])
series_variance[t] = np.dot(np.dot(Z[t],P[:,:,t]),Z[t].T)
lower = forecasted_values - 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(self.latent_variables.get_z_values()[0]),0.5)
upper = forecasted_values + 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(self.latent_variables.get_z_values()[0]),0.5)
lower = np.append(plot_values[-h-1],lower)
upper = np.append(plot_values[-h-1],upper)
plot_index = date_index[-h-past_values:]
plt.figure(figsize=figsize)
if intervals == True:
plt.fill_between(date_index[-h-1:], lower, upper, alpha=0.2)
plt.plot(plot_index,plot_values)
plt.title("Forecast for " + self.data_name)
plt.xlabel("Time")
plt.ylabel(self.data_name)
plt.show() | Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
past_values : int (default : 20)
How many past observations to show on the forecast graph?
intervals : Boolean
Would you like to show 95% prediction intervals for the forecast?
oos_data : pd.DataFrame
Data for the variables to be used out of sample (ys can be NaNs)
Returns
----------
- Plot of the forecast | Below is the the instruction that describes the task:
### Input:
Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
past_values : int (default : 20)
How many past observations to show on the forecast graph?
intervals : Boolean
Would you like to show 95% prediction intervals for the forecast?
oos_data : pd.DataFrame
Data for the variables to be used out of sample (ys can be NaNs)
Returns
----------
- Plot of the forecast
### Response:
def plot_predict(self, h=5, past_values=20, intervals=True, oos_data=None, **kwargs):
""" Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
past_values : int (default : 20)
How many past observations to show on the forecast graph?
intervals : Boolean
Would you like to show 95% prediction intervals for the forecast?
oos_data : pd.DataFrame
Data for the variables to be used out of sample (ys can be NaNs)
Returns
----------
- Plot of the forecast
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
nsims = kwargs.get('nsims', 200)
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
_, X_oos = dmatrices(self.formula, oos_data)
X_oos = np.array([X_oos])[0]
full_X = self.X.copy()
full_X = np.append(full_X,X_oos,axis=0)
Z = full_X
date_index = self.shift_dates(h)
# Retrieve data, dates and (transformed) latent variables
if self.latent_variables.estimation_method in ['M-H']:
lower_final = 0
upper_final = 0
plot_values_final = 0
plot_index = date_index[-h-past_values:]
for i in range(nsims):
t_params = self.draw_latent_variables(nsims=1).T[0]
a, P = self._forecast_model(t_params, Z, h)
smoothed_series = np.zeros(self.y.shape[0]+h)
series_variance = np.zeros(self.y.shape[0]+h)
for t in range(self.y.shape[0]+h):
smoothed_series[t] = np.dot(Z[t],a[:,t])
series_variance[t] = np.dot(np.dot(Z[t],P[:,:,t]),Z[t].T)
plot_values = smoothed_series[-h-past_values:]
lower = smoothed_series[-h:] - 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(t_params[0]),0.5)
upper = smoothed_series[-h:] + 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(t_params[0]),0.5)
lower_final += np.append(plot_values[-h-1], lower)
upper_final += np.append(plot_values[-h-1], upper)
plot_values_final += plot_values
plot_values_final = plot_values_final / nsims
lower_final = lower_final / nsims
upper_final = upper_final / nsims
plt.figure(figsize=figsize)
if intervals == True:
plt.fill_between(date_index[-h-1:], lower_final, upper_final, alpha=0.2)
plt.plot(plot_index, plot_values_final)
plt.title("Forecast for " + self.data_name)
plt.xlabel("Time")
plt.ylabel(self.data_name)
plt.show()
else:
a, P = self._forecast_model(self.latent_variables.get_z_values(), h)
plot_values = a[0][-h-past_values:]
forecasted_values = a[0][-h:]
smoothed_series = np.zeros(self.y.shape[0]+h)
series_variance = np.zeros(self.y.shape[0]+h)
for t in range(self.y.shape[0]+h):
smoothed_series[t] = np.dot(Z[t],a[:,t])
series_variance[t] = np.dot(np.dot(Z[t],P[:,:,t]),Z[t].T)
lower = forecasted_values - 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(self.latent_variables.get_z_values()[0]),0.5)
upper = forecasted_values + 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(self.latent_variables.get_z_values()[0]),0.5)
lower = np.append(plot_values[-h-1],lower)
upper = np.append(plot_values[-h-1],upper)
plot_index = date_index[-h-past_values:]
plt.figure(figsize=figsize)
if intervals == True:
plt.fill_between(date_index[-h-1:], lower, upper, alpha=0.2)
plt.plot(plot_index,plot_values)
plt.title("Forecast for " + self.data_name)
plt.xlabel("Time")
plt.ylabel(self.data_name)
plt.show() |
def _parse_rd(self, config):
""" _parse_rd scans the provided configuration block and extracts
the vrf rd. The return dict is intended to be merged into the response
dict.
Args:
config (str): The vrf configuration block from the nodes running
configuration
Returns:
dict: resource dict attribute
"""
match = RD_RE.search(config)
if match:
value = match.group('value')
else:
value = match
return dict(rd=value) | _parse_rd scans the provided configuration block and extracts
the vrf rd. The return dict is intended to be merged into the response
dict.
Args:
config (str): The vrf configuration block from the nodes running
configuration
Returns:
dict: resource dict attribute | Below is the the instruction that describes the task:
### Input:
_parse_rd scans the provided configuration block and extracts
the vrf rd. The return dict is intended to be merged into the response
dict.
Args:
config (str): The vrf configuration block from the nodes running
configuration
Returns:
dict: resource dict attribute
### Response:
def _parse_rd(self, config):
""" _parse_rd scans the provided configuration block and extracts
the vrf rd. The return dict is intended to be merged into the response
dict.
Args:
config (str): The vrf configuration block from the nodes running
configuration
Returns:
dict: resource dict attribute
"""
match = RD_RE.search(config)
if match:
value = match.group('value')
else:
value = match
return dict(rd=value) |
def make_query(catalog):
"""A function to prepare a query
"""
query = {}
request = api.get_request()
index = get_search_index_for(catalog)
limit = request.form.get("limit")
q = request.form.get("q")
if len(q) > 0:
query[index] = q + "*"
else:
return None
portal_type = request.form.get("portal_type")
if portal_type:
if not isinstance(portal_type, list):
portal_type = [portal_type]
query["portal_type"] = portal_type
if limit and limit.isdigit():
query["sort_limit"] = int(limit)
return query | A function to prepare a query | Below is the the instruction that describes the task:
### Input:
A function to prepare a query
### Response:
def make_query(catalog):
"""A function to prepare a query
"""
query = {}
request = api.get_request()
index = get_search_index_for(catalog)
limit = request.form.get("limit")
q = request.form.get("q")
if len(q) > 0:
query[index] = q + "*"
else:
return None
portal_type = request.form.get("portal_type")
if portal_type:
if not isinstance(portal_type, list):
portal_type = [portal_type]
query["portal_type"] = portal_type
if limit and limit.isdigit():
query["sort_limit"] = int(limit)
return query |
def gen_localdir(self, localdir):
"""
Generate local directory where track will be saved.
Create it if not exists.
"""
directory = "{0}/{1}/".format(localdir, self.get("username"))
if not os.path.exists(directory):
os.makedirs(directory)
return directory | Generate local directory where track will be saved.
Create it if not exists. | Below is the the instruction that describes the task:
### Input:
Generate local directory where track will be saved.
Create it if not exists.
### Response:
def gen_localdir(self, localdir):
"""
Generate local directory where track will be saved.
Create it if not exists.
"""
directory = "{0}/{1}/".format(localdir, self.get("username"))
if not os.path.exists(directory):
os.makedirs(directory)
return directory |
def _check_require_version(namespace, stacklevel):
"""A context manager which tries to give helpful warnings
about missing gi.require_version() which could potentially
break code if only an older version than expected is installed
or a new version gets introduced.
::
with _check_require_version("Gtk", stacklevel):
load_namespace_and_overrides()
"""
repository = GIRepository()
was_loaded = repository.is_registered(namespace)
yield
if was_loaded:
# it was loaded before by another import which depended on this
# namespace or by C code like libpeas
return
if namespace in ("GLib", "GObject", "Gio"):
# part of glib (we have bigger problems if versions change there)
return
if get_required_version(namespace) is not None:
# the version was forced using require_version()
return
version = repository.get_version(namespace)
warnings.warn(
"%(namespace)s was imported without specifying a version first. "
"Use gi.require_version('%(namespace)s', '%(version)s') before "
"import to ensure that the right version gets loaded."
% {"namespace": namespace, "version": version},
PyGIWarning, stacklevel=stacklevel) | A context manager which tries to give helpful warnings
about missing gi.require_version() which could potentially
break code if only an older version than expected is installed
or a new version gets introduced.
::
with _check_require_version("Gtk", stacklevel):
load_namespace_and_overrides() | Below is the the instruction that describes the task:
### Input:
A context manager which tries to give helpful warnings
about missing gi.require_version() which could potentially
break code if only an older version than expected is installed
or a new version gets introduced.
::
with _check_require_version("Gtk", stacklevel):
load_namespace_and_overrides()
### Response:
def _check_require_version(namespace, stacklevel):
"""A context manager which tries to give helpful warnings
about missing gi.require_version() which could potentially
break code if only an older version than expected is installed
or a new version gets introduced.
::
with _check_require_version("Gtk", stacklevel):
load_namespace_and_overrides()
"""
repository = GIRepository()
was_loaded = repository.is_registered(namespace)
yield
if was_loaded:
# it was loaded before by another import which depended on this
# namespace or by C code like libpeas
return
if namespace in ("GLib", "GObject", "Gio"):
# part of glib (we have bigger problems if versions change there)
return
if get_required_version(namespace) is not None:
# the version was forced using require_version()
return
version = repository.get_version(namespace)
warnings.warn(
"%(namespace)s was imported without specifying a version first. "
"Use gi.require_version('%(namespace)s', '%(version)s') before "
"import to ensure that the right version gets loaded."
% {"namespace": namespace, "version": version},
PyGIWarning, stacklevel=stacklevel) |
def bfs_multi_edges(G, source, reverse=False, keys=True, data=False):
"""Produce edges in a breadth-first-search starting at source.
-----
Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py
by D. Eppstein, July 2004.
"""
from collections import deque
from functools import partial
if reverse:
G = G.reverse()
edges_iter = partial(G.edges_iter, keys=keys, data=data)
list(G.edges_iter('multitest', keys=True, data=True))
visited_nodes = set([source])
# visited_edges = set([])
queue = deque([(source, edges_iter(source))])
while queue:
parent, edges = queue[0]
try:
edge = next(edges)
edge_nodata = edge[0:3]
# if edge_nodata not in visited_edges:
yield edge
# visited_edges.add(edge_nodata)
child = edge_nodata[1]
if child not in visited_nodes:
visited_nodes.add(child)
queue.append((child, edges_iter(child)))
except StopIteration:
queue.popleft() | Produce edges in a breadth-first-search starting at source.
-----
Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py
by D. Eppstein, July 2004. | Below is the the instruction that describes the task:
### Input:
Produce edges in a breadth-first-search starting at source.
-----
Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py
by D. Eppstein, July 2004.
### Response:
def bfs_multi_edges(G, source, reverse=False, keys=True, data=False):
"""Produce edges in a breadth-first-search starting at source.
-----
Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py
by D. Eppstein, July 2004.
"""
from collections import deque
from functools import partial
if reverse:
G = G.reverse()
edges_iter = partial(G.edges_iter, keys=keys, data=data)
list(G.edges_iter('multitest', keys=True, data=True))
visited_nodes = set([source])
# visited_edges = set([])
queue = deque([(source, edges_iter(source))])
while queue:
parent, edges = queue[0]
try:
edge = next(edges)
edge_nodata = edge[0:3]
# if edge_nodata not in visited_edges:
yield edge
# visited_edges.add(edge_nodata)
child = edge_nodata[1]
if child not in visited_nodes:
visited_nodes.add(child)
queue.append((child, edges_iter(child)))
except StopIteration:
queue.popleft() |
def get_templates():
"""
Returns each of the templates with env vars injected.
"""
injected = {}
for name, data in templates.items():
injected[name] = dict([(k, v % env) for k, v in data.items()])
return injected | Returns each of the templates with env vars injected. | Below is the the instruction that describes the task:
### Input:
Returns each of the templates with env vars injected.
### Response:
def get_templates():
"""
Returns each of the templates with env vars injected.
"""
injected = {}
for name, data in templates.items():
injected[name] = dict([(k, v % env) for k, v in data.items()])
return injected |
def get_statements(self):
"""Convert network edges into Statements.
Returns
-------
list of Statements
Converted INDRA Statements.
"""
edges = _get_dict_from_list('edges', self.cx)
for edge in edges:
edge_type = edge.get('i')
if not edge_type:
continue
stmt_type = _stmt_map.get(edge_type)
if stmt_type:
id = edge['@id']
source_agent = self._node_agents.get(edge['s'])
target_agent = self._node_agents.get(edge['t'])
if not source_agent or not target_agent:
logger.info("Skipping edge %s->%s: %s" %
(self._node_names[edge['s']],
self._node_names[edge['t']], edge))
continue
ev = self._create_evidence(id)
if stmt_type == Complex:
stmt = stmt_type([source_agent, target_agent], evidence=ev)
else:
stmt = stmt_type(source_agent, target_agent, evidence=ev)
self.statements.append(stmt)
return self.statements | Convert network edges into Statements.
Returns
-------
list of Statements
Converted INDRA Statements. | Below is the the instruction that describes the task:
### Input:
Convert network edges into Statements.
Returns
-------
list of Statements
Converted INDRA Statements.
### Response:
def get_statements(self):
"""Convert network edges into Statements.
Returns
-------
list of Statements
Converted INDRA Statements.
"""
edges = _get_dict_from_list('edges', self.cx)
for edge in edges:
edge_type = edge.get('i')
if not edge_type:
continue
stmt_type = _stmt_map.get(edge_type)
if stmt_type:
id = edge['@id']
source_agent = self._node_agents.get(edge['s'])
target_agent = self._node_agents.get(edge['t'])
if not source_agent or not target_agent:
logger.info("Skipping edge %s->%s: %s" %
(self._node_names[edge['s']],
self._node_names[edge['t']], edge))
continue
ev = self._create_evidence(id)
if stmt_type == Complex:
stmt = stmt_type([source_agent, target_agent], evidence=ev)
else:
stmt = stmt_type(source_agent, target_agent, evidence=ev)
self.statements.append(stmt)
return self.statements |
def destroy(ads):
"""Cleans up AndroidDevice objects.
Args:
ads: A list of AndroidDevice objects.
"""
for ad in ads:
try:
ad.services.stop_all()
except:
ad.log.exception('Failed to clean up properly.') | Cleans up AndroidDevice objects.
Args:
ads: A list of AndroidDevice objects. | Below is the the instruction that describes the task:
### Input:
Cleans up AndroidDevice objects.
Args:
ads: A list of AndroidDevice objects.
### Response:
def destroy(ads):
"""Cleans up AndroidDevice objects.
Args:
ads: A list of AndroidDevice objects.
"""
for ad in ads:
try:
ad.services.stop_all()
except:
ad.log.exception('Failed to clean up properly.') |
def apply_to_model(self, model):
''' Apply this theme to a model.
.. warning::
Typically, don't call this method directly. Instead, set the theme
on the :class:`~bokeh.document.Document` the model is a part of.
'''
model.apply_theme(self._for_class(model.__class__))
# a little paranoia because it would be Bad(tm) to mess
# this up... would be nicer if python had a way to freeze
# the dict.
if len(_empty_dict) > 0:
raise RuntimeError("Somebody put stuff in _empty_dict") | Apply this theme to a model.
.. warning::
Typically, don't call this method directly. Instead, set the theme
on the :class:`~bokeh.document.Document` the model is a part of. | Below is the the instruction that describes the task:
### Input:
Apply this theme to a model.
.. warning::
Typically, don't call this method directly. Instead, set the theme
on the :class:`~bokeh.document.Document` the model is a part of.
### Response:
def apply_to_model(self, model):
''' Apply this theme to a model.
.. warning::
Typically, don't call this method directly. Instead, set the theme
on the :class:`~bokeh.document.Document` the model is a part of.
'''
model.apply_theme(self._for_class(model.__class__))
# a little paranoia because it would be Bad(tm) to mess
# this up... would be nicer if python had a way to freeze
# the dict.
if len(_empty_dict) > 0:
raise RuntimeError("Somebody put stuff in _empty_dict") |
def check_dashboard_cookie(self):
"""
Check if the dashboard cookie should exist through bikasetup
configuration.
If it should exist but doesn't exist yet, the function creates it
with all values as default.
If it should exist and already exists, it returns the value.
Otherwise, the function returns None.
:return: a dictionary of strings
"""
# Getting cookie
cookie_raw = self.request.get(DASHBOARD_FILTER_COOKIE, None)
# If it doesn't exist, create it with default values
if cookie_raw is None:
cookie_raw = self._create_raw_data()
self.request.response.setCookie(
DASHBOARD_FILTER_COOKIE,
json.dumps(cookie_raw),
quoted=False,
path='/')
return cookie_raw
return get_strings(json.loads(cookie_raw)) | Check if the dashboard cookie should exist through bikasetup
configuration.
If it should exist but doesn't exist yet, the function creates it
with all values as default.
If it should exist and already exists, it returns the value.
Otherwise, the function returns None.
:return: a dictionary of strings | Below is the the instruction that describes the task:
### Input:
Check if the dashboard cookie should exist through bikasetup
configuration.
If it should exist but doesn't exist yet, the function creates it
with all values as default.
If it should exist and already exists, it returns the value.
Otherwise, the function returns None.
:return: a dictionary of strings
### Response:
def check_dashboard_cookie(self):
"""
Check if the dashboard cookie should exist through bikasetup
configuration.
If it should exist but doesn't exist yet, the function creates it
with all values as default.
If it should exist and already exists, it returns the value.
Otherwise, the function returns None.
:return: a dictionary of strings
"""
# Getting cookie
cookie_raw = self.request.get(DASHBOARD_FILTER_COOKIE, None)
# If it doesn't exist, create it with default values
if cookie_raw is None:
cookie_raw = self._create_raw_data()
self.request.response.setCookie(
DASHBOARD_FILTER_COOKIE,
json.dumps(cookie_raw),
quoted=False,
path='/')
return cookie_raw
return get_strings(json.loads(cookie_raw)) |
def _parse_stop_words_file(self, path):
"""Load stop words from the given path.
Parse the stop words file, saving each word found in it in a set
for the language of the file. This language is obtained from
the file name. If the file doesn't exist, the method will have
no effect.
Args:
path: Path to the stop words file.
Returns:
A boolean indicating whether the file was loaded.
"""
language = None
loaded = False
if os.path.isfile(path):
self._logger.debug('Loading stop words in %s', path)
language = path.split('-')[-1]
if not language in self.__stop_words:
self.__stop_words[language] = set()
with codecs.open(path, 'r', 'UTF-8') as file:
loaded = True
for word in file:
self.__stop_words[language].add(word.strip())
return loaded | Load stop words from the given path.
Parse the stop words file, saving each word found in it in a set
for the language of the file. This language is obtained from
the file name. If the file doesn't exist, the method will have
no effect.
Args:
path: Path to the stop words file.
Returns:
A boolean indicating whether the file was loaded. | Below is the the instruction that describes the task:
### Input:
Load stop words from the given path.
Parse the stop words file, saving each word found in it in a set
for the language of the file. This language is obtained from
the file name. If the file doesn't exist, the method will have
no effect.
Args:
path: Path to the stop words file.
Returns:
A boolean indicating whether the file was loaded.
### Response:
def _parse_stop_words_file(self, path):
"""Load stop words from the given path.
Parse the stop words file, saving each word found in it in a set
for the language of the file. This language is obtained from
the file name. If the file doesn't exist, the method will have
no effect.
Args:
path: Path to the stop words file.
Returns:
A boolean indicating whether the file was loaded.
"""
language = None
loaded = False
if os.path.isfile(path):
self._logger.debug('Loading stop words in %s', path)
language = path.split('-')[-1]
if not language in self.__stop_words:
self.__stop_words[language] = set()
with codecs.open(path, 'r', 'UTF-8') as file:
loaded = True
for word in file:
self.__stop_words[language].add(word.strip())
return loaded |
def split_input(img):
"""
img: an RGB image of shape (s, 2s, 3).
:return: [input, output]
"""
# split the image into left + right pairs
s = img.shape[0]
assert img.shape[1] == 2 * s
input, output = img[:, :s, :], img[:, s:, :]
if args.mode == 'BtoA':
input, output = output, input
if IN_CH == 1:
input = cv2.cvtColor(input, cv2.COLOR_RGB2GRAY)[:, :, np.newaxis]
if OUT_CH == 1:
output = cv2.cvtColor(output, cv2.COLOR_RGB2GRAY)[:, :, np.newaxis]
return [input, output] | img: an RGB image of shape (s, 2s, 3).
:return: [input, output] | Below is the the instruction that describes the task:
### Input:
img: an RGB image of shape (s, 2s, 3).
:return: [input, output]
### Response:
def split_input(img):
"""
img: an RGB image of shape (s, 2s, 3).
:return: [input, output]
"""
# split the image into left + right pairs
s = img.shape[0]
assert img.shape[1] == 2 * s
input, output = img[:, :s, :], img[:, s:, :]
if args.mode == 'BtoA':
input, output = output, input
if IN_CH == 1:
input = cv2.cvtColor(input, cv2.COLOR_RGB2GRAY)[:, :, np.newaxis]
if OUT_CH == 1:
output = cv2.cvtColor(output, cv2.COLOR_RGB2GRAY)[:, :, np.newaxis]
return [input, output] |
def _CompositeMapByteStream(
self, byte_stream, byte_offset=0, context=None, **unused_kwargs):
"""Maps a sequence of composite data types on a byte stream.
Args:
byte_stream (bytes): byte stream.
byte_offset (Optional[int]): offset into the byte stream where to start.
context (Optional[DataTypeMapContext]): data type map context.
Returns:
object: mapped value.
Raises:
MappingError: if the data type definition cannot be mapped on
the byte stream.
"""
context_state = getattr(context, 'state', {})
attribute_index = context_state.get('attribute_index', 0)
mapped_values = context_state.get('mapped_values', None)
subcontext = context_state.get('context', None)
if not mapped_values:
mapped_values = self._structure_values_class()
if not subcontext:
subcontext = DataTypeMapContext(values={
type(mapped_values).__name__: mapped_values})
members_data_size = 0
for attribute_index in range(attribute_index, self._number_of_attributes):
attribute_name = self._attribute_names[attribute_index]
data_type_map = self._data_type_maps[attribute_index]
member_definition = self._data_type_definition.members[attribute_index]
condition = getattr(member_definition, 'condition', None)
if condition:
namespace = dict(subcontext.values)
# Make sure __builtins__ contains an empty dictionary.
namespace['__builtins__'] = {}
try:
condition_result = eval(condition, namespace) # pylint: disable=eval-used
except Exception as exception:
raise errors.MappingError(
'Unable to evaluate condition with error: {0!s}'.format(
exception))
if not isinstance(condition_result, bool):
raise errors.MappingError(
'Condition does not result in a boolean value')
if not condition_result:
continue
if isinstance(member_definition, data_types.PaddingDefinition):
_, byte_size = divmod(
members_data_size, member_definition.alignment_size)
if byte_size > 0:
byte_size = member_definition.alignment_size - byte_size
data_type_map.byte_size = byte_size
try:
value = data_type_map.MapByteStream(
byte_stream, byte_offset=byte_offset, context=subcontext)
setattr(mapped_values, attribute_name, value)
except errors.ByteStreamTooSmallError as exception:
context_state['attribute_index'] = attribute_index
context_state['context'] = subcontext
context_state['mapped_values'] = mapped_values
raise errors.ByteStreamTooSmallError(exception)
except Exception as exception:
raise errors.MappingError(exception)
supported_values = getattr(member_definition, 'values', None)
if supported_values and value not in supported_values:
raise errors.MappingError(
'Value: {0!s} not in supported values: {1:s}'.format(
value, ', '.join([
'{0!s}'.format(value) for value in supported_values])))
byte_offset += subcontext.byte_size
members_data_size += subcontext.byte_size
if attribute_index != (self._number_of_attributes - 1):
context_state['attribute_index'] = attribute_index
context_state['context'] = subcontext
context_state['mapped_values'] = mapped_values
error_string = (
'Unable to read: {0:s} from byte stream at offset: {1:d} '
'with error: missing attribute: {2:d}').format(
self._data_type_definition.name, byte_offset, attribute_index)
raise errors.ByteStreamTooSmallError(error_string)
if context:
context.byte_size = members_data_size
context.state = {}
return mapped_values | Maps a sequence of composite data types on a byte stream.
Args:
byte_stream (bytes): byte stream.
byte_offset (Optional[int]): offset into the byte stream where to start.
context (Optional[DataTypeMapContext]): data type map context.
Returns:
object: mapped value.
Raises:
MappingError: if the data type definition cannot be mapped on
the byte stream. | Below is the the instruction that describes the task:
### Input:
Maps a sequence of composite data types on a byte stream.
Args:
byte_stream (bytes): byte stream.
byte_offset (Optional[int]): offset into the byte stream where to start.
context (Optional[DataTypeMapContext]): data type map context.
Returns:
object: mapped value.
Raises:
MappingError: if the data type definition cannot be mapped on
the byte stream.
### Response:
def _CompositeMapByteStream(
self, byte_stream, byte_offset=0, context=None, **unused_kwargs):
"""Maps a sequence of composite data types on a byte stream.
Args:
byte_stream (bytes): byte stream.
byte_offset (Optional[int]): offset into the byte stream where to start.
context (Optional[DataTypeMapContext]): data type map context.
Returns:
object: mapped value.
Raises:
MappingError: if the data type definition cannot be mapped on
the byte stream.
"""
context_state = getattr(context, 'state', {})
attribute_index = context_state.get('attribute_index', 0)
mapped_values = context_state.get('mapped_values', None)
subcontext = context_state.get('context', None)
if not mapped_values:
mapped_values = self._structure_values_class()
if not subcontext:
subcontext = DataTypeMapContext(values={
type(mapped_values).__name__: mapped_values})
members_data_size = 0
for attribute_index in range(attribute_index, self._number_of_attributes):
attribute_name = self._attribute_names[attribute_index]
data_type_map = self._data_type_maps[attribute_index]
member_definition = self._data_type_definition.members[attribute_index]
condition = getattr(member_definition, 'condition', None)
if condition:
namespace = dict(subcontext.values)
# Make sure __builtins__ contains an empty dictionary.
namespace['__builtins__'] = {}
try:
condition_result = eval(condition, namespace) # pylint: disable=eval-used
except Exception as exception:
raise errors.MappingError(
'Unable to evaluate condition with error: {0!s}'.format(
exception))
if not isinstance(condition_result, bool):
raise errors.MappingError(
'Condition does not result in a boolean value')
if not condition_result:
continue
if isinstance(member_definition, data_types.PaddingDefinition):
_, byte_size = divmod(
members_data_size, member_definition.alignment_size)
if byte_size > 0:
byte_size = member_definition.alignment_size - byte_size
data_type_map.byte_size = byte_size
try:
value = data_type_map.MapByteStream(
byte_stream, byte_offset=byte_offset, context=subcontext)
setattr(mapped_values, attribute_name, value)
except errors.ByteStreamTooSmallError as exception:
context_state['attribute_index'] = attribute_index
context_state['context'] = subcontext
context_state['mapped_values'] = mapped_values
raise errors.ByteStreamTooSmallError(exception)
except Exception as exception:
raise errors.MappingError(exception)
supported_values = getattr(member_definition, 'values', None)
if supported_values and value not in supported_values:
raise errors.MappingError(
'Value: {0!s} not in supported values: {1:s}'.format(
value, ', '.join([
'{0!s}'.format(value) for value in supported_values])))
byte_offset += subcontext.byte_size
members_data_size += subcontext.byte_size
if attribute_index != (self._number_of_attributes - 1):
context_state['attribute_index'] = attribute_index
context_state['context'] = subcontext
context_state['mapped_values'] = mapped_values
error_string = (
'Unable to read: {0:s} from byte stream at offset: {1:d} '
'with error: missing attribute: {2:d}').format(
self._data_type_definition.name, byte_offset, attribute_index)
raise errors.ByteStreamTooSmallError(error_string)
if context:
context.byte_size = members_data_size
context.state = {}
return mapped_values |
def get_default_config(self):
"""
Returns default configuration options.
"""
config = super(SmartCollector, self).get_default_config()
config.update({
'path': 'smart',
'bin': 'smartctl',
'use_sudo': False,
'sudo_cmd': '/usr/bin/sudo',
'devices': '^disk[0-9]$|^sd[a-z]$|^hd[a-z]$',
})
return config | Returns default configuration options. | Below is the the instruction that describes the task:
### Input:
Returns default configuration options.
### Response:
def get_default_config(self):
"""
Returns default configuration options.
"""
config = super(SmartCollector, self).get_default_config()
config.update({
'path': 'smart',
'bin': 'smartctl',
'use_sudo': False,
'sudo_cmd': '/usr/bin/sudo',
'devices': '^disk[0-9]$|^sd[a-z]$|^hd[a-z]$',
})
return config |
def all(self, customer_id, data={}, **kwargs):
""""
Get all tokens for given customer Id
Args:
customer_id : Customer Id for which tokens have to be fetched
Returns:
Token dicts for given cutomer Id
"""
url = "{}/{}/tokens".format(self.base_url, customer_id)
return self.get_url(url, data, **kwargs) | Get all tokens for given customer Id
Args:
customer_id : Customer Id for which tokens have to be fetched
Returns:
Token dicts for given cutomer Id | Below is the the instruction that describes the task:
### Input:
Get all tokens for given customer Id
Args:
customer_id : Customer Id for which tokens have to be fetched
Returns:
Token dicts for given cutomer Id
### Response:
def all(self, customer_id, data={}, **kwargs):
""""
Get all tokens for given customer Id
Args:
customer_id : Customer Id for which tokens have to be fetched
Returns:
Token dicts for given cutomer Id
"""
url = "{}/{}/tokens".format(self.base_url, customer_id)
return self.get_url(url, data, **kwargs) |
def delete(fun):
'''
Remove specific function contents of minion. Returns True on success.
CLI Example:
.. code-block:: bash
salt '*' mine.delete 'network.interfaces'
'''
if __opts__['file_client'] == 'local':
data = __salt__['data.get']('mine_cache')
if isinstance(data, dict) and fun in data:
del data[fun]
return __salt__['data.update']('mine_cache', data)
load = {
'cmd': '_mine_delete',
'id': __opts__['id'],
'fun': fun,
}
return _mine_send(load, __opts__) | Remove specific function contents of minion. Returns True on success.
CLI Example:
.. code-block:: bash
salt '*' mine.delete 'network.interfaces' | Below is the the instruction that describes the task:
### Input:
Remove specific function contents of minion. Returns True on success.
CLI Example:
.. code-block:: bash
salt '*' mine.delete 'network.interfaces'
### Response:
def delete(fun):
'''
Remove specific function contents of minion. Returns True on success.
CLI Example:
.. code-block:: bash
salt '*' mine.delete 'network.interfaces'
'''
if __opts__['file_client'] == 'local':
data = __salt__['data.get']('mine_cache')
if isinstance(data, dict) and fun in data:
del data[fun]
return __salt__['data.update']('mine_cache', data)
load = {
'cmd': '_mine_delete',
'id': __opts__['id'],
'fun': fun,
}
return _mine_send(load, __opts__) |
def optional(e, default=Ignore):
"""
Create a PEG function to optionally match an expression.
"""
def match_optional(s, grm=None, pos=0):
try:
return e(s, grm, pos)
except PegreError:
return PegreResult(s, default, (pos, pos))
return match_optional | Create a PEG function to optionally match an expression. | Below is the the instruction that describes the task:
### Input:
Create a PEG function to optionally match an expression.
### Response:
def optional(e, default=Ignore):
"""
Create a PEG function to optionally match an expression.
"""
def match_optional(s, grm=None, pos=0):
try:
return e(s, grm, pos)
except PegreError:
return PegreResult(s, default, (pos, pos))
return match_optional |
def removeAll(self):
"""Remove all objects
Returns:
len(int): affected rows
"""
before_len = len(self.model.db)
self.model.db = []
if not self._batch.enable.is_set():
self.model.save_db()
return before_len - len(self.model.db) | Remove all objects
Returns:
len(int): affected rows | Below is the the instruction that describes the task:
### Input:
Remove all objects
Returns:
len(int): affected rows
### Response:
def removeAll(self):
"""Remove all objects
Returns:
len(int): affected rows
"""
before_len = len(self.model.db)
self.model.db = []
if not self._batch.enable.is_set():
self.model.save_db()
return before_len - len(self.model.db) |
def PorodGuinier(q, a, alpha, Rg):
"""Empirical Porod-Guinier scattering
Inputs:
-------
``q``: independent variable
``a``: factor of the power-law branch
``alpha``: power-law exponent
``Rg``: radius of gyration
Formula:
--------
``G * exp(-q^2*Rg^2/3)`` if ``q>q_sep`` and ``a*q^alpha`` otherwise.
``q_sep`` and ``G`` are determined from conditions of smoothness at
the cross-over.
Literature:
-----------
B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43,
716-719.
"""
return PorodGuinierMulti(q, a, alpha, Rg) | Empirical Porod-Guinier scattering
Inputs:
-------
``q``: independent variable
``a``: factor of the power-law branch
``alpha``: power-law exponent
``Rg``: radius of gyration
Formula:
--------
``G * exp(-q^2*Rg^2/3)`` if ``q>q_sep`` and ``a*q^alpha`` otherwise.
``q_sep`` and ``G`` are determined from conditions of smoothness at
the cross-over.
Literature:
-----------
B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43,
716-719. | Below is the the instruction that describes the task:
### Input:
Empirical Porod-Guinier scattering
Inputs:
-------
``q``: independent variable
``a``: factor of the power-law branch
``alpha``: power-law exponent
``Rg``: radius of gyration
Formula:
--------
``G * exp(-q^2*Rg^2/3)`` if ``q>q_sep`` and ``a*q^alpha`` otherwise.
``q_sep`` and ``G`` are determined from conditions of smoothness at
the cross-over.
Literature:
-----------
B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43,
716-719.
### Response:
def PorodGuinier(q, a, alpha, Rg):
"""Empirical Porod-Guinier scattering
Inputs:
-------
``q``: independent variable
``a``: factor of the power-law branch
``alpha``: power-law exponent
``Rg``: radius of gyration
Formula:
--------
``G * exp(-q^2*Rg^2/3)`` if ``q>q_sep`` and ``a*q^alpha`` otherwise.
``q_sep`` and ``G`` are determined from conditions of smoothness at
the cross-over.
Literature:
-----------
B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43,
716-719.
"""
return PorodGuinierMulti(q, a, alpha, Rg) |
def remove_tags(self, tags, **kwargs):
"""
:param tags: Tags to remove from the job
:type tags: list of strings
Removes each of the specified tags from the job. Takes
no action for tags that the job does not currently have.
"""
dxpy.api.job_remove_tags(self._dxid, {"tags": tags}, **kwargs) | :param tags: Tags to remove from the job
:type tags: list of strings
Removes each of the specified tags from the job. Takes
no action for tags that the job does not currently have. | Below is the the instruction that describes the task:
### Input:
:param tags: Tags to remove from the job
:type tags: list of strings
Removes each of the specified tags from the job. Takes
no action for tags that the job does not currently have.
### Response:
def remove_tags(self, tags, **kwargs):
"""
:param tags: Tags to remove from the job
:type tags: list of strings
Removes each of the specified tags from the job. Takes
no action for tags that the job does not currently have.
"""
dxpy.api.job_remove_tags(self._dxid, {"tags": tags}, **kwargs) |
def _prevNonCommentBlock(self, block):
"""Return the closest non-empty line, ignoring comments
(result <= line). Return -1 if the document
"""
block = self._prevNonEmptyBlock(block)
while block.isValid() and self._isCommentBlock(block):
block = self._prevNonEmptyBlock(block)
return block | Return the closest non-empty line, ignoring comments
(result <= line). Return -1 if the document | Below is the the instruction that describes the task:
### Input:
Return the closest non-empty line, ignoring comments
(result <= line). Return -1 if the document
### Response:
def _prevNonCommentBlock(self, block):
"""Return the closest non-empty line, ignoring comments
(result <= line). Return -1 if the document
"""
block = self._prevNonEmptyBlock(block)
while block.isValid() and self._isCommentBlock(block):
block = self._prevNonEmptyBlock(block)
return block |
def retrieve_loadbalancer_status(self, loadbalancer, **_params):
"""Retrieves status for a certain load balancer."""
return self.get(self.lbaas_loadbalancer_path_status % (loadbalancer),
params=_params) | Retrieves status for a certain load balancer. | Below is the the instruction that describes the task:
### Input:
Retrieves status for a certain load balancer.
### Response:
def retrieve_loadbalancer_status(self, loadbalancer, **_params):
"""Retrieves status for a certain load balancer."""
return self.get(self.lbaas_loadbalancer_path_status % (loadbalancer),
params=_params) |
def _execute(self, line):
"""
Evaluate the line and print the result.
"""
output = self.app.output
# WORKAROUND: Due to a bug in Jedi, the current directory is removed
# from sys.path. See: https://github.com/davidhalter/jedi/issues/1148
if '' not in sys.path:
sys.path.insert(0, '')
def compile_with_flags(code, mode):
" Compile code with the right compiler flags. "
return compile(code, '<stdin>', mode,
flags=self.get_compiler_flags(),
dont_inherit=True)
if line.lstrip().startswith('\x1a'):
# When the input starts with Ctrl-Z, quit the REPL.
self.app.exit()
elif line.lstrip().startswith('!'):
# Run as shell command
os.system(line[1:])
else:
# Try eval first
try:
code = compile_with_flags(line, 'eval')
result = eval(code, self.get_globals(), self.get_locals())
locals = self.get_locals()
locals['_'] = locals['_%i' % self.current_statement_index] = result
if result is not None:
out_prompt = self.get_output_prompt()
try:
result_str = '%r\n' % (result, )
except UnicodeDecodeError:
# In Python 2: `__repr__` should return a bytestring,
# so to put it in a unicode context could raise an
# exception that the 'ascii' codec can't decode certain
# characters. Decode as utf-8 in that case.
result_str = '%s\n' % repr(result).decode('utf-8')
# Align every line to the first one.
line_sep = '\n' + ' ' * fragment_list_width(out_prompt)
result_str = line_sep.join(result_str.splitlines()) + '\n'
# Write output tokens.
if self.enable_syntax_highlighting:
formatted_output = merge_formatted_text([
out_prompt,
PygmentsTokens(list(_lex_python_result(result_str))),
])
else:
formatted_output = FormattedText(
out_prompt + [('', result_str)])
print_formatted_text(
formatted_output, style=self._current_style,
style_transformation=self.style_transformation,
include_default_pygments_style=False)
# If not a valid `eval` expression, run using `exec` instead.
except SyntaxError:
code = compile_with_flags(line, 'exec')
six.exec_(code, self.get_globals(), self.get_locals())
output.flush() | Evaluate the line and print the result. | Below is the the instruction that describes the task:
### Input:
Evaluate the line and print the result.
### Response:
def _execute(self, line):
"""
Evaluate the line and print the result.
"""
output = self.app.output
# WORKAROUND: Due to a bug in Jedi, the current directory is removed
# from sys.path. See: https://github.com/davidhalter/jedi/issues/1148
if '' not in sys.path:
sys.path.insert(0, '')
def compile_with_flags(code, mode):
" Compile code with the right compiler flags. "
return compile(code, '<stdin>', mode,
flags=self.get_compiler_flags(),
dont_inherit=True)
if line.lstrip().startswith('\x1a'):
# When the input starts with Ctrl-Z, quit the REPL.
self.app.exit()
elif line.lstrip().startswith('!'):
# Run as shell command
os.system(line[1:])
else:
# Try eval first
try:
code = compile_with_flags(line, 'eval')
result = eval(code, self.get_globals(), self.get_locals())
locals = self.get_locals()
locals['_'] = locals['_%i' % self.current_statement_index] = result
if result is not None:
out_prompt = self.get_output_prompt()
try:
result_str = '%r\n' % (result, )
except UnicodeDecodeError:
# In Python 2: `__repr__` should return a bytestring,
# so to put it in a unicode context could raise an
# exception that the 'ascii' codec can't decode certain
# characters. Decode as utf-8 in that case.
result_str = '%s\n' % repr(result).decode('utf-8')
# Align every line to the first one.
line_sep = '\n' + ' ' * fragment_list_width(out_prompt)
result_str = line_sep.join(result_str.splitlines()) + '\n'
# Write output tokens.
if self.enable_syntax_highlighting:
formatted_output = merge_formatted_text([
out_prompt,
PygmentsTokens(list(_lex_python_result(result_str))),
])
else:
formatted_output = FormattedText(
out_prompt + [('', result_str)])
print_formatted_text(
formatted_output, style=self._current_style,
style_transformation=self.style_transformation,
include_default_pygments_style=False)
# If not a valid `eval` expression, run using `exec` instead.
except SyntaxError:
code = compile_with_flags(line, 'exec')
six.exec_(code, self.get_globals(), self.get_locals())
output.flush() |
def coarsegrain(P, n):
"""
Coarse-grains transition matrix P to n sets using PCCA
Coarse-grains transition matrix P such that the dominant eigenvalues are preserved, using:
..math:
\tilde{P} = M^T P M (M^T M)^{-1}
See [2]_ for the derivation of this form from the coarse-graining method first derived in [1]_.
References
----------
[1] S. Kube and M. Weber
A coarse graining method for the identification of transition rates between molecular conformations.
J. Chem. Phys. 126, 024103 (2007)
[2] F. Noe, H. Wu, J.-H. Prinz and N. Plattner:
Projected and hidden Markov models for calculating kinetics and metastable states of complex molecules
J. Chem. Phys. 139, 184114 (2013)
"""
M = pcca(P, n)
# coarse-grained transition matrix
W = np.linalg.inv(np.dot(M.T, M))
A = np.dot(np.dot(M.T, P), M)
P_coarse = np.dot(W, A)
# symmetrize and renormalize to eliminate numerical errors
from msmtools.analysis import stationary_distribution
pi_coarse = np.dot(M.T, stationary_distribution(P))
X = np.dot(np.diag(pi_coarse), P_coarse)
P_coarse = X / X.sum(axis=1)[:, None]
return P_coarse | Coarse-grains transition matrix P to n sets using PCCA
Coarse-grains transition matrix P such that the dominant eigenvalues are preserved, using:
..math:
\tilde{P} = M^T P M (M^T M)^{-1}
See [2]_ for the derivation of this form from the coarse-graining method first derived in [1]_.
References
----------
[1] S. Kube and M. Weber
A coarse graining method for the identification of transition rates between molecular conformations.
J. Chem. Phys. 126, 024103 (2007)
[2] F. Noe, H. Wu, J.-H. Prinz and N. Plattner:
Projected and hidden Markov models for calculating kinetics and metastable states of complex molecules
J. Chem. Phys. 139, 184114 (2013) | Below is the the instruction that describes the task:
### Input:
Coarse-grains transition matrix P to n sets using PCCA
Coarse-grains transition matrix P such that the dominant eigenvalues are preserved, using:
..math:
\tilde{P} = M^T P M (M^T M)^{-1}
See [2]_ for the derivation of this form from the coarse-graining method first derived in [1]_.
References
----------
[1] S. Kube and M. Weber
A coarse graining method for the identification of transition rates between molecular conformations.
J. Chem. Phys. 126, 024103 (2007)
[2] F. Noe, H. Wu, J.-H. Prinz and N. Plattner:
Projected and hidden Markov models for calculating kinetics and metastable states of complex molecules
J. Chem. Phys. 139, 184114 (2013)
### Response:
def coarsegrain(P, n):
"""
Coarse-grains transition matrix P to n sets using PCCA
Coarse-grains transition matrix P such that the dominant eigenvalues are preserved, using:
..math:
\tilde{P} = M^T P M (M^T M)^{-1}
See [2]_ for the derivation of this form from the coarse-graining method first derived in [1]_.
References
----------
[1] S. Kube and M. Weber
A coarse graining method for the identification of transition rates between molecular conformations.
J. Chem. Phys. 126, 024103 (2007)
[2] F. Noe, H. Wu, J.-H. Prinz and N. Plattner:
Projected and hidden Markov models for calculating kinetics and metastable states of complex molecules
J. Chem. Phys. 139, 184114 (2013)
"""
M = pcca(P, n)
# coarse-grained transition matrix
W = np.linalg.inv(np.dot(M.T, M))
A = np.dot(np.dot(M.T, P), M)
P_coarse = np.dot(W, A)
# symmetrize and renormalize to eliminate numerical errors
from msmtools.analysis import stationary_distribution
pi_coarse = np.dot(M.T, stationary_distribution(P))
X = np.dot(np.diag(pi_coarse), P_coarse)
P_coarse = X / X.sum(axis=1)[:, None]
return P_coarse |
def delete_resourcegroupitems(scenario_id, item_ids, **kwargs):
"""
Delete specified items in a group, in a scenario.
"""
user_id = int(kwargs.get('user_id'))
#check the scenario exists
_get_scenario(scenario_id, user_id)
for item_id in item_ids:
rgi = db.DBSession.query(ResourceGroupItem).\
filter(ResourceGroupItem.id==item_id).one()
db.DBSession.delete(rgi)
db.DBSession.flush() | Delete specified items in a group, in a scenario. | Below is the the instruction that describes the task:
### Input:
Delete specified items in a group, in a scenario.
### Response:
def delete_resourcegroupitems(scenario_id, item_ids, **kwargs):
"""
Delete specified items in a group, in a scenario.
"""
user_id = int(kwargs.get('user_id'))
#check the scenario exists
_get_scenario(scenario_id, user_id)
for item_id in item_ids:
rgi = db.DBSession.query(ResourceGroupItem).\
filter(ResourceGroupItem.id==item_id).one()
db.DBSession.delete(rgi)
db.DBSession.flush() |
def get_input_files(oqparam, hazard=False):
"""
:param oqparam: an OqParam instance
:param hazard: if True, consider only the hazard files
:returns: input path names in a specific order
"""
fnames = [] # files entering in the checksum
for key in oqparam.inputs:
fname = oqparam.inputs[key]
if hazard and key not in ('site_model', 'source_model_logic_tree',
'gsim_logic_tree', 'source'):
continue
# collect .hdf5 tables for the GSIMs, if any
elif key == 'gsim_logic_tree':
gsim_lt = get_gsim_lt(oqparam)
for gsims in gsim_lt.values.values():
for gsim in gsims:
table = getattr(gsim, 'GMPE_TABLE', None)
if table:
fnames.append(table)
fnames.append(fname)
elif key == 'source_model': # UCERF
f = oqparam.inputs['source_model']
fnames.append(f)
fname = nrml.read(f).sourceModel.UCERFSource['filename']
fnames.append(os.path.join(os.path.dirname(f), fname))
elif key == 'exposure': # fname is a list
for exp in asset.Exposure.read_headers(fname):
fnames.extend(exp.datafiles)
fnames.extend(fname)
elif isinstance(fname, dict):
fnames.extend(fname.values())
elif isinstance(fname, list):
for f in fname:
if f == oqparam.input_dir:
raise InvalidFile('%s there is an empty path in %s' %
(oqparam.inputs['job_ini'], key))
fnames.extend(fname)
elif key == 'source_model_logic_tree':
for smpaths in logictree.collect_info(fname).smpaths.values():
fnames.extend(smpaths)
fnames.append(fname)
else:
fnames.append(fname)
return sorted(fnames) | :param oqparam: an OqParam instance
:param hazard: if True, consider only the hazard files
:returns: input path names in a specific order | Below is the the instruction that describes the task:
### Input:
:param oqparam: an OqParam instance
:param hazard: if True, consider only the hazard files
:returns: input path names in a specific order
### Response:
def get_input_files(oqparam, hazard=False):
"""
:param oqparam: an OqParam instance
:param hazard: if True, consider only the hazard files
:returns: input path names in a specific order
"""
fnames = [] # files entering in the checksum
for key in oqparam.inputs:
fname = oqparam.inputs[key]
if hazard and key not in ('site_model', 'source_model_logic_tree',
'gsim_logic_tree', 'source'):
continue
# collect .hdf5 tables for the GSIMs, if any
elif key == 'gsim_logic_tree':
gsim_lt = get_gsim_lt(oqparam)
for gsims in gsim_lt.values.values():
for gsim in gsims:
table = getattr(gsim, 'GMPE_TABLE', None)
if table:
fnames.append(table)
fnames.append(fname)
elif key == 'source_model': # UCERF
f = oqparam.inputs['source_model']
fnames.append(f)
fname = nrml.read(f).sourceModel.UCERFSource['filename']
fnames.append(os.path.join(os.path.dirname(f), fname))
elif key == 'exposure': # fname is a list
for exp in asset.Exposure.read_headers(fname):
fnames.extend(exp.datafiles)
fnames.extend(fname)
elif isinstance(fname, dict):
fnames.extend(fname.values())
elif isinstance(fname, list):
for f in fname:
if f == oqparam.input_dir:
raise InvalidFile('%s there is an empty path in %s' %
(oqparam.inputs['job_ini'], key))
fnames.extend(fname)
elif key == 'source_model_logic_tree':
for smpaths in logictree.collect_info(fname).smpaths.values():
fnames.extend(smpaths)
fnames.append(fname)
else:
fnames.append(fname)
return sorted(fnames) |
def on_message(self, msg=None):
"""
Poll the websocket for a new packet.
`Client.listen()` calls this.
:param msg (string(byte array)): Optional. Parse the specified message
instead of receiving a packet from the socket.
"""
if msg is None:
try:
msg = self.ws.recv()
except Exception as e:
self.subscriber.on_message_error(
'Error while receiving packet: %s' % str(e))
self.disconnect()
return False
if not msg:
self.subscriber.on_message_error('Empty message received')
return False
buf = BufferStruct(msg)
opcode = buf.pop_uint8()
try:
packet_name = packet_s2c[opcode]
except KeyError:
self.subscriber.on_message_error('Unknown packet %s' % opcode)
return False
if not self.ingame and packet_name in ingame_packets:
self.subscriber.on_ingame()
self.ingame = True
parser = getattr(self, 'parse_%s' % packet_name)
try:
parser(buf)
except BufferUnderflowError as e:
msg = 'Parsing %s packet failed: %s' % (packet_name, e.args[0])
self.subscriber.on_message_error(msg)
if len(buf.buffer) != 0:
msg = 'Buffer not empty after parsing "%s" packet' % packet_name
self.subscriber.on_message_error(msg)
return packet_name | Poll the websocket for a new packet.
`Client.listen()` calls this.
:param msg (string(byte array)): Optional. Parse the specified message
instead of receiving a packet from the socket. | Below is the the instruction that describes the task:
### Input:
Poll the websocket for a new packet.
`Client.listen()` calls this.
:param msg (string(byte array)): Optional. Parse the specified message
instead of receiving a packet from the socket.
### Response:
def on_message(self, msg=None):
"""
Poll the websocket for a new packet.
`Client.listen()` calls this.
:param msg (string(byte array)): Optional. Parse the specified message
instead of receiving a packet from the socket.
"""
if msg is None:
try:
msg = self.ws.recv()
except Exception as e:
self.subscriber.on_message_error(
'Error while receiving packet: %s' % str(e))
self.disconnect()
return False
if not msg:
self.subscriber.on_message_error('Empty message received')
return False
buf = BufferStruct(msg)
opcode = buf.pop_uint8()
try:
packet_name = packet_s2c[opcode]
except KeyError:
self.subscriber.on_message_error('Unknown packet %s' % opcode)
return False
if not self.ingame and packet_name in ingame_packets:
self.subscriber.on_ingame()
self.ingame = True
parser = getattr(self, 'parse_%s' % packet_name)
try:
parser(buf)
except BufferUnderflowError as e:
msg = 'Parsing %s packet failed: %s' % (packet_name, e.args[0])
self.subscriber.on_message_error(msg)
if len(buf.buffer) != 0:
msg = 'Buffer not empty after parsing "%s" packet' % packet_name
self.subscriber.on_message_error(msg)
return packet_name |
def encode_username_password(
username: Union[str, bytes], password: Union[str, bytes]
) -> bytes:
"""Encodes a username/password pair in the format used by HTTP auth.
The return value is a byte string in the form ``username:password``.
.. versionadded:: 5.1
"""
if isinstance(username, unicode_type):
username = unicodedata.normalize("NFC", username)
if isinstance(password, unicode_type):
password = unicodedata.normalize("NFC", password)
return utf8(username) + b":" + utf8(password) | Encodes a username/password pair in the format used by HTTP auth.
The return value is a byte string in the form ``username:password``.
.. versionadded:: 5.1 | Below is the the instruction that describes the task:
### Input:
Encodes a username/password pair in the format used by HTTP auth.
The return value is a byte string in the form ``username:password``.
.. versionadded:: 5.1
### Response:
def encode_username_password(
username: Union[str, bytes], password: Union[str, bytes]
) -> bytes:
"""Encodes a username/password pair in the format used by HTTP auth.
The return value is a byte string in the form ``username:password``.
.. versionadded:: 5.1
"""
if isinstance(username, unicode_type):
username = unicodedata.normalize("NFC", username)
if isinstance(password, unicode_type):
password = unicodedata.normalize("NFC", password)
return utf8(username) + b":" + utf8(password) |
def disconnect(self):
'''
Disconnect from the current host, but do not update the closed state.
After the transport is disconnected, the closed state will be True if
this is called after a protocol shutdown, or False if the disconnect
was in error.
TODO: do we really need closed vs. connected states? this only adds
complication and the whole reconnect process has been scrapped anyway.
'''
self._connected = False
if self._transport is not None:
try:
self._transport.disconnect()
except Exception:
self.logger.error(
"Failed to disconnect from %s", self._host, exc_info=True)
raise
finally:
self._transport = None | Disconnect from the current host, but do not update the closed state.
After the transport is disconnected, the closed state will be True if
this is called after a protocol shutdown, or False if the disconnect
was in error.
TODO: do we really need closed vs. connected states? this only adds
complication and the whole reconnect process has been scrapped anyway. | Below is the the instruction that describes the task:
### Input:
Disconnect from the current host, but do not update the closed state.
After the transport is disconnected, the closed state will be True if
this is called after a protocol shutdown, or False if the disconnect
was in error.
TODO: do we really need closed vs. connected states? this only adds
complication and the whole reconnect process has been scrapped anyway.
### Response:
def disconnect(self):
'''
Disconnect from the current host, but do not update the closed state.
After the transport is disconnected, the closed state will be True if
this is called after a protocol shutdown, or False if the disconnect
was in error.
TODO: do we really need closed vs. connected states? this only adds
complication and the whole reconnect process has been scrapped anyway.
'''
self._connected = False
if self._transport is not None:
try:
self._transport.disconnect()
except Exception:
self.logger.error(
"Failed to disconnect from %s", self._host, exc_info=True)
raise
finally:
self._transport = None |
def get_default_bios_settings(self, only_allowed_settings=True):
"""Get default BIOS settings.
:param: only_allowed_settings: True when only allowed BIOS settings
are to be returned. If False, All the BIOS settings supported
by iLO are returned.
:return: a dictionary of default BIOS settings(factory settings).
Depending on the 'only_allowed_settings', either only the
allowed settings are returned or all the supported settings
are returned.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
headers_bios, bios_uri, bios_settings = self._check_bios_resource()
# Get the BaseConfig resource.
try:
base_config_uri = bios_settings['links']['BaseConfigs']['href']
except KeyError:
msg = ("BaseConfigs resource not found. Couldn't apply the BIOS "
"Settings.")
raise exception.IloCommandNotSupportedError(msg)
status, headers, config = self._rest_get(base_config_uri)
if status != 200:
msg = self._get_extended_error(config)
raise exception.IloError(msg)
for cfg in config['BaseConfigs']:
default_settings = cfg.get('default')
if default_settings:
break
else:
msg = ("Default BIOS Settings not found in 'BaseConfigs' "
"resource.")
raise exception.IloCommandNotSupportedError(msg)
if only_allowed_settings:
return utils.apply_bios_properties_filter(
default_settings, constants.SUPPORTED_BIOS_PROPERTIES)
return default_settings | Get default BIOS settings.
:param: only_allowed_settings: True when only allowed BIOS settings
are to be returned. If False, All the BIOS settings supported
by iLO are returned.
:return: a dictionary of default BIOS settings(factory settings).
Depending on the 'only_allowed_settings', either only the
allowed settings are returned or all the supported settings
are returned.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server. | Below is the the instruction that describes the task:
### Input:
Get default BIOS settings.
:param: only_allowed_settings: True when only allowed BIOS settings
are to be returned. If False, All the BIOS settings supported
by iLO are returned.
:return: a dictionary of default BIOS settings(factory settings).
Depending on the 'only_allowed_settings', either only the
allowed settings are returned or all the supported settings
are returned.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
### Response:
def get_default_bios_settings(self, only_allowed_settings=True):
"""Get default BIOS settings.
:param: only_allowed_settings: True when only allowed BIOS settings
are to be returned. If False, All the BIOS settings supported
by iLO are returned.
:return: a dictionary of default BIOS settings(factory settings).
Depending on the 'only_allowed_settings', either only the
allowed settings are returned or all the supported settings
are returned.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
headers_bios, bios_uri, bios_settings = self._check_bios_resource()
# Get the BaseConfig resource.
try:
base_config_uri = bios_settings['links']['BaseConfigs']['href']
except KeyError:
msg = ("BaseConfigs resource not found. Couldn't apply the BIOS "
"Settings.")
raise exception.IloCommandNotSupportedError(msg)
status, headers, config = self._rest_get(base_config_uri)
if status != 200:
msg = self._get_extended_error(config)
raise exception.IloError(msg)
for cfg in config['BaseConfigs']:
default_settings = cfg.get('default')
if default_settings:
break
else:
msg = ("Default BIOS Settings not found in 'BaseConfigs' "
"resource.")
raise exception.IloCommandNotSupportedError(msg)
if only_allowed_settings:
return utils.apply_bios_properties_filter(
default_settings, constants.SUPPORTED_BIOS_PROPERTIES)
return default_settings |
def hexists(self, key, field):
"""Determine if hash field exists."""
fut = self.execute(b'HEXISTS', key, field)
return wait_convert(fut, bool) | Determine if hash field exists. | Below is the the instruction that describes the task:
### Input:
Determine if hash field exists.
### Response:
def hexists(self, key, field):
"""Determine if hash field exists."""
fut = self.execute(b'HEXISTS', key, field)
return wait_convert(fut, bool) |
def qualify(self):
"""
Convert attribute values, that are references to other
objects, into I{qref}. Qualfied using default document namespace.
Since many wsdls are written improperly: when the document does
not define a default namespace, the schema target namespace is used
to qualify references.
"""
defns = self.root.defaultNamespace()
if Namespace.none(defns):
defns = self.schema.tns
for a in self.autoqualified():
ref = getattr(self, a)
if ref is None:
continue
if isqref(ref):
continue
qref = qualify(ref, self.root, defns)
log.debug('%s, convert %s="%s" to %s', self.id, a, ref, qref)
setattr(self, a, qref) | Convert attribute values, that are references to other
objects, into I{qref}. Qualfied using default document namespace.
Since many wsdls are written improperly: when the document does
not define a default namespace, the schema target namespace is used
to qualify references. | Below is the the instruction that describes the task:
### Input:
Convert attribute values, that are references to other
objects, into I{qref}. Qualfied using default document namespace.
Since many wsdls are written improperly: when the document does
not define a default namespace, the schema target namespace is used
to qualify references.
### Response:
def qualify(self):
"""
Convert attribute values, that are references to other
objects, into I{qref}. Qualfied using default document namespace.
Since many wsdls are written improperly: when the document does
not define a default namespace, the schema target namespace is used
to qualify references.
"""
defns = self.root.defaultNamespace()
if Namespace.none(defns):
defns = self.schema.tns
for a in self.autoqualified():
ref = getattr(self, a)
if ref is None:
continue
if isqref(ref):
continue
qref = qualify(ref, self.root, defns)
log.debug('%s, convert %s="%s" to %s', self.id, a, ref, qref)
setattr(self, a, qref) |
def log_rho_bg(trigs, bins, counts):
''' Calculate the log of background fall-off
Parameters
----------
trigs: array
SNR values of all the triggers
bins: string
bins for histogrammed triggers
path: string
counts for histogrammed triggers
Returns
-------
array
'''
trigs = np.atleast_1d(trigs)
N = sum(counts)
assert np.all(trigs >= np.min(bins)), \
'Trigger SNR values cannot all be below the lowest bin limit!'
# If there are any triggers that are louder than the max bin, put one
# fictitious count in a bin that extends from the limits of the slide
# triggers out to the loudest trigger.
# If there is no counts for a foreground trigger put a fictitious count
# in the background bin
if np.any(trigs >= np.max(bins)):
N = N + 1
#log_plimit = -np.log(N) - np.log(np.max(trigs) - bins[-1]) CHECK IT
log_rhos = []
for t in trigs:
if t >= np.max(bins):
log_rhos.append(-log(N)-log(np.max(trigs) - bins[-1]))
else:
i = bisect.bisect(bins, t) - 1
if counts[i] == 0:
counts[i] = 1
log_rhos.append(log(counts[i]) - log(bins[i+1] - bins[i]) - log(N))
return np.array(log_rhos) | Calculate the log of background fall-off
Parameters
----------
trigs: array
SNR values of all the triggers
bins: string
bins for histogrammed triggers
path: string
counts for histogrammed triggers
Returns
-------
array | Below is the the instruction that describes the task:
### Input:
Calculate the log of background fall-off
Parameters
----------
trigs: array
SNR values of all the triggers
bins: string
bins for histogrammed triggers
path: string
counts for histogrammed triggers
Returns
-------
array
### Response:
def log_rho_bg(trigs, bins, counts):
''' Calculate the log of background fall-off
Parameters
----------
trigs: array
SNR values of all the triggers
bins: string
bins for histogrammed triggers
path: string
counts for histogrammed triggers
Returns
-------
array
'''
trigs = np.atleast_1d(trigs)
N = sum(counts)
assert np.all(trigs >= np.min(bins)), \
'Trigger SNR values cannot all be below the lowest bin limit!'
# If there are any triggers that are louder than the max bin, put one
# fictitious count in a bin that extends from the limits of the slide
# triggers out to the loudest trigger.
# If there is no counts for a foreground trigger put a fictitious count
# in the background bin
if np.any(trigs >= np.max(bins)):
N = N + 1
#log_plimit = -np.log(N) - np.log(np.max(trigs) - bins[-1]) CHECK IT
log_rhos = []
for t in trigs:
if t >= np.max(bins):
log_rhos.append(-log(N)-log(np.max(trigs) - bins[-1]))
else:
i = bisect.bisect(bins, t) - 1
if counts[i] == 0:
counts[i] = 1
log_rhos.append(log(counts[i]) - log(bins[i+1] - bins[i]) - log(N))
return np.array(log_rhos) |
def _set_with_metadata(self, name, value, layer=None, source=None):
"""Set a value in the named layer with the given source.
Parameters
----------
name : str
The name of the value
value
The value to store
layer : str, optional
The name of the layer to store the value in. If none is supplied
then the value will be stored in the outermost layer.
source : str, optional
The source to attribute the value to.
Raises
------
TypeError
if the ConfigTree is frozen
"""
if self._frozen:
raise TypeError('Frozen ConfigTree does not support assignment')
if isinstance(value, dict):
if name not in self._children or not isinstance(self._children[name], ConfigTree):
self._children[name] = ConfigTree(layers=list(self._layers))
self._children[name].update(value, layer, source)
else:
if name not in self._children or not isinstance(self._children[name], ConfigNode):
self._children[name] = ConfigNode(list(self._layers))
child = self._children[name]
child.set_value(value, layer, source) | Set a value in the named layer with the given source.
Parameters
----------
name : str
The name of the value
value
The value to store
layer : str, optional
The name of the layer to store the value in. If none is supplied
then the value will be stored in the outermost layer.
source : str, optional
The source to attribute the value to.
Raises
------
TypeError
if the ConfigTree is frozen | Below is the the instruction that describes the task:
### Input:
Set a value in the named layer with the given source.
Parameters
----------
name : str
The name of the value
value
The value to store
layer : str, optional
The name of the layer to store the value in. If none is supplied
then the value will be stored in the outermost layer.
source : str, optional
The source to attribute the value to.
Raises
------
TypeError
if the ConfigTree is frozen
### Response:
def _set_with_metadata(self, name, value, layer=None, source=None):
"""Set a value in the named layer with the given source.
Parameters
----------
name : str
The name of the value
value
The value to store
layer : str, optional
The name of the layer to store the value in. If none is supplied
then the value will be stored in the outermost layer.
source : str, optional
The source to attribute the value to.
Raises
------
TypeError
if the ConfigTree is frozen
"""
if self._frozen:
raise TypeError('Frozen ConfigTree does not support assignment')
if isinstance(value, dict):
if name not in self._children or not isinstance(self._children[name], ConfigTree):
self._children[name] = ConfigTree(layers=list(self._layers))
self._children[name].update(value, layer, source)
else:
if name not in self._children or not isinstance(self._children[name], ConfigNode):
self._children[name] = ConfigNode(list(self._layers))
child = self._children[name]
child.set_value(value, layer, source) |
def get_mzid_specfile_ids(mzidfn, namespace):
"""Returns mzid spectra data filenames and their IDs used in the
mzIdentML file as a dict. Keys == IDs, values == fns"""
sid_fn = {}
for specdata in mzid_specdata_generator(mzidfn, namespace):
sid_fn[specdata.attrib['id']] = specdata.attrib['name']
return sid_fn | Returns mzid spectra data filenames and their IDs used in the
mzIdentML file as a dict. Keys == IDs, values == fns | Below is the the instruction that describes the task:
### Input:
Returns mzid spectra data filenames and their IDs used in the
mzIdentML file as a dict. Keys == IDs, values == fns
### Response:
def get_mzid_specfile_ids(mzidfn, namespace):
"""Returns mzid spectra data filenames and their IDs used in the
mzIdentML file as a dict. Keys == IDs, values == fns"""
sid_fn = {}
for specdata in mzid_specdata_generator(mzidfn, namespace):
sid_fn[specdata.attrib['id']] = specdata.attrib['name']
return sid_fn |
def keys(self, remote=False):
"""
Returns the database names for this client. Default is
to return only the locally cached database names, specify
``remote=True`` to make a remote request to include all databases.
:param bool remote: Dictates whether the list of locally cached
database names are returned or a remote request is made to include
an up to date list of databases from the server. Defaults to False.
:returns: List of database names
"""
if not remote:
return list(super(CouchDB, self).keys())
return self.all_dbs() | Returns the database names for this client. Default is
to return only the locally cached database names, specify
``remote=True`` to make a remote request to include all databases.
:param bool remote: Dictates whether the list of locally cached
database names are returned or a remote request is made to include
an up to date list of databases from the server. Defaults to False.
:returns: List of database names | Below is the the instruction that describes the task:
### Input:
Returns the database names for this client. Default is
to return only the locally cached database names, specify
``remote=True`` to make a remote request to include all databases.
:param bool remote: Dictates whether the list of locally cached
database names are returned or a remote request is made to include
an up to date list of databases from the server. Defaults to False.
:returns: List of database names
### Response:
def keys(self, remote=False):
"""
Returns the database names for this client. Default is
to return only the locally cached database names, specify
``remote=True`` to make a remote request to include all databases.
:param bool remote: Dictates whether the list of locally cached
database names are returned or a remote request is made to include
an up to date list of databases from the server. Defaults to False.
:returns: List of database names
"""
if not remote:
return list(super(CouchDB, self).keys())
return self.all_dbs() |
def _parse_tags(tag_file):
"""Parses a tag file, according to RFC 2822. This
includes line folding, permitting extra-long
field values.
See http://www.faqs.org/rfcs/rfc2822.html for
more information.
"""
tag_name = None
tag_value = None
# Line folding is handled by yielding values only after we encounter
# the start of a new tag, or if we pass the EOF.
for num, line in enumerate(tag_file):
# If byte-order mark ignore it for now.
if num == 0:
if line.startswith(BOM):
line = line.lstrip(BOM)
# Skip over any empty or blank lines.
if len(line) == 0 or line.isspace():
continue
elif line[0].isspace() and tag_value is not None: # folded line
tag_value += line
else:
# Starting a new tag; yield the last one.
if tag_name:
yield (tag_name, tag_value.strip())
if ':' not in line:
raise BagValidationError("invalid line '%s' in %s" % (line.strip(),
os.path.basename(tag_file.name)))
parts = line.strip().split(':', 1)
tag_name = parts[0].strip()
tag_value = parts[1]
# Passed the EOF. All done after this.
if tag_name:
yield (tag_name, tag_value.strip()) | Parses a tag file, according to RFC 2822. This
includes line folding, permitting extra-long
field values.
See http://www.faqs.org/rfcs/rfc2822.html for
more information. | Below is the the instruction that describes the task:
### Input:
Parses a tag file, according to RFC 2822. This
includes line folding, permitting extra-long
field values.
See http://www.faqs.org/rfcs/rfc2822.html for
more information.
### Response:
def _parse_tags(tag_file):
"""Parses a tag file, according to RFC 2822. This
includes line folding, permitting extra-long
field values.
See http://www.faqs.org/rfcs/rfc2822.html for
more information.
"""
tag_name = None
tag_value = None
# Line folding is handled by yielding values only after we encounter
# the start of a new tag, or if we pass the EOF.
for num, line in enumerate(tag_file):
# If byte-order mark ignore it for now.
if num == 0:
if line.startswith(BOM):
line = line.lstrip(BOM)
# Skip over any empty or blank lines.
if len(line) == 0 or line.isspace():
continue
elif line[0].isspace() and tag_value is not None: # folded line
tag_value += line
else:
# Starting a new tag; yield the last one.
if tag_name:
yield (tag_name, tag_value.strip())
if ':' not in line:
raise BagValidationError("invalid line '%s' in %s" % (line.strip(),
os.path.basename(tag_file.name)))
parts = line.strip().split(':', 1)
tag_name = parts[0].strip()
tag_value = parts[1]
# Passed the EOF. All done after this.
if tag_name:
yield (tag_name, tag_value.strip()) |
def are_equivalent_pyxb(a_pyxb, b_pyxb, ignore_timestamps=False):
"""Determine if SystemMetadata PyXB objects are semantically equivalent.
Normalize then compare SystemMetadata PyXB objects for equivalency.
Args:
a_pyxb, b_pyxb : SystemMetadata PyXB objects to compare
reset_timestamps: bool
``True``: Timestamps in the SystemMetadata are set to a standard value so that
objects that are compared after normalization register as equivalent if only
their timestamps differ.
Returns:
bool: **True** if SystemMetadata PyXB objects are semantically equivalent.
Notes:
The SystemMetadata is normalized by removing any redundant information and
ordering all sections where there are no semantics associated with the order. The
normalized SystemMetadata is intended to be semantically equivalent to the
un-normalized one.
"""
normalize_in_place(a_pyxb, ignore_timestamps)
normalize_in_place(b_pyxb, ignore_timestamps)
a_xml = d1_common.xml.serialize_to_xml_str(a_pyxb)
b_xml = d1_common.xml.serialize_to_xml_str(b_pyxb)
are_equivalent = d1_common.xml.are_equivalent(a_xml, b_xml)
if not are_equivalent:
logger.debug('XML documents not equivalent:')
logger.debug(d1_common.xml.format_diff_xml(a_xml, b_xml))
return are_equivalent | Determine if SystemMetadata PyXB objects are semantically equivalent.
Normalize then compare SystemMetadata PyXB objects for equivalency.
Args:
a_pyxb, b_pyxb : SystemMetadata PyXB objects to compare
reset_timestamps: bool
``True``: Timestamps in the SystemMetadata are set to a standard value so that
objects that are compared after normalization register as equivalent if only
their timestamps differ.
Returns:
bool: **True** if SystemMetadata PyXB objects are semantically equivalent.
Notes:
The SystemMetadata is normalized by removing any redundant information and
ordering all sections where there are no semantics associated with the order. The
normalized SystemMetadata is intended to be semantically equivalent to the
un-normalized one. | Below is the the instruction that describes the task:
### Input:
Determine if SystemMetadata PyXB objects are semantically equivalent.
Normalize then compare SystemMetadata PyXB objects for equivalency.
Args:
a_pyxb, b_pyxb : SystemMetadata PyXB objects to compare
reset_timestamps: bool
``True``: Timestamps in the SystemMetadata are set to a standard value so that
objects that are compared after normalization register as equivalent if only
their timestamps differ.
Returns:
bool: **True** if SystemMetadata PyXB objects are semantically equivalent.
Notes:
The SystemMetadata is normalized by removing any redundant information and
ordering all sections where there are no semantics associated with the order. The
normalized SystemMetadata is intended to be semantically equivalent to the
un-normalized one.
### Response:
def are_equivalent_pyxb(a_pyxb, b_pyxb, ignore_timestamps=False):
"""Determine if SystemMetadata PyXB objects are semantically equivalent.
Normalize then compare SystemMetadata PyXB objects for equivalency.
Args:
a_pyxb, b_pyxb : SystemMetadata PyXB objects to compare
reset_timestamps: bool
``True``: Timestamps in the SystemMetadata are set to a standard value so that
objects that are compared after normalization register as equivalent if only
their timestamps differ.
Returns:
bool: **True** if SystemMetadata PyXB objects are semantically equivalent.
Notes:
The SystemMetadata is normalized by removing any redundant information and
ordering all sections where there are no semantics associated with the order. The
normalized SystemMetadata is intended to be semantically equivalent to the
un-normalized one.
"""
normalize_in_place(a_pyxb, ignore_timestamps)
normalize_in_place(b_pyxb, ignore_timestamps)
a_xml = d1_common.xml.serialize_to_xml_str(a_pyxb)
b_xml = d1_common.xml.serialize_to_xml_str(b_pyxb)
are_equivalent = d1_common.xml.are_equivalent(a_xml, b_xml)
if not are_equivalent:
logger.debug('XML documents not equivalent:')
logger.debug(d1_common.xml.format_diff_xml(a_xml, b_xml))
return are_equivalent |
def md5_object(obj):
"""
If an object is hashable, return the string of the MD5.
Parameters
-----------
obj: object
Returns
----------
md5: str, MD5 hash
"""
hasher = hashlib.md5()
if isinstance(obj, basestring) and PY3:
# in python3 convert strings to bytes before hashing
hasher.update(obj.encode('utf-8'))
else:
hasher.update(obj)
md5 = hasher.hexdigest()
return md5 | If an object is hashable, return the string of the MD5.
Parameters
-----------
obj: object
Returns
----------
md5: str, MD5 hash | Below is the the instruction that describes the task:
### Input:
If an object is hashable, return the string of the MD5.
Parameters
-----------
obj: object
Returns
----------
md5: str, MD5 hash
### Response:
def md5_object(obj):
"""
If an object is hashable, return the string of the MD5.
Parameters
-----------
obj: object
Returns
----------
md5: str, MD5 hash
"""
hasher = hashlib.md5()
if isinstance(obj, basestring) and PY3:
# in python3 convert strings to bytes before hashing
hasher.update(obj.encode('utf-8'))
else:
hasher.update(obj)
md5 = hasher.hexdigest()
return md5 |
def _get_synonym(self, line):
"""Given line, return optional attribute synonym value in a namedtuple.
Example synonym and its storage in a namedtuple:
synonym: "The other white meat" EXACT MARKETING_SLOGAN [MEAT:00324, BACONBASE:03021]
text: "The other white meat"
scope: EXACT
typename: MARKETING_SLOGAN
dbxrefs: set(["MEAT:00324", "BACONBASE:03021"])
Example synonyms:
"peptidase inhibitor complex" EXACT [GOC:bf, GOC:pr]
"regulation of postsynaptic cytosolic calcium levels" EXACT syngo_official_label []
"tocopherol 13-hydroxylase activity" EXACT systematic_synonym []
"""
mtch = self.attr2cmp['synonym'].match(line)
text, scope, typename, dbxrefs, _ = mtch.groups()
typename = typename.strip()
dbxrefs = set(dbxrefs.split(', ')) if dbxrefs else set()
return self.attr2cmp['synonym nt']._make([text, scope, typename, dbxrefs]) | Given line, return optional attribute synonym value in a namedtuple.
Example synonym and its storage in a namedtuple:
synonym: "The other white meat" EXACT MARKETING_SLOGAN [MEAT:00324, BACONBASE:03021]
text: "The other white meat"
scope: EXACT
typename: MARKETING_SLOGAN
dbxrefs: set(["MEAT:00324", "BACONBASE:03021"])
Example synonyms:
"peptidase inhibitor complex" EXACT [GOC:bf, GOC:pr]
"regulation of postsynaptic cytosolic calcium levels" EXACT syngo_official_label []
"tocopherol 13-hydroxylase activity" EXACT systematic_synonym [] | Below is the the instruction that describes the task:
### Input:
Given line, return optional attribute synonym value in a namedtuple.
Example synonym and its storage in a namedtuple:
synonym: "The other white meat" EXACT MARKETING_SLOGAN [MEAT:00324, BACONBASE:03021]
text: "The other white meat"
scope: EXACT
typename: MARKETING_SLOGAN
dbxrefs: set(["MEAT:00324", "BACONBASE:03021"])
Example synonyms:
"peptidase inhibitor complex" EXACT [GOC:bf, GOC:pr]
"regulation of postsynaptic cytosolic calcium levels" EXACT syngo_official_label []
"tocopherol 13-hydroxylase activity" EXACT systematic_synonym []
### Response:
def _get_synonym(self, line):
"""Given line, return optional attribute synonym value in a namedtuple.
Example synonym and its storage in a namedtuple:
synonym: "The other white meat" EXACT MARKETING_SLOGAN [MEAT:00324, BACONBASE:03021]
text: "The other white meat"
scope: EXACT
typename: MARKETING_SLOGAN
dbxrefs: set(["MEAT:00324", "BACONBASE:03021"])
Example synonyms:
"peptidase inhibitor complex" EXACT [GOC:bf, GOC:pr]
"regulation of postsynaptic cytosolic calcium levels" EXACT syngo_official_label []
"tocopherol 13-hydroxylase activity" EXACT systematic_synonym []
"""
mtch = self.attr2cmp['synonym'].match(line)
text, scope, typename, dbxrefs, _ = mtch.groups()
typename = typename.strip()
dbxrefs = set(dbxrefs.split(', ')) if dbxrefs else set()
return self.attr2cmp['synonym nt']._make([text, scope, typename, dbxrefs]) |
def send_produce_request(self, payloads=(), acks=1, timeout=1000,
fail_on_error=True, callback=None):
"""
Encode and send some ProduceRequests
ProduceRequests will be grouped by (topic, partition) and then
sent to a specific broker. Output is a list of responses in the
same order as the list of payloads specified
Arguments:
payloads (list of ProduceRequest): produce requests to send to kafka
ProduceRequest payloads must not contain duplicates for any
topic-partition.
acks (int, optional): how many acks the servers should receive from replica
brokers before responding to the request. If it is 0, the server
will not send any response. If it is 1, the server will wait
until the data is written to the local log before sending a
response. If it is -1, the server will wait until the message
is committed by all in-sync replicas before sending a response.
For any value > 1, the server will wait for this number of acks to
occur (but the server will never wait for more acknowledgements than
there are in-sync replicas). defaults to 1.
timeout (int, optional): maximum time in milliseconds the server can
await the receipt of the number of acks, defaults to 1000.
fail_on_error (bool, optional): raise exceptions on connection and
server response errors, defaults to True.
callback (function, optional): instead of returning the ProduceResponse,
first pass it through this function, defaults to None.
Returns:
list of ProduceResponses, or callback results if supplied, in the
order of input payloads
"""
encoder = functools.partial(
KafkaProtocol.encode_produce_request,
acks=acks,
timeout=timeout)
if acks == 0:
decoder = None
else:
decoder = KafkaProtocol.decode_produce_response
resps = self._send_broker_aware_request(payloads, encoder, decoder)
return [resp if not callback else callback(resp) for resp in resps
if resp is not None and
(not fail_on_error or not self._raise_on_response_error(resp))] | Encode and send some ProduceRequests
ProduceRequests will be grouped by (topic, partition) and then
sent to a specific broker. Output is a list of responses in the
same order as the list of payloads specified
Arguments:
payloads (list of ProduceRequest): produce requests to send to kafka
ProduceRequest payloads must not contain duplicates for any
topic-partition.
acks (int, optional): how many acks the servers should receive from replica
brokers before responding to the request. If it is 0, the server
will not send any response. If it is 1, the server will wait
until the data is written to the local log before sending a
response. If it is -1, the server will wait until the message
is committed by all in-sync replicas before sending a response.
For any value > 1, the server will wait for this number of acks to
occur (but the server will never wait for more acknowledgements than
there are in-sync replicas). defaults to 1.
timeout (int, optional): maximum time in milliseconds the server can
await the receipt of the number of acks, defaults to 1000.
fail_on_error (bool, optional): raise exceptions on connection and
server response errors, defaults to True.
callback (function, optional): instead of returning the ProduceResponse,
first pass it through this function, defaults to None.
Returns:
list of ProduceResponses, or callback results if supplied, in the
order of input payloads | Below is the the instruction that describes the task:
### Input:
Encode and send some ProduceRequests
ProduceRequests will be grouped by (topic, partition) and then
sent to a specific broker. Output is a list of responses in the
same order as the list of payloads specified
Arguments:
payloads (list of ProduceRequest): produce requests to send to kafka
ProduceRequest payloads must not contain duplicates for any
topic-partition.
acks (int, optional): how many acks the servers should receive from replica
brokers before responding to the request. If it is 0, the server
will not send any response. If it is 1, the server will wait
until the data is written to the local log before sending a
response. If it is -1, the server will wait until the message
is committed by all in-sync replicas before sending a response.
For any value > 1, the server will wait for this number of acks to
occur (but the server will never wait for more acknowledgements than
there are in-sync replicas). defaults to 1.
timeout (int, optional): maximum time in milliseconds the server can
await the receipt of the number of acks, defaults to 1000.
fail_on_error (bool, optional): raise exceptions on connection and
server response errors, defaults to True.
callback (function, optional): instead of returning the ProduceResponse,
first pass it through this function, defaults to None.
Returns:
list of ProduceResponses, or callback results if supplied, in the
order of input payloads
### Response:
def send_produce_request(self, payloads=(), acks=1, timeout=1000,
fail_on_error=True, callback=None):
"""
Encode and send some ProduceRequests
ProduceRequests will be grouped by (topic, partition) and then
sent to a specific broker. Output is a list of responses in the
same order as the list of payloads specified
Arguments:
payloads (list of ProduceRequest): produce requests to send to kafka
ProduceRequest payloads must not contain duplicates for any
topic-partition.
acks (int, optional): how many acks the servers should receive from replica
brokers before responding to the request. If it is 0, the server
will not send any response. If it is 1, the server will wait
until the data is written to the local log before sending a
response. If it is -1, the server will wait until the message
is committed by all in-sync replicas before sending a response.
For any value > 1, the server will wait for this number of acks to
occur (but the server will never wait for more acknowledgements than
there are in-sync replicas). defaults to 1.
timeout (int, optional): maximum time in milliseconds the server can
await the receipt of the number of acks, defaults to 1000.
fail_on_error (bool, optional): raise exceptions on connection and
server response errors, defaults to True.
callback (function, optional): instead of returning the ProduceResponse,
first pass it through this function, defaults to None.
Returns:
list of ProduceResponses, or callback results if supplied, in the
order of input payloads
"""
encoder = functools.partial(
KafkaProtocol.encode_produce_request,
acks=acks,
timeout=timeout)
if acks == 0:
decoder = None
else:
decoder = KafkaProtocol.decode_produce_response
resps = self._send_broker_aware_request(payloads, encoder, decoder)
return [resp if not callback else callback(resp) for resp in resps
if resp is not None and
(not fail_on_error or not self._raise_on_response_error(resp))] |
def set_quantity(self, twig=None, value=None, **kwargs):
"""
TODO: add documentation
"""
# TODO: handle twig having parameter key (value@, default_unit@, adjust@, etc)
# TODO: does this return anything (update the docstring)?
return self.get_parameter(twig=twig, **kwargs).set_quantity(value=value, **kwargs) | TODO: add documentation | Below is the the instruction that describes the task:
### Input:
TODO: add documentation
### Response:
def set_quantity(self, twig=None, value=None, **kwargs):
"""
TODO: add documentation
"""
# TODO: handle twig having parameter key (value@, default_unit@, adjust@, etc)
# TODO: does this return anything (update the docstring)?
return self.get_parameter(twig=twig, **kwargs).set_quantity(value=value, **kwargs) |
def group(self, meta=None, meta_aggregates=None, regs=None,
regs_aggregates=None, meta_group_name="_group"):
"""
*Wrapper of* ``GROUP``
The GROUP operator is used for grouping both regions and/or metadata of input
dataset samples according to distinct values of certain attributes (known as grouping
attributes); new grouping attributes are added to samples in the output dataset,
storing the results of aggregate function evaluations over metadata and/or regions
in each group of samples.
Samples having missing values for any of the grouping attributes are discarded.
:param meta: (optional) a list of metadata attributes
:param meta_aggregates: (optional) {'new_attr': fun}
:param regs: (optional) a list of region fields
:param regs_aggregates: {'new_attr': fun}
:param meta_group_name: (optional) the name to give to the group attribute in the
metadata
:return: a new GMQLDataset
Example of usage. We group samples by `antibody` and we aggregate the region pvalues taking the maximum value
calling the new region field `maxPvalue`::
import gmql as gl
d1 = gl.get_example_dataset("Example_Dataset_1")
result = d1.group(meta=['antibody'], regs_aggregates={'maxPvalue': gl.MAX("pvalue")})
"""
if isinstance(meta, list) and \
all([isinstance(x, str) for x in meta]):
meta = Some(meta)
elif meta is None:
meta = none()
else:
raise TypeError("meta must be a list of strings. "
"{} was provided".format(type(meta)))
expBuild = self.pmg.getNewExpressionBuilder(self.__index)
if isinstance(meta_aggregates, dict):
metaAggregates = []
for k in meta_aggregates:
if isinstance(k, str):
item = meta_aggregates[k]
if isinstance(item, (SUM, MIN, MAX, AVG, BAG,
BAGD, STD, MEDIAN, COUNTSAMP)):
functionName = item.get_aggregate_name()
argument = item.get_argument()
if argument is None:
argument = none()
else:
argument = Some(argument)
metaAggregates.append(expBuild.createMetaAggregateFunction(functionName,
k, argument))
else:
raise TypeError("the item of the dictionary must be an Aggregate of the following: "
"SUM, MIN, MAX, AVG, BAG, BAGD, STD, COUNTSAMP. "
"{} was provided".format(type(item)))
else:
raise TypeError("keys of meta_aggregates must be string. "
"{} was provided".format(type(k)))
metaAggregates = Some(metaAggregates)
elif meta_aggregates is None:
metaAggregates = none()
else:
raise TypeError("meta_aggregates must be a dictionary of Aggregate functions. "
"{} was provided".format(type(meta_aggregates)))
if isinstance(regs, list) and \
all([isinstance(x, str) for x in regs]):
regs = Some(regs)
elif regs is None:
regs = none()
else:
raise TypeError("regs must be a list of strings. "
"{} was provided".format(type(regs)))
if isinstance(regs_aggregates, dict):
regionAggregates = []
for k in regs_aggregates.keys():
if isinstance(k, str):
item = regs_aggregates[k]
if isinstance(item, (SUM, MIN, MAX, AVG, BAG, BAGD,
MEDIAN, COUNT)):
op_name = item.get_aggregate_name()
op_argument = item.get_argument()
if op_argument is None:
op_argument = none()
else:
op_argument = Some(op_argument)
regsToReg = expBuild.getRegionsToRegion(op_name, k, op_argument)
regionAggregates.append(regsToReg)
else:
raise TypeError("the item of the dictionary must be an Aggregate of the following: "
"SUM, MIN, MAX, AVG, BAG, BAGD, MEDIAN, COUNT. "
"{} was provided".format(type(item)))
else:
raise TypeError("The key of new_reg_fields must be a string. "
"{} was provided".format(type(k)))
regionAggregates = Some(regionAggregates)
elif regs_aggregates is None:
regionAggregates = none()
else:
raise TypeError("new_reg_fields must be a list of dictionary. "
"{} was provided".format(type(regs_aggregates)))
if isinstance(meta_group_name, str):
pass
else:
raise TypeError("meta_group_name must be a string. "
"{} was provided".format(type(meta_group_name)))
new_index = self.opmng.group(self.__index, meta, metaAggregates, meta_group_name, regs, regionAggregates)
return GMQLDataset(index=new_index, location=self.location,
local_sources=self._local_sources,
remote_sources=self._remote_sources,
meta_profile=self.meta_profile) | *Wrapper of* ``GROUP``
The GROUP operator is used for grouping both regions and/or metadata of input
dataset samples according to distinct values of certain attributes (known as grouping
attributes); new grouping attributes are added to samples in the output dataset,
storing the results of aggregate function evaluations over metadata and/or regions
in each group of samples.
Samples having missing values for any of the grouping attributes are discarded.
:param meta: (optional) a list of metadata attributes
:param meta_aggregates: (optional) {'new_attr': fun}
:param regs: (optional) a list of region fields
:param regs_aggregates: {'new_attr': fun}
:param meta_group_name: (optional) the name to give to the group attribute in the
metadata
:return: a new GMQLDataset
Example of usage. We group samples by `antibody` and we aggregate the region pvalues taking the maximum value
calling the new region field `maxPvalue`::
import gmql as gl
d1 = gl.get_example_dataset("Example_Dataset_1")
result = d1.group(meta=['antibody'], regs_aggregates={'maxPvalue': gl.MAX("pvalue")}) | Below is the the instruction that describes the task:
### Input:
*Wrapper of* ``GROUP``
The GROUP operator is used for grouping both regions and/or metadata of input
dataset samples according to distinct values of certain attributes (known as grouping
attributes); new grouping attributes are added to samples in the output dataset,
storing the results of aggregate function evaluations over metadata and/or regions
in each group of samples.
Samples having missing values for any of the grouping attributes are discarded.
:param meta: (optional) a list of metadata attributes
:param meta_aggregates: (optional) {'new_attr': fun}
:param regs: (optional) a list of region fields
:param regs_aggregates: {'new_attr': fun}
:param meta_group_name: (optional) the name to give to the group attribute in the
metadata
:return: a new GMQLDataset
Example of usage. We group samples by `antibody` and we aggregate the region pvalues taking the maximum value
calling the new region field `maxPvalue`::
import gmql as gl
d1 = gl.get_example_dataset("Example_Dataset_1")
result = d1.group(meta=['antibody'], regs_aggregates={'maxPvalue': gl.MAX("pvalue")})
### Response:
def group(self, meta=None, meta_aggregates=None, regs=None,
regs_aggregates=None, meta_group_name="_group"):
"""
*Wrapper of* ``GROUP``
The GROUP operator is used for grouping both regions and/or metadata of input
dataset samples according to distinct values of certain attributes (known as grouping
attributes); new grouping attributes are added to samples in the output dataset,
storing the results of aggregate function evaluations over metadata and/or regions
in each group of samples.
Samples having missing values for any of the grouping attributes are discarded.
:param meta: (optional) a list of metadata attributes
:param meta_aggregates: (optional) {'new_attr': fun}
:param regs: (optional) a list of region fields
:param regs_aggregates: {'new_attr': fun}
:param meta_group_name: (optional) the name to give to the group attribute in the
metadata
:return: a new GMQLDataset
Example of usage. We group samples by `antibody` and we aggregate the region pvalues taking the maximum value
calling the new region field `maxPvalue`::
import gmql as gl
d1 = gl.get_example_dataset("Example_Dataset_1")
result = d1.group(meta=['antibody'], regs_aggregates={'maxPvalue': gl.MAX("pvalue")})
"""
if isinstance(meta, list) and \
all([isinstance(x, str) for x in meta]):
meta = Some(meta)
elif meta is None:
meta = none()
else:
raise TypeError("meta must be a list of strings. "
"{} was provided".format(type(meta)))
expBuild = self.pmg.getNewExpressionBuilder(self.__index)
if isinstance(meta_aggregates, dict):
metaAggregates = []
for k in meta_aggregates:
if isinstance(k, str):
item = meta_aggregates[k]
if isinstance(item, (SUM, MIN, MAX, AVG, BAG,
BAGD, STD, MEDIAN, COUNTSAMP)):
functionName = item.get_aggregate_name()
argument = item.get_argument()
if argument is None:
argument = none()
else:
argument = Some(argument)
metaAggregates.append(expBuild.createMetaAggregateFunction(functionName,
k, argument))
else:
raise TypeError("the item of the dictionary must be an Aggregate of the following: "
"SUM, MIN, MAX, AVG, BAG, BAGD, STD, COUNTSAMP. "
"{} was provided".format(type(item)))
else:
raise TypeError("keys of meta_aggregates must be string. "
"{} was provided".format(type(k)))
metaAggregates = Some(metaAggregates)
elif meta_aggregates is None:
metaAggregates = none()
else:
raise TypeError("meta_aggregates must be a dictionary of Aggregate functions. "
"{} was provided".format(type(meta_aggregates)))
if isinstance(regs, list) and \
all([isinstance(x, str) for x in regs]):
regs = Some(regs)
elif regs is None:
regs = none()
else:
raise TypeError("regs must be a list of strings. "
"{} was provided".format(type(regs)))
if isinstance(regs_aggregates, dict):
regionAggregates = []
for k in regs_aggregates.keys():
if isinstance(k, str):
item = regs_aggregates[k]
if isinstance(item, (SUM, MIN, MAX, AVG, BAG, BAGD,
MEDIAN, COUNT)):
op_name = item.get_aggregate_name()
op_argument = item.get_argument()
if op_argument is None:
op_argument = none()
else:
op_argument = Some(op_argument)
regsToReg = expBuild.getRegionsToRegion(op_name, k, op_argument)
regionAggregates.append(regsToReg)
else:
raise TypeError("the item of the dictionary must be an Aggregate of the following: "
"SUM, MIN, MAX, AVG, BAG, BAGD, MEDIAN, COUNT. "
"{} was provided".format(type(item)))
else:
raise TypeError("The key of new_reg_fields must be a string. "
"{} was provided".format(type(k)))
regionAggregates = Some(regionAggregates)
elif regs_aggregates is None:
regionAggregates = none()
else:
raise TypeError("new_reg_fields must be a list of dictionary. "
"{} was provided".format(type(regs_aggregates)))
if isinstance(meta_group_name, str):
pass
else:
raise TypeError("meta_group_name must be a string. "
"{} was provided".format(type(meta_group_name)))
new_index = self.opmng.group(self.__index, meta, metaAggregates, meta_group_name, regs, regionAggregates)
return GMQLDataset(index=new_index, location=self.location,
local_sources=self._local_sources,
remote_sources=self._remote_sources,
meta_profile=self.meta_profile) |
def detokenize(self, inputs, delim=' '):
"""
Detokenizes single sentence and removes token separator characters.
:param inputs: sequence of tokens
:param delim: tokenization delimiter
returns: string representing detokenized sentence
"""
detok = delim.join([self.idx2tok[idx] for idx in inputs])
detok = detok.replace(self.separator + ' ', '')
detok = detok.replace(self.separator, '')
detok = detok.replace(config.BOS_TOKEN, '')
detok = detok.replace(config.EOS_TOKEN, '')
detok = detok.replace(config.PAD_TOKEN, '')
detok = detok.strip()
return detok | Detokenizes single sentence and removes token separator characters.
:param inputs: sequence of tokens
:param delim: tokenization delimiter
returns: string representing detokenized sentence | Below is the the instruction that describes the task:
### Input:
Detokenizes single sentence and removes token separator characters.
:param inputs: sequence of tokens
:param delim: tokenization delimiter
returns: string representing detokenized sentence
### Response:
def detokenize(self, inputs, delim=' '):
"""
Detokenizes single sentence and removes token separator characters.
:param inputs: sequence of tokens
:param delim: tokenization delimiter
returns: string representing detokenized sentence
"""
detok = delim.join([self.idx2tok[idx] for idx in inputs])
detok = detok.replace(self.separator + ' ', '')
detok = detok.replace(self.separator, '')
detok = detok.replace(config.BOS_TOKEN, '')
detok = detok.replace(config.EOS_TOKEN, '')
detok = detok.replace(config.PAD_TOKEN, '')
detok = detok.strip()
return detok |
def main(args):
'''main entry point of app
Arguments:
args {namespace} -- arguments provided in cli
'''
print("\nNote it's very possible that this doesn't work correctly so take what it gives with a bucketload of salt\n")
#########################
# #
# #
# prompt #
# #
# #
#########################
if not len(sys.argv) > 1:
initialAnswers = askInitial()
inputPath = pathlib.Path(initialAnswers['inputPath'])
year = int(initialAnswers['year'])
# create a list from every row
badFormat = badFormater(inputPath) # create a list from every row
howManyCandidates = len(badFormat) - 1
length = int(len(badFormat['Cand'])/2)
finalReturn = []
if "Get your rank in the year" in initialAnswers['whatToDo']:
candidateNumber = askCandidateNumber()
weightedAverage = myGrades(year, candidateNumber, badFormat, length)
rank = myRank(weightedAverage, badFormat, year, length)
if "Get your weighted average" in initialAnswers['whatToDo']:
finalReturn.append('Your weighted average for the year is: {:.2f}%'.format(
weightedAverage))
finalReturn.append('Your rank is {}th of {} ({:.2f} percentile)'.format(
rank, howManyCandidates, (rank * 100) / howManyCandidates))
elif "Get your weighted average" in initialAnswers['whatToDo']:
candidateNumber = askCandidateNumber()
weightedAverage = myGrades(year, candidateNumber, badFormat, length)
finalReturn.append('Your weighted average for the year is: {:.2f}%'.format(
weightedAverage))
if "Reformat results by module and output to csv" in initialAnswers['whatToDo']:
formatOutputPath = pathlib.Path(askFormat())
goodFormat = goodFormater(badFormat, formatOutputPath, year, length)
if "Plot the results by module" in initialAnswers['whatToDo']:
howPlotAsk(goodFormat)
elif "Plot the results by module" in initialAnswers['whatToDo']:
goodFormat = goodFormater(badFormat, None, year, length)
howPlotAsk(goodFormat)
[print('\n', x) for x in finalReturn]
#########################
# #
# end #
# prompt #
# #
# #
#########################
#########################
# #
# #
# run with #
# cli args #
# #
#########################
if len(sys.argv) > 1:
if not args.input:
inputPath = pathlib.Path(askInput())
else:
inputPath = pathlib.Path(args.input)
if not args.year:
year = int(askYear())
else:
year = int(args.year)
# create a list from every row
badFormat = badFormater(inputPath) # create a list from every row
howManyCandidates = len(badFormat) - 1
length = int(len(badFormat['Cand'])/2)
finalReturn = []
if args.rank:
if not args.candidate:
candidateNumber = askCandidateNumber()
else:
candidateNumber = args.candidate
weightedAverage = myGrades(year, candidateNumber, badFormat, length)
rank = myRank(weightedAverage, badFormat, year, length)
if args.my:
finalReturn.append('Your weighted average for the year is: {:.2f}%'.format(
weightedAverage))
finalReturn.append('Your rank is {}th of {} ({:.2f} percentile)'.format(
rank, howManyCandidates, (rank * 100) / howManyCandidates))
elif args.my:
if not args.candidate:
candidateNumber = askCandidateNumber()
else:
candidateNumber = args.candidate
weightedAverage = myGrades(year, candidateNumber, badFormat, length)
finalReturn.append('Your weighted average for the year is: {:.2f}%'.format(
weightedAverage))
if args.format is not None:
formatOutputPath = pathlib.Path(args.format)
goodFormat = goodFormater(badFormat, formatOutputPath, year, length)
if args.plot:
howPlotArgs(goodFormat)
elif args.plot:
goodFormat = goodFormater(badFormat, None, year, length)
howPlotArgs(goodFormat)
[print('\n', x) for x in finalReturn]
#########################
# #
# end #
# run with #
# cli args #
# #
#########################
print('') | main entry point of app
Arguments:
args {namespace} -- arguments provided in cli | Below is the the instruction that describes the task:
### Input:
main entry point of app
Arguments:
args {namespace} -- arguments provided in cli
### Response:
def main(args):
'''main entry point of app
Arguments:
args {namespace} -- arguments provided in cli
'''
print("\nNote it's very possible that this doesn't work correctly so take what it gives with a bucketload of salt\n")
#########################
# #
# #
# prompt #
# #
# #
#########################
if not len(sys.argv) > 1:
initialAnswers = askInitial()
inputPath = pathlib.Path(initialAnswers['inputPath'])
year = int(initialAnswers['year'])
# create a list from every row
badFormat = badFormater(inputPath) # create a list from every row
howManyCandidates = len(badFormat) - 1
length = int(len(badFormat['Cand'])/2)
finalReturn = []
if "Get your rank in the year" in initialAnswers['whatToDo']:
candidateNumber = askCandidateNumber()
weightedAverage = myGrades(year, candidateNumber, badFormat, length)
rank = myRank(weightedAverage, badFormat, year, length)
if "Get your weighted average" in initialAnswers['whatToDo']:
finalReturn.append('Your weighted average for the year is: {:.2f}%'.format(
weightedAverage))
finalReturn.append('Your rank is {}th of {} ({:.2f} percentile)'.format(
rank, howManyCandidates, (rank * 100) / howManyCandidates))
elif "Get your weighted average" in initialAnswers['whatToDo']:
candidateNumber = askCandidateNumber()
weightedAverage = myGrades(year, candidateNumber, badFormat, length)
finalReturn.append('Your weighted average for the year is: {:.2f}%'.format(
weightedAverage))
if "Reformat results by module and output to csv" in initialAnswers['whatToDo']:
formatOutputPath = pathlib.Path(askFormat())
goodFormat = goodFormater(badFormat, formatOutputPath, year, length)
if "Plot the results by module" in initialAnswers['whatToDo']:
howPlotAsk(goodFormat)
elif "Plot the results by module" in initialAnswers['whatToDo']:
goodFormat = goodFormater(badFormat, None, year, length)
howPlotAsk(goodFormat)
[print('\n', x) for x in finalReturn]
#########################
# #
# end #
# prompt #
# #
# #
#########################
#########################
# #
# #
# run with #
# cli args #
# #
#########################
if len(sys.argv) > 1:
if not args.input:
inputPath = pathlib.Path(askInput())
else:
inputPath = pathlib.Path(args.input)
if not args.year:
year = int(askYear())
else:
year = int(args.year)
# create a list from every row
badFormat = badFormater(inputPath) # create a list from every row
howManyCandidates = len(badFormat) - 1
length = int(len(badFormat['Cand'])/2)
finalReturn = []
if args.rank:
if not args.candidate:
candidateNumber = askCandidateNumber()
else:
candidateNumber = args.candidate
weightedAverage = myGrades(year, candidateNumber, badFormat, length)
rank = myRank(weightedAverage, badFormat, year, length)
if args.my:
finalReturn.append('Your weighted average for the year is: {:.2f}%'.format(
weightedAverage))
finalReturn.append('Your rank is {}th of {} ({:.2f} percentile)'.format(
rank, howManyCandidates, (rank * 100) / howManyCandidates))
elif args.my:
if not args.candidate:
candidateNumber = askCandidateNumber()
else:
candidateNumber = args.candidate
weightedAverage = myGrades(year, candidateNumber, badFormat, length)
finalReturn.append('Your weighted average for the year is: {:.2f}%'.format(
weightedAverage))
if args.format is not None:
formatOutputPath = pathlib.Path(args.format)
goodFormat = goodFormater(badFormat, formatOutputPath, year, length)
if args.plot:
howPlotArgs(goodFormat)
elif args.plot:
goodFormat = goodFormater(badFormat, None, year, length)
howPlotArgs(goodFormat)
[print('\n', x) for x in finalReturn]
#########################
# #
# end #
# run with #
# cli args #
# #
#########################
print('') |
def script(self, sql_script, split_algo='sql_split', prep_statements=True, dump_fails=True):
"""Wrapper method providing access to the SQLScript class's methods and properties."""
return Execute(sql_script, split_algo, prep_statements, dump_fails, self) | Wrapper method providing access to the SQLScript class's methods and properties. | Below is the the instruction that describes the task:
### Input:
Wrapper method providing access to the SQLScript class's methods and properties.
### Response:
def script(self, sql_script, split_algo='sql_split', prep_statements=True, dump_fails=True):
"""Wrapper method providing access to the SQLScript class's methods and properties."""
return Execute(sql_script, split_algo, prep_statements, dump_fails, self) |
def add(self, service, workers=1, args=None, kwargs=None):
"""Add a new service to the ServiceManager
:param service: callable that return an instance of :py:class:`Service`
:type service: callable
:param workers: number of processes/workers for this service
:type workers: int
:param args: additional positional arguments for this service
:type args: tuple
:param kwargs: additional keywoard arguments for this service
:type kwargs: dict
:return: a service id
:rtype: uuid.uuid4
"""
_utils.check_callable(service, 'service')
_utils.check_workers(workers, 1)
service_id = uuid.uuid4()
self._services[service_id] = _service.ServiceConfig(
service_id, service, workers, args, kwargs)
return service_id | Add a new service to the ServiceManager
:param service: callable that return an instance of :py:class:`Service`
:type service: callable
:param workers: number of processes/workers for this service
:type workers: int
:param args: additional positional arguments for this service
:type args: tuple
:param kwargs: additional keywoard arguments for this service
:type kwargs: dict
:return: a service id
:rtype: uuid.uuid4 | Below is the the instruction that describes the task:
### Input:
Add a new service to the ServiceManager
:param service: callable that return an instance of :py:class:`Service`
:type service: callable
:param workers: number of processes/workers for this service
:type workers: int
:param args: additional positional arguments for this service
:type args: tuple
:param kwargs: additional keywoard arguments for this service
:type kwargs: dict
:return: a service id
:rtype: uuid.uuid4
### Response:
def add(self, service, workers=1, args=None, kwargs=None):
"""Add a new service to the ServiceManager
:param service: callable that return an instance of :py:class:`Service`
:type service: callable
:param workers: number of processes/workers for this service
:type workers: int
:param args: additional positional arguments for this service
:type args: tuple
:param kwargs: additional keywoard arguments for this service
:type kwargs: dict
:return: a service id
:rtype: uuid.uuid4
"""
_utils.check_callable(service, 'service')
_utils.check_workers(workers, 1)
service_id = uuid.uuid4()
self._services[service_id] = _service.ServiceConfig(
service_id, service, workers, args, kwargs)
return service_id |
def setup_top(self):
"""Create top-level elements of the hybrid schema."""
self.top_grammar = SchemaNode("grammar")
self.top_grammar.attr = {
"xmlns": "http://relaxng.org/ns/structure/1.0",
"datatypeLibrary": "http://www.w3.org/2001/XMLSchema-datatypes"}
self.tree = SchemaNode("start") | Create top-level elements of the hybrid schema. | Below is the the instruction that describes the task:
### Input:
Create top-level elements of the hybrid schema.
### Response:
def setup_top(self):
"""Create top-level elements of the hybrid schema."""
self.top_grammar = SchemaNode("grammar")
self.top_grammar.attr = {
"xmlns": "http://relaxng.org/ns/structure/1.0",
"datatypeLibrary": "http://www.w3.org/2001/XMLSchema-datatypes"}
self.tree = SchemaNode("start") |
def _ask_password():
"""Securely and interactively ask for a password"""
password = "Foo"
password_trial = ""
while password != password_trial:
password = getpass.getpass()
password_trial = getpass.getpass(prompt="Repeat:")
if password != password_trial:
print("\nPasswords do not match!")
return password | Securely and interactively ask for a password | Below is the the instruction that describes the task:
### Input:
Securely and interactively ask for a password
### Response:
def _ask_password():
"""Securely and interactively ask for a password"""
password = "Foo"
password_trial = ""
while password != password_trial:
password = getpass.getpass()
password_trial = getpass.getpass(prompt="Repeat:")
if password != password_trial:
print("\nPasswords do not match!")
return password |
def dynacRepresentation(self):
"""
Return the Pynac representation of this Set4DAperture instance.
"""
details = [
self.energyDefnFlag.val,
self.energy.val,
self.phase.val,
self.x.val,
self.y.val,
self.radius.val,
]
return ['REJECT', [details]] | Return the Pynac representation of this Set4DAperture instance. | Below is the the instruction that describes the task:
### Input:
Return the Pynac representation of this Set4DAperture instance.
### Response:
def dynacRepresentation(self):
"""
Return the Pynac representation of this Set4DAperture instance.
"""
details = [
self.energyDefnFlag.val,
self.energy.val,
self.phase.val,
self.x.val,
self.y.val,
self.radius.val,
]
return ['REJECT', [details]] |
def version_list(package):
"""
List the versions of a package.
"""
team, owner, pkg = parse_package(package)
session = _get_session(team)
response = session.get(
"{url}/api/version/{owner}/{pkg}/".format(
url=get_registry_url(team),
owner=owner,
pkg=pkg
)
)
for version in response.json()['versions']:
print("%s: %s" % (version['version'], version['hash'])) | List the versions of a package. | Below is the the instruction that describes the task:
### Input:
List the versions of a package.
### Response:
def version_list(package):
"""
List the versions of a package.
"""
team, owner, pkg = parse_package(package)
session = _get_session(team)
response = session.get(
"{url}/api/version/{owner}/{pkg}/".format(
url=get_registry_url(team),
owner=owner,
pkg=pkg
)
)
for version in response.json()['versions']:
print("%s: %s" % (version['version'], version['hash'])) |
def get_root_path(self, name):
"""
Attempt to compute a root path for a (hopefully importable) name.
Based in part on Flask's `root_path` calculation. See:
https://github.com/mitsuhiko/flask/blob/master/flask/helpers.py#L777
"""
module = modules.get(name)
if module is not None and hasattr(module, '__file__'):
return dirname(abspath(module.__file__))
# Flask keeps looking at this point. We instead set the root path to None,
# assume that the user doesn't need resource loading, and raise an error
# when resolving the resource path.
return None | Attempt to compute a root path for a (hopefully importable) name.
Based in part on Flask's `root_path` calculation. See:
https://github.com/mitsuhiko/flask/blob/master/flask/helpers.py#L777 | Below is the the instruction that describes the task:
### Input:
Attempt to compute a root path for a (hopefully importable) name.
Based in part on Flask's `root_path` calculation. See:
https://github.com/mitsuhiko/flask/blob/master/flask/helpers.py#L777
### Response:
def get_root_path(self, name):
"""
Attempt to compute a root path for a (hopefully importable) name.
Based in part on Flask's `root_path` calculation. See:
https://github.com/mitsuhiko/flask/blob/master/flask/helpers.py#L777
"""
module = modules.get(name)
if module is not None and hasattr(module, '__file__'):
return dirname(abspath(module.__file__))
# Flask keeps looking at this point. We instead set the root path to None,
# assume that the user doesn't need resource loading, and raise an error
# when resolving the resource path.
return None |
def scatter(self, *args, **kwargs):
"""Adds a :py:class:`.ScatterSeries` to the chart.
:param \*data: The data for the series as either (x,y) values or two big\
tuples/lists of x and y values respectively.
:param str name: The name to be associated with the series.
:param str color: The hex colour of the line.
:param Number size: The size of each data point - generally the diameter.
:param Number linewidth: The width in pixels of the data points' edge.
:raises ValueError: if the size and length of the data doesn't match\
either format."""
if "color" not in kwargs:
kwargs["color"] = self.next_color()
series = ScatterSeries(*args, **kwargs)
self.add_series(series) | Adds a :py:class:`.ScatterSeries` to the chart.
:param \*data: The data for the series as either (x,y) values or two big\
tuples/lists of x and y values respectively.
:param str name: The name to be associated with the series.
:param str color: The hex colour of the line.
:param Number size: The size of each data point - generally the diameter.
:param Number linewidth: The width in pixels of the data points' edge.
:raises ValueError: if the size and length of the data doesn't match\
either format. | Below is the the instruction that describes the task:
### Input:
Adds a :py:class:`.ScatterSeries` to the chart.
:param \*data: The data for the series as either (x,y) values or two big\
tuples/lists of x and y values respectively.
:param str name: The name to be associated with the series.
:param str color: The hex colour of the line.
:param Number size: The size of each data point - generally the diameter.
:param Number linewidth: The width in pixels of the data points' edge.
:raises ValueError: if the size and length of the data doesn't match\
either format.
### Response:
def scatter(self, *args, **kwargs):
"""Adds a :py:class:`.ScatterSeries` to the chart.
:param \*data: The data for the series as either (x,y) values or two big\
tuples/lists of x and y values respectively.
:param str name: The name to be associated with the series.
:param str color: The hex colour of the line.
:param Number size: The size of each data point - generally the diameter.
:param Number linewidth: The width in pixels of the data points' edge.
:raises ValueError: if the size and length of the data doesn't match\
either format."""
if "color" not in kwargs:
kwargs["color"] = self.next_color()
series = ScatterSeries(*args, **kwargs)
self.add_series(series) |
def _read_footer(file_obj):
"""Read the footer from the given file object and returns a FileMetaData object.
This method assumes that the fo references a valid parquet file.
"""
footer_size = _get_footer_size(file_obj)
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Footer size in bytes: %s", footer_size)
file_obj.seek(-(8 + footer_size), 2) # seek to beginning of footer
tin = TFileTransport(file_obj)
pin = TCompactProtocolFactory().get_protocol(tin)
fmd = parquet_thrift.FileMetaData()
fmd.read(pin)
return fmd | Read the footer from the given file object and returns a FileMetaData object.
This method assumes that the fo references a valid parquet file. | Below is the the instruction that describes the task:
### Input:
Read the footer from the given file object and returns a FileMetaData object.
This method assumes that the fo references a valid parquet file.
### Response:
def _read_footer(file_obj):
"""Read the footer from the given file object and returns a FileMetaData object.
This method assumes that the fo references a valid parquet file.
"""
footer_size = _get_footer_size(file_obj)
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Footer size in bytes: %s", footer_size)
file_obj.seek(-(8 + footer_size), 2) # seek to beginning of footer
tin = TFileTransport(file_obj)
pin = TCompactProtocolFactory().get_protocol(tin)
fmd = parquet_thrift.FileMetaData()
fmd.read(pin)
return fmd |
def p_kwl_kwl(self, p):
''' kwl : kwl SEPARATOR kwl
'''
_LOGGER.debug("kwl -> kwl ; kwl")
if p[3] is not None:
p[0] = p[3]
elif p[1] is not None:
p[0] = p[1]
else:
p[0] = TypedClass(None, TypedClass.UNKNOWN) | kwl : kwl SEPARATOR kwl | Below is the the instruction that describes the task:
### Input:
kwl : kwl SEPARATOR kwl
### Response:
def p_kwl_kwl(self, p):
''' kwl : kwl SEPARATOR kwl
'''
_LOGGER.debug("kwl -> kwl ; kwl")
if p[3] is not None:
p[0] = p[3]
elif p[1] is not None:
p[0] = p[1]
else:
p[0] = TypedClass(None, TypedClass.UNKNOWN) |
def auth(self):
"""
Access the auth
:returns: twilio.rest.api.v2010.account.sip.domain.auth_types.AuthTypesList
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.AuthTypesList
"""
if self._auth is None:
self._auth = AuthTypesList(
self._version,
account_sid=self._solution['account_sid'],
domain_sid=self._solution['sid'],
)
return self._auth | Access the auth
:returns: twilio.rest.api.v2010.account.sip.domain.auth_types.AuthTypesList
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.AuthTypesList | Below is the the instruction that describes the task:
### Input:
Access the auth
:returns: twilio.rest.api.v2010.account.sip.domain.auth_types.AuthTypesList
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.AuthTypesList
### Response:
def auth(self):
"""
Access the auth
:returns: twilio.rest.api.v2010.account.sip.domain.auth_types.AuthTypesList
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.AuthTypesList
"""
if self._auth is None:
self._auth = AuthTypesList(
self._version,
account_sid=self._solution['account_sid'],
domain_sid=self._solution['sid'],
)
return self._auth |
def get_groups_from_category(self, category) -> typing.Iterator['Group']:
"""
Args:
category: group category
Returns: generator over all groups from a specific category in this coalition
"""
Mission.validator_group_category.validate(category, 'get_groups_from_category')
for group in self.groups:
if group.group_category == category:
yield group | Args:
category: group category
Returns: generator over all groups from a specific category in this coalition | Below is the the instruction that describes the task:
### Input:
Args:
category: group category
Returns: generator over all groups from a specific category in this coalition
### Response:
def get_groups_from_category(self, category) -> typing.Iterator['Group']:
"""
Args:
category: group category
Returns: generator over all groups from a specific category in this coalition
"""
Mission.validator_group_category.validate(category, 'get_groups_from_category')
for group in self.groups:
if group.group_category == category:
yield group |
def run_cmd_unit(self, sentry_unit, cmd):
"""Run a command on a unit, return the output and exit code."""
output, code = sentry_unit.run(cmd)
if code == 0:
self.log.debug('{} `{}` command returned {} '
'(OK)'.format(sentry_unit.info['unit_name'],
cmd, code))
else:
msg = ('{} `{}` command returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code, output))
amulet.raise_status(amulet.FAIL, msg=msg)
return str(output), code | Run a command on a unit, return the output and exit code. | Below is the the instruction that describes the task:
### Input:
Run a command on a unit, return the output and exit code.
### Response:
def run_cmd_unit(self, sentry_unit, cmd):
"""Run a command on a unit, return the output and exit code."""
output, code = sentry_unit.run(cmd)
if code == 0:
self.log.debug('{} `{}` command returned {} '
'(OK)'.format(sentry_unit.info['unit_name'],
cmd, code))
else:
msg = ('{} `{}` command returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code, output))
amulet.raise_status(amulet.FAIL, msg=msg)
return str(output), code |
def CargarFormatoPDF(self, archivo="liquidacion_form_c1116b_wslpg.csv"):
"Cargo el formato de campos a generar desde una planilla CSV"
# si no encuentro archivo, lo busco en el directorio predeterminado:
if not os.path.exists(archivo):
archivo = os.path.join(self.InstallDir, "plantillas", os.path.basename(archivo))
if DEBUG: print "abriendo archivo ", archivo
# inicializo la lista de los elementos:
self.elements = []
for lno, linea in enumerate(open(archivo.encode('latin1')).readlines()):
if DEBUG: print "procesando linea ", lno, linea
args = []
for i,v in enumerate(linea.split(";")):
if not v.startswith("'"):
v = v.replace(",",".")
else:
v = v#.decode('latin1')
if v.strip()=='':
v = None
else:
v = eval(v.strip())
args.append(v)
# corrijo path relativo para las imágenes:
if args[1] == 'I':
if not os.path.exists(args[14]):
args[14] = os.path.join(self.InstallDir, "plantillas", os.path.basename(args[14]))
if DEBUG: print "NUEVO PATH:", args[14]
self.AgregarCampoPDF(*args)
self.AgregarCampoPDF("anulado", 'T', 150, 250, 0, 0,
size=70, rotate=45, foreground=0x808080,
priority=-1)
if HOMO:
self.AgregarCampoPDF("homo", 'T', 100, 250, 0, 0,
size=70, rotate=45, foreground=0x808080,
priority=-1)
# cargo los elementos en la plantilla
self.template.load_elements(self.elements)
return True | Cargo el formato de campos a generar desde una planilla CSV | Below is the the instruction that describes the task:
### Input:
Cargo el formato de campos a generar desde una planilla CSV
### Response:
def CargarFormatoPDF(self, archivo="liquidacion_form_c1116b_wslpg.csv"):
"Cargo el formato de campos a generar desde una planilla CSV"
# si no encuentro archivo, lo busco en el directorio predeterminado:
if not os.path.exists(archivo):
archivo = os.path.join(self.InstallDir, "plantillas", os.path.basename(archivo))
if DEBUG: print "abriendo archivo ", archivo
# inicializo la lista de los elementos:
self.elements = []
for lno, linea in enumerate(open(archivo.encode('latin1')).readlines()):
if DEBUG: print "procesando linea ", lno, linea
args = []
for i,v in enumerate(linea.split(";")):
if not v.startswith("'"):
v = v.replace(",",".")
else:
v = v#.decode('latin1')
if v.strip()=='':
v = None
else:
v = eval(v.strip())
args.append(v)
# corrijo path relativo para las imágenes:
if args[1] == 'I':
if not os.path.exists(args[14]):
args[14] = os.path.join(self.InstallDir, "plantillas", os.path.basename(args[14]))
if DEBUG: print "NUEVO PATH:", args[14]
self.AgregarCampoPDF(*args)
self.AgregarCampoPDF("anulado", 'T', 150, 250, 0, 0,
size=70, rotate=45, foreground=0x808080,
priority=-1)
if HOMO:
self.AgregarCampoPDF("homo", 'T', 100, 250, 0, 0,
size=70, rotate=45, foreground=0x808080,
priority=-1)
# cargo los elementos en la plantilla
self.template.load_elements(self.elements)
return True |
def download_handler(feed, placeholders):
import shlex
"""
Parse and execute the download handler
"""
value = feed.retrieve_config('downloadhandler', 'greg')
if value == 'greg':
while os.path.isfile(placeholders.fullpath):
placeholders.fullpath = placeholders.fullpath + '_'
placeholders.filename = placeholders.filename + '_'
urlretrieve(placeholders.link, placeholders.fullpath)
else:
value_list = shlex.split(value)
instruction_list = [substitute_placeholders(part, placeholders) for
part in value_list]
returncode = subprocess.call(instruction_list)
if returncode:
raise URLError | Parse and execute the download handler | Below is the the instruction that describes the task:
### Input:
Parse and execute the download handler
### Response:
def download_handler(feed, placeholders):
import shlex
"""
Parse and execute the download handler
"""
value = feed.retrieve_config('downloadhandler', 'greg')
if value == 'greg':
while os.path.isfile(placeholders.fullpath):
placeholders.fullpath = placeholders.fullpath + '_'
placeholders.filename = placeholders.filename + '_'
urlretrieve(placeholders.link, placeholders.fullpath)
else:
value_list = shlex.split(value)
instruction_list = [substitute_placeholders(part, placeholders) for
part in value_list]
returncode = subprocess.call(instruction_list)
if returncode:
raise URLError |
def print_response(self, input='', keep=False, *args, **kwargs):
"""
print response, if cookie is set then print that each line
:param args:
:param keep: if True more output is to come
:param cookie: set a custom cookie,
if set to 'None' then self.cookie will be used.
if set to 'False' disables cookie output entirely
:return:
"""
cookie = kwargs.get('cookie')
if cookie is None:
cookie = self.cookie or ''
status = kwargs.get('status')
lines = input.splitlines()
if status and not lines:
lines = ['']
if cookie:
output_template = '{cookie} {status}{cookie_char}{line}'
else:
output_template = '{line}'
for i, line in enumerate(lines):
if i != len(lines) - 1 or keep is True:
cookie_char = '>'
else:
# last line
cookie_char = ':'
print(output_template.format(
cookie_char=cookie_char,
cookie=cookie,
status=status or '',
line=line.strip()), file=self.stdout) | print response, if cookie is set then print that each line
:param args:
:param keep: if True more output is to come
:param cookie: set a custom cookie,
if set to 'None' then self.cookie will be used.
if set to 'False' disables cookie output entirely
:return: | Below is the the instruction that describes the task:
### Input:
print response, if cookie is set then print that each line
:param args:
:param keep: if True more output is to come
:param cookie: set a custom cookie,
if set to 'None' then self.cookie will be used.
if set to 'False' disables cookie output entirely
:return:
### Response:
def print_response(self, input='', keep=False, *args, **kwargs):
"""
print response, if cookie is set then print that each line
:param args:
:param keep: if True more output is to come
:param cookie: set a custom cookie,
if set to 'None' then self.cookie will be used.
if set to 'False' disables cookie output entirely
:return:
"""
cookie = kwargs.get('cookie')
if cookie is None:
cookie = self.cookie or ''
status = kwargs.get('status')
lines = input.splitlines()
if status and not lines:
lines = ['']
if cookie:
output_template = '{cookie} {status}{cookie_char}{line}'
else:
output_template = '{line}'
for i, line in enumerate(lines):
if i != len(lines) - 1 or keep is True:
cookie_char = '>'
else:
# last line
cookie_char = ':'
print(output_template.format(
cookie_char=cookie_char,
cookie=cookie,
status=status or '',
line=line.strip()), file=self.stdout) |
def tempdir(cls, suffix='', prefix=None, dir=None):
"""Returns a new temporary directory.
Arguments are as for :meth:`~rpaths.Path.tempfile`, except that the
`text` argument is not accepted.
The directory is readable, writable, and searchable only by the
creating user.
The caller is responsible for deleting the directory when done with it.
"""
if prefix is None:
prefix = tempfile.template
if dir is not None:
# Note that this is not safe on Python 2
# There is no work around, apart from not using the tempfile module
dir = str(Path(dir))
dirname = tempfile.mkdtemp(suffix, prefix, dir)
return cls(dirname).absolute() | Returns a new temporary directory.
Arguments are as for :meth:`~rpaths.Path.tempfile`, except that the
`text` argument is not accepted.
The directory is readable, writable, and searchable only by the
creating user.
The caller is responsible for deleting the directory when done with it. | Below is the the instruction that describes the task:
### Input:
Returns a new temporary directory.
Arguments are as for :meth:`~rpaths.Path.tempfile`, except that the
`text` argument is not accepted.
The directory is readable, writable, and searchable only by the
creating user.
The caller is responsible for deleting the directory when done with it.
### Response:
def tempdir(cls, suffix='', prefix=None, dir=None):
"""Returns a new temporary directory.
Arguments are as for :meth:`~rpaths.Path.tempfile`, except that the
`text` argument is not accepted.
The directory is readable, writable, and searchable only by the
creating user.
The caller is responsible for deleting the directory when done with it.
"""
if prefix is None:
prefix = tempfile.template
if dir is not None:
# Note that this is not safe on Python 2
# There is no work around, apart from not using the tempfile module
dir = str(Path(dir))
dirname = tempfile.mkdtemp(suffix, prefix, dir)
return cls(dirname).absolute() |
def fit(self, order=3):
"""
Overriden since this eos works with volume**(2/3) instead of volume.
"""
x = self.volumes**(-2./3.)
self.eos_params = np.polyfit(x, self.energies, order)
self._set_params() | Overriden since this eos works with volume**(2/3) instead of volume. | Below is the the instruction that describes the task:
### Input:
Overriden since this eos works with volume**(2/3) instead of volume.
### Response:
def fit(self, order=3):
"""
Overriden since this eos works with volume**(2/3) instead of volume.
"""
x = self.volumes**(-2./3.)
self.eos_params = np.polyfit(x, self.energies, order)
self._set_params() |
def get(config, messages, freq, pidDir=None, reactor=None):
"""Return a service which monitors processes based on directory contents
Construct and return a service that, when started, will run processes
based on the contents of the 'config' directory, restarting them
if file contents change and stopping them if the file is removed.
It also listens for restart and restart-all messages on the 'messages'
directory.
:param config: string, location of configuration directory
:param messages: string, location of messages directory
:param freq: number, frequency to check for new messages and configuration
updates
:param pidDir: {twisted.python.filepath.FilePath} or None,
location to keep pid files
:param reactor: something implementing the interfaces
{twisted.internet.interfaces.IReactorTime} and
{twisted.internet.interfaces.IReactorProcess} and
:returns: service, {twisted.application.interfaces.IService}
"""
ret = taservice.MultiService()
args = ()
if reactor is not None:
args = reactor,
procmon = procmonlib.ProcessMonitor(*args)
if pidDir is not None:
protocols = TransportDirectoryDict(pidDir)
procmon.protocols = protocols
procmon.setName('procmon')
receiver = process_events.Receiver(procmon)
confcheck = directory_monitor.checker(config, receiver)
confserv = internet.TimerService(freq, confcheck)
confserv.setServiceParent(ret)
messagecheck = directory_monitor.messages(messages, receiver)
messageserv = internet.TimerService(freq, messagecheck)
messageserv.setServiceParent(ret)
procmon.setServiceParent(ret)
return ret | Return a service which monitors processes based on directory contents
Construct and return a service that, when started, will run processes
based on the contents of the 'config' directory, restarting them
if file contents change and stopping them if the file is removed.
It also listens for restart and restart-all messages on the 'messages'
directory.
:param config: string, location of configuration directory
:param messages: string, location of messages directory
:param freq: number, frequency to check for new messages and configuration
updates
:param pidDir: {twisted.python.filepath.FilePath} or None,
location to keep pid files
:param reactor: something implementing the interfaces
{twisted.internet.interfaces.IReactorTime} and
{twisted.internet.interfaces.IReactorProcess} and
:returns: service, {twisted.application.interfaces.IService} | Below is the the instruction that describes the task:
### Input:
Return a service which monitors processes based on directory contents
Construct and return a service that, when started, will run processes
based on the contents of the 'config' directory, restarting them
if file contents change and stopping them if the file is removed.
It also listens for restart and restart-all messages on the 'messages'
directory.
:param config: string, location of configuration directory
:param messages: string, location of messages directory
:param freq: number, frequency to check for new messages and configuration
updates
:param pidDir: {twisted.python.filepath.FilePath} or None,
location to keep pid files
:param reactor: something implementing the interfaces
{twisted.internet.interfaces.IReactorTime} and
{twisted.internet.interfaces.IReactorProcess} and
:returns: service, {twisted.application.interfaces.IService}
### Response:
def get(config, messages, freq, pidDir=None, reactor=None):
"""Return a service which monitors processes based on directory contents
Construct and return a service that, when started, will run processes
based on the contents of the 'config' directory, restarting them
if file contents change and stopping them if the file is removed.
It also listens for restart and restart-all messages on the 'messages'
directory.
:param config: string, location of configuration directory
:param messages: string, location of messages directory
:param freq: number, frequency to check for new messages and configuration
updates
:param pidDir: {twisted.python.filepath.FilePath} or None,
location to keep pid files
:param reactor: something implementing the interfaces
{twisted.internet.interfaces.IReactorTime} and
{twisted.internet.interfaces.IReactorProcess} and
:returns: service, {twisted.application.interfaces.IService}
"""
ret = taservice.MultiService()
args = ()
if reactor is not None:
args = reactor,
procmon = procmonlib.ProcessMonitor(*args)
if pidDir is not None:
protocols = TransportDirectoryDict(pidDir)
procmon.protocols = protocols
procmon.setName('procmon')
receiver = process_events.Receiver(procmon)
confcheck = directory_monitor.checker(config, receiver)
confserv = internet.TimerService(freq, confcheck)
confserv.setServiceParent(ret)
messagecheck = directory_monitor.messages(messages, receiver)
messageserv = internet.TimerService(freq, messagecheck)
messageserv.setServiceParent(ret)
procmon.setServiceParent(ret)
return ret |
def get_finder(import_path):
"""
Get a finder class from an import path.
Raises ``demosys.core.exceptions.ImproperlyConfigured`` if the finder is not found.
This function uses an lru cache.
:param import_path: string representing an import path
:return: An instance of the finder
"""
Finder = import_string(import_path)
if not issubclass(Finder, BaseFileSystemFinder):
raise ImproperlyConfigured('Finder {} is not a subclass of core.finders.FileSystemFinder'.format(import_path))
return Finder() | Get a finder class from an import path.
Raises ``demosys.core.exceptions.ImproperlyConfigured`` if the finder is not found.
This function uses an lru cache.
:param import_path: string representing an import path
:return: An instance of the finder | Below is the the instruction that describes the task:
### Input:
Get a finder class from an import path.
Raises ``demosys.core.exceptions.ImproperlyConfigured`` if the finder is not found.
This function uses an lru cache.
:param import_path: string representing an import path
:return: An instance of the finder
### Response:
def get_finder(import_path):
"""
Get a finder class from an import path.
Raises ``demosys.core.exceptions.ImproperlyConfigured`` if the finder is not found.
This function uses an lru cache.
:param import_path: string representing an import path
:return: An instance of the finder
"""
Finder = import_string(import_path)
if not issubclass(Finder, BaseFileSystemFinder):
raise ImproperlyConfigured('Finder {} is not a subclass of core.finders.FileSystemFinder'.format(import_path))
return Finder() |
def flatten(sequence, levels = 1):
"""
Example:
>>> nested = [[1,2], [[3]]]
>>> list(flatten(nested))
[1, 2, [3]]
"""
if levels == 0:
for x in sequence:
yield x
else:
for x in sequence:
for y in flatten(x, levels - 1):
yield y | Example:
>>> nested = [[1,2], [[3]]]
>>> list(flatten(nested))
[1, 2, [3]] | Below is the the instruction that describes the task:
### Input:
Example:
>>> nested = [[1,2], [[3]]]
>>> list(flatten(nested))
[1, 2, [3]]
### Response:
def flatten(sequence, levels = 1):
"""
Example:
>>> nested = [[1,2], [[3]]]
>>> list(flatten(nested))
[1, 2, [3]]
"""
if levels == 0:
for x in sequence:
yield x
else:
for x in sequence:
for y in flatten(x, levels - 1):
yield y |
def _writeStructureLink(self, link, fileObject, replaceParamFile):
"""
Write Structure Link to File Method
"""
fileObject.write('%s\n' % link.type)
fileObject.write('NUMSTRUCTS %s\n' % link.numElements)
# Retrieve lists of structures
weirs = link.weirs
culverts = link.culverts
# Write weirs to file
for weir in weirs:
fileObject.write('STRUCTTYPE %s\n' % weir.type)
# Check for replacement vars
crestLength = vwp(weir.crestLength, replaceParamFile)
crestLowElevation = vwp(weir.crestLowElevation, replaceParamFile)
dischargeCoeffForward = vwp(weir.dischargeCoeffForward, replaceParamFile)
dischargeCoeffReverse = vwp(weir.dischargeCoeffReverse, replaceParamFile)
crestLowLocation = vwp(weir.crestLowLocation, replaceParamFile)
steepSlope = vwp(weir.steepSlope, replaceParamFile)
shallowSlope = vwp(weir.shallowSlope, replaceParamFile)
if weir.crestLength != None:
try:
fileObject.write('CREST_LENGTH %.6f\n' % crestLength)
except:
fileObject.write('CREST_LENGTH %s\n' % crestLength)
if weir.crestLowElevation != None:
try:
fileObject.write('CREST_LOW_ELEV %.6f\n' % crestLowElevation)
except:
fileObject.write('CREST_LOW_ELEV %s\n' % crestLowElevation)
if weir.dischargeCoeffForward != None:
try:
fileObject.write('DISCHARGE_COEFF_FORWARD %.6f\n' % dischargeCoeffForward)
except:
fileObject.write('DISCHARGE_COEFF_FORWARD %s\n' % dischargeCoeffForward)
if weir.dischargeCoeffReverse != None:
try:
fileObject.write('DISCHARGE_COEFF_REVERSE %.6f\n' % dischargeCoeffReverse)
except:
fileObject.write('DISCHARGE_COEFF_REVERSE %s\n' % dischargeCoeffReverse)
if weir.crestLowLocation != None:
fileObject.write('CREST_LOW_LOC %s\n' % crestLowLocation)
if weir.steepSlope != None:
try:
fileObject.write('STEEP_SLOPE %.6f\n' % steepSlope)
except:
fileObject.write('STEEP_SLOPE %s\n' % steepSlope)
if weir.shallowSlope != None:
try:
fileObject.write('SHALLOW_SLOPE %.6f\n' % shallowSlope)
except:
fileObject.write('SHALLOW_SLOPE %s\n' % shallowSlope)
# Write culverts to file
for culvert in culverts:
fileObject.write('STRUCTTYPE %s\n' % culvert.type)
# Check for replacement vars
upstreamInvert = vwp(culvert.upstreamInvert, replaceParamFile)
downstreamInvert = vwp(culvert.downstreamInvert, replaceParamFile)
inletDischargeCoeff = vwp(culvert.inletDischargeCoeff, replaceParamFile)
reverseFlowDischargeCoeff = vwp(culvert.reverseFlowDischargeCoeff, replaceParamFile)
slope = vwp(culvert.slope, replaceParamFile)
length = vwp(culvert.length, replaceParamFile)
roughness = vwp(culvert.roughness, replaceParamFile)
diameter = vwp(culvert.diameter, replaceParamFile)
width = vwp(culvert.width, replaceParamFile)
height = vwp(culvert.height, replaceParamFile)
if culvert.upstreamInvert != None:
try:
fileObject.write('UPINVERT %.6f\n' % upstreamInvert)
except:
fileObject.write('UPINVERT %s\n' % upstreamInvert)
if culvert.downstreamInvert != None:
try:
fileObject.write('DOWNINVERT %.6f\n' % downstreamInvert)
except:
fileObject.write('DOWNINVERT %s\n' % downstreamInvert)
if culvert.inletDischargeCoeff != None:
try:
fileObject.write('INLET_DISCH_COEFF %.6f\n' % inletDischargeCoeff)
except:
fileObject.write('INLET_DISCH_COEFF %s\n' % inletDischargeCoeff)
if culvert.reverseFlowDischargeCoeff != None:
try:
fileObject.write('REV_FLOW_DISCH_COEFF %.6f\n' % reverseFlowDischargeCoeff)
except:
fileObject.write('REV_FLOW_DISCH_COEFF %s\n' % reverseFlowDischargeCoeff)
if culvert.slope != None:
try:
fileObject.write('SLOPE %.6f\n' % slope)
except:
fileObject.write('SLOPE %s\n' % slope)
if culvert.length != None:
try:
fileObject.write('LENGTH %.6f\n' % length)
except:
fileObject.write('LENGTH %s\n' % length)
if culvert.roughness != None:
try:
fileObject.write('ROUGH_COEFF %.6f\n' % roughness)
except:
fileObject.write('ROUGH_COEFF %s\n' % roughness)
if culvert.diameter != None:
try:
fileObject.write('DIAMETER %.6f\n' % diameter)
except:
fileObject.write('DIAMETER %s\n' % diameter)
if culvert.width != None:
try:
fileObject.write('WIDTH %.6f\n' % width)
except:
fileObject.write('WIDTH %s\n' % width)
if culvert.height != None:
try:
fileObject.write('HEIGHT %.6f\n' % height)
except:
fileObject.write('HEIGHT %s\n' % height) | Write Structure Link to File Method | Below is the the instruction that describes the task:
### Input:
Write Structure Link to File Method
### Response:
def _writeStructureLink(self, link, fileObject, replaceParamFile):
"""
Write Structure Link to File Method
"""
fileObject.write('%s\n' % link.type)
fileObject.write('NUMSTRUCTS %s\n' % link.numElements)
# Retrieve lists of structures
weirs = link.weirs
culverts = link.culverts
# Write weirs to file
for weir in weirs:
fileObject.write('STRUCTTYPE %s\n' % weir.type)
# Check for replacement vars
crestLength = vwp(weir.crestLength, replaceParamFile)
crestLowElevation = vwp(weir.crestLowElevation, replaceParamFile)
dischargeCoeffForward = vwp(weir.dischargeCoeffForward, replaceParamFile)
dischargeCoeffReverse = vwp(weir.dischargeCoeffReverse, replaceParamFile)
crestLowLocation = vwp(weir.crestLowLocation, replaceParamFile)
steepSlope = vwp(weir.steepSlope, replaceParamFile)
shallowSlope = vwp(weir.shallowSlope, replaceParamFile)
if weir.crestLength != None:
try:
fileObject.write('CREST_LENGTH %.6f\n' % crestLength)
except:
fileObject.write('CREST_LENGTH %s\n' % crestLength)
if weir.crestLowElevation != None:
try:
fileObject.write('CREST_LOW_ELEV %.6f\n' % crestLowElevation)
except:
fileObject.write('CREST_LOW_ELEV %s\n' % crestLowElevation)
if weir.dischargeCoeffForward != None:
try:
fileObject.write('DISCHARGE_COEFF_FORWARD %.6f\n' % dischargeCoeffForward)
except:
fileObject.write('DISCHARGE_COEFF_FORWARD %s\n' % dischargeCoeffForward)
if weir.dischargeCoeffReverse != None:
try:
fileObject.write('DISCHARGE_COEFF_REVERSE %.6f\n' % dischargeCoeffReverse)
except:
fileObject.write('DISCHARGE_COEFF_REVERSE %s\n' % dischargeCoeffReverse)
if weir.crestLowLocation != None:
fileObject.write('CREST_LOW_LOC %s\n' % crestLowLocation)
if weir.steepSlope != None:
try:
fileObject.write('STEEP_SLOPE %.6f\n' % steepSlope)
except:
fileObject.write('STEEP_SLOPE %s\n' % steepSlope)
if weir.shallowSlope != None:
try:
fileObject.write('SHALLOW_SLOPE %.6f\n' % shallowSlope)
except:
fileObject.write('SHALLOW_SLOPE %s\n' % shallowSlope)
# Write culverts to file
for culvert in culverts:
fileObject.write('STRUCTTYPE %s\n' % culvert.type)
# Check for replacement vars
upstreamInvert = vwp(culvert.upstreamInvert, replaceParamFile)
downstreamInvert = vwp(culvert.downstreamInvert, replaceParamFile)
inletDischargeCoeff = vwp(culvert.inletDischargeCoeff, replaceParamFile)
reverseFlowDischargeCoeff = vwp(culvert.reverseFlowDischargeCoeff, replaceParamFile)
slope = vwp(culvert.slope, replaceParamFile)
length = vwp(culvert.length, replaceParamFile)
roughness = vwp(culvert.roughness, replaceParamFile)
diameter = vwp(culvert.diameter, replaceParamFile)
width = vwp(culvert.width, replaceParamFile)
height = vwp(culvert.height, replaceParamFile)
if culvert.upstreamInvert != None:
try:
fileObject.write('UPINVERT %.6f\n' % upstreamInvert)
except:
fileObject.write('UPINVERT %s\n' % upstreamInvert)
if culvert.downstreamInvert != None:
try:
fileObject.write('DOWNINVERT %.6f\n' % downstreamInvert)
except:
fileObject.write('DOWNINVERT %s\n' % downstreamInvert)
if culvert.inletDischargeCoeff != None:
try:
fileObject.write('INLET_DISCH_COEFF %.6f\n' % inletDischargeCoeff)
except:
fileObject.write('INLET_DISCH_COEFF %s\n' % inletDischargeCoeff)
if culvert.reverseFlowDischargeCoeff != None:
try:
fileObject.write('REV_FLOW_DISCH_COEFF %.6f\n' % reverseFlowDischargeCoeff)
except:
fileObject.write('REV_FLOW_DISCH_COEFF %s\n' % reverseFlowDischargeCoeff)
if culvert.slope != None:
try:
fileObject.write('SLOPE %.6f\n' % slope)
except:
fileObject.write('SLOPE %s\n' % slope)
if culvert.length != None:
try:
fileObject.write('LENGTH %.6f\n' % length)
except:
fileObject.write('LENGTH %s\n' % length)
if culvert.roughness != None:
try:
fileObject.write('ROUGH_COEFF %.6f\n' % roughness)
except:
fileObject.write('ROUGH_COEFF %s\n' % roughness)
if culvert.diameter != None:
try:
fileObject.write('DIAMETER %.6f\n' % diameter)
except:
fileObject.write('DIAMETER %s\n' % diameter)
if culvert.width != None:
try:
fileObject.write('WIDTH %.6f\n' % width)
except:
fileObject.write('WIDTH %s\n' % width)
if culvert.height != None:
try:
fileObject.write('HEIGHT %.6f\n' % height)
except:
fileObject.write('HEIGHT %s\n' % height) |
def extern_store_dict(self, context_handle, vals_ptr, vals_len):
"""Given storage and an array of Handles, return a new Handle to represent the dict.
Array of handles alternates keys and values (i.e. key0, value0, key1, value1, ...).
It is assumed that an even number of values were passed.
"""
c = self._ffi.from_handle(context_handle)
tup = tuple(c.from_value(val[0]) for val in self._ffi.unpack(vals_ptr, vals_len))
d = dict()
for i in range(0, len(tup), 2):
d[tup[i]] = tup[i + 1]
return c.to_value(d) | Given storage and an array of Handles, return a new Handle to represent the dict.
Array of handles alternates keys and values (i.e. key0, value0, key1, value1, ...).
It is assumed that an even number of values were passed. | Below is the the instruction that describes the task:
### Input:
Given storage and an array of Handles, return a new Handle to represent the dict.
Array of handles alternates keys and values (i.e. key0, value0, key1, value1, ...).
It is assumed that an even number of values were passed.
### Response:
def extern_store_dict(self, context_handle, vals_ptr, vals_len):
"""Given storage and an array of Handles, return a new Handle to represent the dict.
Array of handles alternates keys and values (i.e. key0, value0, key1, value1, ...).
It is assumed that an even number of values were passed.
"""
c = self._ffi.from_handle(context_handle)
tup = tuple(c.from_value(val[0]) for val in self._ffi.unpack(vals_ptr, vals_len))
d = dict()
for i in range(0, len(tup), 2):
d[tup[i]] = tup[i + 1]
return c.to_value(d) |
def check_recommended_global_attributes(self, dataset):
'''
Check the global recommended attributes for 1.1 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
Basic "does it exist" checks are done in BaseNCEICheck:check_recommended
:title = "" ; //..................................................... RECOMMENDED - Provide a useful title for the data in the file. (ACDD)
:summary = "" ; //................................................... RECOMMENDED - Provide a useful summary or abstract for the data in the file. (ACDD)
:source = "" ; //.................................................... RECOMMENDED - The input data sources regardless of the method of production method used. (CF)
:platform = "platform_variable" ; //................................. RECOMMENDED - Refers to a variable containing information about the platform. May also put this in individual variables. Use NODC or ICES platform table. (NODC)
:instrument = "instrument_parameter_variable" ; //................... RECOMMENDED - Refers to a variable containing information about the instrument. May also put this in individual variables. Use NODC or GCMD instrument table. (NODC)
:uuid = "" ; //...................................................... RECOMMENDED - Machine readable unique identifier for each file. A new uuid is created whenever the file is changed. (NODC)
:sea_name = "" ; //.................................................. RECOMMENDED - The names of the sea in which the data were collected. Use NODC sea names table. (NODC)
:id = "" ; //........................................................ RECOMMENDED - Should be a human readable unique identifier for data set. (ACDD)
:naming_authority = "" ; //.......................................... RECOMMENDED - Backward URL of institution (for example, gov.noaa.nodc). (ACDD)
:time_coverage_start = "" ; //....................................... RECOMMENDED - Use ISO8601 for date and time. (ACDD)
:time_coverage_end = "" ; //......................................... RECOMMENDED - Use ISO8601 for date and time.(ACDD)
:time_coverage_resolution = "" ; //.................................. RECOMMENDED - For example, "point" or "minute averages". (ACDD)
:geospatial_lat_min = 0.0f ; //...................................... RECOMMENDED - Replace with correct value. (ACDD)
:geospatial_lat_max = 0.0f ; //...................................... RECOMMENDED - Replace with correct value. (ACDD)
:geospatial_lat_units = "degrees_north" ; //......................... RECOMMENDED - Use UDUNITS compatible units. (ACDD)
:geospatial_lat_resolution= "" ; //.................................. RECOMMENDED - For example, "point" or "10 degree grid". (ACDD)
:geospatial_lon_min = 0.0f ; //...................................... RECOMMENDED - Replace with correct value. (ACDD)
:geospatial_lon_max = 0.0f ; //...................................... RECOMMENDED - Replace with correct value. (ACDD)
:geospatial_lon_units = "degrees_east"; //........................... RECOMMENDED - Use UDUNITS compatible units. (ACDD)
:geospatial_lon_resolution= "" ; //.................................. RECOMMENDED - For example, "point" or "10 degree grid". (ACDD)
:geospatial_vertical_min = 0.0f ; //................................. RECOMMENDED - Replace with correct value. (ACDD)
:geospatial_vertical_max = 0.0f ; //................................. RECOMMENDED - Replace with correct value. (ACDD)
:geospatial_vertical_units = "" ; //................................. RECOMMENDED - Use UDUNITS compatible units. (ACDD)
:geospatial_vertical_resolution = "" ; //............................ RECOMMENDED - For example, "point" or "1 meter binned". (ACDD)
:geospatial_vertical_positive = "" ; //.............................. RECOMMENDED - Use "up" or "down". (ACDD)
:institution = "" ; //............................................... RECOMMENDED - Institution of the person or group that collected the data. An institution attribute can be used for each variable if variables come from more than one institution. (ACDD)
:creator_name = "" ; //.............................................. RECOMMENDED - Name of the person who collected the data. (ACDD)
:creator_url = "" ; //............................................... RECOMMENDED - URL for person who collected the data. (ACDD)
:creator_email = "" ; //............................................. RECOMMENDED - Email address for person who collected the data. (ACDD)
:project = "" ; //................................................... RECOMMENDED - Project the data was collected under. (ACDD)
:processing_level = "" ; //.......................................... RECOMMENDED - Provide a description of the processing or quality control level of the data. (ACDD)
:references = "" ; //................................................ RECOMMENDED - Published or web-based references that describe the data or methods used to produce it. (CF)
:keywords_vocabulary = "" ; //....................................... RECOMMENDED - Identifies the controlled keyword vocabulary used to specify the values within the attribute "keywords". e.g. NASA/GCMD Earth Science Keywords (ACDD)
:keywords = "" ; //.................................................. RECOMMENDED - A comma separated list of keywords coming from the keywords_vocabulary. (ACDD)
:acknowledgment = "" ; //............................................ RECOMMENDED - Text to use to properly acknowledge use of the data. (ACDD)
:comment = "" ; //................................................... RECOMMENDED - Provide useful additional information here. (ACDD and CF)
:contributor_name = "" ; //.......................................... RECOMMENDED - A comma separated list of contributors to this data set. (ACDD)
:contributor_role = "" ; //.......................................... RECOMMENDED - A comma separated list of their roles. (ACDD)
:date_created = "" ; //.............................................. RECOMMENDED - Creation date of the netCDF. Use ISO8601 for date and time. (ACDD)
:date_modified = "" ; //............................................. RECOMMENDED - Modification date of the netCDF. Use ISO8601 for date and time. (ACDD)
:publisher_name = "" ; //............................................ RECOMMENDED - Publisher of the data. (ACDD)
:publisher_email = "" ; //........................................... RECOMMENDED - Email address of the publisher of the data. (ACDD)
:publisher_url = "" ; //............................................. RECOMMENDED - A URL for the publisher of the data. (ACDD)
:history = "" ; //................................................... RECOMMENDED - Record changes made to the netCDF. (ACDD)
:license = "" ; //................................................... RECOMMENDED - Describe the restrictions to data access and distribution. (ACDD)
:metadata_link = "" ; //............................................. RECOMMENDED - This attribute provides a link to a complete metadata record for this data set or the collection that contains this data set. (ACDD)
'''
recommended_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended global attributes')
# Do any of the variables define platform ?
variable_defined_platform = any((hasattr(var, 'platform') for var in dataset.variables))
if not variable_defined_platform:
platform_name = getattr(dataset, 'platform', '')
recommended_ctx.assert_true(platform_name and platform_name in dataset.variables, 'platform should exist and point to a variable.')
sea_names = [sn.lower() for sn in util.get_sea_names()]
sea_name = getattr(dataset, 'sea_name', '')
sea_name = sea_name.replace(', ', ',')
sea_name = sea_name.split(',') if sea_name else []
for sea in sea_name:
recommended_ctx.assert_true(
sea.lower() in sea_names,
'sea_name attribute should exist and should be from the NODC sea names list: {} is not a valid sea name'.format(sea)
)
# Parse dates, check for ISO 8601
for attr in ['time_coverage_start', 'time_coverage_end', 'date_created', 'date_modified']:
attr_value = getattr(dataset, attr, '')
try:
parse_datetime(attr_value)
recommended_ctx.assert_true(True, '') # Score it True!
except ISO8601Error:
recommended_ctx.assert_true(False, '{} should exist and be ISO-8601 format (example: PT1M30S), currently: {}'.format(attr, attr_value))
units = getattr(dataset, 'geospatial_lat_units', '').lower()
recommended_ctx.assert_true(units == 'degrees_north', 'geospatial_lat_units attribute should be degrees_north: {}'.format(units))
units = getattr(dataset, 'geospatial_lon_units', '').lower()
recommended_ctx.assert_true(units == 'degrees_east', 'geospatial_lon_units attribute should be degrees_east: {}'.format(units))
value = getattr(dataset, 'geospatial_vertical_positive', '')
recommended_ctx.assert_true(value.lower() in ['up', 'down'], 'geospatial_vertical_positive attribute should be up or down: {}'.format(value))
# I hate english.
ack_exists = any((getattr(dataset, attr, '') != '' for attr in ['acknowledgment', 'acknowledgement']))
recommended_ctx.assert_true(ack_exists, 'acknowledgement attribute should exist and not be empty')
contributor_name = getattr(dataset, 'contributor_name', '')
contributor_role = getattr(dataset, 'contributor_role', '')
names = contributor_role.split(',')
roles = contributor_role.split(',')
recommended_ctx.assert_true(contributor_name != '', 'contributor_name should exist and not be empty.')
recommended_ctx.assert_true(len(names) == len(roles), 'length of contributor names matches length of roles')
recommended_ctx.assert_true(contributor_role != '', 'contributor_role should exist and not be empty.')
recommended_ctx.assert_true(len(names) == len(roles), 'length of contributor names matches length of roles')
if hasattr(dataset, 'comment'):
recommended_ctx.assert_true(getattr(dataset, 'comment', '') != '', 'comment attribute should not be empty if specified')
return recommended_ctx.to_result() | Check the global recommended attributes for 1.1 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
Basic "does it exist" checks are done in BaseNCEICheck:check_recommended
:title = "" ; //..................................................... RECOMMENDED - Provide a useful title for the data in the file. (ACDD)
:summary = "" ; //................................................... RECOMMENDED - Provide a useful summary or abstract for the data in the file. (ACDD)
:source = "" ; //.................................................... RECOMMENDED - The input data sources regardless of the method of production method used. (CF)
:platform = "platform_variable" ; //................................. RECOMMENDED - Refers to a variable containing information about the platform. May also put this in individual variables. Use NODC or ICES platform table. (NODC)
:instrument = "instrument_parameter_variable" ; //................... RECOMMENDED - Refers to a variable containing information about the instrument. May also put this in individual variables. Use NODC or GCMD instrument table. (NODC)
:uuid = "" ; //...................................................... RECOMMENDED - Machine readable unique identifier for each file. A new uuid is created whenever the file is changed. (NODC)
:sea_name = "" ; //.................................................. RECOMMENDED - The names of the sea in which the data were collected. Use NODC sea names table. (NODC)
:id = "" ; //........................................................ RECOMMENDED - Should be a human readable unique identifier for data set. (ACDD)
:naming_authority = "" ; //.......................................... RECOMMENDED - Backward URL of institution (for example, gov.noaa.nodc). (ACDD)
:time_coverage_start = "" ; //....................................... RECOMMENDED - Use ISO8601 for date and time. (ACDD)
:time_coverage_end = "" ; //......................................... RECOMMENDED - Use ISO8601 for date and time.(ACDD)
:time_coverage_resolution = "" ; //.................................. RECOMMENDED - For example, "point" or "minute averages". (ACDD)
:geospatial_lat_min = 0.0f ; //...................................... RECOMMENDED - Replace with correct value. (ACDD)
:geospatial_lat_max = 0.0f ; //...................................... RECOMMENDED - Replace with correct value. (ACDD)
:geospatial_lat_units = "degrees_north" ; //......................... RECOMMENDED - Use UDUNITS compatible units. (ACDD)
:geospatial_lat_resolution= "" ; //.................................. RECOMMENDED - For example, "point" or "10 degree grid". (ACDD)
:geospatial_lon_min = 0.0f ; //...................................... RECOMMENDED - Replace with correct value. (ACDD)
:geospatial_lon_max = 0.0f ; //...................................... RECOMMENDED - Replace with correct value. (ACDD)
:geospatial_lon_units = "degrees_east"; //........................... RECOMMENDED - Use UDUNITS compatible units. (ACDD)
:geospatial_lon_resolution= "" ; //.................................. RECOMMENDED - For example, "point" or "10 degree grid". (ACDD)
:geospatial_vertical_min = 0.0f ; //................................. RECOMMENDED - Replace with correct value. (ACDD)
:geospatial_vertical_max = 0.0f ; //................................. RECOMMENDED - Replace with correct value. (ACDD)
:geospatial_vertical_units = "" ; //................................. RECOMMENDED - Use UDUNITS compatible units. (ACDD)
:geospatial_vertical_resolution = "" ; //............................ RECOMMENDED - For example, "point" or "1 meter binned". (ACDD)
:geospatial_vertical_positive = "" ; //.............................. RECOMMENDED - Use "up" or "down". (ACDD)
:institution = "" ; //............................................... RECOMMENDED - Institution of the person or group that collected the data. An institution attribute can be used for each variable if variables come from more than one institution. (ACDD)
:creator_name = "" ; //.............................................. RECOMMENDED - Name of the person who collected the data. (ACDD)
:creator_url = "" ; //............................................... RECOMMENDED - URL for person who collected the data. (ACDD)
:creator_email = "" ; //............................................. RECOMMENDED - Email address for person who collected the data. (ACDD)
:project = "" ; //................................................... RECOMMENDED - Project the data was collected under. (ACDD)
:processing_level = "" ; //.......................................... RECOMMENDED - Provide a description of the processing or quality control level of the data. (ACDD)
:references = "" ; //................................................ RECOMMENDED - Published or web-based references that describe the data or methods used to produce it. (CF)
:keywords_vocabulary = "" ; //....................................... RECOMMENDED - Identifies the controlled keyword vocabulary used to specify the values within the attribute "keywords". e.g. NASA/GCMD Earth Science Keywords (ACDD)
:keywords = "" ; //.................................................. RECOMMENDED - A comma separated list of keywords coming from the keywords_vocabulary. (ACDD)
:acknowledgment = "" ; //............................................ RECOMMENDED - Text to use to properly acknowledge use of the data. (ACDD)
:comment = "" ; //................................................... RECOMMENDED - Provide useful additional information here. (ACDD and CF)
:contributor_name = "" ; //.......................................... RECOMMENDED - A comma separated list of contributors to this data set. (ACDD)
:contributor_role = "" ; //.......................................... RECOMMENDED - A comma separated list of their roles. (ACDD)
:date_created = "" ; //.............................................. RECOMMENDED - Creation date of the netCDF. Use ISO8601 for date and time. (ACDD)
:date_modified = "" ; //............................................. RECOMMENDED - Modification date of the netCDF. Use ISO8601 for date and time. (ACDD)
:publisher_name = "" ; //............................................ RECOMMENDED - Publisher of the data. (ACDD)
:publisher_email = "" ; //........................................... RECOMMENDED - Email address of the publisher of the data. (ACDD)
:publisher_url = "" ; //............................................. RECOMMENDED - A URL for the publisher of the data. (ACDD)
:history = "" ; //................................................... RECOMMENDED - Record changes made to the netCDF. (ACDD)
:license = "" ; //................................................... RECOMMENDED - Describe the restrictions to data access and distribution. (ACDD)
:metadata_link = "" ; //............................................. RECOMMENDED - This attribute provides a link to a complete metadata record for this data set or the collection that contains this data set. (ACDD) | Below is the the instruction that describes the task:
### Input:
Check the global recommended attributes for 1.1 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
Basic "does it exist" checks are done in BaseNCEICheck:check_recommended
:title = "" ; //..................................................... RECOMMENDED - Provide a useful title for the data in the file. (ACDD)
:summary = "" ; //................................................... RECOMMENDED - Provide a useful summary or abstract for the data in the file. (ACDD)
:source = "" ; //.................................................... RECOMMENDED - The input data sources regardless of the method of production method used. (CF)
:platform = "platform_variable" ; //................................. RECOMMENDED - Refers to a variable containing information about the platform. May also put this in individual variables. Use NODC or ICES platform table. (NODC)
:instrument = "instrument_parameter_variable" ; //................... RECOMMENDED - Refers to a variable containing information about the instrument. May also put this in individual variables. Use NODC or GCMD instrument table. (NODC)
:uuid = "" ; //...................................................... RECOMMENDED - Machine readable unique identifier for each file. A new uuid is created whenever the file is changed. (NODC)
:sea_name = "" ; //.................................................. RECOMMENDED - The names of the sea in which the data were collected. Use NODC sea names table. (NODC)
:id = "" ; //........................................................ RECOMMENDED - Should be a human readable unique identifier for data set. (ACDD)
:naming_authority = "" ; //.......................................... RECOMMENDED - Backward URL of institution (for example, gov.noaa.nodc). (ACDD)
:time_coverage_start = "" ; //....................................... RECOMMENDED - Use ISO8601 for date and time. (ACDD)
:time_coverage_end = "" ; //......................................... RECOMMENDED - Use ISO8601 for date and time.(ACDD)
:time_coverage_resolution = "" ; //.................................. RECOMMENDED - For example, "point" or "minute averages". (ACDD)
:geospatial_lat_min = 0.0f ; //...................................... RECOMMENDED - Replace with correct value. (ACDD)
:geospatial_lat_max = 0.0f ; //...................................... RECOMMENDED - Replace with correct value. (ACDD)
:geospatial_lat_units = "degrees_north" ; //......................... RECOMMENDED - Use UDUNITS compatible units. (ACDD)
:geospatial_lat_resolution= "" ; //.................................. RECOMMENDED - For example, "point" or "10 degree grid". (ACDD)
:geospatial_lon_min = 0.0f ; //...................................... RECOMMENDED - Replace with correct value. (ACDD)
:geospatial_lon_max = 0.0f ; //...................................... RECOMMENDED - Replace with correct value. (ACDD)
:geospatial_lon_units = "degrees_east"; //........................... RECOMMENDED - Use UDUNITS compatible units. (ACDD)
:geospatial_lon_resolution= "" ; //.................................. RECOMMENDED - For example, "point" or "10 degree grid". (ACDD)
:geospatial_vertical_min = 0.0f ; //................................. RECOMMENDED - Replace with correct value. (ACDD)
:geospatial_vertical_max = 0.0f ; //................................. RECOMMENDED - Replace with correct value. (ACDD)
:geospatial_vertical_units = "" ; //................................. RECOMMENDED - Use UDUNITS compatible units. (ACDD)
:geospatial_vertical_resolution = "" ; //............................ RECOMMENDED - For example, "point" or "1 meter binned". (ACDD)
:geospatial_vertical_positive = "" ; //.............................. RECOMMENDED - Use "up" or "down". (ACDD)
:institution = "" ; //............................................... RECOMMENDED - Institution of the person or group that collected the data. An institution attribute can be used for each variable if variables come from more than one institution. (ACDD)
:creator_name = "" ; //.............................................. RECOMMENDED - Name of the person who collected the data. (ACDD)
:creator_url = "" ; //............................................... RECOMMENDED - URL for person who collected the data. (ACDD)
:creator_email = "" ; //............................................. RECOMMENDED - Email address for person who collected the data. (ACDD)
:project = "" ; //................................................... RECOMMENDED - Project the data was collected under. (ACDD)
:processing_level = "" ; //.......................................... RECOMMENDED - Provide a description of the processing or quality control level of the data. (ACDD)
:references = "" ; //................................................ RECOMMENDED - Published or web-based references that describe the data or methods used to produce it. (CF)
:keywords_vocabulary = "" ; //....................................... RECOMMENDED - Identifies the controlled keyword vocabulary used to specify the values within the attribute "keywords". e.g. NASA/GCMD Earth Science Keywords (ACDD)
:keywords = "" ; //.................................................. RECOMMENDED - A comma separated list of keywords coming from the keywords_vocabulary. (ACDD)
:acknowledgment = "" ; //............................................ RECOMMENDED - Text to use to properly acknowledge use of the data. (ACDD)
:comment = "" ; //................................................... RECOMMENDED - Provide useful additional information here. (ACDD and CF)
:contributor_name = "" ; //.......................................... RECOMMENDED - A comma separated list of contributors to this data set. (ACDD)
:contributor_role = "" ; //.......................................... RECOMMENDED - A comma separated list of their roles. (ACDD)
:date_created = "" ; //.............................................. RECOMMENDED - Creation date of the netCDF. Use ISO8601 for date and time. (ACDD)
:date_modified = "" ; //............................................. RECOMMENDED - Modification date of the netCDF. Use ISO8601 for date and time. (ACDD)
:publisher_name = "" ; //............................................ RECOMMENDED - Publisher of the data. (ACDD)
:publisher_email = "" ; //........................................... RECOMMENDED - Email address of the publisher of the data. (ACDD)
:publisher_url = "" ; //............................................. RECOMMENDED - A URL for the publisher of the data. (ACDD)
:history = "" ; //................................................... RECOMMENDED - Record changes made to the netCDF. (ACDD)
:license = "" ; //................................................... RECOMMENDED - Describe the restrictions to data access and distribution. (ACDD)
:metadata_link = "" ; //............................................. RECOMMENDED - This attribute provides a link to a complete metadata record for this data set or the collection that contains this data set. (ACDD)
### Response:
def check_recommended_global_attributes(self, dataset):
'''
Check the global recommended attributes for 1.1 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
Basic "does it exist" checks are done in BaseNCEICheck:check_recommended
:title = "" ; //..................................................... RECOMMENDED - Provide a useful title for the data in the file. (ACDD)
:summary = "" ; //................................................... RECOMMENDED - Provide a useful summary or abstract for the data in the file. (ACDD)
:source = "" ; //.................................................... RECOMMENDED - The input data sources regardless of the method of production method used. (CF)
:platform = "platform_variable" ; //................................. RECOMMENDED - Refers to a variable containing information about the platform. May also put this in individual variables. Use NODC or ICES platform table. (NODC)
:instrument = "instrument_parameter_variable" ; //................... RECOMMENDED - Refers to a variable containing information about the instrument. May also put this in individual variables. Use NODC or GCMD instrument table. (NODC)
:uuid = "" ; //...................................................... RECOMMENDED - Machine readable unique identifier for each file. A new uuid is created whenever the file is changed. (NODC)
:sea_name = "" ; //.................................................. RECOMMENDED - The names of the sea in which the data were collected. Use NODC sea names table. (NODC)
:id = "" ; //........................................................ RECOMMENDED - Should be a human readable unique identifier for data set. (ACDD)
:naming_authority = "" ; //.......................................... RECOMMENDED - Backward URL of institution (for example, gov.noaa.nodc). (ACDD)
:time_coverage_start = "" ; //....................................... RECOMMENDED - Use ISO8601 for date and time. (ACDD)
:time_coverage_end = "" ; //......................................... RECOMMENDED - Use ISO8601 for date and time.(ACDD)
:time_coverage_resolution = "" ; //.................................. RECOMMENDED - For example, "point" or "minute averages". (ACDD)
:geospatial_lat_min = 0.0f ; //...................................... RECOMMENDED - Replace with correct value. (ACDD)
:geospatial_lat_max = 0.0f ; //...................................... RECOMMENDED - Replace with correct value. (ACDD)
:geospatial_lat_units = "degrees_north" ; //......................... RECOMMENDED - Use UDUNITS compatible units. (ACDD)
:geospatial_lat_resolution= "" ; //.................................. RECOMMENDED - For example, "point" or "10 degree grid". (ACDD)
:geospatial_lon_min = 0.0f ; //...................................... RECOMMENDED - Replace with correct value. (ACDD)
:geospatial_lon_max = 0.0f ; //...................................... RECOMMENDED - Replace with correct value. (ACDD)
:geospatial_lon_units = "degrees_east"; //........................... RECOMMENDED - Use UDUNITS compatible units. (ACDD)
:geospatial_lon_resolution= "" ; //.................................. RECOMMENDED - For example, "point" or "10 degree grid". (ACDD)
:geospatial_vertical_min = 0.0f ; //................................. RECOMMENDED - Replace with correct value. (ACDD)
:geospatial_vertical_max = 0.0f ; //................................. RECOMMENDED - Replace with correct value. (ACDD)
:geospatial_vertical_units = "" ; //................................. RECOMMENDED - Use UDUNITS compatible units. (ACDD)
:geospatial_vertical_resolution = "" ; //............................ RECOMMENDED - For example, "point" or "1 meter binned". (ACDD)
:geospatial_vertical_positive = "" ; //.............................. RECOMMENDED - Use "up" or "down". (ACDD)
:institution = "" ; //............................................... RECOMMENDED - Institution of the person or group that collected the data. An institution attribute can be used for each variable if variables come from more than one institution. (ACDD)
:creator_name = "" ; //.............................................. RECOMMENDED - Name of the person who collected the data. (ACDD)
:creator_url = "" ; //............................................... RECOMMENDED - URL for person who collected the data. (ACDD)
:creator_email = "" ; //............................................. RECOMMENDED - Email address for person who collected the data. (ACDD)
:project = "" ; //................................................... RECOMMENDED - Project the data was collected under. (ACDD)
:processing_level = "" ; //.......................................... RECOMMENDED - Provide a description of the processing or quality control level of the data. (ACDD)
:references = "" ; //................................................ RECOMMENDED - Published or web-based references that describe the data or methods used to produce it. (CF)
:keywords_vocabulary = "" ; //....................................... RECOMMENDED - Identifies the controlled keyword vocabulary used to specify the values within the attribute "keywords". e.g. NASA/GCMD Earth Science Keywords (ACDD)
:keywords = "" ; //.................................................. RECOMMENDED - A comma separated list of keywords coming from the keywords_vocabulary. (ACDD)
:acknowledgment = "" ; //............................................ RECOMMENDED - Text to use to properly acknowledge use of the data. (ACDD)
:comment = "" ; //................................................... RECOMMENDED - Provide useful additional information here. (ACDD and CF)
:contributor_name = "" ; //.......................................... RECOMMENDED - A comma separated list of contributors to this data set. (ACDD)
:contributor_role = "" ; //.......................................... RECOMMENDED - A comma separated list of their roles. (ACDD)
:date_created = "" ; //.............................................. RECOMMENDED - Creation date of the netCDF. Use ISO8601 for date and time. (ACDD)
:date_modified = "" ; //............................................. RECOMMENDED - Modification date of the netCDF. Use ISO8601 for date and time. (ACDD)
:publisher_name = "" ; //............................................ RECOMMENDED - Publisher of the data. (ACDD)
:publisher_email = "" ; //........................................... RECOMMENDED - Email address of the publisher of the data. (ACDD)
:publisher_url = "" ; //............................................. RECOMMENDED - A URL for the publisher of the data. (ACDD)
:history = "" ; //................................................... RECOMMENDED - Record changes made to the netCDF. (ACDD)
:license = "" ; //................................................... RECOMMENDED - Describe the restrictions to data access and distribution. (ACDD)
:metadata_link = "" ; //............................................. RECOMMENDED - This attribute provides a link to a complete metadata record for this data set or the collection that contains this data set. (ACDD)
'''
recommended_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended global attributes')
# Do any of the variables define platform ?
variable_defined_platform = any((hasattr(var, 'platform') for var in dataset.variables))
if not variable_defined_platform:
platform_name = getattr(dataset, 'platform', '')
recommended_ctx.assert_true(platform_name and platform_name in dataset.variables, 'platform should exist and point to a variable.')
sea_names = [sn.lower() for sn in util.get_sea_names()]
sea_name = getattr(dataset, 'sea_name', '')
sea_name = sea_name.replace(', ', ',')
sea_name = sea_name.split(',') if sea_name else []
for sea in sea_name:
recommended_ctx.assert_true(
sea.lower() in sea_names,
'sea_name attribute should exist and should be from the NODC sea names list: {} is not a valid sea name'.format(sea)
)
# Parse dates, check for ISO 8601
for attr in ['time_coverage_start', 'time_coverage_end', 'date_created', 'date_modified']:
attr_value = getattr(dataset, attr, '')
try:
parse_datetime(attr_value)
recommended_ctx.assert_true(True, '') # Score it True!
except ISO8601Error:
recommended_ctx.assert_true(False, '{} should exist and be ISO-8601 format (example: PT1M30S), currently: {}'.format(attr, attr_value))
units = getattr(dataset, 'geospatial_lat_units', '').lower()
recommended_ctx.assert_true(units == 'degrees_north', 'geospatial_lat_units attribute should be degrees_north: {}'.format(units))
units = getattr(dataset, 'geospatial_lon_units', '').lower()
recommended_ctx.assert_true(units == 'degrees_east', 'geospatial_lon_units attribute should be degrees_east: {}'.format(units))
value = getattr(dataset, 'geospatial_vertical_positive', '')
recommended_ctx.assert_true(value.lower() in ['up', 'down'], 'geospatial_vertical_positive attribute should be up or down: {}'.format(value))
# I hate english.
ack_exists = any((getattr(dataset, attr, '') != '' for attr in ['acknowledgment', 'acknowledgement']))
recommended_ctx.assert_true(ack_exists, 'acknowledgement attribute should exist and not be empty')
contributor_name = getattr(dataset, 'contributor_name', '')
contributor_role = getattr(dataset, 'contributor_role', '')
names = contributor_role.split(',')
roles = contributor_role.split(',')
recommended_ctx.assert_true(contributor_name != '', 'contributor_name should exist and not be empty.')
recommended_ctx.assert_true(len(names) == len(roles), 'length of contributor names matches length of roles')
recommended_ctx.assert_true(contributor_role != '', 'contributor_role should exist and not be empty.')
recommended_ctx.assert_true(len(names) == len(roles), 'length of contributor names matches length of roles')
if hasattr(dataset, 'comment'):
recommended_ctx.assert_true(getattr(dataset, 'comment', '') != '', 'comment attribute should not be empty if specified')
return recommended_ctx.to_result() |
def get_min_max_mag(self):
"Return the minimum and maximum magnitudes"
mag, num_bins = self._get_min_mag_and_num_bins()
return mag, mag + self. bin_width * (num_bins - 1) | Return the minimum and maximum magnitudes | Below is the the instruction that describes the task:
### Input:
Return the minimum and maximum magnitudes
### Response:
def get_min_max_mag(self):
"Return the minimum and maximum magnitudes"
mag, num_bins = self._get_min_mag_and_num_bins()
return mag, mag + self. bin_width * (num_bins - 1) |
def normal_curve_single(obj, u, normalize):
""" Evaluates the curve normal vector at the input parameter, u.
Curve normal is calculated from the 2nd derivative of the curve at the input parameter, u.
The output returns a list containing the starting point (i.e. origin) of the vector and the vector itself.
:param obj: input curve
:type obj: abstract.Curve
:param u: parameter
:type u: float
:param normalize: if True, the returned vector is converted to a unit vector
:type normalize: bool
:return: a list containing "point" and "vector" pairs
:rtype: tuple
"""
# 2nd derivative of the curve gives the normal
ders = obj.derivatives(u, 2)
point = ders[0]
vector = linalg.vector_normalize(ders[2]) if normalize else ders[2]
return tuple(point), tuple(vector) | Evaluates the curve normal vector at the input parameter, u.
Curve normal is calculated from the 2nd derivative of the curve at the input parameter, u.
The output returns a list containing the starting point (i.e. origin) of the vector and the vector itself.
:param obj: input curve
:type obj: abstract.Curve
:param u: parameter
:type u: float
:param normalize: if True, the returned vector is converted to a unit vector
:type normalize: bool
:return: a list containing "point" and "vector" pairs
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Evaluates the curve normal vector at the input parameter, u.
Curve normal is calculated from the 2nd derivative of the curve at the input parameter, u.
The output returns a list containing the starting point (i.e. origin) of the vector and the vector itself.
:param obj: input curve
:type obj: abstract.Curve
:param u: parameter
:type u: float
:param normalize: if True, the returned vector is converted to a unit vector
:type normalize: bool
:return: a list containing "point" and "vector" pairs
:rtype: tuple
### Response:
def normal_curve_single(obj, u, normalize):
""" Evaluates the curve normal vector at the input parameter, u.
Curve normal is calculated from the 2nd derivative of the curve at the input parameter, u.
The output returns a list containing the starting point (i.e. origin) of the vector and the vector itself.
:param obj: input curve
:type obj: abstract.Curve
:param u: parameter
:type u: float
:param normalize: if True, the returned vector is converted to a unit vector
:type normalize: bool
:return: a list containing "point" and "vector" pairs
:rtype: tuple
"""
# 2nd derivative of the curve gives the normal
ders = obj.derivatives(u, 2)
point = ders[0]
vector = linalg.vector_normalize(ders[2]) if normalize else ders[2]
return tuple(point), tuple(vector) |
def get_app_guid(self, app_name):
"""
Returns the GUID for the app instance with
the given name.
"""
summary = self.space.get_space_summary()
for app in summary['apps']:
if app['name'] == app_name:
return app['guid'] | Returns the GUID for the app instance with
the given name. | Below is the the instruction that describes the task:
### Input:
Returns the GUID for the app instance with
the given name.
### Response:
def get_app_guid(self, app_name):
"""
Returns the GUID for the app instance with
the given name.
"""
summary = self.space.get_space_summary()
for app in summary['apps']:
if app['name'] == app_name:
return app['guid'] |
def unmount(self, client):
"""Unmounts a backend within Vault"""
getattr(client, self.unmount_fun)(mount_point=self.path) | Unmounts a backend within Vault | Below is the the instruction that describes the task:
### Input:
Unmounts a backend within Vault
### Response:
def unmount(self, client):
"""Unmounts a backend within Vault"""
getattr(client, self.unmount_fun)(mount_point=self.path) |
def map(self, fn, *seq):
"Perform a map operation distributed among the workers. Will "
"block until done."
results = Queue()
args = zip(*seq)
for seq in args:
j = SimpleJob(results, fn, seq)
self.put(j)
# Aggregate results
r = []
for i in range(len(list(args))):
r.append(results.get())
return r | Perform a map operation distributed among the workers. Will | Below is the the instruction that describes the task:
### Input:
Perform a map operation distributed among the workers. Will
### Response:
def map(self, fn, *seq):
"Perform a map operation distributed among the workers. Will "
"block until done."
results = Queue()
args = zip(*seq)
for seq in args:
j = SimpleJob(results, fn, seq)
self.put(j)
# Aggregate results
r = []
for i in range(len(list(args))):
r.append(results.get())
return r |
def broadcast_transaction(self, hex_tx):
""" Dispatch a raw transaction to the network.
"""
resp = self.obj.sendrawtransaction(hex_tx)
if len(resp) > 0:
return {'transaction_hash': resp, 'success': True}
else:
return error_reply('Invalid response from bitcoind.') | Dispatch a raw transaction to the network. | Below is the the instruction that describes the task:
### Input:
Dispatch a raw transaction to the network.
### Response:
def broadcast_transaction(self, hex_tx):
""" Dispatch a raw transaction to the network.
"""
resp = self.obj.sendrawtransaction(hex_tx)
if len(resp) > 0:
return {'transaction_hash': resp, 'success': True}
else:
return error_reply('Invalid response from bitcoind.') |
def async_task(self, func):
"""
Execute handler as task and return None.
Use this decorator for slow handlers (with timeouts)
.. code-block:: python3
@dp.message_handler(commands=['command'])
@dp.async_task
async def cmd_with_timeout(message: types.Message):
await asyncio.sleep(120)
return SendMessage(message.chat.id, 'KABOOM').reply(message)
:param func:
:return:
"""
def process_response(task):
try:
response = task.result()
except Exception as e:
self.loop.create_task(
self.errors_handlers.notify(types.Update.get_current(), e))
else:
if isinstance(response, BaseResponse):
self.loop.create_task(response.execute_response(self.bot))
@functools.wraps(func)
async def wrapper(*args, **kwargs):
task = self.loop.create_task(func(*args, **kwargs))
task.add_done_callback(process_response)
return wrapper | Execute handler as task and return None.
Use this decorator for slow handlers (with timeouts)
.. code-block:: python3
@dp.message_handler(commands=['command'])
@dp.async_task
async def cmd_with_timeout(message: types.Message):
await asyncio.sleep(120)
return SendMessage(message.chat.id, 'KABOOM').reply(message)
:param func:
:return: | Below is the the instruction that describes the task:
### Input:
Execute handler as task and return None.
Use this decorator for slow handlers (with timeouts)
.. code-block:: python3
@dp.message_handler(commands=['command'])
@dp.async_task
async def cmd_with_timeout(message: types.Message):
await asyncio.sleep(120)
return SendMessage(message.chat.id, 'KABOOM').reply(message)
:param func:
:return:
### Response:
def async_task(self, func):
"""
Execute handler as task and return None.
Use this decorator for slow handlers (with timeouts)
.. code-block:: python3
@dp.message_handler(commands=['command'])
@dp.async_task
async def cmd_with_timeout(message: types.Message):
await asyncio.sleep(120)
return SendMessage(message.chat.id, 'KABOOM').reply(message)
:param func:
:return:
"""
def process_response(task):
try:
response = task.result()
except Exception as e:
self.loop.create_task(
self.errors_handlers.notify(types.Update.get_current(), e))
else:
if isinstance(response, BaseResponse):
self.loop.create_task(response.execute_response(self.bot))
@functools.wraps(func)
async def wrapper(*args, **kwargs):
task = self.loop.create_task(func(*args, **kwargs))
task.add_done_callback(process_response)
return wrapper |
def create_parser():
"""Setup argument Parsing."""
description = """RPC Release Diff Generator
--------------------------
Finds changes in OpenStack-Ansible, OpenStack-Ansible roles, and OpenStack
projects between two RPC-OpenStack revisions.
"""
parser = argparse.ArgumentParser(
usage='%(prog)s',
description=description,
epilog='Licensed "Apache 2.0"',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'old_commit',
action='store',
nargs=1,
help="Git SHA of the older commit",
)
parser.add_argument(
'new_commit',
action='store',
nargs=1,
help="Git SHA of the newer commit",
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help="Enable debug output",
)
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help="Enable verbose output",
)
parser.add_argument(
'-d', '--directory',
action='store',
default="~/.osa-differ",
help="Git repo storage directory (default: ~/.osa-differ)",
)
parser.add_argument(
'-rroc', '--role-requirements-old-commit',
action='store',
default=None,
help=(
"Name of the Ansible role requirements file to read from the old "
"commit, defaults to value of `--role-requirements`."
),
)
parser.add_argument(
'-rr', '--role-requirements',
action='store',
default=ROLE_REQ_FILE,
help="Name of the ansible role requirements file to read",
)
parser.add_argument(
'-r', '--rpc-repo-url',
action='store',
default="https://github.com/rcbops/rpc-openstack",
help="Github repository for the rpc-openstack project"
)
parser.add_argument(
'--osa-repo-url',
action='store',
default="https://git.openstack.org/openstack/openstack-ansible",
help="URL of the openstack-ansible git repo"
)
parser.add_argument(
'-rpoc', '--rpc-product-old-commit',
action='store',
default=None,
help=(
"Set the RPC product version for the old commit, defaults to "
"value of `--rpc-product`."
)
)
parser.add_argument(
'-rp', '--rpc-product',
action='store',
default="master",
help="Set the RPC product version"
)
parser.add_argument(
'-u', '--update',
action='store_true',
default=False,
help="Fetch latest changes to repo",
)
parser.add_argument(
'--version-mappings',
action=osa_differ.VersionMappingsAction,
help=(
"Map dependency versions in cases where the old version no longer "
"exists. The argument should be of the form "
"'repo-name;old-version1:new-version1;old-version2:new-version2'."
),
)
display_opts = parser.add_argument_group("Limit scope")
display_opts.add_argument(
"--skip-projects",
action="store_true",
help="Skip checking for changes in OpenStack projects"
)
display_opts.add_argument(
"--skip-roles",
action="store_true",
help="Skip checking for changes in OpenStack-Ansible roles"
)
output_desc = ("Output is printed to stdout by default.")
output_opts = parser.add_argument_group('Output options', output_desc)
output_opts.add_argument(
'--quiet',
action='store_true',
default=False,
help="Do not output to stdout",
)
output_opts.add_argument(
'--gist',
action='store_true',
default=False,
help="Output into a GitHub Gist",
)
output_opts.add_argument(
'--file',
metavar="FILENAME",
action='store',
help="Output to a file",
)
return parser | Setup argument Parsing. | Below is the the instruction that describes the task:
### Input:
Setup argument Parsing.
### Response:
def create_parser():
"""Setup argument Parsing."""
description = """RPC Release Diff Generator
--------------------------
Finds changes in OpenStack-Ansible, OpenStack-Ansible roles, and OpenStack
projects between two RPC-OpenStack revisions.
"""
parser = argparse.ArgumentParser(
usage='%(prog)s',
description=description,
epilog='Licensed "Apache 2.0"',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'old_commit',
action='store',
nargs=1,
help="Git SHA of the older commit",
)
parser.add_argument(
'new_commit',
action='store',
nargs=1,
help="Git SHA of the newer commit",
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help="Enable debug output",
)
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help="Enable verbose output",
)
parser.add_argument(
'-d', '--directory',
action='store',
default="~/.osa-differ",
help="Git repo storage directory (default: ~/.osa-differ)",
)
parser.add_argument(
'-rroc', '--role-requirements-old-commit',
action='store',
default=None,
help=(
"Name of the Ansible role requirements file to read from the old "
"commit, defaults to value of `--role-requirements`."
),
)
parser.add_argument(
'-rr', '--role-requirements',
action='store',
default=ROLE_REQ_FILE,
help="Name of the ansible role requirements file to read",
)
parser.add_argument(
'-r', '--rpc-repo-url',
action='store',
default="https://github.com/rcbops/rpc-openstack",
help="Github repository for the rpc-openstack project"
)
parser.add_argument(
'--osa-repo-url',
action='store',
default="https://git.openstack.org/openstack/openstack-ansible",
help="URL of the openstack-ansible git repo"
)
parser.add_argument(
'-rpoc', '--rpc-product-old-commit',
action='store',
default=None,
help=(
"Set the RPC product version for the old commit, defaults to "
"value of `--rpc-product`."
)
)
parser.add_argument(
'-rp', '--rpc-product',
action='store',
default="master",
help="Set the RPC product version"
)
parser.add_argument(
'-u', '--update',
action='store_true',
default=False,
help="Fetch latest changes to repo",
)
parser.add_argument(
'--version-mappings',
action=osa_differ.VersionMappingsAction,
help=(
"Map dependency versions in cases where the old version no longer "
"exists. The argument should be of the form "
"'repo-name;old-version1:new-version1;old-version2:new-version2'."
),
)
display_opts = parser.add_argument_group("Limit scope")
display_opts.add_argument(
"--skip-projects",
action="store_true",
help="Skip checking for changes in OpenStack projects"
)
display_opts.add_argument(
"--skip-roles",
action="store_true",
help="Skip checking for changes in OpenStack-Ansible roles"
)
output_desc = ("Output is printed to stdout by default.")
output_opts = parser.add_argument_group('Output options', output_desc)
output_opts.add_argument(
'--quiet',
action='store_true',
default=False,
help="Do not output to stdout",
)
output_opts.add_argument(
'--gist',
action='store_true',
default=False,
help="Output into a GitHub Gist",
)
output_opts.add_argument(
'--file',
metavar="FILENAME",
action='store',
help="Output to a file",
)
return parser |
def intersect(self, **kwargs):
"""
Intersect a Line or Point Collection and the Shoreline
Returns the point of intersection along the coastline
Should also return a linestring buffer around the interseciton point
so we can calculate the direction to bounce a particle.
"""
ls = None
if "linestring" in kwargs:
ls = kwargs.pop('linestring')
spoint = Point(ls.coords[0])
epoint = Point(ls.coords[-1])
elif "start_point" and "end_point" in kwargs:
spoint = kwargs.get('start_point')
epoint = kwargs.get('end_point')
ls = LineString(list(spoint.coords) + list(epoint.coords))
elif "single_point" in kwargs:
spoint = kwargs.get('single_point')
epoint = None
ls = LineString(list(spoint.coords) + list(spoint.coords))
else:
raise TypeError( "must provide a LineString geometry object, (2) Point geometry objects, or (1) Point geometry object" )
inter = False
# If the current point lies outside of our current shapefile index,
# re-query the shapefile in a buffer around this point
if self._spatial_query_object is None or (self._spatial_query_object and not ls.within(self._spatial_query_object)):
self.index(point=spoint)
for element in self._geoms:
prepped_element = prep(element)
# Test if starting on land
if prepped_element.contains(spoint):
if epoint is None:
# If we only passed in one point, return the intersection is true.
return {'point': spoint, 'feature': None}
else:
# If we are testing a linestring, raise an exception that we started on land.
raise Exception('Starting point on land: %s %s %s' % (spoint.envelope, epoint.envelope, element.envelope))
else:
# If we are just checking a single point, continue looping.
if epoint is None:
continue
inter = ls.intersection(element)
if inter:
# Return the first point in the linestring, and the linestring that it hit
if isinstance(inter, MultiLineString):
inter = inter.geoms[0]
inter = Point(inter.coords[0])
smaller_int = inter.buffer(self._spatialbuffer)
shorelines = element.exterior.intersection(smaller_int)
if isinstance(shorelines, LineString):
shorelines = [shorelines]
else:
shorelines = list(shorelines)
for shore_segment in shorelines:
# Once we find the linestring in the Polygon that was
# intersected, break out and return
if ls.touches(shore_segment):
break
return {'point': Point(inter.x, inter.y, 0), 'feature': shore_segment or None}
return None | Intersect a Line or Point Collection and the Shoreline
Returns the point of intersection along the coastline
Should also return a linestring buffer around the interseciton point
so we can calculate the direction to bounce a particle. | Below is the the instruction that describes the task:
### Input:
Intersect a Line or Point Collection and the Shoreline
Returns the point of intersection along the coastline
Should also return a linestring buffer around the interseciton point
so we can calculate the direction to bounce a particle.
### Response:
def intersect(self, **kwargs):
"""
Intersect a Line or Point Collection and the Shoreline
Returns the point of intersection along the coastline
Should also return a linestring buffer around the interseciton point
so we can calculate the direction to bounce a particle.
"""
ls = None
if "linestring" in kwargs:
ls = kwargs.pop('linestring')
spoint = Point(ls.coords[0])
epoint = Point(ls.coords[-1])
elif "start_point" and "end_point" in kwargs:
spoint = kwargs.get('start_point')
epoint = kwargs.get('end_point')
ls = LineString(list(spoint.coords) + list(epoint.coords))
elif "single_point" in kwargs:
spoint = kwargs.get('single_point')
epoint = None
ls = LineString(list(spoint.coords) + list(spoint.coords))
else:
raise TypeError( "must provide a LineString geometry object, (2) Point geometry objects, or (1) Point geometry object" )
inter = False
# If the current point lies outside of our current shapefile index,
# re-query the shapefile in a buffer around this point
if self._spatial_query_object is None or (self._spatial_query_object and not ls.within(self._spatial_query_object)):
self.index(point=spoint)
for element in self._geoms:
prepped_element = prep(element)
# Test if starting on land
if prepped_element.contains(spoint):
if epoint is None:
# If we only passed in one point, return the intersection is true.
return {'point': spoint, 'feature': None}
else:
# If we are testing a linestring, raise an exception that we started on land.
raise Exception('Starting point on land: %s %s %s' % (spoint.envelope, epoint.envelope, element.envelope))
else:
# If we are just checking a single point, continue looping.
if epoint is None:
continue
inter = ls.intersection(element)
if inter:
# Return the first point in the linestring, and the linestring that it hit
if isinstance(inter, MultiLineString):
inter = inter.geoms[0]
inter = Point(inter.coords[0])
smaller_int = inter.buffer(self._spatialbuffer)
shorelines = element.exterior.intersection(smaller_int)
if isinstance(shorelines, LineString):
shorelines = [shorelines]
else:
shorelines = list(shorelines)
for shore_segment in shorelines:
# Once we find the linestring in the Polygon that was
# intersected, break out and return
if ls.touches(shore_segment):
break
return {'point': Point(inter.x, inter.y, 0), 'feature': shore_segment or None}
return None |
def save_file(self, filename, text):
"""Save the given text under the given control filename and the
current path."""
if not filename.endswith('.py'):
filename += '.py'
path = os.path.join(self.currentpath, filename)
with open(path, 'w', encoding="utf-8") as file_:
file_.write(text) | Save the given text under the given control filename and the
current path. | Below is the the instruction that describes the task:
### Input:
Save the given text under the given control filename and the
current path.
### Response:
def save_file(self, filename, text):
"""Save the given text under the given control filename and the
current path."""
if not filename.endswith('.py'):
filename += '.py'
path = os.path.join(self.currentpath, filename)
with open(path, 'w', encoding="utf-8") as file_:
file_.write(text) |
def _set_configspec(self, section, copy):
"""
Called by validate. Handles setting the configspec on subsections
including sections to be validated by __many__
"""
configspec = section.configspec
many = configspec.get('__many__')
if isinstance(many, dict):
for entry in section.sections:
if entry not in configspec:
section[entry].configspec = many
for entry in configspec.sections:
if entry == '__many__':
continue
if entry not in section:
section[entry] = {}
section[entry]._created = True
if copy:
# copy comments
section.comments[entry] = configspec.comments.get(entry, [])
section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
# Could be a scalar when we expect a section
if isinstance(section[entry], Section):
section[entry].configspec = configspec[entry] | Called by validate. Handles setting the configspec on subsections
including sections to be validated by __many__ | Below is the the instruction that describes the task:
### Input:
Called by validate. Handles setting the configspec on subsections
including sections to be validated by __many__
### Response:
def _set_configspec(self, section, copy):
"""
Called by validate. Handles setting the configspec on subsections
including sections to be validated by __many__
"""
configspec = section.configspec
many = configspec.get('__many__')
if isinstance(many, dict):
for entry in section.sections:
if entry not in configspec:
section[entry].configspec = many
for entry in configspec.sections:
if entry == '__many__':
continue
if entry not in section:
section[entry] = {}
section[entry]._created = True
if copy:
# copy comments
section.comments[entry] = configspec.comments.get(entry, [])
section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
# Could be a scalar when we expect a section
if isinstance(section[entry], Section):
section[entry].configspec = configspec[entry] |
def create(self, datapath, tracker_urls, comment=None, root_name=None,
created_by=None, private=False, no_date=False, progress=None,
callback=None):
""" Create a metafile with the path given on object creation.
Returns the last metafile dict that was written (as an object, not bencoded).
"""
if datapath:
self.datapath = datapath
try:
tracker_urls = ['' + tracker_urls]
except TypeError:
tracker_urls = list(tracker_urls)
multi_mode = len(tracker_urls) > 1
# TODO add optimization so the hashing happens only once for multiple URLs!
for tracker_url in tracker_urls:
# Lookup announce URLs from config file
try:
if urlparse.urlparse(tracker_url).scheme:
tracker_alias = urlparse.urlparse(tracker_url).netloc.split(':')[0].split('.')
tracker_alias = tracker_alias[-2 if len(tracker_alias) > 1 else 0]
else:
tracker_alias, tracker_url = config.lookup_announce_alias(tracker_url)
tracker_url = tracker_url[0]
except (KeyError, IndexError):
raise error.UserError("Bad tracker URL %r, or unknown alias!" % (tracker_url,))
# Determine metafile name
output_name = self.filename
if multi_mode:
# Add 2nd level of announce URL domain to metafile name
output_name = list(os.path.splitext(output_name))
try:
output_name[1:1] = '-' + tracker_alias
except (IndexError,):
self.LOG.error("Malformed announce URL %r, skipping!" % (tracker_url,))
continue
output_name = ''.join(output_name)
# Hash the data
self.LOG.info("Creating %r for %s %r..." % (
output_name, "filenames read from" if self._fifo else "data in", self.datapath,
))
meta, _ = self._make_meta(tracker_url, root_name, private, progress)
# Add optional fields
if comment:
meta["comment"] = comment
if created_by:
meta["created by"] = created_by
if not no_date:
meta["creation date"] = int(time.time())
if callback:
callback(meta)
# Write metafile to disk
self.LOG.debug("Writing %r..." % (output_name,))
bencode.bwrite(output_name, meta)
return meta | Create a metafile with the path given on object creation.
Returns the last metafile dict that was written (as an object, not bencoded). | Below is the the instruction that describes the task:
### Input:
Create a metafile with the path given on object creation.
Returns the last metafile dict that was written (as an object, not bencoded).
### Response:
def create(self, datapath, tracker_urls, comment=None, root_name=None,
created_by=None, private=False, no_date=False, progress=None,
callback=None):
""" Create a metafile with the path given on object creation.
Returns the last metafile dict that was written (as an object, not bencoded).
"""
if datapath:
self.datapath = datapath
try:
tracker_urls = ['' + tracker_urls]
except TypeError:
tracker_urls = list(tracker_urls)
multi_mode = len(tracker_urls) > 1
# TODO add optimization so the hashing happens only once for multiple URLs!
for tracker_url in tracker_urls:
# Lookup announce URLs from config file
try:
if urlparse.urlparse(tracker_url).scheme:
tracker_alias = urlparse.urlparse(tracker_url).netloc.split(':')[0].split('.')
tracker_alias = tracker_alias[-2 if len(tracker_alias) > 1 else 0]
else:
tracker_alias, tracker_url = config.lookup_announce_alias(tracker_url)
tracker_url = tracker_url[0]
except (KeyError, IndexError):
raise error.UserError("Bad tracker URL %r, or unknown alias!" % (tracker_url,))
# Determine metafile name
output_name = self.filename
if multi_mode:
# Add 2nd level of announce URL domain to metafile name
output_name = list(os.path.splitext(output_name))
try:
output_name[1:1] = '-' + tracker_alias
except (IndexError,):
self.LOG.error("Malformed announce URL %r, skipping!" % (tracker_url,))
continue
output_name = ''.join(output_name)
# Hash the data
self.LOG.info("Creating %r for %s %r..." % (
output_name, "filenames read from" if self._fifo else "data in", self.datapath,
))
meta, _ = self._make_meta(tracker_url, root_name, private, progress)
# Add optional fields
if comment:
meta["comment"] = comment
if created_by:
meta["created by"] = created_by
if not no_date:
meta["creation date"] = int(time.time())
if callback:
callback(meta)
# Write metafile to disk
self.LOG.debug("Writing %r..." % (output_name,))
bencode.bwrite(output_name, meta)
return meta |
def make_codon_list(protein_seq, template_dna=None, include_stop=True):
"""
Return a list of codons that would be translated to the given protein
sequence. Codons are picked first to minimize the mutations relative to a
template DNA sequence and second to prefer "optimal" codons.
"""
codon_list = []
if template_dna is None:
template_dna = []
# Reverse translate each codon, preferring (in order):
# 1. The codon with the most similarity to the template codon.
# 2. The codon with the highest natural usage.
for i, res in enumerate(protein_seq.upper()):
try: template_codon = template_dna[3*i:3*i+3]
except IndexError: template_codon = '---'
# Already sorted by natural codon usage
possible_codons = dna.ecoli_reverse_translate[res]
# Sort by similarity. Note that this is a stable sort.
possible_codons.sort(
key=lambda x: dna.num_mutations(x, template_codon))
# Pick the best codon.
codon_list.append(possible_codons[0])
# Make sure the sequence ends with a stop codon.
last_codon = codon_list[-1]
stop_codons = dna.ecoli_reverse_translate['.']
if include_stop and last_codon not in stop_codons:
codon_list.append(stop_codons[0])
return codon_list | Return a list of codons that would be translated to the given protein
sequence. Codons are picked first to minimize the mutations relative to a
template DNA sequence and second to prefer "optimal" codons. | Below is the the instruction that describes the task:
### Input:
Return a list of codons that would be translated to the given protein
sequence. Codons are picked first to minimize the mutations relative to a
template DNA sequence and second to prefer "optimal" codons.
### Response:
def make_codon_list(protein_seq, template_dna=None, include_stop=True):
"""
Return a list of codons that would be translated to the given protein
sequence. Codons are picked first to minimize the mutations relative to a
template DNA sequence and second to prefer "optimal" codons.
"""
codon_list = []
if template_dna is None:
template_dna = []
# Reverse translate each codon, preferring (in order):
# 1. The codon with the most similarity to the template codon.
# 2. The codon with the highest natural usage.
for i, res in enumerate(protein_seq.upper()):
try: template_codon = template_dna[3*i:3*i+3]
except IndexError: template_codon = '---'
# Already sorted by natural codon usage
possible_codons = dna.ecoli_reverse_translate[res]
# Sort by similarity. Note that this is a stable sort.
possible_codons.sort(
key=lambda x: dna.num_mutations(x, template_codon))
# Pick the best codon.
codon_list.append(possible_codons[0])
# Make sure the sequence ends with a stop codon.
last_codon = codon_list[-1]
stop_codons = dna.ecoli_reverse_translate['.']
if include_stop and last_codon not in stop_codons:
codon_list.append(stop_codons[0])
return codon_list |
Subsets and Splits