code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def add_link(self, name, desc, layout, node_1, node_2):
"""
Add a link to a network. Links are what effectively
define the network topology, by associating two already
existing nodes.
"""
existing_link = get_session().query(Link).filter(Link.name==name, Link.network_id==self.id).first()
if existing_link is not None:
raise HydraError("A link with name %s is already in network %s"%(name, self.id))
l = Link()
l.name = name
l.description = desc
l.layout = json.dumps(layout) if layout is not None else None
l.node_a = node_1
l.node_b = node_2
get_session().add(l)
self.links.append(l)
return l | Add a link to a network. Links are what effectively
define the network topology, by associating two already
existing nodes. | Below is the the instruction that describes the task:
### Input:
Add a link to a network. Links are what effectively
define the network topology, by associating two already
existing nodes.
### Response:
def add_link(self, name, desc, layout, node_1, node_2):
"""
Add a link to a network. Links are what effectively
define the network topology, by associating two already
existing nodes.
"""
existing_link = get_session().query(Link).filter(Link.name==name, Link.network_id==self.id).first()
if existing_link is not None:
raise HydraError("A link with name %s is already in network %s"%(name, self.id))
l = Link()
l.name = name
l.description = desc
l.layout = json.dumps(layout) if layout is not None else None
l.node_a = node_1
l.node_b = node_2
get_session().add(l)
self.links.append(l)
return l |
def _normalize_http_methods(http_method):
"""
Normalizes Http Methods. Api Gateway allows a Http Methods of ANY. This is a special verb to denote all
supported Http Methods on Api Gateway.
:param str http_method: Http method
:yield str: Either the input http_method or one of the _ANY_HTTP_METHODS (normalized Http Methods)
"""
if http_method.upper() == 'ANY':
for method in SamApiProvider._ANY_HTTP_METHODS:
yield method.upper()
else:
yield http_method.upper() | Normalizes Http Methods. Api Gateway allows a Http Methods of ANY. This is a special verb to denote all
supported Http Methods on Api Gateway.
:param str http_method: Http method
:yield str: Either the input http_method or one of the _ANY_HTTP_METHODS (normalized Http Methods) | Below is the the instruction that describes the task:
### Input:
Normalizes Http Methods. Api Gateway allows a Http Methods of ANY. This is a special verb to denote all
supported Http Methods on Api Gateway.
:param str http_method: Http method
:yield str: Either the input http_method or one of the _ANY_HTTP_METHODS (normalized Http Methods)
### Response:
def _normalize_http_methods(http_method):
"""
Normalizes Http Methods. Api Gateway allows a Http Methods of ANY. This is a special verb to denote all
supported Http Methods on Api Gateway.
:param str http_method: Http method
:yield str: Either the input http_method or one of the _ANY_HTTP_METHODS (normalized Http Methods)
"""
if http_method.upper() == 'ANY':
for method in SamApiProvider._ANY_HTTP_METHODS:
yield method.upper()
else:
yield http_method.upper() |
def search_handle(self, URL=None, prefix=None, **key_value_pairs):
'''
Search for handles containing the specified key with the specified
value. The search terms are passed on to the reverse lookup servlet
as-is. The servlet is supposed to be case-insensitive, but if it
isn't, the wrong case will cause a :exc:`~b2handle.handleexceptions.ReverseLookupException`.
*Note:* If allowed search keys are configured, only these are used. If
no allowed search keys are specified, all key-value pairs are
passed on to the reverse lookup servlet, possibly causing a
:exc:`~b2handle.handleexceptions.ReverseLookupException`.
Example calls:
.. code:: python
list_of_handles = search_handle('http://www.foo.com')
list_of_handles = search_handle('http://www.foo.com', CHECKSUM=99999)
list_of_handles = search_handle(URL='http://www.foo.com', CHECKSUM=99999)
:param URL: Optional. The URL to search for (reverse lookup). [This is
NOT the URL of the search servlet!]
:param prefix: Optional. The Handle prefix to which the search should
be limited to. If unspecified, the method will search across all
prefixes present at the server given to the constructor.
:param key_value_pairs: Optional. Several search fields and values can
be specified as key-value-pairs,
e.g. CHECKSUM=123456, URL=www.foo.com
:raise: :exc:`~b2handle.handleexceptions.ReverseLookupException`: If a search field is specified that
cannot be used, or if something else goes wrong.
:return: A list of all Handles (strings) that bear the given key with
given value of given prefix or server. The list may be empty and
may also contain more than one element.
'''
LOGGER.debug('search_handle...')
list_of_handles = self.__searcher.search_handle(URL=URL, prefix=prefix, **key_value_pairs)
return list_of_handles | Search for handles containing the specified key with the specified
value. The search terms are passed on to the reverse lookup servlet
as-is. The servlet is supposed to be case-insensitive, but if it
isn't, the wrong case will cause a :exc:`~b2handle.handleexceptions.ReverseLookupException`.
*Note:* If allowed search keys are configured, only these are used. If
no allowed search keys are specified, all key-value pairs are
passed on to the reverse lookup servlet, possibly causing a
:exc:`~b2handle.handleexceptions.ReverseLookupException`.
Example calls:
.. code:: python
list_of_handles = search_handle('http://www.foo.com')
list_of_handles = search_handle('http://www.foo.com', CHECKSUM=99999)
list_of_handles = search_handle(URL='http://www.foo.com', CHECKSUM=99999)
:param URL: Optional. The URL to search for (reverse lookup). [This is
NOT the URL of the search servlet!]
:param prefix: Optional. The Handle prefix to which the search should
be limited to. If unspecified, the method will search across all
prefixes present at the server given to the constructor.
:param key_value_pairs: Optional. Several search fields and values can
be specified as key-value-pairs,
e.g. CHECKSUM=123456, URL=www.foo.com
:raise: :exc:`~b2handle.handleexceptions.ReverseLookupException`: If a search field is specified that
cannot be used, or if something else goes wrong.
:return: A list of all Handles (strings) that bear the given key with
given value of given prefix or server. The list may be empty and
may also contain more than one element. | Below is the the instruction that describes the task:
### Input:
Search for handles containing the specified key with the specified
value. The search terms are passed on to the reverse lookup servlet
as-is. The servlet is supposed to be case-insensitive, but if it
isn't, the wrong case will cause a :exc:`~b2handle.handleexceptions.ReverseLookupException`.
*Note:* If allowed search keys are configured, only these are used. If
no allowed search keys are specified, all key-value pairs are
passed on to the reverse lookup servlet, possibly causing a
:exc:`~b2handle.handleexceptions.ReverseLookupException`.
Example calls:
.. code:: python
list_of_handles = search_handle('http://www.foo.com')
list_of_handles = search_handle('http://www.foo.com', CHECKSUM=99999)
list_of_handles = search_handle(URL='http://www.foo.com', CHECKSUM=99999)
:param URL: Optional. The URL to search for (reverse lookup). [This is
NOT the URL of the search servlet!]
:param prefix: Optional. The Handle prefix to which the search should
be limited to. If unspecified, the method will search across all
prefixes present at the server given to the constructor.
:param key_value_pairs: Optional. Several search fields and values can
be specified as key-value-pairs,
e.g. CHECKSUM=123456, URL=www.foo.com
:raise: :exc:`~b2handle.handleexceptions.ReverseLookupException`: If a search field is specified that
cannot be used, or if something else goes wrong.
:return: A list of all Handles (strings) that bear the given key with
given value of given prefix or server. The list may be empty and
may also contain more than one element.
### Response:
def search_handle(self, URL=None, prefix=None, **key_value_pairs):
'''
Search for handles containing the specified key with the specified
value. The search terms are passed on to the reverse lookup servlet
as-is. The servlet is supposed to be case-insensitive, but if it
isn't, the wrong case will cause a :exc:`~b2handle.handleexceptions.ReverseLookupException`.
*Note:* If allowed search keys are configured, only these are used. If
no allowed search keys are specified, all key-value pairs are
passed on to the reverse lookup servlet, possibly causing a
:exc:`~b2handle.handleexceptions.ReverseLookupException`.
Example calls:
.. code:: python
list_of_handles = search_handle('http://www.foo.com')
list_of_handles = search_handle('http://www.foo.com', CHECKSUM=99999)
list_of_handles = search_handle(URL='http://www.foo.com', CHECKSUM=99999)
:param URL: Optional. The URL to search for (reverse lookup). [This is
NOT the URL of the search servlet!]
:param prefix: Optional. The Handle prefix to which the search should
be limited to. If unspecified, the method will search across all
prefixes present at the server given to the constructor.
:param key_value_pairs: Optional. Several search fields and values can
be specified as key-value-pairs,
e.g. CHECKSUM=123456, URL=www.foo.com
:raise: :exc:`~b2handle.handleexceptions.ReverseLookupException`: If a search field is specified that
cannot be used, or if something else goes wrong.
:return: A list of all Handles (strings) that bear the given key with
given value of given prefix or server. The list may be empty and
may also contain more than one element.
'''
LOGGER.debug('search_handle...')
list_of_handles = self.__searcher.search_handle(URL=URL, prefix=prefix, **key_value_pairs)
return list_of_handles |
def _store_token(self, token, remember=False):
"""Store token for future use."""
if token and remember:
try:
keyring.set_password('github', 'token', token)
except Exception:
if self._show_msgbox:
QMessageBox.warning(self.parent_widget,
_('Failed to store token'),
_('It was not possible to securely '
'save your token. You will be '
'prompted for your Github token '
'next time you want to report '
'an issue.'))
remember = False
CONF.set('main', 'report_error/remember_token', remember) | Store token for future use. | Below is the the instruction that describes the task:
### Input:
Store token for future use.
### Response:
def _store_token(self, token, remember=False):
"""Store token for future use."""
if token and remember:
try:
keyring.set_password('github', 'token', token)
except Exception:
if self._show_msgbox:
QMessageBox.warning(self.parent_widget,
_('Failed to store token'),
_('It was not possible to securely '
'save your token. You will be '
'prompted for your Github token '
'next time you want to report '
'an issue.'))
remember = False
CONF.set('main', 'report_error/remember_token', remember) |
def service_action(self, service, action):
"Perform given action on service for the selected cluster"
try:
service = api.get_cluster(self.cluster).get_service(service)
except ApiException:
print("Service not found")
return None
if action == "start":
service.start()
if action == "restart":
service.restart()
if action == "stop":
service.stop()
return True | Perform given action on service for the selected cluster | Below is the the instruction that describes the task:
### Input:
Perform given action on service for the selected cluster
### Response:
def service_action(self, service, action):
"Perform given action on service for the selected cluster"
try:
service = api.get_cluster(self.cluster).get_service(service)
except ApiException:
print("Service not found")
return None
if action == "start":
service.start()
if action == "restart":
service.restart()
if action == "stop":
service.stop()
return True |
def get_cumulative_veto_group_files(workflow, option, cat_files,
out_dir, execute_now=True, tags=None):
"""
Get the cumulative veto files that define the different backgrounds
we want to analyze, defined by groups of vetos.
Parameters
-----------
workflow : Workflow object
Instance of the workflow object
option : str
ini file option to use to get the veto groups
cat_files : FileList of SegFiles
The category veto files generated by get_veto_segs
out_dir : path
Location to store output files
execute_now : Boolean
If true outputs are generated at runtime. Else jobs go into the workflow
and are generated then.
tags : list of strings
Used to retrieve subsections of the ini file for
configuration options.
Returns
--------
seg_files : workflow.core.FileList instance
The cumulative segment files for each veto group.
names : list of strings
The segment names for the corresponding seg_file
cat_files : workflow.core.FileList instance
The list of individual category veto files
"""
if tags is None:
tags = []
logging.info("Starting generating vetoes for groups in %s" %(option))
make_analysis_dir(out_dir)
cat_sets = parse_cat_ini_opt(workflow.cp.get_opt_tags('workflow-segments',
option, tags))
cum_seg_files = FileList()
names = []
for cat_set in cat_sets:
segment_name = "CUMULATIVE_CAT_%s" % (''.join(sorted(cat_set)))
logging.info('getting information for %s' % segment_name)
categories = [cat_to_veto_def_cat(c) for c in cat_set]
cum_seg_files += [get_cumulative_segs(workflow, categories, cat_files,
out_dir, execute_now=execute_now,
segment_name=segment_name, tags=tags)]
names.append(segment_name)
logging.info("Done generating vetoes for groups in %s" %(option))
return cum_seg_files, names, cat_files | Get the cumulative veto files that define the different backgrounds
we want to analyze, defined by groups of vetos.
Parameters
-----------
workflow : Workflow object
Instance of the workflow object
option : str
ini file option to use to get the veto groups
cat_files : FileList of SegFiles
The category veto files generated by get_veto_segs
out_dir : path
Location to store output files
execute_now : Boolean
If true outputs are generated at runtime. Else jobs go into the workflow
and are generated then.
tags : list of strings
Used to retrieve subsections of the ini file for
configuration options.
Returns
--------
seg_files : workflow.core.FileList instance
The cumulative segment files for each veto group.
names : list of strings
The segment names for the corresponding seg_file
cat_files : workflow.core.FileList instance
The list of individual category veto files | Below is the the instruction that describes the task:
### Input:
Get the cumulative veto files that define the different backgrounds
we want to analyze, defined by groups of vetos.
Parameters
-----------
workflow : Workflow object
Instance of the workflow object
option : str
ini file option to use to get the veto groups
cat_files : FileList of SegFiles
The category veto files generated by get_veto_segs
out_dir : path
Location to store output files
execute_now : Boolean
If true outputs are generated at runtime. Else jobs go into the workflow
and are generated then.
tags : list of strings
Used to retrieve subsections of the ini file for
configuration options.
Returns
--------
seg_files : workflow.core.FileList instance
The cumulative segment files for each veto group.
names : list of strings
The segment names for the corresponding seg_file
cat_files : workflow.core.FileList instance
The list of individual category veto files
### Response:
def get_cumulative_veto_group_files(workflow, option, cat_files,
out_dir, execute_now=True, tags=None):
"""
Get the cumulative veto files that define the different backgrounds
we want to analyze, defined by groups of vetos.
Parameters
-----------
workflow : Workflow object
Instance of the workflow object
option : str
ini file option to use to get the veto groups
cat_files : FileList of SegFiles
The category veto files generated by get_veto_segs
out_dir : path
Location to store output files
execute_now : Boolean
If true outputs are generated at runtime. Else jobs go into the workflow
and are generated then.
tags : list of strings
Used to retrieve subsections of the ini file for
configuration options.
Returns
--------
seg_files : workflow.core.FileList instance
The cumulative segment files for each veto group.
names : list of strings
The segment names for the corresponding seg_file
cat_files : workflow.core.FileList instance
The list of individual category veto files
"""
if tags is None:
tags = []
logging.info("Starting generating vetoes for groups in %s" %(option))
make_analysis_dir(out_dir)
cat_sets = parse_cat_ini_opt(workflow.cp.get_opt_tags('workflow-segments',
option, tags))
cum_seg_files = FileList()
names = []
for cat_set in cat_sets:
segment_name = "CUMULATIVE_CAT_%s" % (''.join(sorted(cat_set)))
logging.info('getting information for %s' % segment_name)
categories = [cat_to_veto_def_cat(c) for c in cat_set]
cum_seg_files += [get_cumulative_segs(workflow, categories, cat_files,
out_dir, execute_now=execute_now,
segment_name=segment_name, tags=tags)]
names.append(segment_name)
logging.info("Done generating vetoes for groups in %s" %(option))
return cum_seg_files, names, cat_files |
def args(self):
"""The parsed URL parameters. By default an
:class:`~werkzeug.datastructures.ImmutableMultiDict`
is returned from this function. This can be changed by setting
:attr:`parameter_storage_class` to a different type. This might
be necessary if the order of the form data is important.
"""
return url_decode(wsgi_get_bytes(self.environ.get('QUERY_STRING', '')),
self.url_charset, errors=self.encoding_errors,
cls=self.parameter_storage_class) | The parsed URL parameters. By default an
:class:`~werkzeug.datastructures.ImmutableMultiDict`
is returned from this function. This can be changed by setting
:attr:`parameter_storage_class` to a different type. This might
be necessary if the order of the form data is important. | Below is the the instruction that describes the task:
### Input:
The parsed URL parameters. By default an
:class:`~werkzeug.datastructures.ImmutableMultiDict`
is returned from this function. This can be changed by setting
:attr:`parameter_storage_class` to a different type. This might
be necessary if the order of the form data is important.
### Response:
def args(self):
"""The parsed URL parameters. By default an
:class:`~werkzeug.datastructures.ImmutableMultiDict`
is returned from this function. This can be changed by setting
:attr:`parameter_storage_class` to a different type. This might
be necessary if the order of the form data is important.
"""
return url_decode(wsgi_get_bytes(self.environ.get('QUERY_STRING', '')),
self.url_charset, errors=self.encoding_errors,
cls=self.parameter_storage_class) |
def forbid_web_access(f):
"""
Forbids running task using http request.
:param f: Callable
:return Callable
"""
@wraps(f)
def wrapper_fn(*args, **kwargs):
if isinstance(JobContext.get_current_context(), WebJobContext):
raise ForbiddenError('Access forbidden from web.')
return f(*args, **kwargs)
return wrapper_fn | Forbids running task using http request.
:param f: Callable
:return Callable | Below is the the instruction that describes the task:
### Input:
Forbids running task using http request.
:param f: Callable
:return Callable
### Response:
def forbid_web_access(f):
"""
Forbids running task using http request.
:param f: Callable
:return Callable
"""
@wraps(f)
def wrapper_fn(*args, **kwargs):
if isinstance(JobContext.get_current_context(), WebJobContext):
raise ForbiddenError('Access forbidden from web.')
return f(*args, **kwargs)
return wrapper_fn |
def get_header(cls, script_text="", executable=None):
"""Create a #! line, getting options (if any) from script_text"""
cmd = cls.command_spec_class.best().from_param(executable)
cmd.install_options(script_text)
return cmd.as_header() | Create a #! line, getting options (if any) from script_text | Below is the the instruction that describes the task:
### Input:
Create a #! line, getting options (if any) from script_text
### Response:
def get_header(cls, script_text="", executable=None):
"""Create a #! line, getting options (if any) from script_text"""
cmd = cls.command_spec_class.best().from_param(executable)
cmd.install_options(script_text)
return cmd.as_header() |
def main(path):
'''scan path directory and any subdirectories for valid captain scripts'''
basepath = os.path.abspath(os.path.expanduser(str(path)))
echo.h2("Available scripts in {}".format(basepath))
echo.br()
for root_dir, dirs, files in os.walk(basepath, topdown=True):
for f in fnmatch.filter(files, '*.py'):
try:
filepath = os.path.join(root_dir, f)
# super edge case, this makes sure the python script won't start
# an interactive console session which would cause the session
# to start and not allow the for loop to complete
with open(filepath, encoding="UTF-8") as fp:
body = fp.read()
is_console = "InteractiveConsole" in body
is_console = is_console or "code" in body
is_console = is_console and "interact(" in body
if is_console:
continue
s = captain.Script(filepath)
if s.can_run_from_cli():
rel_filepath = s.call_path(basepath)
p = s.parser
echo.h3(rel_filepath)
desc = p.description
if desc:
echo.indent(desc, indent=(" " * 4))
subcommands = s.subcommands
if subcommands:
echo.br()
echo.indent("Subcommands:", indent=(" " * 4))
for sc in subcommands.keys():
echo.indent(sc, indent=(" " * 6))
echo.br()
except captain.ParseError:
pass
except Exception as e:
#echo.exception(e)
#echo.err("Failed to parse {} because {}", f, e.message)
echo.err("Failed to parse {}", f)
echo.verbose(e.message)
echo.br() | scan path directory and any subdirectories for valid captain scripts | Below is the the instruction that describes the task:
### Input:
scan path directory and any subdirectories for valid captain scripts
### Response:
def main(path):
'''scan path directory and any subdirectories for valid captain scripts'''
basepath = os.path.abspath(os.path.expanduser(str(path)))
echo.h2("Available scripts in {}".format(basepath))
echo.br()
for root_dir, dirs, files in os.walk(basepath, topdown=True):
for f in fnmatch.filter(files, '*.py'):
try:
filepath = os.path.join(root_dir, f)
# super edge case, this makes sure the python script won't start
# an interactive console session which would cause the session
# to start and not allow the for loop to complete
with open(filepath, encoding="UTF-8") as fp:
body = fp.read()
is_console = "InteractiveConsole" in body
is_console = is_console or "code" in body
is_console = is_console and "interact(" in body
if is_console:
continue
s = captain.Script(filepath)
if s.can_run_from_cli():
rel_filepath = s.call_path(basepath)
p = s.parser
echo.h3(rel_filepath)
desc = p.description
if desc:
echo.indent(desc, indent=(" " * 4))
subcommands = s.subcommands
if subcommands:
echo.br()
echo.indent("Subcommands:", indent=(" " * 4))
for sc in subcommands.keys():
echo.indent(sc, indent=(" " * 6))
echo.br()
except captain.ParseError:
pass
except Exception as e:
#echo.exception(e)
#echo.err("Failed to parse {} because {}", f, e.message)
echo.err("Failed to parse {}", f)
echo.verbose(e.message)
echo.br() |
def get_info(self):
'''
Get info regarding the current fuzzed enclosed node
:return: info dictionary
'''
field = self._current_field()
if field:
info = field.get_info()
info['path'] = '%s/%s' % (self.name if self.name else '<no name>', info['path'])
else:
info = super(Container, self).get_info()
return info | Get info regarding the current fuzzed enclosed node
:return: info dictionary | Below is the the instruction that describes the task:
### Input:
Get info regarding the current fuzzed enclosed node
:return: info dictionary
### Response:
def get_info(self):
'''
Get info regarding the current fuzzed enclosed node
:return: info dictionary
'''
field = self._current_field()
if field:
info = field.get_info()
info['path'] = '%s/%s' % (self.name if self.name else '<no name>', info['path'])
else:
info = super(Container, self).get_info()
return info |
def isel(self, indexers=None, drop=False, **indexers_kwargs):
"""Return a new DataArray whose dataset is given by integer indexing
along the specified dimension(s).
See Also
--------
Dataset.isel
DataArray.sel
"""
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, 'isel')
ds = self._to_temp_dataset().isel(drop=drop, indexers=indexers)
return self._from_temp_dataset(ds) | Return a new DataArray whose dataset is given by integer indexing
along the specified dimension(s).
See Also
--------
Dataset.isel
DataArray.sel | Below is the the instruction that describes the task:
### Input:
Return a new DataArray whose dataset is given by integer indexing
along the specified dimension(s).
See Also
--------
Dataset.isel
DataArray.sel
### Response:
def isel(self, indexers=None, drop=False, **indexers_kwargs):
"""Return a new DataArray whose dataset is given by integer indexing
along the specified dimension(s).
See Also
--------
Dataset.isel
DataArray.sel
"""
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, 'isel')
ds = self._to_temp_dataset().isel(drop=drop, indexers=indexers)
return self._from_temp_dataset(ds) |
def changed_lines(self):
"""
A list of dicts in the format:
{
'file_name': str,
'content': str,
'line_number': int,
'position': int
}
"""
lines = []
file_name = ''
line_number = 0
patch_position = -1
found_first_information_line = False
for i, content in enumerate(self.body.splitlines()):
range_information_match = RANGE_INFORMATION_LINE.search(content)
file_name_line_match = FILE_NAME_LINE.search(content)
if file_name_line_match:
file_name = file_name_line_match.group('file_name')
found_first_information_line = False
elif range_information_match:
line_number = int(range_information_match.group('line_number'))
if not found_first_information_line:
# This is the first information line. Set patch position to 1 and start counting
patch_position = 0
found_first_information_line = True
elif MODIFIED_LINE.search(content):
line = {
'file_name': file_name,
'content': content,
'line_number': line_number,
'position': patch_position
}
lines.append(line)
line_number += 1
elif NOT_REMOVED_OR_NEWLINE_WARNING.search(content) or content == '':
line_number += 1
patch_position += 1
return lines | A list of dicts in the format:
{
'file_name': str,
'content': str,
'line_number': int,
'position': int
} | Below is the the instruction that describes the task:
### Input:
A list of dicts in the format:
{
'file_name': str,
'content': str,
'line_number': int,
'position': int
}
### Response:
def changed_lines(self):
"""
A list of dicts in the format:
{
'file_name': str,
'content': str,
'line_number': int,
'position': int
}
"""
lines = []
file_name = ''
line_number = 0
patch_position = -1
found_first_information_line = False
for i, content in enumerate(self.body.splitlines()):
range_information_match = RANGE_INFORMATION_LINE.search(content)
file_name_line_match = FILE_NAME_LINE.search(content)
if file_name_line_match:
file_name = file_name_line_match.group('file_name')
found_first_information_line = False
elif range_information_match:
line_number = int(range_information_match.group('line_number'))
if not found_first_information_line:
# This is the first information line. Set patch position to 1 and start counting
patch_position = 0
found_first_information_line = True
elif MODIFIED_LINE.search(content):
line = {
'file_name': file_name,
'content': content,
'line_number': line_number,
'position': patch_position
}
lines.append(line)
line_number += 1
elif NOT_REMOVED_OR_NEWLINE_WARNING.search(content) or content == '':
line_number += 1
patch_position += 1
return lines |
def description(self, value):
"""Update description of the zone.
:type value: str
:param value: (Optional) new description
:raises: ValueError for invalid value types.
"""
if not isinstance(value, six.string_types) and value is not None:
raise ValueError("Pass a string, or None")
self._properties["description"] = value | Update description of the zone.
:type value: str
:param value: (Optional) new description
:raises: ValueError for invalid value types. | Below is the the instruction that describes the task:
### Input:
Update description of the zone.
:type value: str
:param value: (Optional) new description
:raises: ValueError for invalid value types.
### Response:
def description(self, value):
"""Update description of the zone.
:type value: str
:param value: (Optional) new description
:raises: ValueError for invalid value types.
"""
if not isinstance(value, six.string_types) and value is not None:
raise ValueError("Pass a string, or None")
self._properties["description"] = value |
def pairs_multi(self, strands, cutoff=0.001, permutation=None, temp=37.0,
pseudo=False, material=None, dangles='some', sodium=1.0,
magnesium=0.0):
'''Compute the pair probabilities for an ordered complex of strands.
Runs the \'pairs\' command.
:param strands: List of strands to use as inputs to pairs -multi.
:type strands: list
:param permutation: The circular permutation of strands to test in
complex. e.g. to test in the order that was input
for 4 strands, the permutation would be [1,2,3,4].
If set to None, defaults to the order of the
input strands.
:type permutation: list
:param temp: Temperature setting for the computation. Negative values
are not allowed.
:type temp: float
:param pseudo: Enable pseudoknots.
:type pseudo: bool
:param material: The material setting to use in the computation. If set
to None (the default), the material type is inferred
from the strands. Other settings available: 'dna' for
DNA parameters, 'rna' for RNA (1995) parameters, and
'rna1999' for the RNA 1999 parameters.
:type material: str
:param dangles: How to treat dangles in the computation. From the
user guide: For \'none\': Dangle energies are ignored.
For \'some\': \'A dangle energy is incorporated for
each unpaired base flanking a duplex\'. For 'all': all
dangle energy is considered.
:type dangles: str
:param sodium: Sodium concentration in solution (molar), only applies
to DNA.
:type sodium: float
:param magnesium: Magnesium concentration in solution (molar), only
applies to DNA>
:type magnesium: float
:param cutoff: Only probabilities above this cutoff appear in the
output.
:type cutoff: float
:returns: Two probability matrices: The probability matrix as in the
pairs method (but with a dimension equal to the sum of the
lengths of the sequences in the permutation), and a similar
probability matrix where multiple strands of the same species
are considered to be indistinguishable.
:rtype: list
'''
# Set the material (will be used to set command material flag)
material = self._set_material(strands, material, multi=True)
# Set up command flags
cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium,
magnesium, multi=True)
# Set up the input file and run the command. Note: no STDOUT
if permutation is None:
permutation = range(1, len(strands) + 1)
lines = self._multi_lines(strands, permutation)
self._run('pairs', cmd_args, lines)
# Read the output from file
N = sum([len(s) for s in strands])
matrices = []
for mat_type in ['ppairs', 'epairs']:
data = self._read_tempfile('pairs.' + mat_type)
probs = re.search('\n\n\d*\n(.*)', data, flags=re.DOTALL).group(1)
lines = probs.split('\n')
# Remove the last line (empty)
lines.pop()
pairlist = [line.split('\t') for line in lines]
prob_matrix = self._pairs_to_np(pairlist, N)
matrices.append(prob_matrix)
return matrices | Compute the pair probabilities for an ordered complex of strands.
Runs the \'pairs\' command.
:param strands: List of strands to use as inputs to pairs -multi.
:type strands: list
:param permutation: The circular permutation of strands to test in
complex. e.g. to test in the order that was input
for 4 strands, the permutation would be [1,2,3,4].
If set to None, defaults to the order of the
input strands.
:type permutation: list
:param temp: Temperature setting for the computation. Negative values
are not allowed.
:type temp: float
:param pseudo: Enable pseudoknots.
:type pseudo: bool
:param material: The material setting to use in the computation. If set
to None (the default), the material type is inferred
from the strands. Other settings available: 'dna' for
DNA parameters, 'rna' for RNA (1995) parameters, and
'rna1999' for the RNA 1999 parameters.
:type material: str
:param dangles: How to treat dangles in the computation. From the
user guide: For \'none\': Dangle energies are ignored.
For \'some\': \'A dangle energy is incorporated for
each unpaired base flanking a duplex\'. For 'all': all
dangle energy is considered.
:type dangles: str
:param sodium: Sodium concentration in solution (molar), only applies
to DNA.
:type sodium: float
:param magnesium: Magnesium concentration in solution (molar), only
applies to DNA>
:type magnesium: float
:param cutoff: Only probabilities above this cutoff appear in the
output.
:type cutoff: float
:returns: Two probability matrices: The probability matrix as in the
pairs method (but with a dimension equal to the sum of the
lengths of the sequences in the permutation), and a similar
probability matrix where multiple strands of the same species
are considered to be indistinguishable.
:rtype: list | Below is the the instruction that describes the task:
### Input:
Compute the pair probabilities for an ordered complex of strands.
Runs the \'pairs\' command.
:param strands: List of strands to use as inputs to pairs -multi.
:type strands: list
:param permutation: The circular permutation of strands to test in
complex. e.g. to test in the order that was input
for 4 strands, the permutation would be [1,2,3,4].
If set to None, defaults to the order of the
input strands.
:type permutation: list
:param temp: Temperature setting for the computation. Negative values
are not allowed.
:type temp: float
:param pseudo: Enable pseudoknots.
:type pseudo: bool
:param material: The material setting to use in the computation. If set
to None (the default), the material type is inferred
from the strands. Other settings available: 'dna' for
DNA parameters, 'rna' for RNA (1995) parameters, and
'rna1999' for the RNA 1999 parameters.
:type material: str
:param dangles: How to treat dangles in the computation. From the
user guide: For \'none\': Dangle energies are ignored.
For \'some\': \'A dangle energy is incorporated for
each unpaired base flanking a duplex\'. For 'all': all
dangle energy is considered.
:type dangles: str
:param sodium: Sodium concentration in solution (molar), only applies
to DNA.
:type sodium: float
:param magnesium: Magnesium concentration in solution (molar), only
applies to DNA>
:type magnesium: float
:param cutoff: Only probabilities above this cutoff appear in the
output.
:type cutoff: float
:returns: Two probability matrices: The probability matrix as in the
pairs method (but with a dimension equal to the sum of the
lengths of the sequences in the permutation), and a similar
probability matrix where multiple strands of the same species
are considered to be indistinguishable.
:rtype: list
### Response:
def pairs_multi(self, strands, cutoff=0.001, permutation=None, temp=37.0,
pseudo=False, material=None, dangles='some', sodium=1.0,
magnesium=0.0):
'''Compute the pair probabilities for an ordered complex of strands.
Runs the \'pairs\' command.
:param strands: List of strands to use as inputs to pairs -multi.
:type strands: list
:param permutation: The circular permutation of strands to test in
complex. e.g. to test in the order that was input
for 4 strands, the permutation would be [1,2,3,4].
If set to None, defaults to the order of the
input strands.
:type permutation: list
:param temp: Temperature setting for the computation. Negative values
are not allowed.
:type temp: float
:param pseudo: Enable pseudoknots.
:type pseudo: bool
:param material: The material setting to use in the computation. If set
to None (the default), the material type is inferred
from the strands. Other settings available: 'dna' for
DNA parameters, 'rna' for RNA (1995) parameters, and
'rna1999' for the RNA 1999 parameters.
:type material: str
:param dangles: How to treat dangles in the computation. From the
user guide: For \'none\': Dangle energies are ignored.
For \'some\': \'A dangle energy is incorporated for
each unpaired base flanking a duplex\'. For 'all': all
dangle energy is considered.
:type dangles: str
:param sodium: Sodium concentration in solution (molar), only applies
to DNA.
:type sodium: float
:param magnesium: Magnesium concentration in solution (molar), only
applies to DNA>
:type magnesium: float
:param cutoff: Only probabilities above this cutoff appear in the
output.
:type cutoff: float
:returns: Two probability matrices: The probability matrix as in the
pairs method (but with a dimension equal to the sum of the
lengths of the sequences in the permutation), and a similar
probability matrix where multiple strands of the same species
are considered to be indistinguishable.
:rtype: list
'''
# Set the material (will be used to set command material flag)
material = self._set_material(strands, material, multi=True)
# Set up command flags
cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium,
magnesium, multi=True)
# Set up the input file and run the command. Note: no STDOUT
if permutation is None:
permutation = range(1, len(strands) + 1)
lines = self._multi_lines(strands, permutation)
self._run('pairs', cmd_args, lines)
# Read the output from file
N = sum([len(s) for s in strands])
matrices = []
for mat_type in ['ppairs', 'epairs']:
data = self._read_tempfile('pairs.' + mat_type)
probs = re.search('\n\n\d*\n(.*)', data, flags=re.DOTALL).group(1)
lines = probs.split('\n')
# Remove the last line (empty)
lines.pop()
pairlist = [line.split('\t') for line in lines]
prob_matrix = self._pairs_to_np(pairlist, N)
matrices.append(prob_matrix)
return matrices |
def tablename_from_link(klass, link):
"""
Helper method for URL's that look like /api/now/v1/table/FOO/sys_id etc.
"""
arr = link.split("/")
i = arr.index("table")
tn = arr[i+1]
return tn | Helper method for URL's that look like /api/now/v1/table/FOO/sys_id etc. | Below is the the instruction that describes the task:
### Input:
Helper method for URL's that look like /api/now/v1/table/FOO/sys_id etc.
### Response:
def tablename_from_link(klass, link):
"""
Helper method for URL's that look like /api/now/v1/table/FOO/sys_id etc.
"""
arr = link.split("/")
i = arr.index("table")
tn = arr[i+1]
return tn |
def local_node_swbd_number(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
local_node = ET.SubElement(config, "local-node", xmlns="urn:brocade.com:mgmt:brocade-vcs")
swbd_number = ET.SubElement(local_node, "swbd-number")
swbd_number.text = kwargs.pop('swbd_number')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def local_node_swbd_number(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
local_node = ET.SubElement(config, "local-node", xmlns="urn:brocade.com:mgmt:brocade-vcs")
swbd_number = ET.SubElement(local_node, "swbd-number")
swbd_number.text = kwargs.pop('swbd_number')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def pred_from_structures(self, target_species, structures_list,
remove_duplicates=True, remove_existing=False):
"""
performs a structure prediction targeting compounds containing all of
the target_species, based on a list of structure (those structures
can for instance come from a database like the ICSD). It will return
all the structures formed by ionic substitutions with a probability
higher than the threshold
Notes:
If the default probability model is used, input structures must
be oxidation state decorated. See AutoOxiStateDecorationTransformation
This method does not change the number of species in a structure. i.e
if the number of target species is 3, only input structures containing
3 species will be considered.
Args:
target_species:
a list of species with oxidation states
e.g., [Specie('Li',1),Specie('Ni',2), Specie('O',-2)]
structures_list:
a list of dictionnary of the form {'structure':Structure object
,'id':some id where it comes from}
the id can for instance refer to an ICSD id.
remove_duplicates:
if True, the duplicates in the predicted structures will
be removed
remove_existing:
if True, the predicted structures that already exist in the
structures_list will be removed
Returns:
a list of TransformedStructure objects.
"""
target_species = get_el_sp(target_species)
result = []
transmuter = StandardTransmuter([])
if len(list(set(target_species) & set(self.get_allowed_species()))) \
!= len(target_species):
raise ValueError("the species in target_species are not allowed "
+ "for the probability model you are using")
for permut in itertools.permutations(target_species):
for s in structures_list:
# check if: species are in the domain,
# and the probability of subst. is above the threshold
els = s['structure'].composition.elements
if len(els) == len(permut) and \
len(list(set(els) & set(
self.get_allowed_species()))) == \
len(els) and self._sp.cond_prob_list(permut,
els) > \
self._threshold:
clean_subst = {els[i]: permut[i]
for i in range(0, len(els))
if els[i] != permut[i]}
if len(clean_subst) == 0:
continue
transf = SubstitutionTransformation(clean_subst)
if Substitutor._is_charge_balanced(
transf.apply_transformation(s['structure'])):
ts = TransformedStructure(
s['structure'], [transf],
history=[{"source": s['id']}],
other_parameters={
'type': 'structure_prediction',
'proba': self._sp.cond_prob_list(permut, els)}
)
result.append(ts)
transmuter.append_transformed_structures([ts])
if remove_duplicates:
transmuter.apply_filter(RemoveDuplicatesFilter(
symprec=self._symprec))
if remove_existing:
# Make the list of structures from structures_list that corresponds to the
# target species
chemsys = list(set([sp.symbol for sp in target_species]))
structures_list_target = [st['structure'] for st in structures_list
if Substitutor._is_from_chemical_system(
chemsys,
st['structure'])]
transmuter.apply_filter(RemoveExistingFilter(structures_list_target,
symprec=self._symprec))
return transmuter.transformed_structures | performs a structure prediction targeting compounds containing all of
the target_species, based on a list of structure (those structures
can for instance come from a database like the ICSD). It will return
all the structures formed by ionic substitutions with a probability
higher than the threshold
Notes:
If the default probability model is used, input structures must
be oxidation state decorated. See AutoOxiStateDecorationTransformation
This method does not change the number of species in a structure. i.e
if the number of target species is 3, only input structures containing
3 species will be considered.
Args:
target_species:
a list of species with oxidation states
e.g., [Specie('Li',1),Specie('Ni',2), Specie('O',-2)]
structures_list:
a list of dictionnary of the form {'structure':Structure object
,'id':some id where it comes from}
the id can for instance refer to an ICSD id.
remove_duplicates:
if True, the duplicates in the predicted structures will
be removed
remove_existing:
if True, the predicted structures that already exist in the
structures_list will be removed
Returns:
a list of TransformedStructure objects. | Below is the the instruction that describes the task:
### Input:
performs a structure prediction targeting compounds containing all of
the target_species, based on a list of structure (those structures
can for instance come from a database like the ICSD). It will return
all the structures formed by ionic substitutions with a probability
higher than the threshold
Notes:
If the default probability model is used, input structures must
be oxidation state decorated. See AutoOxiStateDecorationTransformation
This method does not change the number of species in a structure. i.e
if the number of target species is 3, only input structures containing
3 species will be considered.
Args:
target_species:
a list of species with oxidation states
e.g., [Specie('Li',1),Specie('Ni',2), Specie('O',-2)]
structures_list:
a list of dictionnary of the form {'structure':Structure object
,'id':some id where it comes from}
the id can for instance refer to an ICSD id.
remove_duplicates:
if True, the duplicates in the predicted structures will
be removed
remove_existing:
if True, the predicted structures that already exist in the
structures_list will be removed
Returns:
a list of TransformedStructure objects.
### Response:
def pred_from_structures(self, target_species, structures_list,
remove_duplicates=True, remove_existing=False):
"""
performs a structure prediction targeting compounds containing all of
the target_species, based on a list of structure (those structures
can for instance come from a database like the ICSD). It will return
all the structures formed by ionic substitutions with a probability
higher than the threshold
Notes:
If the default probability model is used, input structures must
be oxidation state decorated. See AutoOxiStateDecorationTransformation
This method does not change the number of species in a structure. i.e
if the number of target species is 3, only input structures containing
3 species will be considered.
Args:
target_species:
a list of species with oxidation states
e.g., [Specie('Li',1),Specie('Ni',2), Specie('O',-2)]
structures_list:
a list of dictionnary of the form {'structure':Structure object
,'id':some id where it comes from}
the id can for instance refer to an ICSD id.
remove_duplicates:
if True, the duplicates in the predicted structures will
be removed
remove_existing:
if True, the predicted structures that already exist in the
structures_list will be removed
Returns:
a list of TransformedStructure objects.
"""
target_species = get_el_sp(target_species)
result = []
transmuter = StandardTransmuter([])
if len(list(set(target_species) & set(self.get_allowed_species()))) \
!= len(target_species):
raise ValueError("the species in target_species are not allowed "
+ "for the probability model you are using")
for permut in itertools.permutations(target_species):
for s in structures_list:
# check if: species are in the domain,
# and the probability of subst. is above the threshold
els = s['structure'].composition.elements
if len(els) == len(permut) and \
len(list(set(els) & set(
self.get_allowed_species()))) == \
len(els) and self._sp.cond_prob_list(permut,
els) > \
self._threshold:
clean_subst = {els[i]: permut[i]
for i in range(0, len(els))
if els[i] != permut[i]}
if len(clean_subst) == 0:
continue
transf = SubstitutionTransformation(clean_subst)
if Substitutor._is_charge_balanced(
transf.apply_transformation(s['structure'])):
ts = TransformedStructure(
s['structure'], [transf],
history=[{"source": s['id']}],
other_parameters={
'type': 'structure_prediction',
'proba': self._sp.cond_prob_list(permut, els)}
)
result.append(ts)
transmuter.append_transformed_structures([ts])
if remove_duplicates:
transmuter.apply_filter(RemoveDuplicatesFilter(
symprec=self._symprec))
if remove_existing:
# Make the list of structures from structures_list that corresponds to the
# target species
chemsys = list(set([sp.symbol for sp in target_species]))
structures_list_target = [st['structure'] for st in structures_list
if Substitutor._is_from_chemical_system(
chemsys,
st['structure'])]
transmuter.apply_filter(RemoveExistingFilter(structures_list_target,
symprec=self._symprec))
return transmuter.transformed_structures |
def getUpdatedFields(self, cascadeObjects=False):
'''
getUpdatedFields - See changed fields.
@param cascadeObjects <bool> default False, if True will check if any foreign linked objects themselves have unsaved changes (recursively).
Otherwise, will just check if the pk has changed.
@return - a dictionary of fieldName : tuple(old, new).
fieldName may be a string or may implement IRField (which implements string, and can be used just like a string)
'''
updatedFields = {}
for thisField in self.FIELDS:
thisVal = object.__getattribute__(self, thisField)
if self._origData.get(thisField, '') != thisVal:
updatedFields[thisField] = (self._origData[thisField], thisVal)
if cascadeObjects is True and issubclass(thisField.__class__, IRForeignLinkFieldBase) and thisVal.objHasUnsavedChanges():
updatedFields[thisField] = (self._origData[thisField], thisVal)
return updatedFields | getUpdatedFields - See changed fields.
@param cascadeObjects <bool> default False, if True will check if any foreign linked objects themselves have unsaved changes (recursively).
Otherwise, will just check if the pk has changed.
@return - a dictionary of fieldName : tuple(old, new).
fieldName may be a string or may implement IRField (which implements string, and can be used just like a string) | Below is the the instruction that describes the task:
### Input:
getUpdatedFields - See changed fields.
@param cascadeObjects <bool> default False, if True will check if any foreign linked objects themselves have unsaved changes (recursively).
Otherwise, will just check if the pk has changed.
@return - a dictionary of fieldName : tuple(old, new).
fieldName may be a string or may implement IRField (which implements string, and can be used just like a string)
### Response:
def getUpdatedFields(self, cascadeObjects=False):
'''
getUpdatedFields - See changed fields.
@param cascadeObjects <bool> default False, if True will check if any foreign linked objects themselves have unsaved changes (recursively).
Otherwise, will just check if the pk has changed.
@return - a dictionary of fieldName : tuple(old, new).
fieldName may be a string or may implement IRField (which implements string, and can be used just like a string)
'''
updatedFields = {}
for thisField in self.FIELDS:
thisVal = object.__getattribute__(self, thisField)
if self._origData.get(thisField, '') != thisVal:
updatedFields[thisField] = (self._origData[thisField], thisVal)
if cascadeObjects is True and issubclass(thisField.__class__, IRForeignLinkFieldBase) and thisVal.objHasUnsavedChanges():
updatedFields[thisField] = (self._origData[thisField], thisVal)
return updatedFields |
def public_ip_address_get(name, resource_group, **kwargs):
'''
.. versionadded:: 2019.2.0
Get details about a specific public IP address.
:param name: The name of the public IP address to query.
:param resource_group: The resource group name assigned to the
public IP address.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.public_ip_address_get test-pub-ip testgroup
'''
expand = kwargs.get('expand')
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
pub_ip = netconn.public_ip_addresses.get(
public_ip_address_name=name,
resource_group_name=resource_group,
expand=expand
)
result = pub_ip.as_dict()
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
result = {'error': str(exc)}
return result | .. versionadded:: 2019.2.0
Get details about a specific public IP address.
:param name: The name of the public IP address to query.
:param resource_group: The resource group name assigned to the
public IP address.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.public_ip_address_get test-pub-ip testgroup | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2019.2.0
Get details about a specific public IP address.
:param name: The name of the public IP address to query.
:param resource_group: The resource group name assigned to the
public IP address.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.public_ip_address_get test-pub-ip testgroup
### Response:
def public_ip_address_get(name, resource_group, **kwargs):
'''
.. versionadded:: 2019.2.0
Get details about a specific public IP address.
:param name: The name of the public IP address to query.
:param resource_group: The resource group name assigned to the
public IP address.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.public_ip_address_get test-pub-ip testgroup
'''
expand = kwargs.get('expand')
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
pub_ip = netconn.public_ip_addresses.get(
public_ip_address_name=name,
resource_group_name=resource_group,
expand=expand
)
result = pub_ip.as_dict()
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
result = {'error': str(exc)}
return result |
def _extract_pc(d, root, pc, whichtables):
"""
Extract all data from a PaleoData dictionary.
:param dict d: PaleoData dictionary
:param dict root: Time series root data
:param str pc: paleoData or chronData
:param str whichtables: all, meas, summ, or ens
:return list _ts: Time series
"""
logger_ts.info("enter extract_pc")
_ts = []
try:
# For each table in pc
for k, v in d[pc].items():
if whichtables == "all" or whichtables == "meas":
for _table_name1, _table_data1 in v["measurementTable"].items():
_ts = _extract_table(_table_data1, copy.deepcopy(root), pc, _ts, "meas")
if whichtables != "meas":
if "model" in v:
for _table_name1, _table_data1 in v["model"].items():
# get the method info for this model. This will be paired to all summ and ens table data
_method = _extract_method(_table_data1["method"])
if whichtables == "all" or whichtables == "summ":
if "summaryTable" in _table_data1:
for _table_name2, _table_data2 in _table_data1["summaryTable"].items():
# take a copy of this tso root
_tso = copy.deepcopy(root)
# add in the method details
_tso.update(_method)
# add in the table details
_ts = _extract_table(_table_data2, _tso, pc, _ts, "summ")
if whichtables == "all" or whichtables == "ens":
if "ensembleTable" in _table_data1:
for _table_name2, _table_data2 in _table_data1["ensembleTable"].items():
_tso = copy.deepcopy(root)
_tso.update(_method)
_ts = _extract_table(_table_data2, _tso, pc, _ts, "ens")
except Exception as e:
logger_ts.warn("extract_pc: Exception: {}".format(e))
return _ts | Extract all data from a PaleoData dictionary.
:param dict d: PaleoData dictionary
:param dict root: Time series root data
:param str pc: paleoData or chronData
:param str whichtables: all, meas, summ, or ens
:return list _ts: Time series | Below is the the instruction that describes the task:
### Input:
Extract all data from a PaleoData dictionary.
:param dict d: PaleoData dictionary
:param dict root: Time series root data
:param str pc: paleoData or chronData
:param str whichtables: all, meas, summ, or ens
:return list _ts: Time series
### Response:
def _extract_pc(d, root, pc, whichtables):
"""
Extract all data from a PaleoData dictionary.
:param dict d: PaleoData dictionary
:param dict root: Time series root data
:param str pc: paleoData or chronData
:param str whichtables: all, meas, summ, or ens
:return list _ts: Time series
"""
logger_ts.info("enter extract_pc")
_ts = []
try:
# For each table in pc
for k, v in d[pc].items():
if whichtables == "all" or whichtables == "meas":
for _table_name1, _table_data1 in v["measurementTable"].items():
_ts = _extract_table(_table_data1, copy.deepcopy(root), pc, _ts, "meas")
if whichtables != "meas":
if "model" in v:
for _table_name1, _table_data1 in v["model"].items():
# get the method info for this model. This will be paired to all summ and ens table data
_method = _extract_method(_table_data1["method"])
if whichtables == "all" or whichtables == "summ":
if "summaryTable" in _table_data1:
for _table_name2, _table_data2 in _table_data1["summaryTable"].items():
# take a copy of this tso root
_tso = copy.deepcopy(root)
# add in the method details
_tso.update(_method)
# add in the table details
_ts = _extract_table(_table_data2, _tso, pc, _ts, "summ")
if whichtables == "all" or whichtables == "ens":
if "ensembleTable" in _table_data1:
for _table_name2, _table_data2 in _table_data1["ensembleTable"].items():
_tso = copy.deepcopy(root)
_tso.update(_method)
_ts = _extract_table(_table_data2, _tso, pc, _ts, "ens")
except Exception as e:
logger_ts.warn("extract_pc: Exception: {}".format(e))
return _ts |
def wrap_invalid_resp_data_error(function):
"""Catch exceptions when using zvm client response data."""
@functools.wraps(function)
def decorated_function(*arg, **kwargs):
try:
return function(*arg, **kwargs)
except (ValueError, TypeError, IndexError, AttributeError,
KeyError) as err:
msg = ('Invalid smt response data. Error: %s' %
six.text_type(err))
LOG.error(msg)
raise exception.SDKInternalError(msg=msg)
return decorated_function | Catch exceptions when using zvm client response data. | Below is the the instruction that describes the task:
### Input:
Catch exceptions when using zvm client response data.
### Response:
def wrap_invalid_resp_data_error(function):
"""Catch exceptions when using zvm client response data."""
@functools.wraps(function)
def decorated_function(*arg, **kwargs):
try:
return function(*arg, **kwargs)
except (ValueError, TypeError, IndexError, AttributeError,
KeyError) as err:
msg = ('Invalid smt response data. Error: %s' %
six.text_type(err))
LOG.error(msg)
raise exception.SDKInternalError(msg=msg)
return decorated_function |
def write_file(path, data, format=True):
"""
Write JSON data to file.
Arguments:
path (str): Destination.
data (dict or list): JSON serializable data.
format (bool, optional): Pretty-print JSON data.
"""
if format:
fs.write_file(path, format_json(data))
else:
fs.write_file(path, json.dumps(data)) | Write JSON data to file.
Arguments:
path (str): Destination.
data (dict or list): JSON serializable data.
format (bool, optional): Pretty-print JSON data. | Below is the the instruction that describes the task:
### Input:
Write JSON data to file.
Arguments:
path (str): Destination.
data (dict or list): JSON serializable data.
format (bool, optional): Pretty-print JSON data.
### Response:
def write_file(path, data, format=True):
"""
Write JSON data to file.
Arguments:
path (str): Destination.
data (dict or list): JSON serializable data.
format (bool, optional): Pretty-print JSON data.
"""
if format:
fs.write_file(path, format_json(data))
else:
fs.write_file(path, json.dumps(data)) |
def get_params(self, *keys):
"""Returns the specified parameters for the current preprocessor.
Parameters:
-----------
keys : variable sized list, containing the names of the requested parameters
Returns:
--------
values : list or dictionary, if any `keys` are specified
those named parameters' values are returned, otherwise
all parameters are returned as a dictionary
"""
if len(keys) == 0:
return vars(self)
else:
return [vars(self)[k] for k in keys] | Returns the specified parameters for the current preprocessor.
Parameters:
-----------
keys : variable sized list, containing the names of the requested parameters
Returns:
--------
values : list or dictionary, if any `keys` are specified
those named parameters' values are returned, otherwise
all parameters are returned as a dictionary | Below is the the instruction that describes the task:
### Input:
Returns the specified parameters for the current preprocessor.
Parameters:
-----------
keys : variable sized list, containing the names of the requested parameters
Returns:
--------
values : list or dictionary, if any `keys` are specified
those named parameters' values are returned, otherwise
all parameters are returned as a dictionary
### Response:
def get_params(self, *keys):
"""Returns the specified parameters for the current preprocessor.
Parameters:
-----------
keys : variable sized list, containing the names of the requested parameters
Returns:
--------
values : list or dictionary, if any `keys` are specified
those named parameters' values are returned, otherwise
all parameters are returned as a dictionary
"""
if len(keys) == 0:
return vars(self)
else:
return [vars(self)[k] for k in keys] |
def _normalize_tabular_data(tabular_data, headers):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
"""
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = tabular_data.keys()
vals = tabular_data.values # values matrix doesn't need to be transposed
names = tabular_data.index
rows = [[v]+list(row) for v,row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type,keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")): # namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif headers == "keys" and len(rows) > 0: # keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(headers)
rows = list(map(list,rows))
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""]*(ncols - nhs) + headers
return rows, headers | Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys". | Below is the the instruction that describes the task:
### Input:
Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
### Response:
def _normalize_tabular_data(tabular_data, headers):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
"""
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = tabular_data.keys()
vals = tabular_data.values # values matrix doesn't need to be transposed
names = tabular_data.index
rows = [[v]+list(row) for v,row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type,keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")): # namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif headers == "keys" and len(rows) > 0: # keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(headers)
rows = list(map(list,rows))
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""]*(ncols - nhs) + headers
return rows, headers |
def _GetActualName(name):
""" Note: Must be holding the _lazyLock """
if _allowCapitalizedNames:
name = UncapitalizeVmodlName(name)
for defMap in _dataDefMap, _managedDefMap, _enumDefMap:
dic = defMap.get(name)
if dic:
return dic[0]
return None | Note: Must be holding the _lazyLock | Below is the the instruction that describes the task:
### Input:
Note: Must be holding the _lazyLock
### Response:
def _GetActualName(name):
""" Note: Must be holding the _lazyLock """
if _allowCapitalizedNames:
name = UncapitalizeVmodlName(name)
for defMap in _dataDefMap, _managedDefMap, _enumDefMap:
dic = defMap.get(name)
if dic:
return dic[0]
return None |
def get_element_desc(element, ar_tree, ns):
# type: (_Element, _DocRoot, str) -> str
"""Get element description from XML."""
desc = get_child(element, "DESC", ar_tree, ns)
txt = get_child(desc, 'L-2[@L="DE"]', ar_tree, ns)
if txt is None:
txt = get_child(desc, 'L-2[@L="EN"]', ar_tree, ns)
if txt is None:
txt = get_child(desc, 'L-2', ar_tree, ns)
if txt is not None:
return txt.text
else:
return "" | Get element description from XML. | Below is the the instruction that describes the task:
### Input:
Get element description from XML.
### Response:
def get_element_desc(element, ar_tree, ns):
# type: (_Element, _DocRoot, str) -> str
"""Get element description from XML."""
desc = get_child(element, "DESC", ar_tree, ns)
txt = get_child(desc, 'L-2[@L="DE"]', ar_tree, ns)
if txt is None:
txt = get_child(desc, 'L-2[@L="EN"]', ar_tree, ns)
if txt is None:
txt = get_child(desc, 'L-2', ar_tree, ns)
if txt is not None:
return txt.text
else:
return "" |
def as_dict(self):
"""
Bson-serializable dict representation of the StructureEnvironments object.
:return: Bson-serializable dict representation of the StructureEnvironments object.
"""
ce_list_dict = [{str(cn): [ce.as_dict() if ce is not None else None for ce in ce_dict[cn]]
for cn in ce_dict} if ce_dict is not None else None for ce_dict in self.ce_list]
nbs_sets_dict = [{str(cn): [nb_set.as_dict() for nb_set in nb_sets]
for cn, nb_sets in site_nbs_sets.items()}
if site_nbs_sets is not None else None
for site_nbs_sets in self.neighbors_sets]
info_dict = {key: val for key, val in self.info.items() if key not in ['sites_info']}
info_dict['sites_info'] = [{'nb_sets_info': {str(cn): {str(inb_set): nb_set_info
for inb_set, nb_set_info in cn_sets.items()}
for cn, cn_sets in site_info['nb_sets_info'].items()},
'time': site_info['time']} if 'nb_sets_info' in site_info else {}
for site_info in self.info['sites_info']]
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"voronoi": self.voronoi.as_dict(),
"valences": self.valences,
"sites_map": self.sites_map,
"equivalent_sites": [[ps.as_dict() for ps in psl] for psl in self.equivalent_sites],
"ce_list": ce_list_dict,
"structure": self.structure.as_dict(),
"neighbors_sets": nbs_sets_dict,
"info": info_dict} | Bson-serializable dict representation of the StructureEnvironments object.
:return: Bson-serializable dict representation of the StructureEnvironments object. | Below is the the instruction that describes the task:
### Input:
Bson-serializable dict representation of the StructureEnvironments object.
:return: Bson-serializable dict representation of the StructureEnvironments object.
### Response:
def as_dict(self):
"""
Bson-serializable dict representation of the StructureEnvironments object.
:return: Bson-serializable dict representation of the StructureEnvironments object.
"""
ce_list_dict = [{str(cn): [ce.as_dict() if ce is not None else None for ce in ce_dict[cn]]
for cn in ce_dict} if ce_dict is not None else None for ce_dict in self.ce_list]
nbs_sets_dict = [{str(cn): [nb_set.as_dict() for nb_set in nb_sets]
for cn, nb_sets in site_nbs_sets.items()}
if site_nbs_sets is not None else None
for site_nbs_sets in self.neighbors_sets]
info_dict = {key: val for key, val in self.info.items() if key not in ['sites_info']}
info_dict['sites_info'] = [{'nb_sets_info': {str(cn): {str(inb_set): nb_set_info
for inb_set, nb_set_info in cn_sets.items()}
for cn, cn_sets in site_info['nb_sets_info'].items()},
'time': site_info['time']} if 'nb_sets_info' in site_info else {}
for site_info in self.info['sites_info']]
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"voronoi": self.voronoi.as_dict(),
"valences": self.valences,
"sites_map": self.sites_map,
"equivalent_sites": [[ps.as_dict() for ps in psl] for psl in self.equivalent_sites],
"ce_list": ce_list_dict,
"structure": self.structure.as_dict(),
"neighbors_sets": nbs_sets_dict,
"info": info_dict} |
def summed_probabilities(self, choosers, alternatives):
"""
Returns the sum of probabilities for alternatives across all
chooser segments.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
Must have a column matching the .segmentation_col attribute.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
Returns
-------
probs : pandas.Series
Summed probabilities from each segment added together.
"""
if len(alternatives) == 0 or len(choosers) == 0:
return pd.Series()
logger.debug(
'start: calculate summed probabilities in LCM group {}'.format(
self.name))
probs = []
for name, df in self._iter_groups(choosers):
probs.append(
self.models[name].summed_probabilities(df, alternatives))
add = tz.curry(pd.Series.add, fill_value=0)
probs = tz.reduce(add, probs)
logger.debug(
'finish: calculate summed probabilities in LCM group {}'.format(
self.name))
return probs | Returns the sum of probabilities for alternatives across all
chooser segments.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
Must have a column matching the .segmentation_col attribute.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
Returns
-------
probs : pandas.Series
Summed probabilities from each segment added together. | Below is the the instruction that describes the task:
### Input:
Returns the sum of probabilities for alternatives across all
chooser segments.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
Must have a column matching the .segmentation_col attribute.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
Returns
-------
probs : pandas.Series
Summed probabilities from each segment added together.
### Response:
def summed_probabilities(self, choosers, alternatives):
"""
Returns the sum of probabilities for alternatives across all
chooser segments.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
Must have a column matching the .segmentation_col attribute.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
Returns
-------
probs : pandas.Series
Summed probabilities from each segment added together.
"""
if len(alternatives) == 0 or len(choosers) == 0:
return pd.Series()
logger.debug(
'start: calculate summed probabilities in LCM group {}'.format(
self.name))
probs = []
for name, df in self._iter_groups(choosers):
probs.append(
self.models[name].summed_probabilities(df, alternatives))
add = tz.curry(pd.Series.add, fill_value=0)
probs = tz.reduce(add, probs)
logger.debug(
'finish: calculate summed probabilities in LCM group {}'.format(
self.name))
return probs |
def get_used_files():
"""Get files used by processes with name scanpy."""
import psutil
loop_over_scanpy_processes = (proc for proc in psutil.process_iter()
if proc.name() == 'scanpy')
filenames = []
for proc in loop_over_scanpy_processes:
try:
flist = proc.open_files()
for nt in flist:
filenames.append(nt.path)
# This catches a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess as err:
pass
return set(filenames) | Get files used by processes with name scanpy. | Below is the the instruction that describes the task:
### Input:
Get files used by processes with name scanpy.
### Response:
def get_used_files():
"""Get files used by processes with name scanpy."""
import psutil
loop_over_scanpy_processes = (proc for proc in psutil.process_iter()
if proc.name() == 'scanpy')
filenames = []
for proc in loop_over_scanpy_processes:
try:
flist = proc.open_files()
for nt in flist:
filenames.append(nt.path)
# This catches a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess as err:
pass
return set(filenames) |
def spellcheck_results(self):
"""The list of True/False values denoting the correct spelling of words."""
if not self.is_tagged(WORDS):
self.tokenize_words()
return vabamorf.spellcheck(self.word_texts, suggestions=True) | The list of True/False values denoting the correct spelling of words. | Below is the the instruction that describes the task:
### Input:
The list of True/False values denoting the correct spelling of words.
### Response:
def spellcheck_results(self):
"""The list of True/False values denoting the correct spelling of words."""
if not self.is_tagged(WORDS):
self.tokenize_words()
return vabamorf.spellcheck(self.word_texts, suggestions=True) |
def get_ingest_status(self, dataset_id):
"""
Returns the current status of dataset ingestion. If any file uploaded to a dataset is in an error/failure state
this endpoint will return error/failure. If any files are still processing, will return processing.
:param dataset_id: Dataset identifier
:return: Status of dataset ingestion as a string
"""
failure_message = "Failed to create dataset ingest status for dataset {}".format(dataset_id)
response = self._get_success_json(
self._get('v1/datasets/' + str(dataset_id) + '/ingest-status',
failure_message=failure_message))['data']
if 'status' in response:
return response['status']
return '' | Returns the current status of dataset ingestion. If any file uploaded to a dataset is in an error/failure state
this endpoint will return error/failure. If any files are still processing, will return processing.
:param dataset_id: Dataset identifier
:return: Status of dataset ingestion as a string | Below is the the instruction that describes the task:
### Input:
Returns the current status of dataset ingestion. If any file uploaded to a dataset is in an error/failure state
this endpoint will return error/failure. If any files are still processing, will return processing.
:param dataset_id: Dataset identifier
:return: Status of dataset ingestion as a string
### Response:
def get_ingest_status(self, dataset_id):
"""
Returns the current status of dataset ingestion. If any file uploaded to a dataset is in an error/failure state
this endpoint will return error/failure. If any files are still processing, will return processing.
:param dataset_id: Dataset identifier
:return: Status of dataset ingestion as a string
"""
failure_message = "Failed to create dataset ingest status for dataset {}".format(dataset_id)
response = self._get_success_json(
self._get('v1/datasets/' + str(dataset_id) + '/ingest-status',
failure_message=failure_message))['data']
if 'status' in response:
return response['status']
return '' |
def find_sources(self):
"""
Look for Python sources available for the current configuration.
"""
app_configs = apps.get_app_configs()
for app_config in app_configs:
ignore_dirs = []
for root, dirs, files in os.walk(app_config.path):
if [True for idir in ignore_dirs if root.startswith(idir)]:
continue
if '__init__.py' not in files:
ignore_dirs.append(root)
continue
for filename in files:
basename, ext = os.path.splitext(filename)
if ext != '.py':
continue
yield os.path.abspath(os.path.join(root, filename)) | Look for Python sources available for the current configuration. | Below is the the instruction that describes the task:
### Input:
Look for Python sources available for the current configuration.
### Response:
def find_sources(self):
"""
Look for Python sources available for the current configuration.
"""
app_configs = apps.get_app_configs()
for app_config in app_configs:
ignore_dirs = []
for root, dirs, files in os.walk(app_config.path):
if [True for idir in ignore_dirs if root.startswith(idir)]:
continue
if '__init__.py' not in files:
ignore_dirs.append(root)
continue
for filename in files:
basename, ext = os.path.splitext(filename)
if ext != '.py':
continue
yield os.path.abspath(os.path.join(root, filename)) |
def recvRtspReply(self):
"""Receive RTSP reply from the server."""
while True:
reply = self.rtspSocket.recv(1024)
if reply:
self.parseRtspReply(reply)
# Close the RTSP socket upon requesting Teardown
if self.requestSent == self.TEARDOWN:
self.rtspSocket.shutdown(socket.SHUT_RDWR)
self.rtspSocket.close()
break | Receive RTSP reply from the server. | Below is the the instruction that describes the task:
### Input:
Receive RTSP reply from the server.
### Response:
def recvRtspReply(self):
"""Receive RTSP reply from the server."""
while True:
reply = self.rtspSocket.recv(1024)
if reply:
self.parseRtspReply(reply)
# Close the RTSP socket upon requesting Teardown
if self.requestSent == self.TEARDOWN:
self.rtspSocket.shutdown(socket.SHUT_RDWR)
self.rtspSocket.close()
break |
def new(obj, path, value, separator="/"):
"""
Set the element at the terminus of path to value, and create
it if it does not exist (as opposed to 'set' that can only
change existing keys).
path will NOT be treated like a glob. If it has globbing
characters in it, they will become part of the resulting
keys
"""
pathlist = __safe_path__(path, separator)
pathobj = dpath.path.path_types(obj, pathlist)
return dpath.path.set(obj, pathobj, value, create_missing=True) | Set the element at the terminus of path to value, and create
it if it does not exist (as opposed to 'set' that can only
change existing keys).
path will NOT be treated like a glob. If it has globbing
characters in it, they will become part of the resulting
keys | Below is the the instruction that describes the task:
### Input:
Set the element at the terminus of path to value, and create
it if it does not exist (as opposed to 'set' that can only
change existing keys).
path will NOT be treated like a glob. If it has globbing
characters in it, they will become part of the resulting
keys
### Response:
def new(obj, path, value, separator="/"):
"""
Set the element at the terminus of path to value, and create
it if it does not exist (as opposed to 'set' that can only
change existing keys).
path will NOT be treated like a glob. If it has globbing
characters in it, they will become part of the resulting
keys
"""
pathlist = __safe_path__(path, separator)
pathobj = dpath.path.path_types(obj, pathlist)
return dpath.path.set(obj, pathobj, value, create_missing=True) |
def _path_pair(self, s):
"""Parse two paths separated by a space."""
# TODO: handle a space in the first path
if s.startswith(b'"'):
parts = s[1:].split(b'" ', 1)
else:
parts = s.split(b' ', 1)
if len(parts) != 2:
self.abort(errors.BadFormat, '?', '?', s)
elif parts[1].startswith(b'"') and parts[1].endswith(b'"'):
parts[1] = parts[1][1:-1]
elif parts[1].startswith(b'"') or parts[1].endswith(b'"'):
self.abort(errors.BadFormat, '?', '?', s)
return [_unquote_c_string(s) for s in parts] | Parse two paths separated by a space. | Below is the the instruction that describes the task:
### Input:
Parse two paths separated by a space.
### Response:
def _path_pair(self, s):
"""Parse two paths separated by a space."""
# TODO: handle a space in the first path
if s.startswith(b'"'):
parts = s[1:].split(b'" ', 1)
else:
parts = s.split(b' ', 1)
if len(parts) != 2:
self.abort(errors.BadFormat, '?', '?', s)
elif parts[1].startswith(b'"') and parts[1].endswith(b'"'):
parts[1] = parts[1][1:-1]
elif parts[1].startswith(b'"') or parts[1].endswith(b'"'):
self.abort(errors.BadFormat, '?', '?', s)
return [_unquote_c_string(s) for s in parts] |
def parse_srec(srec):
"""
Extract the data portion of a given S-Record (without checksum)
Returns: the record type, the lenght of the data section, the write address, the data itself and the checksum
"""
record_type = srec[0:2]
data_len = srec[2:4]
addr_len = __ADDR_LEN.get(record_type) * 2
addr = srec[4:4 + addr_len]
data = srec[4 + addr_len:len(srec)-2]
checksum = srec[len(srec) - 2:]
return record_type, data_len, addr, data, checksum | Extract the data portion of a given S-Record (without checksum)
Returns: the record type, the lenght of the data section, the write address, the data itself and the checksum | Below is the the instruction that describes the task:
### Input:
Extract the data portion of a given S-Record (without checksum)
Returns: the record type, the lenght of the data section, the write address, the data itself and the checksum
### Response:
def parse_srec(srec):
"""
Extract the data portion of a given S-Record (without checksum)
Returns: the record type, the lenght of the data section, the write address, the data itself and the checksum
"""
record_type = srec[0:2]
data_len = srec[2:4]
addr_len = __ADDR_LEN.get(record_type) * 2
addr = srec[4:4 + addr_len]
data = srec[4 + addr_len:len(srec)-2]
checksum = srec[len(srec) - 2:]
return record_type, data_len, addr, data, checksum |
def situations(self, *, tz_offset=None):
"""Get a listing of situations.
Parameters:
tz_offset (int, Optional): A time zone offset from UTC in seconds.
"""
response = self._call(
mc_calls.ListenNowSituations,
tz_offset
)
situation_list = response.body.get('situations', [])
return situation_list | Get a listing of situations.
Parameters:
tz_offset (int, Optional): A time zone offset from UTC in seconds. | Below is the the instruction that describes the task:
### Input:
Get a listing of situations.
Parameters:
tz_offset (int, Optional): A time zone offset from UTC in seconds.
### Response:
def situations(self, *, tz_offset=None):
"""Get a listing of situations.
Parameters:
tz_offset (int, Optional): A time zone offset from UTC in seconds.
"""
response = self._call(
mc_calls.ListenNowSituations,
tz_offset
)
situation_list = response.body.get('situations', [])
return situation_list |
def argument_types(self):
"""Retrieve a container for the non-variadic arguments for this type.
The returned object is iterable and indexable. Each item in the
container is a Type instance.
"""
class ArgumentsIterator(collections.Sequence):
def __init__(self, parent):
self.parent = parent
self.length = None
def __len__(self):
if self.length is None:
self.length = conf.lib.clang_getNumArgTypes(self.parent)
return self.length
def __getitem__(self, key):
# FIXME Support slice objects.
if not isinstance(key, int):
raise TypeError("Must supply a non-negative int.")
if key < 0:
raise IndexError("Only non-negative indexes are accepted.")
if key >= len(self):
raise IndexError("Index greater than container length: "
"%d > %d" % ( key, len(self) ))
result = conf.lib.clang_getArgType(self.parent, key)
if result.kind == TypeKind.INVALID:
raise IndexError("Argument could not be retrieved.")
return result
assert self.kind == TypeKind.FUNCTIONPROTO
return ArgumentsIterator(self) | Retrieve a container for the non-variadic arguments for this type.
The returned object is iterable and indexable. Each item in the
container is a Type instance. | Below is the the instruction that describes the task:
### Input:
Retrieve a container for the non-variadic arguments for this type.
The returned object is iterable and indexable. Each item in the
container is a Type instance.
### Response:
def argument_types(self):
"""Retrieve a container for the non-variadic arguments for this type.
The returned object is iterable and indexable. Each item in the
container is a Type instance.
"""
class ArgumentsIterator(collections.Sequence):
def __init__(self, parent):
self.parent = parent
self.length = None
def __len__(self):
if self.length is None:
self.length = conf.lib.clang_getNumArgTypes(self.parent)
return self.length
def __getitem__(self, key):
# FIXME Support slice objects.
if not isinstance(key, int):
raise TypeError("Must supply a non-negative int.")
if key < 0:
raise IndexError("Only non-negative indexes are accepted.")
if key >= len(self):
raise IndexError("Index greater than container length: "
"%d > %d" % ( key, len(self) ))
result = conf.lib.clang_getArgType(self.parent, key)
if result.kind == TypeKind.INVALID:
raise IndexError("Argument could not be retrieved.")
return result
assert self.kind == TypeKind.FUNCTIONPROTO
return ArgumentsIterator(self) |
def num_gpus():
"""Query CUDA for the number of GPUs present.
Raises
------
Will raise an exception on any CUDA error.
Returns
-------
count : int
The number of GPUs.
"""
count = ctypes.c_int()
check_call(_LIB.MXGetGPUCount(ctypes.byref(count)))
return count.value | Query CUDA for the number of GPUs present.
Raises
------
Will raise an exception on any CUDA error.
Returns
-------
count : int
The number of GPUs. | Below is the the instruction that describes the task:
### Input:
Query CUDA for the number of GPUs present.
Raises
------
Will raise an exception on any CUDA error.
Returns
-------
count : int
The number of GPUs.
### Response:
def num_gpus():
"""Query CUDA for the number of GPUs present.
Raises
------
Will raise an exception on any CUDA error.
Returns
-------
count : int
The number of GPUs.
"""
count = ctypes.c_int()
check_call(_LIB.MXGetGPUCount(ctypes.byref(count)))
return count.value |
def gaussian_hmm(pi, P, means, sigmas):
""" Initializes a 1D-Gaussian HMM
Parameters
----------
pi : ndarray(nstates, )
Initial distribution.
P : ndarray(nstates,nstates)
Hidden transition matrix
means : ndarray(nstates, )
Means of Gaussian output distributions
sigmas : ndarray(nstates, )
Standard deviations of Gaussian output distributions
stationary : bool, optional, default=True
If True: initial distribution is equal to stationary distribution of transition matrix
reversible : bool, optional, default=True
If True: transition matrix will fulfill detailed balance constraints.
"""
from bhmm.hmm.gaussian_hmm import GaussianHMM
from bhmm.output_models.gaussian import GaussianOutputModel
# count states
nstates = _np.array(P).shape[0]
# initialize output model
output_model = GaussianOutputModel(nstates, means, sigmas)
# initialize general HMM
from bhmm.hmm.generic_hmm import HMM as _HMM
ghmm = _HMM(pi, P, output_model)
# turn it into a Gaussian HMM
ghmm = GaussianHMM(ghmm)
return ghmm | Initializes a 1D-Gaussian HMM
Parameters
----------
pi : ndarray(nstates, )
Initial distribution.
P : ndarray(nstates,nstates)
Hidden transition matrix
means : ndarray(nstates, )
Means of Gaussian output distributions
sigmas : ndarray(nstates, )
Standard deviations of Gaussian output distributions
stationary : bool, optional, default=True
If True: initial distribution is equal to stationary distribution of transition matrix
reversible : bool, optional, default=True
If True: transition matrix will fulfill detailed balance constraints. | Below is the the instruction that describes the task:
### Input:
Initializes a 1D-Gaussian HMM
Parameters
----------
pi : ndarray(nstates, )
Initial distribution.
P : ndarray(nstates,nstates)
Hidden transition matrix
means : ndarray(nstates, )
Means of Gaussian output distributions
sigmas : ndarray(nstates, )
Standard deviations of Gaussian output distributions
stationary : bool, optional, default=True
If True: initial distribution is equal to stationary distribution of transition matrix
reversible : bool, optional, default=True
If True: transition matrix will fulfill detailed balance constraints.
### Response:
def gaussian_hmm(pi, P, means, sigmas):
""" Initializes a 1D-Gaussian HMM
Parameters
----------
pi : ndarray(nstates, )
Initial distribution.
P : ndarray(nstates,nstates)
Hidden transition matrix
means : ndarray(nstates, )
Means of Gaussian output distributions
sigmas : ndarray(nstates, )
Standard deviations of Gaussian output distributions
stationary : bool, optional, default=True
If True: initial distribution is equal to stationary distribution of transition matrix
reversible : bool, optional, default=True
If True: transition matrix will fulfill detailed balance constraints.
"""
from bhmm.hmm.gaussian_hmm import GaussianHMM
from bhmm.output_models.gaussian import GaussianOutputModel
# count states
nstates = _np.array(P).shape[0]
# initialize output model
output_model = GaussianOutputModel(nstates, means, sigmas)
# initialize general HMM
from bhmm.hmm.generic_hmm import HMM as _HMM
ghmm = _HMM(pi, P, output_model)
# turn it into a Gaussian HMM
ghmm = GaussianHMM(ghmm)
return ghmm |
def nltides_coefs(amplitude, n, m1, m2):
"""Calculate the coefficents needed to compute the
shift in t(f) and phi(f) due to non-linear tides.
Parameters
----------
amplitude: float
Amplitude of effect
n: float
Growth dependence of effect
m1: float
Mass of component 1
m2: float
Mass of component 2
Returns
-------
f_ref : float
Reference frequency used to define A and n
t_of_f_factor: float
The constant factor needed to compute t(f)
phi_of_f_factor: float
The constant factor needed to compute phi(f)
"""
# Use 100.0 Hz as a reference frequency
f_ref = 100.0
# Calculate chirp mass
mc = mchirp_from_mass1_mass2(m1, m2)
mc *= lal.lal.MSUN_SI
# Calculate constants in phasing
a = (96./5.) * \
(lal.lal.G_SI * lal.lal.PI * mc * f_ref / lal.lal.C_SI**3.)**(5./3.)
b = 6. * amplitude
t_of_f_factor = -1./(lal.lal.PI*f_ref) * b/(a*a * (n-4.))
phi_of_f_factor = -2.*b / (a*a * (n-3.))
return f_ref, t_of_f_factor, phi_of_f_factor | Calculate the coefficents needed to compute the
shift in t(f) and phi(f) due to non-linear tides.
Parameters
----------
amplitude: float
Amplitude of effect
n: float
Growth dependence of effect
m1: float
Mass of component 1
m2: float
Mass of component 2
Returns
-------
f_ref : float
Reference frequency used to define A and n
t_of_f_factor: float
The constant factor needed to compute t(f)
phi_of_f_factor: float
The constant factor needed to compute phi(f) | Below is the the instruction that describes the task:
### Input:
Calculate the coefficents needed to compute the
shift in t(f) and phi(f) due to non-linear tides.
Parameters
----------
amplitude: float
Amplitude of effect
n: float
Growth dependence of effect
m1: float
Mass of component 1
m2: float
Mass of component 2
Returns
-------
f_ref : float
Reference frequency used to define A and n
t_of_f_factor: float
The constant factor needed to compute t(f)
phi_of_f_factor: float
The constant factor needed to compute phi(f)
### Response:
def nltides_coefs(amplitude, n, m1, m2):
"""Calculate the coefficents needed to compute the
shift in t(f) and phi(f) due to non-linear tides.
Parameters
----------
amplitude: float
Amplitude of effect
n: float
Growth dependence of effect
m1: float
Mass of component 1
m2: float
Mass of component 2
Returns
-------
f_ref : float
Reference frequency used to define A and n
t_of_f_factor: float
The constant factor needed to compute t(f)
phi_of_f_factor: float
The constant factor needed to compute phi(f)
"""
# Use 100.0 Hz as a reference frequency
f_ref = 100.0
# Calculate chirp mass
mc = mchirp_from_mass1_mass2(m1, m2)
mc *= lal.lal.MSUN_SI
# Calculate constants in phasing
a = (96./5.) * \
(lal.lal.G_SI * lal.lal.PI * mc * f_ref / lal.lal.C_SI**3.)**(5./3.)
b = 6. * amplitude
t_of_f_factor = -1./(lal.lal.PI*f_ref) * b/(a*a * (n-4.))
phi_of_f_factor = -2.*b / (a*a * (n-3.))
return f_ref, t_of_f_factor, phi_of_f_factor |
def _grab_concretization_results(cls, state):
"""
Grabs the concretized result so we can add the constraint ourselves.
"""
# only grab ones that match the constrained addrs
if cls._should_add_constraints(state):
addr = state.inspect.address_concretization_expr
result = state.inspect.address_concretization_result
if result is None:
l.warning("addr concretization result is None")
return
state.preconstrainer.address_concretization.append((addr, result)) | Grabs the concretized result so we can add the constraint ourselves. | Below is the the instruction that describes the task:
### Input:
Grabs the concretized result so we can add the constraint ourselves.
### Response:
def _grab_concretization_results(cls, state):
"""
Grabs the concretized result so we can add the constraint ourselves.
"""
# only grab ones that match the constrained addrs
if cls._should_add_constraints(state):
addr = state.inspect.address_concretization_expr
result = state.inspect.address_concretization_result
if result is None:
l.warning("addr concretization result is None")
return
state.preconstrainer.address_concretization.append((addr, result)) |
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys() | Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)] | Below is the the instruction that describes the task:
### Input:
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
### Response:
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys() |
def _vmomentsurfaceMCIntegrand(vz,vR,vT,R,z,df,sigmaR1,gamma,sigmaz1,mvT,n,m,o):
"""Internal function that is the integrand for the vmomentsurface mass integration"""
return vR**n*vT**m*vz**o*df(R,vR*sigmaR1,vT*sigmaR1*gamma,z,vz*sigmaz1,use_physical=False)*numpy.exp(vR**2./2.+(vT-mvT)**2./2.+vz**2./2.) | Internal function that is the integrand for the vmomentsurface mass integration | Below is the the instruction that describes the task:
### Input:
Internal function that is the integrand for the vmomentsurface mass integration
### Response:
def _vmomentsurfaceMCIntegrand(vz,vR,vT,R,z,df,sigmaR1,gamma,sigmaz1,mvT,n,m,o):
"""Internal function that is the integrand for the vmomentsurface mass integration"""
return vR**n*vT**m*vz**o*df(R,vR*sigmaR1,vT*sigmaR1*gamma,z,vz*sigmaz1,use_physical=False)*numpy.exp(vR**2./2.+(vT-mvT)**2./2.+vz**2./2.) |
def location_query(self):
"""
Return the Location-Query of the response.
:rtype : String
:return: the Location-Query option
"""
value = []
for option in self.options:
if option.number == defines.OptionRegistry.LOCATION_QUERY.number:
value.append(option.value)
return value | Return the Location-Query of the response.
:rtype : String
:return: the Location-Query option | Below is the the instruction that describes the task:
### Input:
Return the Location-Query of the response.
:rtype : String
:return: the Location-Query option
### Response:
def location_query(self):
"""
Return the Location-Query of the response.
:rtype : String
:return: the Location-Query option
"""
value = []
for option in self.options:
if option.number == defines.OptionRegistry.LOCATION_QUERY.number:
value.append(option.value)
return value |
def callback(self, name, before=None, after=None):
"""Add a callback pair to this spectator.
You can specify, with keywords, whether each callback should be triggered
before, and/or or after a given method is called - hereafter refered to as
"beforebacks" and "afterbacks" respectively.
Parameters
----------
name: str
The name of the method to which callbacks should respond.
before: None or callable
A callable of the form ``before(obj, call)`` where ``obj`` is
the instance which called a watched method, and ``call`` is a
:class:`Data` containing the name of the called method, along with
its positional and keyword arguments under the attributes "name"
"args", and "kwargs" respectively.
after: None or callable
A callable of the form ``after(obj, answer)`` where ``obj` is
the instance which alled a watched method, and ``answer`` is a
:class:`Data` containing the name of the called method, along with
the value it returned, and data ``before`` may have returned
under the attributes "name", "value", and "before" respectively.
"""
if isinstance(name, (list, tuple)):
for name in name:
self.callback(name, before, after)
else:
if not isinstance(getattr(self.subclass, name), MethodSpectator):
raise ValueError("No method specator for '%s'" % name)
if before is None and after is None:
raise ValueError("No pre or post '%s' callbacks were given" % name)
elif before is not None and not callable(before):
raise ValueError("Expected a callable, not %r." % before)
elif after is not None and not callable(after):
raise ValueError("Expected a callable, not %r." % after)
elif before is None and after is None:
raise ValueError("No callbacks were given.")
if name in self._callback_registry:
callback_list = self._callback_registry[name]
else:
callback_list = []
self._callback_registry[name] = callback_list
callback_list.append((before, after)) | Add a callback pair to this spectator.
You can specify, with keywords, whether each callback should be triggered
before, and/or or after a given method is called - hereafter refered to as
"beforebacks" and "afterbacks" respectively.
Parameters
----------
name: str
The name of the method to which callbacks should respond.
before: None or callable
A callable of the form ``before(obj, call)`` where ``obj`` is
the instance which called a watched method, and ``call`` is a
:class:`Data` containing the name of the called method, along with
its positional and keyword arguments under the attributes "name"
"args", and "kwargs" respectively.
after: None or callable
A callable of the form ``after(obj, answer)`` where ``obj` is
the instance which alled a watched method, and ``answer`` is a
:class:`Data` containing the name of the called method, along with
the value it returned, and data ``before`` may have returned
under the attributes "name", "value", and "before" respectively. | Below is the the instruction that describes the task:
### Input:
Add a callback pair to this spectator.
You can specify, with keywords, whether each callback should be triggered
before, and/or or after a given method is called - hereafter refered to as
"beforebacks" and "afterbacks" respectively.
Parameters
----------
name: str
The name of the method to which callbacks should respond.
before: None or callable
A callable of the form ``before(obj, call)`` where ``obj`` is
the instance which called a watched method, and ``call`` is a
:class:`Data` containing the name of the called method, along with
its positional and keyword arguments under the attributes "name"
"args", and "kwargs" respectively.
after: None or callable
A callable of the form ``after(obj, answer)`` where ``obj` is
the instance which alled a watched method, and ``answer`` is a
:class:`Data` containing the name of the called method, along with
the value it returned, and data ``before`` may have returned
under the attributes "name", "value", and "before" respectively.
### Response:
def callback(self, name, before=None, after=None):
"""Add a callback pair to this spectator.
You can specify, with keywords, whether each callback should be triggered
before, and/or or after a given method is called - hereafter refered to as
"beforebacks" and "afterbacks" respectively.
Parameters
----------
name: str
The name of the method to which callbacks should respond.
before: None or callable
A callable of the form ``before(obj, call)`` where ``obj`` is
the instance which called a watched method, and ``call`` is a
:class:`Data` containing the name of the called method, along with
its positional and keyword arguments under the attributes "name"
"args", and "kwargs" respectively.
after: None or callable
A callable of the form ``after(obj, answer)`` where ``obj` is
the instance which alled a watched method, and ``answer`` is a
:class:`Data` containing the name of the called method, along with
the value it returned, and data ``before`` may have returned
under the attributes "name", "value", and "before" respectively.
"""
if isinstance(name, (list, tuple)):
for name in name:
self.callback(name, before, after)
else:
if not isinstance(getattr(self.subclass, name), MethodSpectator):
raise ValueError("No method specator for '%s'" % name)
if before is None and after is None:
raise ValueError("No pre or post '%s' callbacks were given" % name)
elif before is not None and not callable(before):
raise ValueError("Expected a callable, not %r." % before)
elif after is not None and not callable(after):
raise ValueError("Expected a callable, not %r." % after)
elif before is None and after is None:
raise ValueError("No callbacks were given.")
if name in self._callback_registry:
callback_list = self._callback_registry[name]
else:
callback_list = []
self._callback_registry[name] = callback_list
callback_list.append((before, after)) |
def run_tasks(procs, err_q, out_q, num):
"""
A function that executes populated processes and processes
the resultant array. Checks error queue for any exceptions.
:param procs: list of Process objects
:param out_q: thread-safe output queue
:param err_q: thread-safe queue to populate on exception
:param num : length of resultant array
"""
# function to terminate processes that are still running.
die = (lambda vals : [val.terminate() for val in vals
if val.exitcode is None])
try:
for proc in procs:
proc.start()
for proc in procs:
proc.join()
except Exception as e:
# kill all slave processes on ctrl-C
try:
die(procs)
finally:
raise e
if not err_q.empty():
# kill all on any exception from any one slave
try:
die(procs)
finally:
raise err_q.get()
# Processes finish in arbitrary order. Process IDs double
# as index in the resultant array.
results=[None]*num;
while not out_q.empty():
idx, result = out_q.get()
results[idx] = result
# Remove extra dimension added by array_split
return list(numpy.concatenate(results)) | A function that executes populated processes and processes
the resultant array. Checks error queue for any exceptions.
:param procs: list of Process objects
:param out_q: thread-safe output queue
:param err_q: thread-safe queue to populate on exception
:param num : length of resultant array | Below is the the instruction that describes the task:
### Input:
A function that executes populated processes and processes
the resultant array. Checks error queue for any exceptions.
:param procs: list of Process objects
:param out_q: thread-safe output queue
:param err_q: thread-safe queue to populate on exception
:param num : length of resultant array
### Response:
def run_tasks(procs, err_q, out_q, num):
"""
A function that executes populated processes and processes
the resultant array. Checks error queue for any exceptions.
:param procs: list of Process objects
:param out_q: thread-safe output queue
:param err_q: thread-safe queue to populate on exception
:param num : length of resultant array
"""
# function to terminate processes that are still running.
die = (lambda vals : [val.terminate() for val in vals
if val.exitcode is None])
try:
for proc in procs:
proc.start()
for proc in procs:
proc.join()
except Exception as e:
# kill all slave processes on ctrl-C
try:
die(procs)
finally:
raise e
if not err_q.empty():
# kill all on any exception from any one slave
try:
die(procs)
finally:
raise err_q.get()
# Processes finish in arbitrary order. Process IDs double
# as index in the resultant array.
results=[None]*num;
while not out_q.empty():
idx, result = out_q.get()
results[idx] = result
# Remove extra dimension added by array_split
return list(numpy.concatenate(results)) |
def add_method_callback(self, classname, methodname, method_callback,
namespace=None,):
"""
Register a callback function for a CIM method that will be called when
the CIM method is invoked via `InvokeMethod`.
If the namespace does not exist, :exc:`~pywbem.CIMError` with status
CIM_ERR_INVALID_NAMESPACE is raised.
Parameters:
classname (:term:`string`):
The CIM class name for which the callback function is registered.
The faked `InvokeMethod` implementation uses this information to
look up the callback function from its parameters.
For method invocations on a target instance, this must be the class
name of the creation class of the target instance.
For method invocations on a target class, this must be the class
name of the target class.
methodname (:term:`string`):
The CIM method name for which the callback function is registered.
The faked `InvokeMethod` implementation uses this information to
look up the callback function from its parameters.
method_callback (:func:`~pywbem_mock.method_callback_interface`):
The callback function.
namespace (:term:`string`):
The CIM namespace for which the callback function is registered.
If `None`, the callback function is registered for the default
namespace of the connection.
The faked `InvokeMethod` implementation uses this information to
look up the callback function from its parameters.
Raises:
ValueError: Duplicate method specification.
:exc:`~pywbem.CIMError`: CIM_ERR_INVALID_NAMESPACE: Namespace does
not exist.
"""
if namespace is None:
namespace = self.default_namespace
# Validate namespace
method_repo = self._get_method_repo(namespace)
if classname not in method_repo:
method_repo[classname] = NocaseDict()
if methodname in method_repo[classname]:
raise ValueError("Duplicate method specification")
method_repo[classname][methodname] = method_callback | Register a callback function for a CIM method that will be called when
the CIM method is invoked via `InvokeMethod`.
If the namespace does not exist, :exc:`~pywbem.CIMError` with status
CIM_ERR_INVALID_NAMESPACE is raised.
Parameters:
classname (:term:`string`):
The CIM class name for which the callback function is registered.
The faked `InvokeMethod` implementation uses this information to
look up the callback function from its parameters.
For method invocations on a target instance, this must be the class
name of the creation class of the target instance.
For method invocations on a target class, this must be the class
name of the target class.
methodname (:term:`string`):
The CIM method name for which the callback function is registered.
The faked `InvokeMethod` implementation uses this information to
look up the callback function from its parameters.
method_callback (:func:`~pywbem_mock.method_callback_interface`):
The callback function.
namespace (:term:`string`):
The CIM namespace for which the callback function is registered.
If `None`, the callback function is registered for the default
namespace of the connection.
The faked `InvokeMethod` implementation uses this information to
look up the callback function from its parameters.
Raises:
ValueError: Duplicate method specification.
:exc:`~pywbem.CIMError`: CIM_ERR_INVALID_NAMESPACE: Namespace does
not exist. | Below is the the instruction that describes the task:
### Input:
Register a callback function for a CIM method that will be called when
the CIM method is invoked via `InvokeMethod`.
If the namespace does not exist, :exc:`~pywbem.CIMError` with status
CIM_ERR_INVALID_NAMESPACE is raised.
Parameters:
classname (:term:`string`):
The CIM class name for which the callback function is registered.
The faked `InvokeMethod` implementation uses this information to
look up the callback function from its parameters.
For method invocations on a target instance, this must be the class
name of the creation class of the target instance.
For method invocations on a target class, this must be the class
name of the target class.
methodname (:term:`string`):
The CIM method name for which the callback function is registered.
The faked `InvokeMethod` implementation uses this information to
look up the callback function from its parameters.
method_callback (:func:`~pywbem_mock.method_callback_interface`):
The callback function.
namespace (:term:`string`):
The CIM namespace for which the callback function is registered.
If `None`, the callback function is registered for the default
namespace of the connection.
The faked `InvokeMethod` implementation uses this information to
look up the callback function from its parameters.
Raises:
ValueError: Duplicate method specification.
:exc:`~pywbem.CIMError`: CIM_ERR_INVALID_NAMESPACE: Namespace does
not exist.
### Response:
def add_method_callback(self, classname, methodname, method_callback,
namespace=None,):
"""
Register a callback function for a CIM method that will be called when
the CIM method is invoked via `InvokeMethod`.
If the namespace does not exist, :exc:`~pywbem.CIMError` with status
CIM_ERR_INVALID_NAMESPACE is raised.
Parameters:
classname (:term:`string`):
The CIM class name for which the callback function is registered.
The faked `InvokeMethod` implementation uses this information to
look up the callback function from its parameters.
For method invocations on a target instance, this must be the class
name of the creation class of the target instance.
For method invocations on a target class, this must be the class
name of the target class.
methodname (:term:`string`):
The CIM method name for which the callback function is registered.
The faked `InvokeMethod` implementation uses this information to
look up the callback function from its parameters.
method_callback (:func:`~pywbem_mock.method_callback_interface`):
The callback function.
namespace (:term:`string`):
The CIM namespace for which the callback function is registered.
If `None`, the callback function is registered for the default
namespace of the connection.
The faked `InvokeMethod` implementation uses this information to
look up the callback function from its parameters.
Raises:
ValueError: Duplicate method specification.
:exc:`~pywbem.CIMError`: CIM_ERR_INVALID_NAMESPACE: Namespace does
not exist.
"""
if namespace is None:
namespace = self.default_namespace
# Validate namespace
method_repo = self._get_method_repo(namespace)
if classname not in method_repo:
method_repo[classname] = NocaseDict()
if methodname in method_repo[classname]:
raise ValueError("Duplicate method specification")
method_repo[classname][methodname] = method_callback |
def limits(self):
"""
Limits to use for the independent variable whenever
creating a linespace, plot, etc.
Defaults to `(-x, x)` where `x` is the largest absolute value
of the data corresponding to the independent variable.
If no such values are negative, defaults to `(0, x)` instead.
"""
if not hasattr(self, '_limits'):
xmax = max(abs(self.data.array[0]))
xmin = min(self.data.array[0])
x_error = self.data.error[0]
if isinstance(x_error, numpy.ndarray):
if x_error.ndim == 0: xmax = xmax + x_error
if xmin < 0:
self._limits = (-xmax, xmax)
else:
self._limits = (0, xmax)
return self._limits | Limits to use for the independent variable whenever
creating a linespace, plot, etc.
Defaults to `(-x, x)` where `x` is the largest absolute value
of the data corresponding to the independent variable.
If no such values are negative, defaults to `(0, x)` instead. | Below is the the instruction that describes the task:
### Input:
Limits to use for the independent variable whenever
creating a linespace, plot, etc.
Defaults to `(-x, x)` where `x` is the largest absolute value
of the data corresponding to the independent variable.
If no such values are negative, defaults to `(0, x)` instead.
### Response:
def limits(self):
"""
Limits to use for the independent variable whenever
creating a linespace, plot, etc.
Defaults to `(-x, x)` where `x` is the largest absolute value
of the data corresponding to the independent variable.
If no such values are negative, defaults to `(0, x)` instead.
"""
if not hasattr(self, '_limits'):
xmax = max(abs(self.data.array[0]))
xmin = min(self.data.array[0])
x_error = self.data.error[0]
if isinstance(x_error, numpy.ndarray):
if x_error.ndim == 0: xmax = xmax + x_error
if xmin < 0:
self._limits = (-xmax, xmax)
else:
self._limits = (0, xmax)
return self._limits |
def _get_override_base(self, override_wrapper):
"""Retrieve the override base class from the
:class:`_OverriddenMethod` wrapper.
"""
base = override_wrapper.modifier.base
if not base:
return None
if is_class(base):
return base
# resolve the (possibly qualified) class name
if '.' in base:
# repeatedly try to import the first N-1, N-2, etc. dot-separated
# parts of the qualified name; this way we can handle all names
# including `package.module.Class.InnerClass`
dot_parts = base.split('.')
for i in xrange(len(dot_parts) - 1, 1, -1): # n-1 -> 1
module_name = '.'.join(dot_parts[:i])
class_name = '.'.join(dot_parts[i:])
try:
module = __import__(module_name, fromlist=[dot_parts[i]])
break
except ImportError:
pass
else:
# couldn't resolve class name, return it verbatim
return base
else:
class_name = base
module_name = override_wrapper.method.__module__
module = sys.modules[module_name]
return getattr(module, class_name) | Retrieve the override base class from the
:class:`_OverriddenMethod` wrapper. | Below is the the instruction that describes the task:
### Input:
Retrieve the override base class from the
:class:`_OverriddenMethod` wrapper.
### Response:
def _get_override_base(self, override_wrapper):
"""Retrieve the override base class from the
:class:`_OverriddenMethod` wrapper.
"""
base = override_wrapper.modifier.base
if not base:
return None
if is_class(base):
return base
# resolve the (possibly qualified) class name
if '.' in base:
# repeatedly try to import the first N-1, N-2, etc. dot-separated
# parts of the qualified name; this way we can handle all names
# including `package.module.Class.InnerClass`
dot_parts = base.split('.')
for i in xrange(len(dot_parts) - 1, 1, -1): # n-1 -> 1
module_name = '.'.join(dot_parts[:i])
class_name = '.'.join(dot_parts[i:])
try:
module = __import__(module_name, fromlist=[dot_parts[i]])
break
except ImportError:
pass
else:
# couldn't resolve class name, return it verbatim
return base
else:
class_name = base
module_name = override_wrapper.method.__module__
module = sys.modules[module_name]
return getattr(module, class_name) |
def sendChatAction(self, chat_id, action):
""" See: https://core.telegram.org/bots/api#sendchataction """
p = _strip(locals())
return self._api_request('sendChatAction', _rectify(p)) | See: https://core.telegram.org/bots/api#sendchataction | Below is the the instruction that describes the task:
### Input:
See: https://core.telegram.org/bots/api#sendchataction
### Response:
def sendChatAction(self, chat_id, action):
""" See: https://core.telegram.org/bots/api#sendchataction """
p = _strip(locals())
return self._api_request('sendChatAction', _rectify(p)) |
def fault_zone(self, zone, simulate_wire_problem=False):
"""
Faults a zone if we are emulating a zone expander.
:param zone: zone to fault
:type zone: int
:param simulate_wire_problem: Whether or not to simulate a wire fault
:type simulate_wire_problem: bool
"""
# Allow ourselves to also be passed an address/channel combination
# for zone expanders.
#
# Format (expander index, channel)
if isinstance(zone, tuple):
expander_idx, channel = zone
zone = self._zonetracker.expander_to_zone(expander_idx, channel)
status = 2 if simulate_wire_problem else 1
self.send("L{0:02}{1}\r".format(zone, status)) | Faults a zone if we are emulating a zone expander.
:param zone: zone to fault
:type zone: int
:param simulate_wire_problem: Whether or not to simulate a wire fault
:type simulate_wire_problem: bool | Below is the the instruction that describes the task:
### Input:
Faults a zone if we are emulating a zone expander.
:param zone: zone to fault
:type zone: int
:param simulate_wire_problem: Whether or not to simulate a wire fault
:type simulate_wire_problem: bool
### Response:
def fault_zone(self, zone, simulate_wire_problem=False):
"""
Faults a zone if we are emulating a zone expander.
:param zone: zone to fault
:type zone: int
:param simulate_wire_problem: Whether or not to simulate a wire fault
:type simulate_wire_problem: bool
"""
# Allow ourselves to also be passed an address/channel combination
# for zone expanders.
#
# Format (expander index, channel)
if isinstance(zone, tuple):
expander_idx, channel = zone
zone = self._zonetracker.expander_to_zone(expander_idx, channel)
status = 2 if simulate_wire_problem else 1
self.send("L{0:02}{1}\r".format(zone, status)) |
def load_job_from_ref(self):
"""
Loads the job.json into self.job
"""
if not self.job_id:
raise Exception('Job not loaded yet. Use load(id) first.')
if not os.path.exists(self.git.work_tree + '/aetros/job.json'):
raise Exception('Could not load aetros/job.json from git repository. Make sure you have created the job correctly.')
with open(self.git.work_tree + '/aetros/job.json') as f:
self.job = simplejson.loads(f.read(), object_pairs_hook=collections.OrderedDict)
if not self.job:
raise Exception('Could not parse aetros/job.json from git repository. Make sure you have created the job correctly.')
self.logger.debug('job: ' + str(self.job)) | Loads the job.json into self.job | Below is the the instruction that describes the task:
### Input:
Loads the job.json into self.job
### Response:
def load_job_from_ref(self):
"""
Loads the job.json into self.job
"""
if not self.job_id:
raise Exception('Job not loaded yet. Use load(id) first.')
if not os.path.exists(self.git.work_tree + '/aetros/job.json'):
raise Exception('Could not load aetros/job.json from git repository. Make sure you have created the job correctly.')
with open(self.git.work_tree + '/aetros/job.json') as f:
self.job = simplejson.loads(f.read(), object_pairs_hook=collections.OrderedDict)
if not self.job:
raise Exception('Could not parse aetros/job.json from git repository. Make sure you have created the job correctly.')
self.logger.debug('job: ' + str(self.job)) |
def get_pip_requirement_set(self, arguments, use_remote_index, use_wheels=False):
"""
Get the unpacked requirement(s) specified by the caller by running pip.
:param arguments: The command line arguments to ``pip install ...`` (a
list of strings).
:param use_remote_index: A boolean indicating whether pip is allowed to
connect to the main package index
(http://pypi.python.org by default).
:param use_wheels: Whether pip and pip-accel are allowed to use wheels_
(:data:`False` by default for backwards compatibility
with callers that use pip-accel as a Python API).
:returns: A :class:`pip.req.RequirementSet` object created by pip.
:raises: Any exceptions raised by pip.
"""
# Compose the pip command line arguments. This is where a lot of the
# core logic of pip-accel is hidden and it uses some esoteric features
# of pip so this method is heavily commented.
command_line = []
# Use `--download' to instruct pip to download requirement(s) into
# pip-accel's local source distribution index directory. This has the
# following documented side effects (see `pip install --help'):
# 1. It disables the installation of requirements (without using the
# `--no-install' option which is deprecated and slated for removal
# in pip 7.x).
# 2. It ignores requirements that are already installed (because
# pip-accel doesn't actually need to re-install requirements that
# are already installed we will have work around this later, but
# that seems fairly simple to do).
command_line.append('--download=%s' % self.config.source_index)
# Use `--find-links' to point pip at pip-accel's local source
# distribution index directory. This ensures that source distribution
# archives are never downloaded more than once (regardless of the HTTP
# cache that was introduced in pip 6.x).
command_line.append('--find-links=%s' % create_file_url(self.config.source_index))
# Use `--no-binary=:all:' to ignore wheel distributions by default in
# order to preserve backwards compatibility with callers that expect a
# requirement set consisting only of source distributions that can be
# converted to `dumb binary distributions'.
if not use_wheels and self.arguments_allow_wheels(arguments):
command_line.append('--no-binary=:all:')
# Use `--no-index' to force pip to only consider source distribution
# archives contained in pip-accel's local source distribution index
# directory. This enables pip-accel to ask pip "Can the local source
# distribution index satisfy all requirements in the given requirement
# set?" which enables pip-accel to keep pip off the internet unless
# absolutely necessary :-).
if not use_remote_index:
command_line.append('--no-index')
# Use `--no-clean' to instruct pip to unpack the source distribution
# archives and *not* clean up the unpacked source distributions
# afterwards. This enables pip-accel to replace pip's installation
# logic with cached binary distribution archives.
command_line.append('--no-clean')
# Use `--build-directory' to instruct pip to unpack the source
# distribution archives to a temporary directory managed by pip-accel.
# We will clean up the build directory when we're done using the
# unpacked source distributions.
command_line.append('--build-directory=%s' % self.build_directory)
# Append the user's `pip install ...' arguments to the command line
# that we just assembled.
command_line.extend(arguments)
logger.info("Executing command: pip install %s", ' '.join(command_line))
# Clear the build directory to prevent PreviousBuildDirError exceptions.
self.clear_build_directory()
# During the pip 6.x upgrade pip-accel switched to using `pip install
# --download' which can produce an interactive prompt as described in
# issue 51 [1]. The documented way [2] to get rid of this interactive
# prompt is pip's --exists-action option, but due to what is most
# likely a bug in pip this doesn't actually work. The environment
# variable $PIP_EXISTS_ACTION does work however, so if the user didn't
# set it we will set a reasonable default for them.
# [1] https://github.com/paylogic/pip-accel/issues/51
# [2] https://pip.pypa.io/en/latest/reference/pip.html#exists-action-option
os.environ.setdefault('PIP_EXISTS_ACTION', 'w')
# Initialize and run the `pip install' command.
command = InstallCommand()
opts, args = command.parse_args(command_line)
if not opts.ignore_installed:
# If the user didn't supply the -I, --ignore-installed option we
# will forcefully disable the option. Refer to the documentation of
# the AttributeOverrides class for further details.
opts = AttributeOverrides(opts, ignore_installed=False)
requirement_set = command.run(opts, args)
# Make sure the output of pip and pip-accel are not intermingled.
sys.stdout.flush()
if requirement_set is None:
raise NothingToDoError("""
pip didn't generate a requirement set, most likely you
specified an empty requirements file?
""")
else:
return self.transform_pip_requirement_set(requirement_set) | Get the unpacked requirement(s) specified by the caller by running pip.
:param arguments: The command line arguments to ``pip install ...`` (a
list of strings).
:param use_remote_index: A boolean indicating whether pip is allowed to
connect to the main package index
(http://pypi.python.org by default).
:param use_wheels: Whether pip and pip-accel are allowed to use wheels_
(:data:`False` by default for backwards compatibility
with callers that use pip-accel as a Python API).
:returns: A :class:`pip.req.RequirementSet` object created by pip.
:raises: Any exceptions raised by pip. | Below is the the instruction that describes the task:
### Input:
Get the unpacked requirement(s) specified by the caller by running pip.
:param arguments: The command line arguments to ``pip install ...`` (a
list of strings).
:param use_remote_index: A boolean indicating whether pip is allowed to
connect to the main package index
(http://pypi.python.org by default).
:param use_wheels: Whether pip and pip-accel are allowed to use wheels_
(:data:`False` by default for backwards compatibility
with callers that use pip-accel as a Python API).
:returns: A :class:`pip.req.RequirementSet` object created by pip.
:raises: Any exceptions raised by pip.
### Response:
def get_pip_requirement_set(self, arguments, use_remote_index, use_wheels=False):
"""
Get the unpacked requirement(s) specified by the caller by running pip.
:param arguments: The command line arguments to ``pip install ...`` (a
list of strings).
:param use_remote_index: A boolean indicating whether pip is allowed to
connect to the main package index
(http://pypi.python.org by default).
:param use_wheels: Whether pip and pip-accel are allowed to use wheels_
(:data:`False` by default for backwards compatibility
with callers that use pip-accel as a Python API).
:returns: A :class:`pip.req.RequirementSet` object created by pip.
:raises: Any exceptions raised by pip.
"""
# Compose the pip command line arguments. This is where a lot of the
# core logic of pip-accel is hidden and it uses some esoteric features
# of pip so this method is heavily commented.
command_line = []
# Use `--download' to instruct pip to download requirement(s) into
# pip-accel's local source distribution index directory. This has the
# following documented side effects (see `pip install --help'):
# 1. It disables the installation of requirements (without using the
# `--no-install' option which is deprecated and slated for removal
# in pip 7.x).
# 2. It ignores requirements that are already installed (because
# pip-accel doesn't actually need to re-install requirements that
# are already installed we will have work around this later, but
# that seems fairly simple to do).
command_line.append('--download=%s' % self.config.source_index)
# Use `--find-links' to point pip at pip-accel's local source
# distribution index directory. This ensures that source distribution
# archives are never downloaded more than once (regardless of the HTTP
# cache that was introduced in pip 6.x).
command_line.append('--find-links=%s' % create_file_url(self.config.source_index))
# Use `--no-binary=:all:' to ignore wheel distributions by default in
# order to preserve backwards compatibility with callers that expect a
# requirement set consisting only of source distributions that can be
# converted to `dumb binary distributions'.
if not use_wheels and self.arguments_allow_wheels(arguments):
command_line.append('--no-binary=:all:')
# Use `--no-index' to force pip to only consider source distribution
# archives contained in pip-accel's local source distribution index
# directory. This enables pip-accel to ask pip "Can the local source
# distribution index satisfy all requirements in the given requirement
# set?" which enables pip-accel to keep pip off the internet unless
# absolutely necessary :-).
if not use_remote_index:
command_line.append('--no-index')
# Use `--no-clean' to instruct pip to unpack the source distribution
# archives and *not* clean up the unpacked source distributions
# afterwards. This enables pip-accel to replace pip's installation
# logic with cached binary distribution archives.
command_line.append('--no-clean')
# Use `--build-directory' to instruct pip to unpack the source
# distribution archives to a temporary directory managed by pip-accel.
# We will clean up the build directory when we're done using the
# unpacked source distributions.
command_line.append('--build-directory=%s' % self.build_directory)
# Append the user's `pip install ...' arguments to the command line
# that we just assembled.
command_line.extend(arguments)
logger.info("Executing command: pip install %s", ' '.join(command_line))
# Clear the build directory to prevent PreviousBuildDirError exceptions.
self.clear_build_directory()
# During the pip 6.x upgrade pip-accel switched to using `pip install
# --download' which can produce an interactive prompt as described in
# issue 51 [1]. The documented way [2] to get rid of this interactive
# prompt is pip's --exists-action option, but due to what is most
# likely a bug in pip this doesn't actually work. The environment
# variable $PIP_EXISTS_ACTION does work however, so if the user didn't
# set it we will set a reasonable default for them.
# [1] https://github.com/paylogic/pip-accel/issues/51
# [2] https://pip.pypa.io/en/latest/reference/pip.html#exists-action-option
os.environ.setdefault('PIP_EXISTS_ACTION', 'w')
# Initialize and run the `pip install' command.
command = InstallCommand()
opts, args = command.parse_args(command_line)
if not opts.ignore_installed:
# If the user didn't supply the -I, --ignore-installed option we
# will forcefully disable the option. Refer to the documentation of
# the AttributeOverrides class for further details.
opts = AttributeOverrides(opts, ignore_installed=False)
requirement_set = command.run(opts, args)
# Make sure the output of pip and pip-accel are not intermingled.
sys.stdout.flush()
if requirement_set is None:
raise NothingToDoError("""
pip didn't generate a requirement set, most likely you
specified an empty requirements file?
""")
else:
return self.transform_pip_requirement_set(requirement_set) |
def done_tasks(self):
"""Return tasks in loop which its state is not pending."""
tasks = [task for task in self.all_tasks if task._state != NewTask._PENDING]
return tasks | Return tasks in loop which its state is not pending. | Below is the the instruction that describes the task:
### Input:
Return tasks in loop which its state is not pending.
### Response:
def done_tasks(self):
"""Return tasks in loop which its state is not pending."""
tasks = [task for task in self.all_tasks if task._state != NewTask._PENDING]
return tasks |
def prohibit(self, data):
"""Checks for prohibited characters."""
for char in data:
for lookup in self.prohibited:
if lookup(char):
raise StringprepError("Prohibited character: {0!r}"
.format(char))
return data | Checks for prohibited characters. | Below is the the instruction that describes the task:
### Input:
Checks for prohibited characters.
### Response:
def prohibit(self, data):
"""Checks for prohibited characters."""
for char in data:
for lookup in self.prohibited:
if lookup(char):
raise StringprepError("Prohibited character: {0!r}"
.format(char))
return data |
def _CallFlowRelational(self,
flow_name=None,
args=None,
runner_args=None,
client_id=None,
**kwargs):
"""Creates a new flow and send its responses to a state.
This creates a new flow. The flow may send back many responses which will be
queued by the framework until the flow terminates. The final status message
will cause the entire transaction to be committed to the specified state.
Args:
flow_name: The name of the flow to invoke.
args: Flow arguments.
runner_args: Flow runner arguments.
client_id: If given, the flow is started for this client.
**kwargs: Arguments for the child flow.
Returns:
The URN of the child flow which was created.
Raises:
RuntimeError: In case of no cpu quota left to start more clients.
"""
if isinstance(client_id, rdfvalue.RDFURN):
client_id = client_id.Basename()
if flow_name is None and runner_args is not None:
flow_name = runner_args.flow_name
flow_cls = registry.FlowRegistry.FlowClassByName(flow_name)
flow_id = flow.StartFlow(
client_id=client_id,
creator=self.hunt_obj.creator,
cpu_limit=self._GetSubFlowCPULimit(),
network_bytes_limit=self._GetSubFlowNetworkLimit(),
flow_cls=flow_cls,
flow_args=args,
parent_hunt_id=self.hunt_obj.urn.Basename(),
**kwargs)
return rdfvalue.RDFURN(client_id).Add("flows").Add(flow_id) | Creates a new flow and send its responses to a state.
This creates a new flow. The flow may send back many responses which will be
queued by the framework until the flow terminates. The final status message
will cause the entire transaction to be committed to the specified state.
Args:
flow_name: The name of the flow to invoke.
args: Flow arguments.
runner_args: Flow runner arguments.
client_id: If given, the flow is started for this client.
**kwargs: Arguments for the child flow.
Returns:
The URN of the child flow which was created.
Raises:
RuntimeError: In case of no cpu quota left to start more clients. | Below is the the instruction that describes the task:
### Input:
Creates a new flow and send its responses to a state.
This creates a new flow. The flow may send back many responses which will be
queued by the framework until the flow terminates. The final status message
will cause the entire transaction to be committed to the specified state.
Args:
flow_name: The name of the flow to invoke.
args: Flow arguments.
runner_args: Flow runner arguments.
client_id: If given, the flow is started for this client.
**kwargs: Arguments for the child flow.
Returns:
The URN of the child flow which was created.
Raises:
RuntimeError: In case of no cpu quota left to start more clients.
### Response:
def _CallFlowRelational(self,
flow_name=None,
args=None,
runner_args=None,
client_id=None,
**kwargs):
"""Creates a new flow and send its responses to a state.
This creates a new flow. The flow may send back many responses which will be
queued by the framework until the flow terminates. The final status message
will cause the entire transaction to be committed to the specified state.
Args:
flow_name: The name of the flow to invoke.
args: Flow arguments.
runner_args: Flow runner arguments.
client_id: If given, the flow is started for this client.
**kwargs: Arguments for the child flow.
Returns:
The URN of the child flow which was created.
Raises:
RuntimeError: In case of no cpu quota left to start more clients.
"""
if isinstance(client_id, rdfvalue.RDFURN):
client_id = client_id.Basename()
if flow_name is None and runner_args is not None:
flow_name = runner_args.flow_name
flow_cls = registry.FlowRegistry.FlowClassByName(flow_name)
flow_id = flow.StartFlow(
client_id=client_id,
creator=self.hunt_obj.creator,
cpu_limit=self._GetSubFlowCPULimit(),
network_bytes_limit=self._GetSubFlowNetworkLimit(),
flow_cls=flow_cls,
flow_args=args,
parent_hunt_id=self.hunt_obj.urn.Basename(),
**kwargs)
return rdfvalue.RDFURN(client_id).Add("flows").Add(flow_id) |
def cctop_save_xml(jobid, outpath):
"""Save the CCTOP results file in XML format.
Args:
jobid (str): Job ID obtained when job was submitted
outpath (str): Path to output filename
Returns:
str: Path to output filename
"""
status = cctop_check_status(jobid=jobid)
if status == 'Finished':
result = 'http://cctop.enzim.ttk.mta.hu/php/result.php?jobId={}'.format(jobid)
result_text = requests.post(result)
with open(outpath, 'w') as f:
f.write(result_text.text)
return outpath
else:
raise ConnectionRefusedError('CCTOP job incomplete, status is "{}"'.format(status)) | Save the CCTOP results file in XML format.
Args:
jobid (str): Job ID obtained when job was submitted
outpath (str): Path to output filename
Returns:
str: Path to output filename | Below is the the instruction that describes the task:
### Input:
Save the CCTOP results file in XML format.
Args:
jobid (str): Job ID obtained when job was submitted
outpath (str): Path to output filename
Returns:
str: Path to output filename
### Response:
def cctop_save_xml(jobid, outpath):
"""Save the CCTOP results file in XML format.
Args:
jobid (str): Job ID obtained when job was submitted
outpath (str): Path to output filename
Returns:
str: Path to output filename
"""
status = cctop_check_status(jobid=jobid)
if status == 'Finished':
result = 'http://cctop.enzim.ttk.mta.hu/php/result.php?jobId={}'.format(jobid)
result_text = requests.post(result)
with open(outpath, 'w') as f:
f.write(result_text.text)
return outpath
else:
raise ConnectionRefusedError('CCTOP job incomplete, status is "{}"'.format(status)) |
def fmt_addr_raw(addr, reverse=True):
"""Given a string containing a xx:xx:xx:xx:xx:xx address, return as a byte sequence.
Args:
addr (str): Bluetooth address in xx:xx:xx:xx:xx:xx format.
reverse (bool): True if the byte ordering should be reversed in the output.
Returns:
A bytearray containing the converted address.
"""
addr = addr.replace(':', '')
raw_addr = [int(addr[i:i+2], 16) for i in range(0, len(addr), 2)]
if reverse:
raw_addr.reverse()
# for Python 2, this needs to be a string instead of a bytearray
if sys.version_info[0] == 2:
return str(bytearray(raw_addr))
return bytearray(raw_addr) | Given a string containing a xx:xx:xx:xx:xx:xx address, return as a byte sequence.
Args:
addr (str): Bluetooth address in xx:xx:xx:xx:xx:xx format.
reverse (bool): True if the byte ordering should be reversed in the output.
Returns:
A bytearray containing the converted address. | Below is the the instruction that describes the task:
### Input:
Given a string containing a xx:xx:xx:xx:xx:xx address, return as a byte sequence.
Args:
addr (str): Bluetooth address in xx:xx:xx:xx:xx:xx format.
reverse (bool): True if the byte ordering should be reversed in the output.
Returns:
A bytearray containing the converted address.
### Response:
def fmt_addr_raw(addr, reverse=True):
"""Given a string containing a xx:xx:xx:xx:xx:xx address, return as a byte sequence.
Args:
addr (str): Bluetooth address in xx:xx:xx:xx:xx:xx format.
reverse (bool): True if the byte ordering should be reversed in the output.
Returns:
A bytearray containing the converted address.
"""
addr = addr.replace(':', '')
raw_addr = [int(addr[i:i+2], 16) for i in range(0, len(addr), 2)]
if reverse:
raw_addr.reverse()
# for Python 2, this needs to be a string instead of a bytearray
if sys.version_info[0] == 2:
return str(bytearray(raw_addr))
return bytearray(raw_addr) |
def prepare_outputs(self, job):
"""
Called before job is started.
If output is a `FileSystemTarget`, create parent directories so the hive command won't fail
"""
outputs = flatten(job.output())
for o in outputs:
if isinstance(o, FileSystemTarget):
parent_dir = os.path.dirname(o.path)
if parent_dir and not o.fs.exists(parent_dir):
logger.info("Creating parent directory %r", parent_dir)
try:
# there is a possible race condition
# which needs to be handled here
o.fs.mkdir(parent_dir)
except FileAlreadyExists:
pass | Called before job is started.
If output is a `FileSystemTarget`, create parent directories so the hive command won't fail | Below is the the instruction that describes the task:
### Input:
Called before job is started.
If output is a `FileSystemTarget`, create parent directories so the hive command won't fail
### Response:
def prepare_outputs(self, job):
"""
Called before job is started.
If output is a `FileSystemTarget`, create parent directories so the hive command won't fail
"""
outputs = flatten(job.output())
for o in outputs:
if isinstance(o, FileSystemTarget):
parent_dir = os.path.dirname(o.path)
if parent_dir and not o.fs.exists(parent_dir):
logger.info("Creating parent directory %r", parent_dir)
try:
# there is a possible race condition
# which needs to be handled here
o.fs.mkdir(parent_dir)
except FileAlreadyExists:
pass |
def availability_zone_list(request):
"""Utility method to retrieve a list of availability zones."""
try:
return api.nova.availability_zone_list(request)
except Exception:
exceptions.handle(request,
_('Unable to retrieve Nova availability zones.'))
return [] | Utility method to retrieve a list of availability zones. | Below is the the instruction that describes the task:
### Input:
Utility method to retrieve a list of availability zones.
### Response:
def availability_zone_list(request):
"""Utility method to retrieve a list of availability zones."""
try:
return api.nova.availability_zone_list(request)
except Exception:
exceptions.handle(request,
_('Unable to retrieve Nova availability zones.'))
return [] |
def sys_mmap_pgoff(self, address, size, prot, flags, fd, offset):
"""Wrapper for mmap2"""
return self.sys_mmap2(address, size, prot, flags, fd, offset) | Wrapper for mmap2 | Below is the the instruction that describes the task:
### Input:
Wrapper for mmap2
### Response:
def sys_mmap_pgoff(self, address, size, prot, flags, fd, offset):
"""Wrapper for mmap2"""
return self.sys_mmap2(address, size, prot, flags, fd, offset) |
def patch_pyzmq():
"""backport a few patches from newer pyzmq
These can be removed as we bump our minimum pyzmq version
"""
import zmq
# ioloop.install, introduced in pyzmq 2.1.7
from zmq.eventloop import ioloop
def install():
import tornado.ioloop
tornado.ioloop.IOLoop = ioloop.IOLoop
if not hasattr(ioloop, 'install'):
ioloop.install = install
# fix missing DEALER/ROUTER aliases in pyzmq < 2.1.9
if not hasattr(zmq, 'DEALER'):
zmq.DEALER = zmq.XREQ
if not hasattr(zmq, 'ROUTER'):
zmq.ROUTER = zmq.XREP
# fallback on stdlib json if jsonlib is selected, because jsonlib breaks things.
# jsonlib support is removed from pyzmq >= 2.2.0
from zmq.utils import jsonapi
if jsonapi.jsonmod.__name__ == 'jsonlib':
import json
jsonapi.jsonmod = json | backport a few patches from newer pyzmq
These can be removed as we bump our minimum pyzmq version | Below is the the instruction that describes the task:
### Input:
backport a few patches from newer pyzmq
These can be removed as we bump our minimum pyzmq version
### Response:
def patch_pyzmq():
"""backport a few patches from newer pyzmq
These can be removed as we bump our minimum pyzmq version
"""
import zmq
# ioloop.install, introduced in pyzmq 2.1.7
from zmq.eventloop import ioloop
def install():
import tornado.ioloop
tornado.ioloop.IOLoop = ioloop.IOLoop
if not hasattr(ioloop, 'install'):
ioloop.install = install
# fix missing DEALER/ROUTER aliases in pyzmq < 2.1.9
if not hasattr(zmq, 'DEALER'):
zmq.DEALER = zmq.XREQ
if not hasattr(zmq, 'ROUTER'):
zmq.ROUTER = zmq.XREP
# fallback on stdlib json if jsonlib is selected, because jsonlib breaks things.
# jsonlib support is removed from pyzmq >= 2.2.0
from zmq.utils import jsonapi
if jsonapi.jsonmod.__name__ == 'jsonlib':
import json
jsonapi.jsonmod = json |
def process_app_config_section(config, app_config):
""" Processes the app section from a configuration data dict.
:param config: The config reference of the object that will hold the
configuration data from the config_data.
:param app_config: App section from a config data dict.
"""
if 'addresses' in app_config:
config.app['addresses'] = app_config['addresses']
if 'component' in app_config:
config.app['component'] = app_config['component']
if 'data' in app_config:
if 'sources' in app_config['data']:
config.app['data']['sources'] = app_config['data']['sources']
if 'id' in app_config:
config.app['id'] = app_config['id']
if 'login' in app_config:
if 'urls' in app_config['login']:
for url in app_config['login']['urls']:
config.app['login']['urls'][url['name']] = url['value']
if 'pythonpath' in app_config:
config.app['pythonpath'] = app_config['pythonpath']
if 'port' in app_config:
config.app['port'] = app_config['port']
if 'process' in app_config:
if 'num_processes' in app_config['process']:
config.app['process']['num_processes'] = app_config[
'process']['num_processes']
if 'url_root_path' in app_config:
root_url = app_config['url_root_path'].strip()
if root_url[0] == "/":
root_url = root_url[1:]
if root_url == "":
root_url = None
config.app['url_root_path'] = root_url
if 'settings' in app_config:
config.app['settings'] = app_config['settings']
if 'socket' in app_config:
config.app['socket'] = app_config['socket']
if 'static_path' in app_config:
config.app['static_path'] = app_config['static_path']
if 'static_url_prefix' in app_config:
config.app['static_url_prefix'] = app_config['static_url_prefix']
if 'type' in app_config:
config.app['type'] = app_config['type']
if 'types' in app_config:
for app_type in app_config['types']:
app_type['launcher'] = get_config_from_package(
app_type['launcher'])
config.app['types'][app_type['name']] = app_type
if 'wait_before_shutdown' in app_config:
config.app['wait_before_shutdown'] = app_config['wait_before_shutdown'] | Processes the app section from a configuration data dict.
:param config: The config reference of the object that will hold the
configuration data from the config_data.
:param app_config: App section from a config data dict. | Below is the the instruction that describes the task:
### Input:
Processes the app section from a configuration data dict.
:param config: The config reference of the object that will hold the
configuration data from the config_data.
:param app_config: App section from a config data dict.
### Response:
def process_app_config_section(config, app_config):
""" Processes the app section from a configuration data dict.
:param config: The config reference of the object that will hold the
configuration data from the config_data.
:param app_config: App section from a config data dict.
"""
if 'addresses' in app_config:
config.app['addresses'] = app_config['addresses']
if 'component' in app_config:
config.app['component'] = app_config['component']
if 'data' in app_config:
if 'sources' in app_config['data']:
config.app['data']['sources'] = app_config['data']['sources']
if 'id' in app_config:
config.app['id'] = app_config['id']
if 'login' in app_config:
if 'urls' in app_config['login']:
for url in app_config['login']['urls']:
config.app['login']['urls'][url['name']] = url['value']
if 'pythonpath' in app_config:
config.app['pythonpath'] = app_config['pythonpath']
if 'port' in app_config:
config.app['port'] = app_config['port']
if 'process' in app_config:
if 'num_processes' in app_config['process']:
config.app['process']['num_processes'] = app_config[
'process']['num_processes']
if 'url_root_path' in app_config:
root_url = app_config['url_root_path'].strip()
if root_url[0] == "/":
root_url = root_url[1:]
if root_url == "":
root_url = None
config.app['url_root_path'] = root_url
if 'settings' in app_config:
config.app['settings'] = app_config['settings']
if 'socket' in app_config:
config.app['socket'] = app_config['socket']
if 'static_path' in app_config:
config.app['static_path'] = app_config['static_path']
if 'static_url_prefix' in app_config:
config.app['static_url_prefix'] = app_config['static_url_prefix']
if 'type' in app_config:
config.app['type'] = app_config['type']
if 'types' in app_config:
for app_type in app_config['types']:
app_type['launcher'] = get_config_from_package(
app_type['launcher'])
config.app['types'][app_type['name']] = app_type
if 'wait_before_shutdown' in app_config:
config.app['wait_before_shutdown'] = app_config['wait_before_shutdown'] |
def filter(self, mask):
"""
Create a SiteCollection with only a subset of sites.
:param mask:
Numpy array of boolean values of the same length as the site
collection. ``True`` values should indicate that site with that
index should be included into the filtered collection.
:returns:
A new :class:`SiteCollection` instance, unless all the
values in ``mask`` are ``True``, in which case this site collection
is returned, or if all the values in ``mask`` are ``False``,
in which case method returns ``None``. New collection has data
of only those sites that were marked for inclusion in the mask.
"""
assert len(mask) == len(self), (len(mask), len(self))
if mask.all():
# all sites satisfy the filter, return
# this collection unchanged
return self
if not mask.any():
# no sites pass the filter, return None
return None
# extract indices of Trues from the mask
indices, = mask.nonzero()
return self.filtered(indices) | Create a SiteCollection with only a subset of sites.
:param mask:
Numpy array of boolean values of the same length as the site
collection. ``True`` values should indicate that site with that
index should be included into the filtered collection.
:returns:
A new :class:`SiteCollection` instance, unless all the
values in ``mask`` are ``True``, in which case this site collection
is returned, or if all the values in ``mask`` are ``False``,
in which case method returns ``None``. New collection has data
of only those sites that were marked for inclusion in the mask. | Below is the the instruction that describes the task:
### Input:
Create a SiteCollection with only a subset of sites.
:param mask:
Numpy array of boolean values of the same length as the site
collection. ``True`` values should indicate that site with that
index should be included into the filtered collection.
:returns:
A new :class:`SiteCollection` instance, unless all the
values in ``mask`` are ``True``, in which case this site collection
is returned, or if all the values in ``mask`` are ``False``,
in which case method returns ``None``. New collection has data
of only those sites that were marked for inclusion in the mask.
### Response:
def filter(self, mask):
"""
Create a SiteCollection with only a subset of sites.
:param mask:
Numpy array of boolean values of the same length as the site
collection. ``True`` values should indicate that site with that
index should be included into the filtered collection.
:returns:
A new :class:`SiteCollection` instance, unless all the
values in ``mask`` are ``True``, in which case this site collection
is returned, or if all the values in ``mask`` are ``False``,
in which case method returns ``None``. New collection has data
of only those sites that were marked for inclusion in the mask.
"""
assert len(mask) == len(self), (len(mask), len(self))
if mask.all():
# all sites satisfy the filter, return
# this collection unchanged
return self
if not mask.any():
# no sites pass the filter, return None
return None
# extract indices of Trues from the mask
indices, = mask.nonzero()
return self.filtered(indices) |
def delete(self, version_name):
"""Delete a version of model.
Args:
version_name: the name of the version in short form, such as "v1".
"""
name = ('%s/versions/%s' % (self._full_model_name, version_name))
response = self._api.projects().models().versions().delete(name=name).execute()
if 'name' not in response:
raise Exception('Invalid response from service. "name" is not found.')
_util.wait_for_long_running_operation(response['name']) | Delete a version of model.
Args:
version_name: the name of the version in short form, such as "v1". | Below is the the instruction that describes the task:
### Input:
Delete a version of model.
Args:
version_name: the name of the version in short form, such as "v1".
### Response:
def delete(self, version_name):
"""Delete a version of model.
Args:
version_name: the name of the version in short form, such as "v1".
"""
name = ('%s/versions/%s' % (self._full_model_name, version_name))
response = self._api.projects().models().versions().delete(name=name).execute()
if 'name' not in response:
raise Exception('Invalid response from service. "name" is not found.')
_util.wait_for_long_running_operation(response['name']) |
def parseString(inString, silence=False):
'''Parse a string, create the object tree, and export it.
Arguments:
- inString -- A string. This XML fragment should not start
with an XML declaration containing an encoding.
- silence -- A boolean. If False, export the object.
Returns -- The root object in the tree.
'''
parser = None
rootNode= parsexmlstring_(inString, parser)
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'PcGts'
rootClass = PcGts
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='xmlns:pc="http://schema.primaresearch.org/PAGE/gts/pagecontent/2018-07-15"')
return rootObj | Parse a string, create the object tree, and export it.
Arguments:
- inString -- A string. This XML fragment should not start
with an XML declaration containing an encoding.
- silence -- A boolean. If False, export the object.
Returns -- The root object in the tree. | Below is the the instruction that describes the task:
### Input:
Parse a string, create the object tree, and export it.
Arguments:
- inString -- A string. This XML fragment should not start
with an XML declaration containing an encoding.
- silence -- A boolean. If False, export the object.
Returns -- The root object in the tree.
### Response:
def parseString(inString, silence=False):
'''Parse a string, create the object tree, and export it.
Arguments:
- inString -- A string. This XML fragment should not start
with an XML declaration containing an encoding.
- silence -- A boolean. If False, export the object.
Returns -- The root object in the tree.
'''
parser = None
rootNode= parsexmlstring_(inString, parser)
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'PcGts'
rootClass = PcGts
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='xmlns:pc="http://schema.primaresearch.org/PAGE/gts/pagecontent/2018-07-15"')
return rootObj |
def SPF(domain, record='SPF', nameserver=None):
'''
Return the allowed IPv4 ranges in the SPF record for ``domain``.
If record is ``SPF`` and the SPF record is empty, the TXT record will be
searched automatically. If you know the domain uses TXT and not SPF,
specifying that will save a lookup.
CLI Example:
.. code-block:: bash
salt ns1 dig.SPF google.com
'''
spf_re = re.compile(r'(?:\+|~)?(ip[46]|include):(.+)')
cmd = ['dig', '+short', six.text_type(domain), record]
if nameserver is not None:
cmd.append('@{0}'.format(nameserver))
result = __salt__['cmd.run_all'](cmd, python_shell=False)
# In this case, 0 is not the same as False
if result['retcode'] != 0:
log.warning(
'dig returned exit code \'%s\'. Returning empty list as fallback.',
result['retcode']
)
return []
if result['stdout'] == '' and record == 'SPF':
# empty string is successful query, but nothing to return. So, try TXT
# record.
return SPF(domain, 'TXT', nameserver)
sections = re.sub('"', '', result['stdout']).split()
if not sections or sections[0] != 'v=spf1':
return []
if sections[1].startswith('redirect='):
# Run a lookup on the part after 'redirect=' (9 chars)
return SPF(sections[1][9:], 'SPF', nameserver)
ret = []
for section in sections[1:]:
try:
mechanism, address = spf_re.match(section).groups()
except AttributeError:
# Regex was not matched
continue
if mechanism == 'include':
ret.extend(SPF(address, 'SPF', nameserver))
elif mechanism in ('ip4', 'ip6') and check_ip(address):
ret.append(address)
return ret | Return the allowed IPv4 ranges in the SPF record for ``domain``.
If record is ``SPF`` and the SPF record is empty, the TXT record will be
searched automatically. If you know the domain uses TXT and not SPF,
specifying that will save a lookup.
CLI Example:
.. code-block:: bash
salt ns1 dig.SPF google.com | Below is the the instruction that describes the task:
### Input:
Return the allowed IPv4 ranges in the SPF record for ``domain``.
If record is ``SPF`` and the SPF record is empty, the TXT record will be
searched automatically. If you know the domain uses TXT and not SPF,
specifying that will save a lookup.
CLI Example:
.. code-block:: bash
salt ns1 dig.SPF google.com
### Response:
def SPF(domain, record='SPF', nameserver=None):
'''
Return the allowed IPv4 ranges in the SPF record for ``domain``.
If record is ``SPF`` and the SPF record is empty, the TXT record will be
searched automatically. If you know the domain uses TXT and not SPF,
specifying that will save a lookup.
CLI Example:
.. code-block:: bash
salt ns1 dig.SPF google.com
'''
spf_re = re.compile(r'(?:\+|~)?(ip[46]|include):(.+)')
cmd = ['dig', '+short', six.text_type(domain), record]
if nameserver is not None:
cmd.append('@{0}'.format(nameserver))
result = __salt__['cmd.run_all'](cmd, python_shell=False)
# In this case, 0 is not the same as False
if result['retcode'] != 0:
log.warning(
'dig returned exit code \'%s\'. Returning empty list as fallback.',
result['retcode']
)
return []
if result['stdout'] == '' and record == 'SPF':
# empty string is successful query, but nothing to return. So, try TXT
# record.
return SPF(domain, 'TXT', nameserver)
sections = re.sub('"', '', result['stdout']).split()
if not sections or sections[0] != 'v=spf1':
return []
if sections[1].startswith('redirect='):
# Run a lookup on the part after 'redirect=' (9 chars)
return SPF(sections[1][9:], 'SPF', nameserver)
ret = []
for section in sections[1:]:
try:
mechanism, address = spf_re.match(section).groups()
except AttributeError:
# Regex was not matched
continue
if mechanism == 'include':
ret.extend(SPF(address, 'SPF', nameserver))
elif mechanism in ('ip4', 'ip6') and check_ip(address):
ret.append(address)
return ret |
def model_to_tree(model, title=None, lucent_id=TRANSLUCENT_BINDER_ID):
"""Given an model, build the tree::
{'id': <id>|'subcol', 'title': <title>, 'contents': [<tree>, ...]}
"""
id = model.ident_hash
if id is None and isinstance(model, TranslucentBinder):
id = lucent_id
md = model.metadata
shortid = md.get('shortId', md.get('cnx-archive-shortid'))
title = title is not None and title or md.get('title')
tree = {'id': id, 'title': title, 'shortId': shortid}
if hasattr(model, '__iter__'):
contents = tree['contents'] = []
for node in model:
item = model_to_tree(node, model.get_title_for_node(node),
lucent_id=lucent_id)
contents.append(item)
return tree | Given an model, build the tree::
{'id': <id>|'subcol', 'title': <title>, 'contents': [<tree>, ...]} | Below is the the instruction that describes the task:
### Input:
Given an model, build the tree::
{'id': <id>|'subcol', 'title': <title>, 'contents': [<tree>, ...]}
### Response:
def model_to_tree(model, title=None, lucent_id=TRANSLUCENT_BINDER_ID):
"""Given an model, build the tree::
{'id': <id>|'subcol', 'title': <title>, 'contents': [<tree>, ...]}
"""
id = model.ident_hash
if id is None and isinstance(model, TranslucentBinder):
id = lucent_id
md = model.metadata
shortid = md.get('shortId', md.get('cnx-archive-shortid'))
title = title is not None and title or md.get('title')
tree = {'id': id, 'title': title, 'shortId': shortid}
if hasattr(model, '__iter__'):
contents = tree['contents'] = []
for node in model:
item = model_to_tree(node, model.get_title_for_node(node),
lucent_id=lucent_id)
contents.append(item)
return tree |
def decode_network_values(ptype, plen, buf):
"""Decodes a list of DS values in collectd network format
"""
nvalues = short.unpack_from(buf, header.size)[0]
off = header.size + short.size + nvalues
valskip = double.size
# Check whether our expected packet size is the reported one
assert ((valskip + 1) * nvalues + short.size + header.size) == plen
assert double.size == number.size
result = []
for dstype in [ord(x) for x in buf[header.size + short.size:off]]:
if dstype == DS_TYPE_COUNTER:
result.append((dstype, number.unpack_from(buf, off)[0]))
off += valskip
elif dstype == DS_TYPE_GAUGE:
result.append((dstype, double.unpack_from(buf, off)[0]))
off += valskip
elif dstype == DS_TYPE_DERIVE:
result.append((dstype, number.unpack_from(buf, off)[0]))
off += valskip
elif dstype == DS_TYPE_ABSOLUTE:
result.append((dstype, number.unpack_from(buf, off)[0]))
off += valskip
else:
raise ValueError("DS type %i unsupported" % dstype)
return result | Decodes a list of DS values in collectd network format | Below is the the instruction that describes the task:
### Input:
Decodes a list of DS values in collectd network format
### Response:
def decode_network_values(ptype, plen, buf):
"""Decodes a list of DS values in collectd network format
"""
nvalues = short.unpack_from(buf, header.size)[0]
off = header.size + short.size + nvalues
valskip = double.size
# Check whether our expected packet size is the reported one
assert ((valskip + 1) * nvalues + short.size + header.size) == plen
assert double.size == number.size
result = []
for dstype in [ord(x) for x in buf[header.size + short.size:off]]:
if dstype == DS_TYPE_COUNTER:
result.append((dstype, number.unpack_from(buf, off)[0]))
off += valskip
elif dstype == DS_TYPE_GAUGE:
result.append((dstype, double.unpack_from(buf, off)[0]))
off += valskip
elif dstype == DS_TYPE_DERIVE:
result.append((dstype, number.unpack_from(buf, off)[0]))
off += valskip
elif dstype == DS_TYPE_ABSOLUTE:
result.append((dstype, number.unpack_from(buf, off)[0]))
off += valskip
else:
raise ValueError("DS type %i unsupported" % dstype)
return result |
def new(params, event_size, num_components,
dtype=None, validate_args=False, name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'CategoricalMixtureOfOneHotCategorical',
[params, event_size, num_components]):
dist = MixtureSameFamily.new(
params,
num_components,
OneHotCategorical(
event_size,
validate_args=False, # So we can eval on simplex interior.
name=name),
validate_args=validate_args,
name=name)
# pylint: disable=protected-access
dist._mean = functools.partial(
_eval_all_one_hot, tfd.Distribution.prob, dist)
dist.log_mean = functools.partial(
_eval_all_one_hot, tfd.Distribution.log_prob, dist)
# pylint: enable=protected-access
return dist | Create the distribution instance from a `params` vector. | Below is the the instruction that describes the task:
### Input:
Create the distribution instance from a `params` vector.
### Response:
def new(params, event_size, num_components,
dtype=None, validate_args=False, name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'CategoricalMixtureOfOneHotCategorical',
[params, event_size, num_components]):
dist = MixtureSameFamily.new(
params,
num_components,
OneHotCategorical(
event_size,
validate_args=False, # So we can eval on simplex interior.
name=name),
validate_args=validate_args,
name=name)
# pylint: disable=protected-access
dist._mean = functools.partial(
_eval_all_one_hot, tfd.Distribution.prob, dist)
dist.log_mean = functools.partial(
_eval_all_one_hot, tfd.Distribution.log_prob, dist)
# pylint: enable=protected-access
return dist |
def fetch(self, url):
'''
Fetch url and create a response object according to the mime-type.
Args:
url: The url to fetch data from
Returns:
OEmbedResponse object according to data fetched
'''
opener = self._urllib.build_opener()
opener.addheaders = self._requestHeaders.items()
response = opener.open(url)
headers = response.info()
raw = response.read()
raw = raw.decode('utf8')
if not 'Content-Type' in headers:
raise OEmbedError('Missing mime-type in response')
if headers['Content-Type'].find('application/xml') != -1 or \
headers['Content-Type'].find('text/xml') != -1:
response = OEmbedResponse.newFromXML(raw)
elif headers['Content-Type'].find('application/json') != -1 or \
headers['Content-Type'].find('text/javascript') != -1 or \
headers['Content-Type'].find('text/json') != -1:
response = OEmbedResponse.newFromJSON(raw)
else:
raise OEmbedError('Invalid mime-type in response - %s' % headers['Content-Type'])
return response | Fetch url and create a response object according to the mime-type.
Args:
url: The url to fetch data from
Returns:
OEmbedResponse object according to data fetched | Below is the the instruction that describes the task:
### Input:
Fetch url and create a response object according to the mime-type.
Args:
url: The url to fetch data from
Returns:
OEmbedResponse object according to data fetched
### Response:
def fetch(self, url):
'''
Fetch url and create a response object according to the mime-type.
Args:
url: The url to fetch data from
Returns:
OEmbedResponse object according to data fetched
'''
opener = self._urllib.build_opener()
opener.addheaders = self._requestHeaders.items()
response = opener.open(url)
headers = response.info()
raw = response.read()
raw = raw.decode('utf8')
if not 'Content-Type' in headers:
raise OEmbedError('Missing mime-type in response')
if headers['Content-Type'].find('application/xml') != -1 or \
headers['Content-Type'].find('text/xml') != -1:
response = OEmbedResponse.newFromXML(raw)
elif headers['Content-Type'].find('application/json') != -1 or \
headers['Content-Type'].find('text/javascript') != -1 or \
headers['Content-Type'].find('text/json') != -1:
response = OEmbedResponse.newFromJSON(raw)
else:
raise OEmbedError('Invalid mime-type in response - %s' % headers['Content-Type'])
return response |
def _bindDomain(self, domain_name, create=False, block=True):
"""
Return the Boto Domain object representing the SDB domain of the given name. If the
domain does not exist and `create` is True, it will be created.
:param str domain_name: the name of the domain to bind to
:param bool create: True if domain should be created if it doesn't exist
:param bool block: If False, return None if the domain doesn't exist. If True, wait until
domain appears. This parameter is ignored if create is True.
:rtype: Domain|None
:raises SDBResponseError: If `block` is True and the domain still doesn't exist after the
retry timeout expires.
"""
log.debug("Binding to job store domain '%s'.", domain_name)
retryargs = dict(predicate=lambda e: no_such_sdb_domain(e) or sdb_unavailable(e))
if not block:
retryargs['timeout'] = 15
for attempt in retry_sdb(**retryargs):
with attempt:
try:
return self.db.get_domain(domain_name)
except SDBResponseError as e:
if no_such_sdb_domain(e):
if create:
return self.db.create_domain(domain_name)
elif block:
raise
else:
return None
else:
raise | Return the Boto Domain object representing the SDB domain of the given name. If the
domain does not exist and `create` is True, it will be created.
:param str domain_name: the name of the domain to bind to
:param bool create: True if domain should be created if it doesn't exist
:param bool block: If False, return None if the domain doesn't exist. If True, wait until
domain appears. This parameter is ignored if create is True.
:rtype: Domain|None
:raises SDBResponseError: If `block` is True and the domain still doesn't exist after the
retry timeout expires. | Below is the the instruction that describes the task:
### Input:
Return the Boto Domain object representing the SDB domain of the given name. If the
domain does not exist and `create` is True, it will be created.
:param str domain_name: the name of the domain to bind to
:param bool create: True if domain should be created if it doesn't exist
:param bool block: If False, return None if the domain doesn't exist. If True, wait until
domain appears. This parameter is ignored if create is True.
:rtype: Domain|None
:raises SDBResponseError: If `block` is True and the domain still doesn't exist after the
retry timeout expires.
### Response:
def _bindDomain(self, domain_name, create=False, block=True):
"""
Return the Boto Domain object representing the SDB domain of the given name. If the
domain does not exist and `create` is True, it will be created.
:param str domain_name: the name of the domain to bind to
:param bool create: True if domain should be created if it doesn't exist
:param bool block: If False, return None if the domain doesn't exist. If True, wait until
domain appears. This parameter is ignored if create is True.
:rtype: Domain|None
:raises SDBResponseError: If `block` is True and the domain still doesn't exist after the
retry timeout expires.
"""
log.debug("Binding to job store domain '%s'.", domain_name)
retryargs = dict(predicate=lambda e: no_such_sdb_domain(e) or sdb_unavailable(e))
if not block:
retryargs['timeout'] = 15
for attempt in retry_sdb(**retryargs):
with attempt:
try:
return self.db.get_domain(domain_name)
except SDBResponseError as e:
if no_such_sdb_domain(e):
if create:
return self.db.create_domain(domain_name)
elif block:
raise
else:
return None
else:
raise |
def validate_geotweet(self, record):
""" check that stream record is actual tweet with coordinates """
if record and self._validate('user', record) \
and self._validate('coordinates', record):
return True
return False | check that stream record is actual tweet with coordinates | Below is the the instruction that describes the task:
### Input:
check that stream record is actual tweet with coordinates
### Response:
def validate_geotweet(self, record):
""" check that stream record is actual tweet with coordinates """
if record and self._validate('user', record) \
and self._validate('coordinates', record):
return True
return False |
def function(script, x_func='x', y_func='y', z_func='z'):
"""Geometric function using muparser lib to generate new Coordinates
You can change x, y, z for every vertex according to the function specified.
See help(mlx.muparser_ref) for muparser reference documentation.
It's possible to use the following per-vertex variables in the expression:
Variables (per vertex):
x, y, z (coordinates)
nx, ny, nz (normal)
r, g, b, a (color)
q (quality)
rad (radius)
vi (vertex index)
vtu, vtv (texture coordinates)
ti (texture index)
vsel (is the vertex selected? 1 yes, 0 no)
and all custom vertex attributes already defined by user.
Args:
x_func (str): function to generate new coordinates for x
y_func (str): function to generate new coordinates for y
z_func (str): function to generate new coordinates for z
Layer stack:
No impacts
MeshLab versions:
1.3.4BETA
"""
filter_xml = ''.join([
' <filter name="Geometric Function">\n',
' <Param name="x" ',
'value="{}" '.format(str(x_func).replace('&', '&').replace('<', '<')),
'description="func x = " ',
'type="RichString" ',
'/>\n',
' <Param name="y" ',
'value="{}" '.format(str(y_func).replace('&', '&').replace('<', '<')),
'description="func y = " ',
'type="RichString" ',
'/>\n',
' <Param name="z" ',
'value="{}" '.format(str(z_func).replace('&', '&').replace('<', '<')),
'description="func z = " ',
'type="RichString" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
return None | Geometric function using muparser lib to generate new Coordinates
You can change x, y, z for every vertex according to the function specified.
See help(mlx.muparser_ref) for muparser reference documentation.
It's possible to use the following per-vertex variables in the expression:
Variables (per vertex):
x, y, z (coordinates)
nx, ny, nz (normal)
r, g, b, a (color)
q (quality)
rad (radius)
vi (vertex index)
vtu, vtv (texture coordinates)
ti (texture index)
vsel (is the vertex selected? 1 yes, 0 no)
and all custom vertex attributes already defined by user.
Args:
x_func (str): function to generate new coordinates for x
y_func (str): function to generate new coordinates for y
z_func (str): function to generate new coordinates for z
Layer stack:
No impacts
MeshLab versions:
1.3.4BETA | Below is the the instruction that describes the task:
### Input:
Geometric function using muparser lib to generate new Coordinates
You can change x, y, z for every vertex according to the function specified.
See help(mlx.muparser_ref) for muparser reference documentation.
It's possible to use the following per-vertex variables in the expression:
Variables (per vertex):
x, y, z (coordinates)
nx, ny, nz (normal)
r, g, b, a (color)
q (quality)
rad (radius)
vi (vertex index)
vtu, vtv (texture coordinates)
ti (texture index)
vsel (is the vertex selected? 1 yes, 0 no)
and all custom vertex attributes already defined by user.
Args:
x_func (str): function to generate new coordinates for x
y_func (str): function to generate new coordinates for y
z_func (str): function to generate new coordinates for z
Layer stack:
No impacts
MeshLab versions:
1.3.4BETA
### Response:
def function(script, x_func='x', y_func='y', z_func='z'):
"""Geometric function using muparser lib to generate new Coordinates
You can change x, y, z for every vertex according to the function specified.
See help(mlx.muparser_ref) for muparser reference documentation.
It's possible to use the following per-vertex variables in the expression:
Variables (per vertex):
x, y, z (coordinates)
nx, ny, nz (normal)
r, g, b, a (color)
q (quality)
rad (radius)
vi (vertex index)
vtu, vtv (texture coordinates)
ti (texture index)
vsel (is the vertex selected? 1 yes, 0 no)
and all custom vertex attributes already defined by user.
Args:
x_func (str): function to generate new coordinates for x
y_func (str): function to generate new coordinates for y
z_func (str): function to generate new coordinates for z
Layer stack:
No impacts
MeshLab versions:
1.3.4BETA
"""
filter_xml = ''.join([
' <filter name="Geometric Function">\n',
' <Param name="x" ',
'value="{}" '.format(str(x_func).replace('&', '&').replace('<', '<')),
'description="func x = " ',
'type="RichString" ',
'/>\n',
' <Param name="y" ',
'value="{}" '.format(str(y_func).replace('&', '&').replace('<', '<')),
'description="func y = " ',
'type="RichString" ',
'/>\n',
' <Param name="z" ',
'value="{}" '.format(str(z_func).replace('&', '&').replace('<', '<')),
'description="func z = " ',
'type="RichString" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
return None |
def cublasDger(handle, m, n, alpha, x, incx, y, incy, A, lda):
"""
Rank-1 operation on real general matrix.
"""
status = _libcublas.cublasDger_v2(handle,
m, n,
ctypes.byref(ctypes.c_double(alpha)),
int(x), incx,
int(y), incy, int(A), lda)
cublasCheckStatus(status) | Rank-1 operation on real general matrix. | Below is the the instruction that describes the task:
### Input:
Rank-1 operation on real general matrix.
### Response:
def cublasDger(handle, m, n, alpha, x, incx, y, incy, A, lda):
"""
Rank-1 operation on real general matrix.
"""
status = _libcublas.cublasDger_v2(handle,
m, n,
ctypes.byref(ctypes.c_double(alpha)),
int(x), incx,
int(y), incy, int(A), lda)
cublasCheckStatus(status) |
def mono(self):
"""
Return this instance summed to mono. If the instance is already mono,
this is a no-op.
"""
if self.channels == 1:
return self
x = self.sum(axis=1) * 0.5
y = x * 0.5
return AudioSamples(y, self.samplerate) | Return this instance summed to mono. If the instance is already mono,
this is a no-op. | Below is the the instruction that describes the task:
### Input:
Return this instance summed to mono. If the instance is already mono,
this is a no-op.
### Response:
def mono(self):
"""
Return this instance summed to mono. If the instance is already mono,
this is a no-op.
"""
if self.channels == 1:
return self
x = self.sum(axis=1) * 0.5
y = x * 0.5
return AudioSamples(y, self.samplerate) |
def __find_executables(path):
"""Used by find_graphviz
path - single directory as a string
If any of the executables are found, it will return a dictionary
containing the program names as keys and their paths as values.
Otherwise returns None
"""
success = False
progs = {
"dot": "",
"twopi": "",
"neato": "",
"circo": "",
"fdp": "",
"sfdp": "",
}
was_quoted = False
path = path.strip()
if path.startswith('"') and path.endswith('"'):
path = path[1:-1]
was_quoted = True
if not os.path.isdir(path):
return None
for prg in progs:
if progs[prg]:
continue
prg_path = os.path.join(path, prg)
prg_exe_path = prg_path + ".exe"
if os.path.exists(prg_path):
if was_quoted:
prg_path = "\"{}\"".format(prg_path)
progs[prg] = prg_path
success = True
elif os.path.exists(prg_exe_path):
if was_quoted:
prg_exe_path = "\"{}\"".format(prg_exe_path)
progs[prg] = prg_exe_path
success = True
if success:
return progs
return None | Used by find_graphviz
path - single directory as a string
If any of the executables are found, it will return a dictionary
containing the program names as keys and their paths as values.
Otherwise returns None | Below is the the instruction that describes the task:
### Input:
Used by find_graphviz
path - single directory as a string
If any of the executables are found, it will return a dictionary
containing the program names as keys and their paths as values.
Otherwise returns None
### Response:
def __find_executables(path):
"""Used by find_graphviz
path - single directory as a string
If any of the executables are found, it will return a dictionary
containing the program names as keys and their paths as values.
Otherwise returns None
"""
success = False
progs = {
"dot": "",
"twopi": "",
"neato": "",
"circo": "",
"fdp": "",
"sfdp": "",
}
was_quoted = False
path = path.strip()
if path.startswith('"') and path.endswith('"'):
path = path[1:-1]
was_quoted = True
if not os.path.isdir(path):
return None
for prg in progs:
if progs[prg]:
continue
prg_path = os.path.join(path, prg)
prg_exe_path = prg_path + ".exe"
if os.path.exists(prg_path):
if was_quoted:
prg_path = "\"{}\"".format(prg_path)
progs[prg] = prg_path
success = True
elif os.path.exists(prg_exe_path):
if was_quoted:
prg_exe_path = "\"{}\"".format(prg_exe_path)
progs[prg] = prg_exe_path
success = True
if success:
return progs
return None |
def _set_path_to_configs(cls, path_to_config):
"""
Set the paths to the configuration files.
:param path_to_config: The possible path to the config to load.
:type path_to_config: str
:return:
The path to the config to read (0), the path to the default
configuration to read as fallback.(1)
:rtype: tuple
"""
if not path_to_config.endswith(PyFunceble.directory_separator):
# The path to the config does not ends with the directory separator.
# We initiate the default and the parsed variable with the directory separator.
default = parsed = path_to_config + PyFunceble.directory_separator
else:
# The path to the config does ends with the directory separator.
# We initiate the default and the parsed variable.
default = parsed = path_to_config
# We append the `CONFIGURATION_FILENAME` to the parsed variable.
parsed += PyFunceble.CONFIGURATION_FILENAME
# And we append the `DEFAULT_CONFIGURATION_FILENAME` to the default variable.
default += PyFunceble.DEFAULT_CONFIGURATION_FILENAME
# We finaly return a tuple which contain both informations.
return (parsed, default) | Set the paths to the configuration files.
:param path_to_config: The possible path to the config to load.
:type path_to_config: str
:return:
The path to the config to read (0), the path to the default
configuration to read as fallback.(1)
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Set the paths to the configuration files.
:param path_to_config: The possible path to the config to load.
:type path_to_config: str
:return:
The path to the config to read (0), the path to the default
configuration to read as fallback.(1)
:rtype: tuple
### Response:
def _set_path_to_configs(cls, path_to_config):
"""
Set the paths to the configuration files.
:param path_to_config: The possible path to the config to load.
:type path_to_config: str
:return:
The path to the config to read (0), the path to the default
configuration to read as fallback.(1)
:rtype: tuple
"""
if not path_to_config.endswith(PyFunceble.directory_separator):
# The path to the config does not ends with the directory separator.
# We initiate the default and the parsed variable with the directory separator.
default = parsed = path_to_config + PyFunceble.directory_separator
else:
# The path to the config does ends with the directory separator.
# We initiate the default and the parsed variable.
default = parsed = path_to_config
# We append the `CONFIGURATION_FILENAME` to the parsed variable.
parsed += PyFunceble.CONFIGURATION_FILENAME
# And we append the `DEFAULT_CONFIGURATION_FILENAME` to the default variable.
default += PyFunceble.DEFAULT_CONFIGURATION_FILENAME
# We finaly return a tuple which contain both informations.
return (parsed, default) |
def auth_interactive(self, username, handler, submethods=""):
"""
Authenticate to the server interactively. A handler is used to answer
arbitrary questions from the server. On many servers, this is just a
dumb wrapper around PAM.
This method will block until the authentication succeeds or fails,
peroidically calling the handler asynchronously to get answers to
authentication questions. The handler may be called more than once
if the server continues to ask questions.
The handler is expected to be a callable that will handle calls of the
form: ``handler(title, instructions, prompt_list)``. The ``title`` is
meant to be a dialog-window title, and the ``instructions`` are user
instructions (both are strings). ``prompt_list`` will be a list of
prompts, each prompt being a tuple of ``(str, bool)``. The string is
the prompt and the boolean indicates whether the user text should be
echoed.
A sample call would thus be:
``handler('title', 'instructions', [('Password:', False)])``.
The handler should return a list or tuple of answers to the server's
questions.
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
:param str username: the username to authenticate as
:param callable handler: a handler for responding to server questions
:param str submethods: a string list of desired submethods (optional)
:return:
list of auth types permissible for the next stage of
authentication (normally empty).
:raises: `.BadAuthenticationType` -- if public-key authentication isn't
allowed by the server for this user
:raises: `.AuthenticationException` -- if the authentication failed
:raises: `.SSHException` -- if there was a network error
.. versionadded:: 1.5
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException("No existing session")
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_interactive(
username, handler, my_event, submethods
)
return self.auth_handler.wait_for_response(my_event) | Authenticate to the server interactively. A handler is used to answer
arbitrary questions from the server. On many servers, this is just a
dumb wrapper around PAM.
This method will block until the authentication succeeds or fails,
peroidically calling the handler asynchronously to get answers to
authentication questions. The handler may be called more than once
if the server continues to ask questions.
The handler is expected to be a callable that will handle calls of the
form: ``handler(title, instructions, prompt_list)``. The ``title`` is
meant to be a dialog-window title, and the ``instructions`` are user
instructions (both are strings). ``prompt_list`` will be a list of
prompts, each prompt being a tuple of ``(str, bool)``. The string is
the prompt and the boolean indicates whether the user text should be
echoed.
A sample call would thus be:
``handler('title', 'instructions', [('Password:', False)])``.
The handler should return a list or tuple of answers to the server's
questions.
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
:param str username: the username to authenticate as
:param callable handler: a handler for responding to server questions
:param str submethods: a string list of desired submethods (optional)
:return:
list of auth types permissible for the next stage of
authentication (normally empty).
:raises: `.BadAuthenticationType` -- if public-key authentication isn't
allowed by the server for this user
:raises: `.AuthenticationException` -- if the authentication failed
:raises: `.SSHException` -- if there was a network error
.. versionadded:: 1.5 | Below is the the instruction that describes the task:
### Input:
Authenticate to the server interactively. A handler is used to answer
arbitrary questions from the server. On many servers, this is just a
dumb wrapper around PAM.
This method will block until the authentication succeeds or fails,
peroidically calling the handler asynchronously to get answers to
authentication questions. The handler may be called more than once
if the server continues to ask questions.
The handler is expected to be a callable that will handle calls of the
form: ``handler(title, instructions, prompt_list)``. The ``title`` is
meant to be a dialog-window title, and the ``instructions`` are user
instructions (both are strings). ``prompt_list`` will be a list of
prompts, each prompt being a tuple of ``(str, bool)``. The string is
the prompt and the boolean indicates whether the user text should be
echoed.
A sample call would thus be:
``handler('title', 'instructions', [('Password:', False)])``.
The handler should return a list or tuple of answers to the server's
questions.
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
:param str username: the username to authenticate as
:param callable handler: a handler for responding to server questions
:param str submethods: a string list of desired submethods (optional)
:return:
list of auth types permissible for the next stage of
authentication (normally empty).
:raises: `.BadAuthenticationType` -- if public-key authentication isn't
allowed by the server for this user
:raises: `.AuthenticationException` -- if the authentication failed
:raises: `.SSHException` -- if there was a network error
.. versionadded:: 1.5
### Response:
def auth_interactive(self, username, handler, submethods=""):
"""
Authenticate to the server interactively. A handler is used to answer
arbitrary questions from the server. On many servers, this is just a
dumb wrapper around PAM.
This method will block until the authentication succeeds or fails,
peroidically calling the handler asynchronously to get answers to
authentication questions. The handler may be called more than once
if the server continues to ask questions.
The handler is expected to be a callable that will handle calls of the
form: ``handler(title, instructions, prompt_list)``. The ``title`` is
meant to be a dialog-window title, and the ``instructions`` are user
instructions (both are strings). ``prompt_list`` will be a list of
prompts, each prompt being a tuple of ``(str, bool)``. The string is
the prompt and the boolean indicates whether the user text should be
echoed.
A sample call would thus be:
``handler('title', 'instructions', [('Password:', False)])``.
The handler should return a list or tuple of answers to the server's
questions.
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
:param str username: the username to authenticate as
:param callable handler: a handler for responding to server questions
:param str submethods: a string list of desired submethods (optional)
:return:
list of auth types permissible for the next stage of
authentication (normally empty).
:raises: `.BadAuthenticationType` -- if public-key authentication isn't
allowed by the server for this user
:raises: `.AuthenticationException` -- if the authentication failed
:raises: `.SSHException` -- if there was a network error
.. versionadded:: 1.5
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException("No existing session")
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_interactive(
username, handler, my_event, submethods
)
return self.auth_handler.wait_for_response(my_event) |
def remove(self, rev, permanent=False):
"""Removes a revision from this changelist
:param rev: Revision to remove
:type rev: :class:`.Revision`
:param permanent: Whether or not we need to set the changelist to default
:type permanent: bool
"""
if not isinstance(rev, Revision):
raise TypeError('argument needs to be an instance of Revision')
if rev not in self:
raise ValueError('{} not in changelist'.format(rev))
self._files.remove(rev)
if not permanent:
rev.changelist = self._connection.default | Removes a revision from this changelist
:param rev: Revision to remove
:type rev: :class:`.Revision`
:param permanent: Whether or not we need to set the changelist to default
:type permanent: bool | Below is the the instruction that describes the task:
### Input:
Removes a revision from this changelist
:param rev: Revision to remove
:type rev: :class:`.Revision`
:param permanent: Whether or not we need to set the changelist to default
:type permanent: bool
### Response:
def remove(self, rev, permanent=False):
"""Removes a revision from this changelist
:param rev: Revision to remove
:type rev: :class:`.Revision`
:param permanent: Whether or not we need to set the changelist to default
:type permanent: bool
"""
if not isinstance(rev, Revision):
raise TypeError('argument needs to be an instance of Revision')
if rev not in self:
raise ValueError('{} not in changelist'.format(rev))
self._files.remove(rev)
if not permanent:
rev.changelist = self._connection.default |
def url_to_parts(url):
""" Split url urlsplit style, but return path as a list and query as a dict """
if not url:
return None
scheme, netloc, path, query, fragment = _urlsplit(url)
if not path or path == '/':
path = []
else:
path = path.strip('/').split('/')
if not query:
query = {}
else:
query = _parse_qs(query)
return _urllib_parse.SplitResult(scheme, netloc, path, query, fragment) | Split url urlsplit style, but return path as a list and query as a dict | Below is the the instruction that describes the task:
### Input:
Split url urlsplit style, but return path as a list and query as a dict
### Response:
def url_to_parts(url):
""" Split url urlsplit style, but return path as a list and query as a dict """
if not url:
return None
scheme, netloc, path, query, fragment = _urlsplit(url)
if not path or path == '/':
path = []
else:
path = path.strip('/').split('/')
if not query:
query = {}
else:
query = _parse_qs(query)
return _urllib_parse.SplitResult(scheme, netloc, path, query, fragment) |
def avail(locale):
'''
Check if a locale is available.
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' locale.avail 'en_US.UTF-8'
'''
try:
normalized_locale = salt.utils.locales.normalize_locale(locale)
except IndexError:
log.error('Unable to validate locale "%s"', locale)
return False
avail_locales = __salt__['locale.list_avail']()
locale_exists = next((True for x in avail_locales
if salt.utils.locales.normalize_locale(x.strip()) == normalized_locale), False)
return locale_exists | Check if a locale is available.
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' locale.avail 'en_US.UTF-8' | Below is the the instruction that describes the task:
### Input:
Check if a locale is available.
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' locale.avail 'en_US.UTF-8'
### Response:
def avail(locale):
'''
Check if a locale is available.
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' locale.avail 'en_US.UTF-8'
'''
try:
normalized_locale = salt.utils.locales.normalize_locale(locale)
except IndexError:
log.error('Unable to validate locale "%s"', locale)
return False
avail_locales = __salt__['locale.list_avail']()
locale_exists = next((True for x in avail_locales
if salt.utils.locales.normalize_locale(x.strip()) == normalized_locale), False)
return locale_exists |
def update_input(filelist, ivmlist=None, removed_files=None):
"""
Removes files flagged to be removed from the input filelist.
Removes the corresponding ivm files if present.
"""
newfilelist = []
if removed_files == []:
return filelist, ivmlist
else:
sci_ivm = list(zip(filelist, ivmlist))
for f in removed_files:
result=[sci_ivm.remove(t) for t in sci_ivm if t[0] == f ]
ivmlist = [el[1] for el in sci_ivm]
newfilelist = [el[0] for el in sci_ivm]
return newfilelist, ivmlist | Removes files flagged to be removed from the input filelist.
Removes the corresponding ivm files if present. | Below is the the instruction that describes the task:
### Input:
Removes files flagged to be removed from the input filelist.
Removes the corresponding ivm files if present.
### Response:
def update_input(filelist, ivmlist=None, removed_files=None):
"""
Removes files flagged to be removed from the input filelist.
Removes the corresponding ivm files if present.
"""
newfilelist = []
if removed_files == []:
return filelist, ivmlist
else:
sci_ivm = list(zip(filelist, ivmlist))
for f in removed_files:
result=[sci_ivm.remove(t) for t in sci_ivm if t[0] == f ]
ivmlist = [el[1] for el in sci_ivm]
newfilelist = [el[0] for el in sci_ivm]
return newfilelist, ivmlist |
def send_email_confirmation_instructions(self, user):
"""
Sends the confirmation instructions email for the specified user.
Sends signal `confirm_instructions_sent`.
:param user: The user to send the instructions to.
"""
token = self.security_utils_service.generate_confirmation_token(user)
confirmation_link = url_for('security_controller.confirm_email',
token=token, _external=True)
self.send_mail(
_('flask_unchained.bundles.security:email_subject.email_confirmation_instructions'),
to=user.email,
template='security/email/email_confirmation_instructions.html',
user=user,
confirmation_link=confirmation_link)
confirm_instructions_sent.send(app._get_current_object(), user=user,
token=token) | Sends the confirmation instructions email for the specified user.
Sends signal `confirm_instructions_sent`.
:param user: The user to send the instructions to. | Below is the the instruction that describes the task:
### Input:
Sends the confirmation instructions email for the specified user.
Sends signal `confirm_instructions_sent`.
:param user: The user to send the instructions to.
### Response:
def send_email_confirmation_instructions(self, user):
"""
Sends the confirmation instructions email for the specified user.
Sends signal `confirm_instructions_sent`.
:param user: The user to send the instructions to.
"""
token = self.security_utils_service.generate_confirmation_token(user)
confirmation_link = url_for('security_controller.confirm_email',
token=token, _external=True)
self.send_mail(
_('flask_unchained.bundles.security:email_subject.email_confirmation_instructions'),
to=user.email,
template='security/email/email_confirmation_instructions.html',
user=user,
confirmation_link=confirmation_link)
confirm_instructions_sent.send(app._get_current_object(), user=user,
token=token) |
def walk(value, walker, path=None, seen=None):
"""Walks the _evaluated_ tree of the given GCL tuple.
The appropriate methods of walker will be invoked for every element in the
tree.
"""
seen = seen or set()
path = path or []
# Recursion
if id(value) in seen:
walker.visitRecursion(path)
return
# Error
if isinstance(value, Exception):
walker.visitError(path, value)
return
# List
if isinstance(value, list):
# Not actually a tuple, but okay
recurse = walker.enterList(value, path)
if not recurse: return
next_walker = walker if recurse is True else recurse
with TempSetAdd(seen, id(value)):
for i, x in enumerate(value):
walk(x, next_walker, path=path + ['[%d]' % i], seen=seen)
walker.leaveList(value, path)
return
# Scalar
if not isinstance(value, framework.TupleLike):
walker.visitScalar(path, value)
return
# Tuple
recurse = walker.enterTuple(value, path)
if not recurse: return
next_walker = walker if recurse is True else recurse
with TempSetAdd(seen, id(value)):
keys = sorted(value.keys())
for key in keys:
key_path = path + [key]
elm = get_or_error(value, key)
walk(elm, next_walker, path=key_path, seen=seen)
walker.leaveTuple(value, path) | Walks the _evaluated_ tree of the given GCL tuple.
The appropriate methods of walker will be invoked for every element in the
tree. | Below is the the instruction that describes the task:
### Input:
Walks the _evaluated_ tree of the given GCL tuple.
The appropriate methods of walker will be invoked for every element in the
tree.
### Response:
def walk(value, walker, path=None, seen=None):
"""Walks the _evaluated_ tree of the given GCL tuple.
The appropriate methods of walker will be invoked for every element in the
tree.
"""
seen = seen or set()
path = path or []
# Recursion
if id(value) in seen:
walker.visitRecursion(path)
return
# Error
if isinstance(value, Exception):
walker.visitError(path, value)
return
# List
if isinstance(value, list):
# Not actually a tuple, but okay
recurse = walker.enterList(value, path)
if not recurse: return
next_walker = walker if recurse is True else recurse
with TempSetAdd(seen, id(value)):
for i, x in enumerate(value):
walk(x, next_walker, path=path + ['[%d]' % i], seen=seen)
walker.leaveList(value, path)
return
# Scalar
if not isinstance(value, framework.TupleLike):
walker.visitScalar(path, value)
return
# Tuple
recurse = walker.enterTuple(value, path)
if not recurse: return
next_walker = walker if recurse is True else recurse
with TempSetAdd(seen, id(value)):
keys = sorted(value.keys())
for key in keys:
key_path = path + [key]
elm = get_or_error(value, key)
walk(elm, next_walker, path=key_path, seen=seen)
walker.leaveTuple(value, path) |
def shift_multi(
x, wrg=0.1, hrg=0.1, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0.,
order=1
):
"""Shift images with the same arguments, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.shift``.
Returns
-------
numpy.array
A list of processed images.
"""
h, w = x[0].shape[row_index], x[0].shape[col_index]
if is_random:
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
else:
tx, ty = hrg * h, wrg * w
translation_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
results = []
for data in x:
results.append(affine_transform(data, transform_matrix, channel_index, fill_mode, cval, order))
return np.asarray(results) | Shift images with the same arguments, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.shift``.
Returns
-------
numpy.array
A list of processed images. | Below is the the instruction that describes the task:
### Input:
Shift images with the same arguments, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.shift``.
Returns
-------
numpy.array
A list of processed images.
### Response:
def shift_multi(
x, wrg=0.1, hrg=0.1, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0.,
order=1
):
"""Shift images with the same arguments, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.shift``.
Returns
-------
numpy.array
A list of processed images.
"""
h, w = x[0].shape[row_index], x[0].shape[col_index]
if is_random:
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
else:
tx, ty = hrg * h, wrg * w
translation_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
results = []
for data in x:
results.append(affine_transform(data, transform_matrix, channel_index, fill_mode, cval, order))
return np.asarray(results) |
def bash_app(function=None, data_flow_kernel=None, walltime=60, cache=False, executors='all'):
"""Decorator function for making bash apps.
Parameters
----------
function : function
Do not pass this keyword argument directly. This is needed in order to allow for omitted parenthesis,
for example, `@bash_app` if using all defaults or `@bash_app(walltime=120)`. If the
decorator is used alone, function will be the actual function being decorated, whereas if it
is called with arguments, function will be None. Default is None.
data_flow_kernel : DataFlowKernel
The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for managing this app. This can
be omitted only after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`. Default is None.
walltime : int
Walltime for app in seconds. Default is 60.
executors : string or list
Labels of the executors that this app can execute over. Default is 'all'.
cache : bool
Enable caching of the app call. Default is False.
"""
from parsl.app.bash import BashApp
def decorator(func):
def wrapper(f):
return BashApp(f,
data_flow_kernel=data_flow_kernel,
walltime=walltime,
cache=cache,
executors=executors)
return wrapper(func)
if function is not None:
return decorator(function)
return decorator | Decorator function for making bash apps.
Parameters
----------
function : function
Do not pass this keyword argument directly. This is needed in order to allow for omitted parenthesis,
for example, `@bash_app` if using all defaults or `@bash_app(walltime=120)`. If the
decorator is used alone, function will be the actual function being decorated, whereas if it
is called with arguments, function will be None. Default is None.
data_flow_kernel : DataFlowKernel
The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for managing this app. This can
be omitted only after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`. Default is None.
walltime : int
Walltime for app in seconds. Default is 60.
executors : string or list
Labels of the executors that this app can execute over. Default is 'all'.
cache : bool
Enable caching of the app call. Default is False. | Below is the the instruction that describes the task:
### Input:
Decorator function for making bash apps.
Parameters
----------
function : function
Do not pass this keyword argument directly. This is needed in order to allow for omitted parenthesis,
for example, `@bash_app` if using all defaults or `@bash_app(walltime=120)`. If the
decorator is used alone, function will be the actual function being decorated, whereas if it
is called with arguments, function will be None. Default is None.
data_flow_kernel : DataFlowKernel
The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for managing this app. This can
be omitted only after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`. Default is None.
walltime : int
Walltime for app in seconds. Default is 60.
executors : string or list
Labels of the executors that this app can execute over. Default is 'all'.
cache : bool
Enable caching of the app call. Default is False.
### Response:
def bash_app(function=None, data_flow_kernel=None, walltime=60, cache=False, executors='all'):
"""Decorator function for making bash apps.
Parameters
----------
function : function
Do not pass this keyword argument directly. This is needed in order to allow for omitted parenthesis,
for example, `@bash_app` if using all defaults or `@bash_app(walltime=120)`. If the
decorator is used alone, function will be the actual function being decorated, whereas if it
is called with arguments, function will be None. Default is None.
data_flow_kernel : DataFlowKernel
The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for managing this app. This can
be omitted only after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`. Default is None.
walltime : int
Walltime for app in seconds. Default is 60.
executors : string or list
Labels of the executors that this app can execute over. Default is 'all'.
cache : bool
Enable caching of the app call. Default is False.
"""
from parsl.app.bash import BashApp
def decorator(func):
def wrapper(f):
return BashApp(f,
data_flow_kernel=data_flow_kernel,
walltime=walltime,
cache=cache,
executors=executors)
return wrapper(func)
if function is not None:
return decorator(function)
return decorator |
def write_tables(fname, table_names=None, prefix=None, compress=False, local=False):
"""
Writes tables to a pandas.HDFStore file.
Parameters
----------
fname : str
File name for HDFStore. Will be opened in append mode and closed
at the end of this function.
table_names: list of str, optional, default None
List of tables to write. If None, all registered tables will
be written.
prefix: str
If not None, used to prefix the output table names so that
multiple iterations can go in the same file.
compress: boolean
Whether to compress output file using standard HDF5-readable
zlib compression, default False.
"""
if table_names is None:
table_names = list_tables()
tables = (get_table(t) for t in table_names)
key_template = '{}/{{}}'.format(prefix) if prefix is not None else '{}'
# set compression options to zlib level-1 if compress arg is True
complib = compress and 'zlib' or None
complevel = compress and 1 or 0
with pd.HDFStore(fname, mode='a', complib=complib, complevel=complevel) as store:
for t in tables:
# if local arg is True, store only local columns
columns = None
if local is True:
columns = t.local_columns
store[key_template.format(t.name)] = t.to_frame(columns=columns) | Writes tables to a pandas.HDFStore file.
Parameters
----------
fname : str
File name for HDFStore. Will be opened in append mode and closed
at the end of this function.
table_names: list of str, optional, default None
List of tables to write. If None, all registered tables will
be written.
prefix: str
If not None, used to prefix the output table names so that
multiple iterations can go in the same file.
compress: boolean
Whether to compress output file using standard HDF5-readable
zlib compression, default False. | Below is the the instruction that describes the task:
### Input:
Writes tables to a pandas.HDFStore file.
Parameters
----------
fname : str
File name for HDFStore. Will be opened in append mode and closed
at the end of this function.
table_names: list of str, optional, default None
List of tables to write. If None, all registered tables will
be written.
prefix: str
If not None, used to prefix the output table names so that
multiple iterations can go in the same file.
compress: boolean
Whether to compress output file using standard HDF5-readable
zlib compression, default False.
### Response:
def write_tables(fname, table_names=None, prefix=None, compress=False, local=False):
"""
Writes tables to a pandas.HDFStore file.
Parameters
----------
fname : str
File name for HDFStore. Will be opened in append mode and closed
at the end of this function.
table_names: list of str, optional, default None
List of tables to write. If None, all registered tables will
be written.
prefix: str
If not None, used to prefix the output table names so that
multiple iterations can go in the same file.
compress: boolean
Whether to compress output file using standard HDF5-readable
zlib compression, default False.
"""
if table_names is None:
table_names = list_tables()
tables = (get_table(t) for t in table_names)
key_template = '{}/{{}}'.format(prefix) if prefix is not None else '{}'
# set compression options to zlib level-1 if compress arg is True
complib = compress and 'zlib' or None
complevel = compress and 1 or 0
with pd.HDFStore(fname, mode='a', complib=complib, complevel=complevel) as store:
for t in tables:
# if local arg is True, store only local columns
columns = None
if local is True:
columns = t.local_columns
store[key_template.format(t.name)] = t.to_frame(columns=columns) |
async def run(websession: ClientSession):
"""Run."""
try:
# Create a client:
client = Client(
'<API_KEY>',
39.7974509,
-104.8887227,
websession,
altitude=1609.3)
# Get current UV info:
print('CURRENT UV DATA:')
print(await client.uv_index())
# Get forecasted UV info:
print()
print('FORECASTED UV DATA:')
print(await client.uv_forecast())
# Get UV protection window:
print()
print('UV PROTECTION WINDOW:')
print(await client.uv_protection_window())
except OpenUvError as err:
print(err) | Run. | Below is the the instruction that describes the task:
### Input:
Run.
### Response:
async def run(websession: ClientSession):
"""Run."""
try:
# Create a client:
client = Client(
'<API_KEY>',
39.7974509,
-104.8887227,
websession,
altitude=1609.3)
# Get current UV info:
print('CURRENT UV DATA:')
print(await client.uv_index())
# Get forecasted UV info:
print()
print('FORECASTED UV DATA:')
print(await client.uv_forecast())
# Get UV protection window:
print()
print('UV PROTECTION WINDOW:')
print(await client.uv_protection_window())
except OpenUvError as err:
print(err) |
def get_exitstatus(self):
"""Get the exit status of the program execution.
Returns:
int: Exit status as reported by the operating system,
or None if it is not available.
"""
logger.debug("Exit status is {0}".format(self._spawn.exitstatus))
return self._spawn.exitstatus | Get the exit status of the program execution.
Returns:
int: Exit status as reported by the operating system,
or None if it is not available. | Below is the the instruction that describes the task:
### Input:
Get the exit status of the program execution.
Returns:
int: Exit status as reported by the operating system,
or None if it is not available.
### Response:
def get_exitstatus(self):
"""Get the exit status of the program execution.
Returns:
int: Exit status as reported by the operating system,
or None if it is not available.
"""
logger.debug("Exit status is {0}".format(self._spawn.exitstatus))
return self._spawn.exitstatus |
def _run_paired(paired):
"""Run somatic variant calling pipeline.
"""
from bcbio.structural import titancna
work_dir = _sv_workdir(paired.tumor_data)
seg_files = model_segments(tz.get_in(["depth", "bins", "normalized"], paired.tumor_data),
work_dir, paired)
call_file = call_copy_numbers(seg_files["seg"], work_dir, paired.tumor_data)
out = []
if paired.normal_data:
out.append(paired.normal_data)
if "sv" not in paired.tumor_data:
paired.tumor_data["sv"] = []
paired.tumor_data["sv"].append({"variantcaller": "gatk-cnv",
"call_file": call_file,
"vrn_file": titancna.to_vcf(call_file, "GATK4-CNV", _get_seg_header,
_seg_to_vcf, paired.tumor_data),
"seg": seg_files["seg"],
"plot": plot_model_segments(seg_files, work_dir, paired.tumor_data)})
out.append(paired.tumor_data)
return out | Run somatic variant calling pipeline. | Below is the the instruction that describes the task:
### Input:
Run somatic variant calling pipeline.
### Response:
def _run_paired(paired):
"""Run somatic variant calling pipeline.
"""
from bcbio.structural import titancna
work_dir = _sv_workdir(paired.tumor_data)
seg_files = model_segments(tz.get_in(["depth", "bins", "normalized"], paired.tumor_data),
work_dir, paired)
call_file = call_copy_numbers(seg_files["seg"], work_dir, paired.tumor_data)
out = []
if paired.normal_data:
out.append(paired.normal_data)
if "sv" not in paired.tumor_data:
paired.tumor_data["sv"] = []
paired.tumor_data["sv"].append({"variantcaller": "gatk-cnv",
"call_file": call_file,
"vrn_file": titancna.to_vcf(call_file, "GATK4-CNV", _get_seg_header,
_seg_to_vcf, paired.tumor_data),
"seg": seg_files["seg"],
"plot": plot_model_segments(seg_files, work_dir, paired.tumor_data)})
out.append(paired.tumor_data)
return out |
def list_manga_series(self, filter=None, content_type='jp_manga'):
"""Get a list of manga series
"""
result = self._manga_api.list_series(filter, content_type)
return result | Get a list of manga series | Below is the the instruction that describes the task:
### Input:
Get a list of manga series
### Response:
def list_manga_series(self, filter=None, content_type='jp_manga'):
"""Get a list of manga series
"""
result = self._manga_api.list_series(filter, content_type)
return result |
def make_table (dt):
"""
Build the HTML needed for a MultiQC table.
:param data: MultiQC datatable object
"""
table_id = dt.pconfig.get('id', 'table_{}'.format(''.join(random.sample(letters, 4))) )
table_id = report.save_htmlid(table_id)
t_headers = OrderedDict()
t_modal_headers = OrderedDict()
t_rows = OrderedDict()
dt.raw_vals = defaultdict(lambda: dict())
empty_cells = dict()
hidden_cols = 1
table_title = dt.pconfig.get('table_title')
if table_title is None:
table_title = table_id.replace("_", " ").title()
for idx, k, header in dt.get_headers_in_order():
rid = header['rid']
# Build the table header cell
shared_key = ''
if header.get('shared_key', None) is not None:
shared_key = ' data-shared-key={}'.format(header['shared_key'])
hide = ''
muted = ''
checked = ' checked="checked"'
if header.get('hidden', False) is True:
hide = 'hidden'
muted = ' text-muted'
checked = ''
hidden_cols += 1
data_attr = 'data-dmax="{}" data-dmin="{}" data-namespace="{}" {}' \
.format(header['dmax'], header['dmin'], header['namespace'], shared_key)
cell_contents = '<span class="mqc_table_tooltip" title="{}: {}">{}</span>' \
.format(header['namespace'], header['description'], header['title'])
t_headers[rid] = '<th id="header_{rid}" class="{rid} {h}" {da}>{c}</th>' \
.format(rid=rid, h=hide, da=data_attr, c=cell_contents)
empty_cells[rid] = '<td class="data-coloured {rid} {h}"></td>'.format(rid=rid, h=hide)
# Build the modal table row
t_modal_headers[rid] = """
<tr class="{rid}{muted}" style="background-color: rgba({col}, 0.15);">
<td class="sorthandle ui-sortable-handle">||</span></td>
<td style="text-align:center;">
<input class="mqc_table_col_visible" type="checkbox" {checked} value="{rid}" data-target="#{tid}">
</td>
<td>{name}</td>
<td>{title}</td>
<td>{desc}</td>
<td>{col_id}</td>
<td>{sk}</td>
</tr>""".format(
rid = rid,
muted = muted,
checked = checked,
tid = table_id,
col = header['colour'],
name = header['namespace'],
title = header['title'],
desc = header['description'],
col_id = '<code>{}</code>'.format(k),
sk = header.get('shared_key', '')
)
# Make a colour scale
if header['scale'] == False:
c_scale = None
else:
c_scale = mqc_colour.mqc_colour_scale(header['scale'], header['dmin'], header['dmax'])
# Add the data table cells
for (s_name, samp) in dt.data[idx].items():
if k in samp:
val = samp[k]
kname = '{}_{}'.format(header['namespace'], rid)
dt.raw_vals[s_name][kname] = val
if 'modify' in header and callable(header['modify']):
val = header['modify'](val)
try:
dmin = header['dmin']
dmax = header['dmax']
percentage = ((float(val) - dmin) / (dmax - dmin)) * 100
percentage = min(percentage, 100)
percentage = max(percentage, 0)
except (ZeroDivisionError,ValueError):
percentage = 0
try:
valstring = str(header['format'].format(val))
except ValueError:
try:
valstring = str(header['format'].format(float(val)))
except ValueError:
valstring = str(val)
except:
valstring = str(val)
# This is horrible, but Python locale settings are worse
if config.thousandsSep_format is None:
config.thousandsSep_format = '<span class="mqc_thousandSep"></span>'
if config.decimalPoint_format is None:
config.decimalPoint_format = '.'
valstring = valstring.replace('.', 'DECIMAL').replace(',', 'THOUSAND')
valstring = valstring.replace('DECIMAL', config.decimalPoint_format).replace('THOUSAND', config.thousandsSep_format)
# Percentage suffixes etc
valstring += header.get('suffix', '')
# Conditional formatting
cmatches = { cfck: False for cfc in config.table_cond_formatting_colours for cfck in cfc }
# Find general rules followed by column-specific rules
for cfk in ['all_columns', rid]:
if cfk in config.table_cond_formatting_rules:
# Loop through match types
for ftype in cmatches.keys():
# Loop through array of comparison types
for cmp in config.table_cond_formatting_rules[cfk].get(ftype, []):
try:
# Each comparison should be a dict with single key: val
if 's_eq' in cmp and str(cmp['s_eq']).lower() == str(val).lower():
cmatches[ftype] = True
if 's_contains' in cmp and str(cmp['s_contains']).lower() in str(val).lower():
cmatches[ftype] = True
if 's_ne' in cmp and str(cmp['s_ne']).lower() != str(val).lower():
cmatches[ftype] = True
if 'eq' in cmp and float(cmp['eq']) == float(val):
cmatches[ftype] = True
if 'ne' in cmp and float(cmp['ne']) != float(val):
cmatches[ftype] = True
if 'gt' in cmp and float(cmp['gt']) < float(val):
cmatches[ftype] = True
if 'lt' in cmp and float(cmp['lt']) > float(val):
cmatches[ftype] = True
except:
logger.warn("Not able to apply table conditional formatting to '{}' ({})".format(val, cmp))
# Apply HTML in order of config keys
bgcol = None
for cfc in config.table_cond_formatting_colours:
for cfck in cfc: # should always be one, but you never know
if cmatches[cfck]:
bgcol = cfc[cfck]
if bgcol is not None:
valstring = '<span class="badge" style="background-color:{}">{}</span>'.format(bgcol, valstring)
# Build HTML
if not header['scale']:
if s_name not in t_rows:
t_rows[s_name] = dict()
t_rows[s_name][rid] = '<td class="{rid} {h}">{v}</td>'.format(rid=rid, h=hide, v=valstring)
else:
if c_scale is not None:
col = ' background-color:{};'.format(c_scale.get_colour(val))
else:
col = ''
bar_html = '<span class="bar" style="width:{}%;{}"></span>'.format(percentage, col)
val_html = '<span class="val">{}</span>'.format(valstring)
wrapper_html = '<div class="wrapper">{}{}</div>'.format(bar_html, val_html)
if s_name not in t_rows:
t_rows[s_name] = dict()
t_rows[s_name][rid] = '<td class="data-coloured {rid} {h}">{c}</td>'.format(rid=rid, h=hide, c=wrapper_html)
# Remove header if we don't have any filled cells for it
if sum([len(rows) for rows in t_rows.values()]) == 0:
t_headers.pop(rid, None)
t_modal_headers.pop(rid, None)
logger.debug('Removing header {} from general stats table, as no data'.format(k))
#
# Put everything together
#
# Buttons above the table
html = ''
if not config.simple_output:
# Copy Table Button
html += """
<button type="button" class="mqc_table_copy_btn btn btn-default btn-sm" data-clipboard-target="#{tid}">
<span class="glyphicon glyphicon-copy"></span> Copy table
</button>
""".format(tid=table_id)
# Configure Columns Button
if len(t_headers) > 1:
html += """
<button type="button" class="mqc_table_configModal_btn btn btn-default btn-sm" data-toggle="modal" data-target="#{tid}_configModal">
<span class="glyphicon glyphicon-th"></span> Configure Columns
</button>
""".format(tid=table_id)
# Sort By Highlight button
html += """
<button type="button" class="mqc_table_sortHighlight btn btn-default btn-sm" data-target="#{tid}" data-direction="desc" style="display:none;">
<span class="glyphicon glyphicon-sort-by-attributes-alt"></span> Sort by highlight
</button>
""".format(tid=table_id)
# Scatter Plot Button
if len(t_headers) > 1:
html += """
<button type="button" class="mqc_table_makeScatter btn btn-default btn-sm" data-toggle="modal" data-target="#tableScatterModal" data-table="#{tid}">
<span class="glyphicon glyphicon glyphicon-stats"></span> Plot
</button>
""".format(tid=table_id)
# "Showing x of y columns" text
html += """
<small id="{tid}_numrows_text" class="mqc_table_numrows_text">Showing <sup id="{tid}_numrows" class="mqc_table_numrows">{nrows}</sup>/<sub>{nrows}</sub> rows and <sup id="{tid}_numcols" class="mqc_table_numcols">{ncols_vis}</sup>/<sub>{ncols}</sub> columns.</small>
""".format(tid=table_id, nrows=len(t_rows), ncols_vis = (len(t_headers)+1)-hidden_cols, ncols=len(t_headers))
# Build the table itself
collapse_class = 'mqc-table-collapse' if len(t_rows) > 10 and config.collapse_tables else ''
html += """
<div id="{tid}_container" class="mqc_table_container">
<div class="table-responsive mqc-table-responsive {cc}">
<table id="{tid}" class="table table-condensed mqc_table" data-title="{title}">
""".format( tid=table_id, title=table_title, cc=collapse_class)
# Build the header row
col1_header = dt.pconfig.get('col1_header', 'Sample Name')
html += '<thead><tr><th class="rowheader">{}</th>{}</tr></thead>'.format(col1_header, ''.join(t_headers.values()))
# Build the table body
html += '<tbody>'
t_row_keys = t_rows.keys()
if dt.pconfig.get('sortRows') is not False:
t_row_keys = sorted(t_row_keys)
for s_name in t_row_keys:
html += '<tr>'
# Sample name row header
html += '<th class="rowheader" data-original-sn="{sn}">{sn}</th>'.format(sn=s_name)
for k in t_headers:
html += t_rows[s_name].get(k, empty_cells[k])
html += '</tr>'
html += '</tbody></table></div>'
if len(t_rows) > 10 and config.collapse_tables:
html += '<div class="mqc-table-expand"><span class="glyphicon glyphicon-chevron-down" aria-hidden="true"></span></div>'
html += '</div>'
# Build the bootstrap modal to customise columns and order
if not config.simple_output:
html += """
<!-- MultiQC Table Columns Modal -->
<div class="modal fade" id="{tid}_configModal" tabindex="-1">
<div class="modal-dialog modal-lg">
<div class="modal-content">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">×</span></button>
<h4 class="modal-title">{title}: Columns</h4>
</div>
<div class="modal-body">
<p>Uncheck the tick box to hide columns. Click and drag the handle on the left to change order.</p>
<p>
<button class="btn btn-default btn-sm mqc_configModal_bulkVisible" data-target="#{tid}" data-action="showAll">Show All</button>
<button class="btn btn-default btn-sm mqc_configModal_bulkVisible" data-target="#{tid}" data-action="showNone">Show None</button>
</p>
<table class="table mqc_table mqc_sortable mqc_configModal_table" id="{tid}_configModal_table" data-title="{title}">
<thead>
<tr>
<th class="sorthandle" style="text-align:center;">Sort</th>
<th style="text-align:center;">Visible</th>
<th>Group</th>
<th>Column</th>
<th>Description</th>
<th>ID</th>
<th>Scale</th>
</tr>
</thead>
<tbody>
{trows}
</tbody>
</table>
</div>
<div class="modal-footer"> <button type="button" class="btn btn-default" data-dismiss="modal">Close</button> </div>
</div> </div> </div>""".format( tid=table_id, title=table_title, trows=''.join(t_modal_headers.values()) )
# Save the raw values to a file if requested
if dt.pconfig.get('save_file') is True:
fn = dt.pconfig.get('raw_data_fn', 'multiqc_{}'.format(table_id) )
util_functions.write_data_file(dt.raw_vals, fn )
report.saved_raw_data[fn] = dt.raw_vals
return html | Build the HTML needed for a MultiQC table.
:param data: MultiQC datatable object | Below is the the instruction that describes the task:
### Input:
Build the HTML needed for a MultiQC table.
:param data: MultiQC datatable object
### Response:
def make_table (dt):
"""
Build the HTML needed for a MultiQC table.
:param data: MultiQC datatable object
"""
table_id = dt.pconfig.get('id', 'table_{}'.format(''.join(random.sample(letters, 4))) )
table_id = report.save_htmlid(table_id)
t_headers = OrderedDict()
t_modal_headers = OrderedDict()
t_rows = OrderedDict()
dt.raw_vals = defaultdict(lambda: dict())
empty_cells = dict()
hidden_cols = 1
table_title = dt.pconfig.get('table_title')
if table_title is None:
table_title = table_id.replace("_", " ").title()
for idx, k, header in dt.get_headers_in_order():
rid = header['rid']
# Build the table header cell
shared_key = ''
if header.get('shared_key', None) is not None:
shared_key = ' data-shared-key={}'.format(header['shared_key'])
hide = ''
muted = ''
checked = ' checked="checked"'
if header.get('hidden', False) is True:
hide = 'hidden'
muted = ' text-muted'
checked = ''
hidden_cols += 1
data_attr = 'data-dmax="{}" data-dmin="{}" data-namespace="{}" {}' \
.format(header['dmax'], header['dmin'], header['namespace'], shared_key)
cell_contents = '<span class="mqc_table_tooltip" title="{}: {}">{}</span>' \
.format(header['namespace'], header['description'], header['title'])
t_headers[rid] = '<th id="header_{rid}" class="{rid} {h}" {da}>{c}</th>' \
.format(rid=rid, h=hide, da=data_attr, c=cell_contents)
empty_cells[rid] = '<td class="data-coloured {rid} {h}"></td>'.format(rid=rid, h=hide)
# Build the modal table row
t_modal_headers[rid] = """
<tr class="{rid}{muted}" style="background-color: rgba({col}, 0.15);">
<td class="sorthandle ui-sortable-handle">||</span></td>
<td style="text-align:center;">
<input class="mqc_table_col_visible" type="checkbox" {checked} value="{rid}" data-target="#{tid}">
</td>
<td>{name}</td>
<td>{title}</td>
<td>{desc}</td>
<td>{col_id}</td>
<td>{sk}</td>
</tr>""".format(
rid = rid,
muted = muted,
checked = checked,
tid = table_id,
col = header['colour'],
name = header['namespace'],
title = header['title'],
desc = header['description'],
col_id = '<code>{}</code>'.format(k),
sk = header.get('shared_key', '')
)
# Make a colour scale
if header['scale'] == False:
c_scale = None
else:
c_scale = mqc_colour.mqc_colour_scale(header['scale'], header['dmin'], header['dmax'])
# Add the data table cells
for (s_name, samp) in dt.data[idx].items():
if k in samp:
val = samp[k]
kname = '{}_{}'.format(header['namespace'], rid)
dt.raw_vals[s_name][kname] = val
if 'modify' in header and callable(header['modify']):
val = header['modify'](val)
try:
dmin = header['dmin']
dmax = header['dmax']
percentage = ((float(val) - dmin) / (dmax - dmin)) * 100
percentage = min(percentage, 100)
percentage = max(percentage, 0)
except (ZeroDivisionError,ValueError):
percentage = 0
try:
valstring = str(header['format'].format(val))
except ValueError:
try:
valstring = str(header['format'].format(float(val)))
except ValueError:
valstring = str(val)
except:
valstring = str(val)
# This is horrible, but Python locale settings are worse
if config.thousandsSep_format is None:
config.thousandsSep_format = '<span class="mqc_thousandSep"></span>'
if config.decimalPoint_format is None:
config.decimalPoint_format = '.'
valstring = valstring.replace('.', 'DECIMAL').replace(',', 'THOUSAND')
valstring = valstring.replace('DECIMAL', config.decimalPoint_format).replace('THOUSAND', config.thousandsSep_format)
# Percentage suffixes etc
valstring += header.get('suffix', '')
# Conditional formatting
cmatches = { cfck: False for cfc in config.table_cond_formatting_colours for cfck in cfc }
# Find general rules followed by column-specific rules
for cfk in ['all_columns', rid]:
if cfk in config.table_cond_formatting_rules:
# Loop through match types
for ftype in cmatches.keys():
# Loop through array of comparison types
for cmp in config.table_cond_formatting_rules[cfk].get(ftype, []):
try:
# Each comparison should be a dict with single key: val
if 's_eq' in cmp and str(cmp['s_eq']).lower() == str(val).lower():
cmatches[ftype] = True
if 's_contains' in cmp and str(cmp['s_contains']).lower() in str(val).lower():
cmatches[ftype] = True
if 's_ne' in cmp and str(cmp['s_ne']).lower() != str(val).lower():
cmatches[ftype] = True
if 'eq' in cmp and float(cmp['eq']) == float(val):
cmatches[ftype] = True
if 'ne' in cmp and float(cmp['ne']) != float(val):
cmatches[ftype] = True
if 'gt' in cmp and float(cmp['gt']) < float(val):
cmatches[ftype] = True
if 'lt' in cmp and float(cmp['lt']) > float(val):
cmatches[ftype] = True
except:
logger.warn("Not able to apply table conditional formatting to '{}' ({})".format(val, cmp))
# Apply HTML in order of config keys
bgcol = None
for cfc in config.table_cond_formatting_colours:
for cfck in cfc: # should always be one, but you never know
if cmatches[cfck]:
bgcol = cfc[cfck]
if bgcol is not None:
valstring = '<span class="badge" style="background-color:{}">{}</span>'.format(bgcol, valstring)
# Build HTML
if not header['scale']:
if s_name not in t_rows:
t_rows[s_name] = dict()
t_rows[s_name][rid] = '<td class="{rid} {h}">{v}</td>'.format(rid=rid, h=hide, v=valstring)
else:
if c_scale is not None:
col = ' background-color:{};'.format(c_scale.get_colour(val))
else:
col = ''
bar_html = '<span class="bar" style="width:{}%;{}"></span>'.format(percentage, col)
val_html = '<span class="val">{}</span>'.format(valstring)
wrapper_html = '<div class="wrapper">{}{}</div>'.format(bar_html, val_html)
if s_name not in t_rows:
t_rows[s_name] = dict()
t_rows[s_name][rid] = '<td class="data-coloured {rid} {h}">{c}</td>'.format(rid=rid, h=hide, c=wrapper_html)
# Remove header if we don't have any filled cells for it
if sum([len(rows) for rows in t_rows.values()]) == 0:
t_headers.pop(rid, None)
t_modal_headers.pop(rid, None)
logger.debug('Removing header {} from general stats table, as no data'.format(k))
#
# Put everything together
#
# Buttons above the table
html = ''
if not config.simple_output:
# Copy Table Button
html += """
<button type="button" class="mqc_table_copy_btn btn btn-default btn-sm" data-clipboard-target="#{tid}">
<span class="glyphicon glyphicon-copy"></span> Copy table
</button>
""".format(tid=table_id)
# Configure Columns Button
if len(t_headers) > 1:
html += """
<button type="button" class="mqc_table_configModal_btn btn btn-default btn-sm" data-toggle="modal" data-target="#{tid}_configModal">
<span class="glyphicon glyphicon-th"></span> Configure Columns
</button>
""".format(tid=table_id)
# Sort By Highlight button
html += """
<button type="button" class="mqc_table_sortHighlight btn btn-default btn-sm" data-target="#{tid}" data-direction="desc" style="display:none;">
<span class="glyphicon glyphicon-sort-by-attributes-alt"></span> Sort by highlight
</button>
""".format(tid=table_id)
# Scatter Plot Button
if len(t_headers) > 1:
html += """
<button type="button" class="mqc_table_makeScatter btn btn-default btn-sm" data-toggle="modal" data-target="#tableScatterModal" data-table="#{tid}">
<span class="glyphicon glyphicon glyphicon-stats"></span> Plot
</button>
""".format(tid=table_id)
# "Showing x of y columns" text
html += """
<small id="{tid}_numrows_text" class="mqc_table_numrows_text">Showing <sup id="{tid}_numrows" class="mqc_table_numrows">{nrows}</sup>/<sub>{nrows}</sub> rows and <sup id="{tid}_numcols" class="mqc_table_numcols">{ncols_vis}</sup>/<sub>{ncols}</sub> columns.</small>
""".format(tid=table_id, nrows=len(t_rows), ncols_vis = (len(t_headers)+1)-hidden_cols, ncols=len(t_headers))
# Build the table itself
collapse_class = 'mqc-table-collapse' if len(t_rows) > 10 and config.collapse_tables else ''
html += """
<div id="{tid}_container" class="mqc_table_container">
<div class="table-responsive mqc-table-responsive {cc}">
<table id="{tid}" class="table table-condensed mqc_table" data-title="{title}">
""".format( tid=table_id, title=table_title, cc=collapse_class)
# Build the header row
col1_header = dt.pconfig.get('col1_header', 'Sample Name')
html += '<thead><tr><th class="rowheader">{}</th>{}</tr></thead>'.format(col1_header, ''.join(t_headers.values()))
# Build the table body
html += '<tbody>'
t_row_keys = t_rows.keys()
if dt.pconfig.get('sortRows') is not False:
t_row_keys = sorted(t_row_keys)
for s_name in t_row_keys:
html += '<tr>'
# Sample name row header
html += '<th class="rowheader" data-original-sn="{sn}">{sn}</th>'.format(sn=s_name)
for k in t_headers:
html += t_rows[s_name].get(k, empty_cells[k])
html += '</tr>'
html += '</tbody></table></div>'
if len(t_rows) > 10 and config.collapse_tables:
html += '<div class="mqc-table-expand"><span class="glyphicon glyphicon-chevron-down" aria-hidden="true"></span></div>'
html += '</div>'
# Build the bootstrap modal to customise columns and order
if not config.simple_output:
html += """
<!-- MultiQC Table Columns Modal -->
<div class="modal fade" id="{tid}_configModal" tabindex="-1">
<div class="modal-dialog modal-lg">
<div class="modal-content">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">×</span></button>
<h4 class="modal-title">{title}: Columns</h4>
</div>
<div class="modal-body">
<p>Uncheck the tick box to hide columns. Click and drag the handle on the left to change order.</p>
<p>
<button class="btn btn-default btn-sm mqc_configModal_bulkVisible" data-target="#{tid}" data-action="showAll">Show All</button>
<button class="btn btn-default btn-sm mqc_configModal_bulkVisible" data-target="#{tid}" data-action="showNone">Show None</button>
</p>
<table class="table mqc_table mqc_sortable mqc_configModal_table" id="{tid}_configModal_table" data-title="{title}">
<thead>
<tr>
<th class="sorthandle" style="text-align:center;">Sort</th>
<th style="text-align:center;">Visible</th>
<th>Group</th>
<th>Column</th>
<th>Description</th>
<th>ID</th>
<th>Scale</th>
</tr>
</thead>
<tbody>
{trows}
</tbody>
</table>
</div>
<div class="modal-footer"> <button type="button" class="btn btn-default" data-dismiss="modal">Close</button> </div>
</div> </div> </div>""".format( tid=table_id, title=table_title, trows=''.join(t_modal_headers.values()) )
# Save the raw values to a file if requested
if dt.pconfig.get('save_file') is True:
fn = dt.pconfig.get('raw_data_fn', 'multiqc_{}'.format(table_id) )
util_functions.write_data_file(dt.raw_vals, fn )
report.saved_raw_data[fn] = dt.raw_vals
return html |
def _common_setup(self):
"""
The minimal amount of setup done by both setup() and no_setup().
"""
self._started = True
self._reactor = self._reactorFactory()
self._registry = ResultRegistry()
# We want to unblock EventualResult regardless of how the reactor is
# run, so we always register this:
self._reactor.addSystemEventTrigger(
"before", "shutdown", self._registry.stop) | The minimal amount of setup done by both setup() and no_setup(). | Below is the the instruction that describes the task:
### Input:
The minimal amount of setup done by both setup() and no_setup().
### Response:
def _common_setup(self):
"""
The minimal amount of setup done by both setup() and no_setup().
"""
self._started = True
self._reactor = self._reactorFactory()
self._registry = ResultRegistry()
# We want to unblock EventualResult regardless of how the reactor is
# run, so we always register this:
self._reactor.addSystemEventTrigger(
"before", "shutdown", self._registry.stop) |
def read_char_lengths(self, filename, electrode_filename):
"""Read characteristic lengths from the given file.
The file is expected to have either 1 or 4 entries/lines with
characteristic lengths > 0 (floats). If only one value is encountered,
it is used for all four entities. If four values are encountered, they
are assigned, in order, to:
1) electrode nodes
2) boundary nodes
3) nodes from extra lines
4) nodes from extra nodes
Note that in case one node belongs to multiple entities, the smallest
characteristic length will be used.
If four values are used and the electrode length is negative, then the
electrode positions will be read in (todo: we open the electrode.dat
file two times here...) and the minimal distance between all electrodes
will be multiplied by the absolute value of the imported value, and
used as the characteristic length:
.. math::
l_{electrodes} = min(pdist(electrodes)) * |l_{electrodes}^{from
file}|
The function scipy.spatial.distance.pdist is used to compute the global
minimal distance between any two electrodes.
It is advisable to only used values in the range [-1, 0) for the
automatic char length option.
"""
if os.path.isfile(filename):
data = np.atleast_1d(np.loadtxt(filename))
if data.size == 4:
characteristic_length = data
# check sign of first (electrode) length value
if characteristic_length[0] < 0:
try:
elec_positions = np.loadtxt(electrode_filename)
except:
raise IOError(
'The was an error opening the electrode file')
import scipy.spatial.distance
distances = scipy.spatial.distance.pdist(elec_positions)
characteristic_length[0] = min(distances) * np.abs(
characteristic_length[0])
if characteristic_length[0] == 0:
raise Exception(
'Error computing electrode ' +
'distances (got a minimal distance of zero')
else:
characteristic_length = np.ones(4) * data[0]
else:
characteristic_length = np.ones(4)
if np.any(characteristic_length <= 0):
raise Exception('No negative characteristic lengths allowed ' +
'(except for electrode length')
self.char_lengths = {}
for key, item in zip(('electrode',
'boundary',
'extra_line',
'extra_node'),
characteristic_length):
self.char_lengths[key] = item | Read characteristic lengths from the given file.
The file is expected to have either 1 or 4 entries/lines with
characteristic lengths > 0 (floats). If only one value is encountered,
it is used for all four entities. If four values are encountered, they
are assigned, in order, to:
1) electrode nodes
2) boundary nodes
3) nodes from extra lines
4) nodes from extra nodes
Note that in case one node belongs to multiple entities, the smallest
characteristic length will be used.
If four values are used and the electrode length is negative, then the
electrode positions will be read in (todo: we open the electrode.dat
file two times here...) and the minimal distance between all electrodes
will be multiplied by the absolute value of the imported value, and
used as the characteristic length:
.. math::
l_{electrodes} = min(pdist(electrodes)) * |l_{electrodes}^{from
file}|
The function scipy.spatial.distance.pdist is used to compute the global
minimal distance between any two electrodes.
It is advisable to only used values in the range [-1, 0) for the
automatic char length option. | Below is the the instruction that describes the task:
### Input:
Read characteristic lengths from the given file.
The file is expected to have either 1 or 4 entries/lines with
characteristic lengths > 0 (floats). If only one value is encountered,
it is used for all four entities. If four values are encountered, they
are assigned, in order, to:
1) electrode nodes
2) boundary nodes
3) nodes from extra lines
4) nodes from extra nodes
Note that in case one node belongs to multiple entities, the smallest
characteristic length will be used.
If four values are used and the electrode length is negative, then the
electrode positions will be read in (todo: we open the electrode.dat
file two times here...) and the minimal distance between all electrodes
will be multiplied by the absolute value of the imported value, and
used as the characteristic length:
.. math::
l_{electrodes} = min(pdist(electrodes)) * |l_{electrodes}^{from
file}|
The function scipy.spatial.distance.pdist is used to compute the global
minimal distance between any two electrodes.
It is advisable to only used values in the range [-1, 0) for the
automatic char length option.
### Response:
def read_char_lengths(self, filename, electrode_filename):
"""Read characteristic lengths from the given file.
The file is expected to have either 1 or 4 entries/lines with
characteristic lengths > 0 (floats). If only one value is encountered,
it is used for all four entities. If four values are encountered, they
are assigned, in order, to:
1) electrode nodes
2) boundary nodes
3) nodes from extra lines
4) nodes from extra nodes
Note that in case one node belongs to multiple entities, the smallest
characteristic length will be used.
If four values are used and the electrode length is negative, then the
electrode positions will be read in (todo: we open the electrode.dat
file two times here...) and the minimal distance between all electrodes
will be multiplied by the absolute value of the imported value, and
used as the characteristic length:
.. math::
l_{electrodes} = min(pdist(electrodes)) * |l_{electrodes}^{from
file}|
The function scipy.spatial.distance.pdist is used to compute the global
minimal distance between any two electrodes.
It is advisable to only used values in the range [-1, 0) for the
automatic char length option.
"""
if os.path.isfile(filename):
data = np.atleast_1d(np.loadtxt(filename))
if data.size == 4:
characteristic_length = data
# check sign of first (electrode) length value
if characteristic_length[0] < 0:
try:
elec_positions = np.loadtxt(electrode_filename)
except:
raise IOError(
'The was an error opening the electrode file')
import scipy.spatial.distance
distances = scipy.spatial.distance.pdist(elec_positions)
characteristic_length[0] = min(distances) * np.abs(
characteristic_length[0])
if characteristic_length[0] == 0:
raise Exception(
'Error computing electrode ' +
'distances (got a minimal distance of zero')
else:
characteristic_length = np.ones(4) * data[0]
else:
characteristic_length = np.ones(4)
if np.any(characteristic_length <= 0):
raise Exception('No negative characteristic lengths allowed ' +
'(except for electrode length')
self.char_lengths = {}
for key, item in zip(('electrode',
'boundary',
'extra_line',
'extra_node'),
characteristic_length):
self.char_lengths[key] = item |
def get_activities(self, count=10, since=None, style='summary',
limit=None):
"""Iterate over all activities, from newest to oldest.
:param count: The number of results to retrieve per page. If set to
``None``, pagination is disabled.
:param since: Return only activities since this date. Can be either
a timestamp or a datetime object.
:param style: The type of records to return. May be one of
'summary', 'briefs', 'ids', or 'extended'.
:param limit: The maximum number of activities to return for the given
query.
"""
params = {}
if since:
params.update(fromDate=to_timestamp(since))
parts = ['my', 'activities', 'search']
if style != 'summary':
parts.append(style)
url = self._build_url(*parts)
# TODO: return an Activity (or ActivitySummary?) class that can do
# things like convert date and time fields to proper datetime objects
return islice(self._iter(url, count, **params), limit) | Iterate over all activities, from newest to oldest.
:param count: The number of results to retrieve per page. If set to
``None``, pagination is disabled.
:param since: Return only activities since this date. Can be either
a timestamp or a datetime object.
:param style: The type of records to return. May be one of
'summary', 'briefs', 'ids', or 'extended'.
:param limit: The maximum number of activities to return for the given
query. | Below is the the instruction that describes the task:
### Input:
Iterate over all activities, from newest to oldest.
:param count: The number of results to retrieve per page. If set to
``None``, pagination is disabled.
:param since: Return only activities since this date. Can be either
a timestamp or a datetime object.
:param style: The type of records to return. May be one of
'summary', 'briefs', 'ids', or 'extended'.
:param limit: The maximum number of activities to return for the given
query.
### Response:
def get_activities(self, count=10, since=None, style='summary',
limit=None):
"""Iterate over all activities, from newest to oldest.
:param count: The number of results to retrieve per page. If set to
``None``, pagination is disabled.
:param since: Return only activities since this date. Can be either
a timestamp or a datetime object.
:param style: The type of records to return. May be one of
'summary', 'briefs', 'ids', or 'extended'.
:param limit: The maximum number of activities to return for the given
query.
"""
params = {}
if since:
params.update(fromDate=to_timestamp(since))
parts = ['my', 'activities', 'search']
if style != 'summary':
parts.append(style)
url = self._build_url(*parts)
# TODO: return an Activity (or ActivitySummary?) class that can do
# things like convert date and time fields to proper datetime objects
return islice(self._iter(url, count, **params), limit) |
def drawDisplay(self, painter, option, rect, text):
"""
Overloads the drawDisplay method to render HTML if the rich text \
information is set to true.
:param painter | <QtGui.QPainter>
option | <QtGui.QStyleOptionItem>
rect | <QtCore.QRect>
text | <str>
"""
if self.showRichText():
# create the document
doc = QtGui.QTextDocument()
doc.setTextWidth(float(rect.width()))
doc.setHtml(text)
# draw the contents
painter.translate(rect.x(), rect.y())
doc.drawContents(painter, QtCore.QRectF(0,
0,
float(rect.width()),
float(rect.height())))
painter.translate(-rect.x(), -rect.y())
else:
if type(text).__name__ not in ('str', 'unicode', 'QString'):
text = nativestring(text)
metrics = QtGui.QFontMetrics(option.font)
text = metrics.elidedText(text,
QtCore.Qt.TextElideMode(option.textElideMode),
rect.width())
painter.setFont(option.font)
painter.drawText(rect, int(option.displayAlignment), text) | Overloads the drawDisplay method to render HTML if the rich text \
information is set to true.
:param painter | <QtGui.QPainter>
option | <QtGui.QStyleOptionItem>
rect | <QtCore.QRect>
text | <str> | Below is the the instruction that describes the task:
### Input:
Overloads the drawDisplay method to render HTML if the rich text \
information is set to true.
:param painter | <QtGui.QPainter>
option | <QtGui.QStyleOptionItem>
rect | <QtCore.QRect>
text | <str>
### Response:
def drawDisplay(self, painter, option, rect, text):
"""
Overloads the drawDisplay method to render HTML if the rich text \
information is set to true.
:param painter | <QtGui.QPainter>
option | <QtGui.QStyleOptionItem>
rect | <QtCore.QRect>
text | <str>
"""
if self.showRichText():
# create the document
doc = QtGui.QTextDocument()
doc.setTextWidth(float(rect.width()))
doc.setHtml(text)
# draw the contents
painter.translate(rect.x(), rect.y())
doc.drawContents(painter, QtCore.QRectF(0,
0,
float(rect.width()),
float(rect.height())))
painter.translate(-rect.x(), -rect.y())
else:
if type(text).__name__ not in ('str', 'unicode', 'QString'):
text = nativestring(text)
metrics = QtGui.QFontMetrics(option.font)
text = metrics.elidedText(text,
QtCore.Qt.TextElideMode(option.textElideMode),
rect.width())
painter.setFont(option.font)
painter.drawText(rect, int(option.displayAlignment), text) |
def get_subject_version_ids(self, subject):
"""
Return the list of schema version ids which have been registered
under the given subject.
"""
res = requests.get(self._url('/subjects/{}/versions', subject))
raise_if_failed(res)
return res.json() | Return the list of schema version ids which have been registered
under the given subject. | Below is the the instruction that describes the task:
### Input:
Return the list of schema version ids which have been registered
under the given subject.
### Response:
def get_subject_version_ids(self, subject):
"""
Return the list of schema version ids which have been registered
under the given subject.
"""
res = requests.get(self._url('/subjects/{}/versions', subject))
raise_if_failed(res)
return res.json() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.