code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def reply(self, connection, reply, orig_req):
"""Send an asynchronous reply to an earlier request.
Parameters
----------
connection : ClientConnection object
The client to send the reply to.
reply : Message object
The reply message to send.
orig_req : Message object
The request message being replied to. The reply message's
id is overridden with the id from orig_req before the
reply is sent.
"""
if isinstance(connection, ClientRequestConnection):
self._logger.warn(
'Deprecation warning: do not use self.reply() '
'within a reply handler context -- use req.reply(*msg_args)\n'
'or req.reply_with_message(msg) Traceback:\n %s',
"".join(traceback.format_stack()))
# Get the underlying ClientConnection instance
connection = connection.client_connection
connection.reply(reply, orig_req) | Send an asynchronous reply to an earlier request.
Parameters
----------
connection : ClientConnection object
The client to send the reply to.
reply : Message object
The reply message to send.
orig_req : Message object
The request message being replied to. The reply message's
id is overridden with the id from orig_req before the
reply is sent. | Below is the the instruction that describes the task:
### Input:
Send an asynchronous reply to an earlier request.
Parameters
----------
connection : ClientConnection object
The client to send the reply to.
reply : Message object
The reply message to send.
orig_req : Message object
The request message being replied to. The reply message's
id is overridden with the id from orig_req before the
reply is sent.
### Response:
def reply(self, connection, reply, orig_req):
"""Send an asynchronous reply to an earlier request.
Parameters
----------
connection : ClientConnection object
The client to send the reply to.
reply : Message object
The reply message to send.
orig_req : Message object
The request message being replied to. The reply message's
id is overridden with the id from orig_req before the
reply is sent.
"""
if isinstance(connection, ClientRequestConnection):
self._logger.warn(
'Deprecation warning: do not use self.reply() '
'within a reply handler context -- use req.reply(*msg_args)\n'
'or req.reply_with_message(msg) Traceback:\n %s',
"".join(traceback.format_stack()))
# Get the underlying ClientConnection instance
connection = connection.client_connection
connection.reply(reply, orig_req) |
def route(obj, rule, *args, **kwargs):
"""Decorator for the View classes."""
def decorator(cls):
endpoint = kwargs.get('endpoint', camel_to_snake(cls.__name__))
kwargs['view_func'] = cls.as_view(endpoint)
obj.add_url_rule(rule, *args, **kwargs)
return cls
return decorator | Decorator for the View classes. | Below is the the instruction that describes the task:
### Input:
Decorator for the View classes.
### Response:
def route(obj, rule, *args, **kwargs):
"""Decorator for the View classes."""
def decorator(cls):
endpoint = kwargs.get('endpoint', camel_to_snake(cls.__name__))
kwargs['view_func'] = cls.as_view(endpoint)
obj.add_url_rule(rule, *args, **kwargs)
return cls
return decorator |
def scan_url(self, this_url):
""" Submit a URL to be scanned by VirusTotal.
:param this_url: The URL that should be scanned. This parameter accepts a list of URLs (up to 4 with the
standard request rate) so as to perform a batch scanning request with one single call. The
URLs must be separated by a new line character.
:return: JSON response that contains scan_id and permalink.
"""
params = {'apikey': self.api_key, 'url': this_url}
try:
response = requests.post(self.base + 'url/scan', params=params, proxies=self.proxies)
except requests.RequestException as e:
return dict(error=e.message)
return _return_response_and_status_code(response) | Submit a URL to be scanned by VirusTotal.
:param this_url: The URL that should be scanned. This parameter accepts a list of URLs (up to 4 with the
standard request rate) so as to perform a batch scanning request with one single call. The
URLs must be separated by a new line character.
:return: JSON response that contains scan_id and permalink. | Below is the the instruction that describes the task:
### Input:
Submit a URL to be scanned by VirusTotal.
:param this_url: The URL that should be scanned. This parameter accepts a list of URLs (up to 4 with the
standard request rate) so as to perform a batch scanning request with one single call. The
URLs must be separated by a new line character.
:return: JSON response that contains scan_id and permalink.
### Response:
def scan_url(self, this_url):
""" Submit a URL to be scanned by VirusTotal.
:param this_url: The URL that should be scanned. This parameter accepts a list of URLs (up to 4 with the
standard request rate) so as to perform a batch scanning request with one single call. The
URLs must be separated by a new line character.
:return: JSON response that contains scan_id and permalink.
"""
params = {'apikey': self.api_key, 'url': this_url}
try:
response = requests.post(self.base + 'url/scan', params=params, proxies=self.proxies)
except requests.RequestException as e:
return dict(error=e.message)
return _return_response_and_status_code(response) |
def get_institute_graph_url(start, end):
""" Pie chart comparing institutes usage. """
filename = get_institute_graph_filename(start, end)
urls = {
'graph_url': urlparse.urljoin(GRAPH_URL, filename + ".png"),
'data_url': urlparse.urljoin(GRAPH_URL, filename + ".csv"),
}
return urls | Pie chart comparing institutes usage. | Below is the the instruction that describes the task:
### Input:
Pie chart comparing institutes usage.
### Response:
def get_institute_graph_url(start, end):
""" Pie chart comparing institutes usage. """
filename = get_institute_graph_filename(start, end)
urls = {
'graph_url': urlparse.urljoin(GRAPH_URL, filename + ".png"),
'data_url': urlparse.urljoin(GRAPH_URL, filename + ".csv"),
}
return urls |
def sendGame(self, chat_id, game_short_name,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendgame """
p = _strip(locals())
return self._api_request('sendGame', _rectify(p)) | See: https://core.telegram.org/bots/api#sendgame | Below is the the instruction that describes the task:
### Input:
See: https://core.telegram.org/bots/api#sendgame
### Response:
def sendGame(self, chat_id, game_short_name,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendgame """
p = _strip(locals())
return self._api_request('sendGame', _rectify(p)) |
def status(self):
"""Nome amigável do campo ``ESTADO_OPERACAO``, conforme a "Tabela de
Informações do Status do SAT".
"""
for valor, rotulo in ESTADOS_OPERACAO:
if self.ESTADO_OPERACAO == valor:
return rotulo
return u'(desconhecido: {})'.format(self.ESTADO_OPERACAO) | Nome amigável do campo ``ESTADO_OPERACAO``, conforme a "Tabela de
Informações do Status do SAT". | Below is the the instruction that describes the task:
### Input:
Nome amigável do campo ``ESTADO_OPERACAO``, conforme a "Tabela de
Informações do Status do SAT".
### Response:
def status(self):
"""Nome amigável do campo ``ESTADO_OPERACAO``, conforme a "Tabela de
Informações do Status do SAT".
"""
for valor, rotulo in ESTADOS_OPERACAO:
if self.ESTADO_OPERACAO == valor:
return rotulo
return u'(desconhecido: {})'.format(self.ESTADO_OPERACAO) |
def _arc(self, prom, sig):
""" Computes the in-zodiaco and in-mundo arcs
between a promissor and a significator.
"""
arcm = arc(prom['ra'], prom['decl'],
sig['ra'], sig['decl'],
self.mcRA, self.lat)
arcz = arc(prom['raZ'], prom['declZ'],
sig['raZ'], sig['declZ'],
self.mcRA, self.lat)
return {
'arcm': arcm,
'arcz': arcz
} | Computes the in-zodiaco and in-mundo arcs
between a promissor and a significator. | Below is the the instruction that describes the task:
### Input:
Computes the in-zodiaco and in-mundo arcs
between a promissor and a significator.
### Response:
def _arc(self, prom, sig):
""" Computes the in-zodiaco and in-mundo arcs
between a promissor and a significator.
"""
arcm = arc(prom['ra'], prom['decl'],
sig['ra'], sig['decl'],
self.mcRA, self.lat)
arcz = arc(prom['raZ'], prom['declZ'],
sig['raZ'], sig['declZ'],
self.mcRA, self.lat)
return {
'arcm': arcm,
'arcz': arcz
} |
def to_python(self):
'''A :class:`datetime.datetime` object is returned.'''
if self.data is None:
return None
# don't parse data that is already native
if isinstance(self.data, datetime.datetime):
return self.data
elif self.use_int:
return datetime.datetime.utcfromtimestamp(self.data / 1000)
elif self.format is None:
# parse as iso8601
return PySO8601.parse(self.data)
else:
return datetime.datetime.strptime(self.data, self.format) | A :class:`datetime.datetime` object is returned. | Below is the the instruction that describes the task:
### Input:
A :class:`datetime.datetime` object is returned.
### Response:
def to_python(self):
'''A :class:`datetime.datetime` object is returned.'''
if self.data is None:
return None
# don't parse data that is already native
if isinstance(self.data, datetime.datetime):
return self.data
elif self.use_int:
return datetime.datetime.utcfromtimestamp(self.data / 1000)
elif self.format is None:
# parse as iso8601
return PySO8601.parse(self.data)
else:
return datetime.datetime.strptime(self.data, self.format) |
def replace_refs_factory(references, use_cleveref_default, use_eqref,
plusname, starname, target):
"""Returns replace_refs(key, value, fmt, meta) action that replaces
references with format-specific content. The content is determined using
the 'references' dict, which associates reference labels with numbers or
string tags (e.g., { 'fig:1':1, 'fig:2':2, ...}). If 'use_cleveref_default'
is True, or if "modifier" in the reference's attributes is "+" or "*", then
clever referencing is used; i.e., a name is placed in front of the number
or string tag. The 'plusname' and 'starname' lists give the singular
and plural names for "+" and "*" clever references, respectively. The
'target' is the LaTeX type for clever referencing (e.g., "figure",
"equation", "table", ...)."""
global _cleveref_tex_flag # pylint: disable=global-statement
# Update global if clever referencing is required by default
_cleveref_tex_flag = _cleveref_tex_flag or use_cleveref_default
def _insert_cleveref_fakery(key, value, meta):
r"""Inserts TeX to support clever referencing in LaTeX documents
if the key isn't a RawBlock. If the key is a RawBlock, then check
the value to see if the TeX was already inserted.
The \providecommand macro is used to fake the cleveref package's
behaviour if it is not provided in the template via
\usepackage{cleveref}.
TeX is inserted into the value. Replacement elements are returned.
"""
global _cleveref_tex_flag # pylint: disable=global-statement
comment1 = '% pandoc-xnos: cleveref formatting'
tex1 = [comment1,
r'\crefformat{%s}{%s~#2#1#3}'%(target, plusname[0]),
r'\Crefformat{%s}{%s~#2#1#3}'%(target, starname[0])]
if key == 'RawBlock': # Check for existing cleveref TeX
if value[1].startswith(comment1):
# Append the new portion
value[1] = value[1] + '\n' + '\n'.join(tex1[1:])
_cleveref_tex_flag = False # Cleveref fakery already installed
elif key != 'RawBlock': # Write the cleveref TeX
_cleveref_tex_flag = False # Cancels further attempts
ret = []
# Check first to see if fakery is turned off
if not 'xnos-cleveref-fake' in meta or \
check_bool(get_meta(meta, 'xnos-cleveref-fake')):
# Cleveref fakery
tex2 = [
r'% pandoc-xnos: cleveref fakery',
r'\newcommand{\plusnamesingular}{}',
r'\newcommand{\starnamesingular}{}',
r'\newcommand{\xrefname}[1]{'\
r'\protect\renewcommand{\plusnamesingular}{#1}}',
r'\newcommand{\Xrefname}[1]{'\
r'\protect\renewcommand{\starnamesingular}{#1}}',
r'\providecommand{\cref}{\plusnamesingular~\ref}',
r'\providecommand{\Cref}{\starnamesingular~\ref}',
r'\providecommand{\crefformat}[2]{}',
r'\providecommand{\Crefformat}[2]{}']
ret.append(RawBlock('tex', '\n'.join(tex2)))
ret.append(RawBlock('tex', '\n'.join(tex1)))
return ret
return None
def _cite_replacement(key, value, fmt, meta):
"""Returns context-dependent content to replace a Cite element."""
assert key == 'Cite'
attrs, label = value[0], _get_label(key, value)
attrs = PandocAttributes(attrs, 'pandoc')
assert label in references
# Get the replacement value
text = str(references[label])
# Choose between \Cref, \cref and \ref
use_cleveref = attrs['modifier'] in ['*', '+'] \
if 'modifier' in attrs.kvs else use_cleveref_default
plus = attrs['modifier'] == '+' if 'modifier' in attrs.kvs \
else use_cleveref_default
name = plusname[0] if plus else starname[0] # Name used by cref
# The replacement depends on the output format
if fmt == 'latex':
if use_cleveref:
# Renew commands needed for cleveref fakery
if not 'xnos-cleveref-fake' in meta or \
check_bool(get_meta(meta, 'xnos-cleveref-fake')):
faketex = (r'\xrefname' if plus else r'\Xrefname') + \
'{%s}' % name
else:
faketex = ''
macro = r'\cref' if plus else r'\Cref'
ret = RawInline('tex', r'%s%s{%s}'%(faketex, macro, label))
elif use_eqref:
ret = RawInline('tex', r'\eqref{%s}'%label)
else:
ret = RawInline('tex', r'\ref{%s}'%label)
else:
if use_eqref:
text = '(' + text + ')'
linktext = [Math({"t":"InlineMath", "c":[]}, text[1:-1]) \
if text.startswith('$') and text.endswith('$') \
else Str(text)]
link = elt('Link', 2)(linktext, ['#%s' % label, '']) \
if _PANDOCVERSION < '1.16' else \
Link(['', [], []], linktext, ['#%s' % label, ''])
ret = ([Str(name), Space()] if use_cleveref else []) + [link]
return ret
def replace_refs(key, value, fmt, meta): # pylint: disable=unused-argument
"""Replaces references with format-specific content."""
if fmt == 'latex' and _cleveref_tex_flag:
# Put the cleveref TeX fakery in front of the first block element
# that isn't a RawBlock.
if not key in ['Plain', 'Para', 'CodeBlock', 'RawBlock',
'BlockQuote', 'OrderedList', 'BulletList',
'DefinitionList', 'Header', 'HorizontalRule',
'Table', 'Div', 'Null']:
return None
# Reconstruct the block element
el = _getel(key, value)
# Insert cleveref TeX in front of the block element
tex = _insert_cleveref_fakery(key, value, meta)
if tex:
return tex + [el]
elif key == 'Cite' and len(value) == 3: # Replace the reference
return _cite_replacement(key, value, fmt, meta)
return None
return replace_refs | Returns replace_refs(key, value, fmt, meta) action that replaces
references with format-specific content. The content is determined using
the 'references' dict, which associates reference labels with numbers or
string tags (e.g., { 'fig:1':1, 'fig:2':2, ...}). If 'use_cleveref_default'
is True, or if "modifier" in the reference's attributes is "+" or "*", then
clever referencing is used; i.e., a name is placed in front of the number
or string tag. The 'plusname' and 'starname' lists give the singular
and plural names for "+" and "*" clever references, respectively. The
'target' is the LaTeX type for clever referencing (e.g., "figure",
"equation", "table", ...). | Below is the the instruction that describes the task:
### Input:
Returns replace_refs(key, value, fmt, meta) action that replaces
references with format-specific content. The content is determined using
the 'references' dict, which associates reference labels with numbers or
string tags (e.g., { 'fig:1':1, 'fig:2':2, ...}). If 'use_cleveref_default'
is True, or if "modifier" in the reference's attributes is "+" or "*", then
clever referencing is used; i.e., a name is placed in front of the number
or string tag. The 'plusname' and 'starname' lists give the singular
and plural names for "+" and "*" clever references, respectively. The
'target' is the LaTeX type for clever referencing (e.g., "figure",
"equation", "table", ...).
### Response:
def replace_refs_factory(references, use_cleveref_default, use_eqref,
plusname, starname, target):
"""Returns replace_refs(key, value, fmt, meta) action that replaces
references with format-specific content. The content is determined using
the 'references' dict, which associates reference labels with numbers or
string tags (e.g., { 'fig:1':1, 'fig:2':2, ...}). If 'use_cleveref_default'
is True, or if "modifier" in the reference's attributes is "+" or "*", then
clever referencing is used; i.e., a name is placed in front of the number
or string tag. The 'plusname' and 'starname' lists give the singular
and plural names for "+" and "*" clever references, respectively. The
'target' is the LaTeX type for clever referencing (e.g., "figure",
"equation", "table", ...)."""
global _cleveref_tex_flag # pylint: disable=global-statement
# Update global if clever referencing is required by default
_cleveref_tex_flag = _cleveref_tex_flag or use_cleveref_default
def _insert_cleveref_fakery(key, value, meta):
r"""Inserts TeX to support clever referencing in LaTeX documents
if the key isn't a RawBlock. If the key is a RawBlock, then check
the value to see if the TeX was already inserted.
The \providecommand macro is used to fake the cleveref package's
behaviour if it is not provided in the template via
\usepackage{cleveref}.
TeX is inserted into the value. Replacement elements are returned.
"""
global _cleveref_tex_flag # pylint: disable=global-statement
comment1 = '% pandoc-xnos: cleveref formatting'
tex1 = [comment1,
r'\crefformat{%s}{%s~#2#1#3}'%(target, plusname[0]),
r'\Crefformat{%s}{%s~#2#1#3}'%(target, starname[0])]
if key == 'RawBlock': # Check for existing cleveref TeX
if value[1].startswith(comment1):
# Append the new portion
value[1] = value[1] + '\n' + '\n'.join(tex1[1:])
_cleveref_tex_flag = False # Cleveref fakery already installed
elif key != 'RawBlock': # Write the cleveref TeX
_cleveref_tex_flag = False # Cancels further attempts
ret = []
# Check first to see if fakery is turned off
if not 'xnos-cleveref-fake' in meta or \
check_bool(get_meta(meta, 'xnos-cleveref-fake')):
# Cleveref fakery
tex2 = [
r'% pandoc-xnos: cleveref fakery',
r'\newcommand{\plusnamesingular}{}',
r'\newcommand{\starnamesingular}{}',
r'\newcommand{\xrefname}[1]{'\
r'\protect\renewcommand{\plusnamesingular}{#1}}',
r'\newcommand{\Xrefname}[1]{'\
r'\protect\renewcommand{\starnamesingular}{#1}}',
r'\providecommand{\cref}{\plusnamesingular~\ref}',
r'\providecommand{\Cref}{\starnamesingular~\ref}',
r'\providecommand{\crefformat}[2]{}',
r'\providecommand{\Crefformat}[2]{}']
ret.append(RawBlock('tex', '\n'.join(tex2)))
ret.append(RawBlock('tex', '\n'.join(tex1)))
return ret
return None
def _cite_replacement(key, value, fmt, meta):
"""Returns context-dependent content to replace a Cite element."""
assert key == 'Cite'
attrs, label = value[0], _get_label(key, value)
attrs = PandocAttributes(attrs, 'pandoc')
assert label in references
# Get the replacement value
text = str(references[label])
# Choose between \Cref, \cref and \ref
use_cleveref = attrs['modifier'] in ['*', '+'] \
if 'modifier' in attrs.kvs else use_cleveref_default
plus = attrs['modifier'] == '+' if 'modifier' in attrs.kvs \
else use_cleveref_default
name = plusname[0] if plus else starname[0] # Name used by cref
# The replacement depends on the output format
if fmt == 'latex':
if use_cleveref:
# Renew commands needed for cleveref fakery
if not 'xnos-cleveref-fake' in meta or \
check_bool(get_meta(meta, 'xnos-cleveref-fake')):
faketex = (r'\xrefname' if plus else r'\Xrefname') + \
'{%s}' % name
else:
faketex = ''
macro = r'\cref' if plus else r'\Cref'
ret = RawInline('tex', r'%s%s{%s}'%(faketex, macro, label))
elif use_eqref:
ret = RawInline('tex', r'\eqref{%s}'%label)
else:
ret = RawInline('tex', r'\ref{%s}'%label)
else:
if use_eqref:
text = '(' + text + ')'
linktext = [Math({"t":"InlineMath", "c":[]}, text[1:-1]) \
if text.startswith('$') and text.endswith('$') \
else Str(text)]
link = elt('Link', 2)(linktext, ['#%s' % label, '']) \
if _PANDOCVERSION < '1.16' else \
Link(['', [], []], linktext, ['#%s' % label, ''])
ret = ([Str(name), Space()] if use_cleveref else []) + [link]
return ret
def replace_refs(key, value, fmt, meta): # pylint: disable=unused-argument
"""Replaces references with format-specific content."""
if fmt == 'latex' and _cleveref_tex_flag:
# Put the cleveref TeX fakery in front of the first block element
# that isn't a RawBlock.
if not key in ['Plain', 'Para', 'CodeBlock', 'RawBlock',
'BlockQuote', 'OrderedList', 'BulletList',
'DefinitionList', 'Header', 'HorizontalRule',
'Table', 'Div', 'Null']:
return None
# Reconstruct the block element
el = _getel(key, value)
# Insert cleveref TeX in front of the block element
tex = _insert_cleveref_fakery(key, value, meta)
if tex:
return tex + [el]
elif key == 'Cite' and len(value) == 3: # Replace the reference
return _cite_replacement(key, value, fmt, meta)
return None
return replace_refs |
def socket_read(fp):
"""Buffered read from socket. Reads all data available from socket.
@fp: File pointer for socket.
@return: String of characters read from buffer.
"""
response = ''
oldlen = 0
newlen = 0
while True:
response += fp.read(buffSize)
newlen = len(response)
if newlen - oldlen == 0:
break
else:
oldlen = newlen
return response | Buffered read from socket. Reads all data available from socket.
@fp: File pointer for socket.
@return: String of characters read from buffer. | Below is the the instruction that describes the task:
### Input:
Buffered read from socket. Reads all data available from socket.
@fp: File pointer for socket.
@return: String of characters read from buffer.
### Response:
def socket_read(fp):
"""Buffered read from socket. Reads all data available from socket.
@fp: File pointer for socket.
@return: String of characters read from buffer.
"""
response = ''
oldlen = 0
newlen = 0
while True:
response += fp.read(buffSize)
newlen = len(response)
if newlen - oldlen == 0:
break
else:
oldlen = newlen
return response |
def get_client(host, userid, password, port=443, auth_method='basic',
client_timeout=60, **kwargs):
"""get SCCI command partial function
This function returns SCCI command partial function
:param host: hostname or IP of iRMC
:param userid: userid for iRMC with administrator privileges
:param password: password for userid
:param port: port number of iRMC
:param auth_method: irmc_username
:param client_timeout: timeout for SCCI operations
:returns: scci_cmd partial function which takes a SCCI command param
"""
return functools.partial(scci_cmd, host, userid, password,
port=port, auth_method=auth_method,
client_timeout=client_timeout, **kwargs) | get SCCI command partial function
This function returns SCCI command partial function
:param host: hostname or IP of iRMC
:param userid: userid for iRMC with administrator privileges
:param password: password for userid
:param port: port number of iRMC
:param auth_method: irmc_username
:param client_timeout: timeout for SCCI operations
:returns: scci_cmd partial function which takes a SCCI command param | Below is the the instruction that describes the task:
### Input:
get SCCI command partial function
This function returns SCCI command partial function
:param host: hostname or IP of iRMC
:param userid: userid for iRMC with administrator privileges
:param password: password for userid
:param port: port number of iRMC
:param auth_method: irmc_username
:param client_timeout: timeout for SCCI operations
:returns: scci_cmd partial function which takes a SCCI command param
### Response:
def get_client(host, userid, password, port=443, auth_method='basic',
client_timeout=60, **kwargs):
"""get SCCI command partial function
This function returns SCCI command partial function
:param host: hostname or IP of iRMC
:param userid: userid for iRMC with administrator privileges
:param password: password for userid
:param port: port number of iRMC
:param auth_method: irmc_username
:param client_timeout: timeout for SCCI operations
:returns: scci_cmd partial function which takes a SCCI command param
"""
return functools.partial(scci_cmd, host, userid, password,
port=port, auth_method=auth_method,
client_timeout=client_timeout, **kwargs) |
def _update_pipeline_status(self):
"""Parses the .nextflow.log file for signatures of pipeline status.
It sets the :attr:`status_info` attribute.
"""
with open(self.log_file) as fh:
try:
first_line = next(fh)
except:
raise eh.InspectionError("Could not read .nextflow.log file. Is file empty?")
time_str = " ".join(first_line.split()[:2])
self.time_start = time_str
if not self.execution_command:
try:
self.execution_command = re.match(
".*nextflow run (.*)", first_line).group(1)
except AttributeError:
self.execution_command = "Unknown"
for line in fh:
if "DEBUG nextflow.cli.CmdRun" in line:
if not self.nextflow_version:
try:
vline = next(fh)
self.nextflow_version = re.match(
".*Version: (.*)", vline).group(1)
except AttributeError:
self.nextflow_version = "Unknown"
if "Session aborted" in line:
self.run_status = "aborted"
# Get abort cause
try:
self.abort_cause = re.match(
".*Cause: (.*)", line).group(1)
except AttributeError:
self.abort_cause = "Unknown"
# Get time of pipeline stop
time_str = " ".join(line.split()[:2])
self.time_stop = time_str
self.send = True
return
if "Execution complete -- Goodbye" in line:
self.run_status = "complete"
# Get time of pipeline stop
time_str = " ".join(line.split()[:2])
self.time_stop = time_str
self.send = True
return
if self.run_status not in ["running", ""]:
self._clear_inspect()
# Take a break to allow nextflow to restart before refreshing
# pipeine processes
sleep(5)
self._get_pipeline_processes()
self.run_status = "running" | Parses the .nextflow.log file for signatures of pipeline status.
It sets the :attr:`status_info` attribute. | Below is the the instruction that describes the task:
### Input:
Parses the .nextflow.log file for signatures of pipeline status.
It sets the :attr:`status_info` attribute.
### Response:
def _update_pipeline_status(self):
"""Parses the .nextflow.log file for signatures of pipeline status.
It sets the :attr:`status_info` attribute.
"""
with open(self.log_file) as fh:
try:
first_line = next(fh)
except:
raise eh.InspectionError("Could not read .nextflow.log file. Is file empty?")
time_str = " ".join(first_line.split()[:2])
self.time_start = time_str
if not self.execution_command:
try:
self.execution_command = re.match(
".*nextflow run (.*)", first_line).group(1)
except AttributeError:
self.execution_command = "Unknown"
for line in fh:
if "DEBUG nextflow.cli.CmdRun" in line:
if not self.nextflow_version:
try:
vline = next(fh)
self.nextflow_version = re.match(
".*Version: (.*)", vline).group(1)
except AttributeError:
self.nextflow_version = "Unknown"
if "Session aborted" in line:
self.run_status = "aborted"
# Get abort cause
try:
self.abort_cause = re.match(
".*Cause: (.*)", line).group(1)
except AttributeError:
self.abort_cause = "Unknown"
# Get time of pipeline stop
time_str = " ".join(line.split()[:2])
self.time_stop = time_str
self.send = True
return
if "Execution complete -- Goodbye" in line:
self.run_status = "complete"
# Get time of pipeline stop
time_str = " ".join(line.split()[:2])
self.time_stop = time_str
self.send = True
return
if self.run_status not in ["running", ""]:
self._clear_inspect()
# Take a break to allow nextflow to restart before refreshing
# pipeine processes
sleep(5)
self._get_pipeline_processes()
self.run_status = "running" |
def device_query_list(self, **kwargs): # noqa: E501
"""List device queries. # noqa: E501
List all device queries. The result will be paged into pages of 100. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.device_query_list(asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param int limit: How many objects to retrieve in the page.
:param str order: The order of the records based on creation time, `ASC` or `DESC`; by default `ASC`.
:param str after: The ID of The item after which to retrieve the next page.
:param str include: Comma-separated list of data fields to return. Currently supported: `total_count`.
:param str filter: URL encoded query string parameter to filter returned data. ##### Filtering ```?filter={URL encoded query string}``` The query string is made up of key/value pairs separated by ampersands. So for a query of ```key1=value1&key2=value2&key3=value3``` this would be encoded as follows: ```?filter=key1%3Dvalue1%26key2%3Dvalue2%26key3%3Dvalue3``` The below table lists all the fields that can be filtered on with certain filters: <table> <thead> <tr> <th>Field</th> <th>= / __eq / __neq</th> <th>__in / __nin</th> <th>__lte / __gte</th> <tr> <thead> <tbody> <tr> <td>created_at</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>etag</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>id</td> <td>✓</td> <td>✓</td> <td> </td> </tr> <tr> <td>name</td> <td>✓</td> <td>✓</td> <td> </td> </tr> <tr> <td>query</td> <td>✓</td> <td>✓</td> <td> </td> </tr> <tr> <td>updated_at</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> </tbody> </table> The examples below show the queries in *unencoded* form. ###### By device query properties (all properties are filterable): For example: ```description={value}``` ###### On date-time fields: Date-time fields should be specified in UTC RFC3339 format ```YYYY-MM-DDThh:mm:ss.msZ```. There are three permitted variations: * UTC RFC3339 with milliseconds e.g. 2016-11-30T16:25:12.1234Z * UTC RFC3339 without milliseconds e.g. 2016-11-30T16:25:12Z * UTC RFC3339 shortened - without milliseconds and punctuation e.g. 20161130T162512Z Date-time filtering supports three operators: * equality * greater than or equal to – field name suffixed with ```__gte``` * less than or equal to – field name suffixed with ```__lte``` Lower and upper limits to a date-time range may be specified by including both the ```__gte``` and ```__lte``` forms in the filter. ```{field name}[|__lte|__gte]={UTC RFC3339 date-time}``` ##### Multi-field example ```query_id=0158d38771f70000000000010010038c&created_at__gte=2016-11-30T16:25:12.1234Z&created_at__lte=2016-12-30T00:00:00Z``` Encoded: ```filter=query_id%3D0158d38771f70000000000010010038c%26created_at__gte%3D2016-11-30T16%3A25%3A12.1234Z%26created_at__lte%3D2016-11-30T00%3A00%3A00Z``` ##### Filtering with filter operators String field filtering supports the following operators: * equality: `__eq` * non-equality: `__neq` * in : `__in` * not in: `__nin` For `__in` and `__nin` filters list of parameters must be comma-separated: `name__nin=query1,query2`
:return: DeviceQueryPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.device_query_list_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.device_query_list_with_http_info(**kwargs) # noqa: E501
return data | List device queries. # noqa: E501
List all device queries. The result will be paged into pages of 100. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.device_query_list(asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param int limit: How many objects to retrieve in the page.
:param str order: The order of the records based on creation time, `ASC` or `DESC`; by default `ASC`.
:param str after: The ID of The item after which to retrieve the next page.
:param str include: Comma-separated list of data fields to return. Currently supported: `total_count`.
:param str filter: URL encoded query string parameter to filter returned data. ##### Filtering ```?filter={URL encoded query string}``` The query string is made up of key/value pairs separated by ampersands. So for a query of ```key1=value1&key2=value2&key3=value3``` this would be encoded as follows: ```?filter=key1%3Dvalue1%26key2%3Dvalue2%26key3%3Dvalue3``` The below table lists all the fields that can be filtered on with certain filters: <table> <thead> <tr> <th>Field</th> <th>= / __eq / __neq</th> <th>__in / __nin</th> <th>__lte / __gte</th> <tr> <thead> <tbody> <tr> <td>created_at</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>etag</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>id</td> <td>✓</td> <td>✓</td> <td> </td> </tr> <tr> <td>name</td> <td>✓</td> <td>✓</td> <td> </td> </tr> <tr> <td>query</td> <td>✓</td> <td>✓</td> <td> </td> </tr> <tr> <td>updated_at</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> </tbody> </table> The examples below show the queries in *unencoded* form. ###### By device query properties (all properties are filterable): For example: ```description={value}``` ###### On date-time fields: Date-time fields should be specified in UTC RFC3339 format ```YYYY-MM-DDThh:mm:ss.msZ```. There are three permitted variations: * UTC RFC3339 with milliseconds e.g. 2016-11-30T16:25:12.1234Z * UTC RFC3339 without milliseconds e.g. 2016-11-30T16:25:12Z * UTC RFC3339 shortened - without milliseconds and punctuation e.g. 20161130T162512Z Date-time filtering supports three operators: * equality * greater than or equal to – field name suffixed with ```__gte``` * less than or equal to – field name suffixed with ```__lte``` Lower and upper limits to a date-time range may be specified by including both the ```__gte``` and ```__lte``` forms in the filter. ```{field name}[|__lte|__gte]={UTC RFC3339 date-time}``` ##### Multi-field example ```query_id=0158d38771f70000000000010010038c&created_at__gte=2016-11-30T16:25:12.1234Z&created_at__lte=2016-12-30T00:00:00Z``` Encoded: ```filter=query_id%3D0158d38771f70000000000010010038c%26created_at__gte%3D2016-11-30T16%3A25%3A12.1234Z%26created_at__lte%3D2016-11-30T00%3A00%3A00Z``` ##### Filtering with filter operators String field filtering supports the following operators: * equality: `__eq` * non-equality: `__neq` * in : `__in` * not in: `__nin` For `__in` and `__nin` filters list of parameters must be comma-separated: `name__nin=query1,query2`
:return: DeviceQueryPage
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
List device queries. # noqa: E501
List all device queries. The result will be paged into pages of 100. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.device_query_list(asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param int limit: How many objects to retrieve in the page.
:param str order: The order of the records based on creation time, `ASC` or `DESC`; by default `ASC`.
:param str after: The ID of The item after which to retrieve the next page.
:param str include: Comma-separated list of data fields to return. Currently supported: `total_count`.
:param str filter: URL encoded query string parameter to filter returned data. ##### Filtering ```?filter={URL encoded query string}``` The query string is made up of key/value pairs separated by ampersands. So for a query of ```key1=value1&key2=value2&key3=value3``` this would be encoded as follows: ```?filter=key1%3Dvalue1%26key2%3Dvalue2%26key3%3Dvalue3``` The below table lists all the fields that can be filtered on with certain filters: <table> <thead> <tr> <th>Field</th> <th>= / __eq / __neq</th> <th>__in / __nin</th> <th>__lte / __gte</th> <tr> <thead> <tbody> <tr> <td>created_at</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>etag</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>id</td> <td>✓</td> <td>✓</td> <td> </td> </tr> <tr> <td>name</td> <td>✓</td> <td>✓</td> <td> </td> </tr> <tr> <td>query</td> <td>✓</td> <td>✓</td> <td> </td> </tr> <tr> <td>updated_at</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> </tbody> </table> The examples below show the queries in *unencoded* form. ###### By device query properties (all properties are filterable): For example: ```description={value}``` ###### On date-time fields: Date-time fields should be specified in UTC RFC3339 format ```YYYY-MM-DDThh:mm:ss.msZ```. There are three permitted variations: * UTC RFC3339 with milliseconds e.g. 2016-11-30T16:25:12.1234Z * UTC RFC3339 without milliseconds e.g. 2016-11-30T16:25:12Z * UTC RFC3339 shortened - without milliseconds and punctuation e.g. 20161130T162512Z Date-time filtering supports three operators: * equality * greater than or equal to – field name suffixed with ```__gte``` * less than or equal to – field name suffixed with ```__lte``` Lower and upper limits to a date-time range may be specified by including both the ```__gte``` and ```__lte``` forms in the filter. ```{field name}[|__lte|__gte]={UTC RFC3339 date-time}``` ##### Multi-field example ```query_id=0158d38771f70000000000010010038c&created_at__gte=2016-11-30T16:25:12.1234Z&created_at__lte=2016-12-30T00:00:00Z``` Encoded: ```filter=query_id%3D0158d38771f70000000000010010038c%26created_at__gte%3D2016-11-30T16%3A25%3A12.1234Z%26created_at__lte%3D2016-11-30T00%3A00%3A00Z``` ##### Filtering with filter operators String field filtering supports the following operators: * equality: `__eq` * non-equality: `__neq` * in : `__in` * not in: `__nin` For `__in` and `__nin` filters list of parameters must be comma-separated: `name__nin=query1,query2`
:return: DeviceQueryPage
If the method is called asynchronously,
returns the request thread.
### Response:
def device_query_list(self, **kwargs): # noqa: E501
"""List device queries. # noqa: E501
List all device queries. The result will be paged into pages of 100. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.device_query_list(asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param int limit: How many objects to retrieve in the page.
:param str order: The order of the records based on creation time, `ASC` or `DESC`; by default `ASC`.
:param str after: The ID of The item after which to retrieve the next page.
:param str include: Comma-separated list of data fields to return. Currently supported: `total_count`.
:param str filter: URL encoded query string parameter to filter returned data. ##### Filtering ```?filter={URL encoded query string}``` The query string is made up of key/value pairs separated by ampersands. So for a query of ```key1=value1&key2=value2&key3=value3``` this would be encoded as follows: ```?filter=key1%3Dvalue1%26key2%3Dvalue2%26key3%3Dvalue3``` The below table lists all the fields that can be filtered on with certain filters: <table> <thead> <tr> <th>Field</th> <th>= / __eq / __neq</th> <th>__in / __nin</th> <th>__lte / __gte</th> <tr> <thead> <tbody> <tr> <td>created_at</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>etag</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>id</td> <td>✓</td> <td>✓</td> <td> </td> </tr> <tr> <td>name</td> <td>✓</td> <td>✓</td> <td> </td> </tr> <tr> <td>query</td> <td>✓</td> <td>✓</td> <td> </td> </tr> <tr> <td>updated_at</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> </tbody> </table> The examples below show the queries in *unencoded* form. ###### By device query properties (all properties are filterable): For example: ```description={value}``` ###### On date-time fields: Date-time fields should be specified in UTC RFC3339 format ```YYYY-MM-DDThh:mm:ss.msZ```. There are three permitted variations: * UTC RFC3339 with milliseconds e.g. 2016-11-30T16:25:12.1234Z * UTC RFC3339 without milliseconds e.g. 2016-11-30T16:25:12Z * UTC RFC3339 shortened - without milliseconds and punctuation e.g. 20161130T162512Z Date-time filtering supports three operators: * equality * greater than or equal to – field name suffixed with ```__gte``` * less than or equal to – field name suffixed with ```__lte``` Lower and upper limits to a date-time range may be specified by including both the ```__gte``` and ```__lte``` forms in the filter. ```{field name}[|__lte|__gte]={UTC RFC3339 date-time}``` ##### Multi-field example ```query_id=0158d38771f70000000000010010038c&created_at__gte=2016-11-30T16:25:12.1234Z&created_at__lte=2016-12-30T00:00:00Z``` Encoded: ```filter=query_id%3D0158d38771f70000000000010010038c%26created_at__gte%3D2016-11-30T16%3A25%3A12.1234Z%26created_at__lte%3D2016-11-30T00%3A00%3A00Z``` ##### Filtering with filter operators String field filtering supports the following operators: * equality: `__eq` * non-equality: `__neq` * in : `__in` * not in: `__nin` For `__in` and `__nin` filters list of parameters must be comma-separated: `name__nin=query1,query2`
:return: DeviceQueryPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.device_query_list_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.device_query_list_with_http_info(**kwargs) # noqa: E501
return data |
def _is_dir(self, f):
'''Check if the given in-dap file is a directory'''
return self._tar.getmember(f).type == tarfile.DIRTYPE | Check if the given in-dap file is a directory | Below is the the instruction that describes the task:
### Input:
Check if the given in-dap file is a directory
### Response:
def _is_dir(self, f):
'''Check if the given in-dap file is a directory'''
return self._tar.getmember(f).type == tarfile.DIRTYPE |
def from_lines(lines: Iterable[str], **kwargs) -> BELGraph:
"""Load a BEL graph from an iterable over the lines of a BEL script.
:param lines: An iterable of strings (the lines in a BEL script)
The remaining keyword arguments are passed to :func:`pybel.io.line_utils.parse_lines`.
"""
graph = BELGraph()
parse_lines(graph=graph, lines=lines, **kwargs)
return graph | Load a BEL graph from an iterable over the lines of a BEL script.
:param lines: An iterable of strings (the lines in a BEL script)
The remaining keyword arguments are passed to :func:`pybel.io.line_utils.parse_lines`. | Below is the the instruction that describes the task:
### Input:
Load a BEL graph from an iterable over the lines of a BEL script.
:param lines: An iterable of strings (the lines in a BEL script)
The remaining keyword arguments are passed to :func:`pybel.io.line_utils.parse_lines`.
### Response:
def from_lines(lines: Iterable[str], **kwargs) -> BELGraph:
"""Load a BEL graph from an iterable over the lines of a BEL script.
:param lines: An iterable of strings (the lines in a BEL script)
The remaining keyword arguments are passed to :func:`pybel.io.line_utils.parse_lines`.
"""
graph = BELGraph()
parse_lines(graph=graph, lines=lines, **kwargs)
return graph |
def managed(name, users=None, defaults=None):
'''
Manages the configuration of the users on the device, as specified in the state SLS file. Users not defined in that
file will be remove whilst users not configured on the device, will be added.
SLS Example:
.. code-block:: yaml
netusers_example:
netusers.managed:
- users:
admin:
level: 15
password: $1$knmhgPPv$g8745biu4rb.Zf.IT.F/U1
sshkeys: []
restricted:
level: 1
password: $1$j34j5k4b$4d5SVjTiz1l.Zf.IT.F/K7
martin:
level: 15
password: ''
sshkeys:
- ssh-dss AAAAB3NzaC1kc3MAAACBAK9dP3KariMlM/JmFW9rTSm5cXs4nR0+o6fTHP9o+bOLXMBTP8R4vwWHh0w
JPjQmJYafAqZTnlgi0srGjyifFwPtODppDWLCgLe2M4LXnu3OMqknr54w344zPHP3iFwWxHrBrZKtCjO8LhbWCa+
X528+i87t6r5e4ersdfxgchvjbknlio87t6r5drcfhgjhbknio8976tycv7t86ftyiu87Oz1nKsKuNzm2csoUQlJ
trmRfpjsOPNookmOz5wG0YxhwDmKeo6fWK+ATk1OiP+QT39fn4G77j8o+e4WAwxM570s35Of/vV0zoOccj753sXn
pvJenvwpM2H6o3a9ALvehAJKWodAgZT7X8+iu786r5drtycghvjbiu78t+wAAAIBURwSPZVElXe+9a43sF6M4ysT
7Xv+6wTsa8q86E3+RYyu8O2ObI2kwNLC3/HTgFniE/YqRG+WJac81/VHWQNP822gns8RVrWKjqBktmQoEm7z5yy0
bkjui78675dytcghvjkoi9y7t867ftcuvhbuu9t78gy/v+zvMmv8KvQgHg
jonathan:
level: 15
password: ''
sshkeys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcgxE6HZF/xjFtIt0thEDKPjFJxW9BpZtTVstYbDgGR9zPkHG
ZJT/j345jk345jk453jk43545j35nl3kln34n5kl4ghv3/JzWt/0Js5KZp/51KRNCs9O4t07qaoqwpLB15GwLfEX
Bx9dW26zc4O+hi6754trxcfghvjbo98765drt/LYIEg0KSQPWyJEK1g31gacbxN7Ab006xeHh7rv7HtXF6zH3WId
Uhq9rtdUag6kYnv6qvjG7sbCyHGYu5vZB7GytnNuVNbZuI+RdFvmHSnErV9HCu9xZBq6DBb+sESMS4s7nFcsruMo
edb+BAc3aww0naeWpogjSt+We7y2N
CLI Example:
salt 'edge01.kix01' state.sls router.users
Output example (raw python - can be reused in other modules):
.. code-block:: python
{
'netusers_|-netusers_example_|-netusers_example_|-managed': {
'comment': 'Configuration updated!',
'name': 'netusers_example',
'start_time': '10:57:08.678811',
'__id__': 'netusers_example',
'duration': 1620.982,
'__run_num__': 0,
'changes': {
'updated': {
'admin': {
'level': 15
},
'restricted': {
'level': 1
},
'martin': {
'sshkeys': [
'ssh-dss AAAAB3NzaC1kc3MAAACBAK9dP3KariMlM/JmFW9rTSm5cXs4nR0+o6fTHP9o+bOLXMBTP8R4vwWHh0w
JPjQmJYafAqZTnlgi0srGjyifFwPtODppDWLCgLe2M4LXnu3OMqknr54w344zPHP3iFwWxHrBrZKtCjO8LhbWCa+
X528+i87t6r5e4ersdfxgchvjbknlio87t6r5drcfhgjhbknio8976tycv7t86ftyiu87Oz1nKsKuNzm2csoUQlJ
trmRfpjsOPNookmOz5wG0YxhwDmKeo6fWK+ATk1OiP+QT39fn4G77j8o+e4WAwxM570s35Of/vV0zoOccj753sXn
pvJenvwpM2H6o3a9ALvehAJKWodAgZT7X8+iu786r5drtycghvjbiu78t+wAAAIBURwSPZVElXe+9a43sF6M4ysT
7Xv+6wTsa8q86E3+RYyu8O2ObI2kwNLC3/HTgFniE/YqRG+WJac81/VHWQNP822gns8RVrWKjqBktmQoEm7z5yy0
bkjui78675dytcghvjkoi9y7t867ftcuvhbuu9t78gy/v+zvMmv8KvQgHg'
]
}
},
'added': {
'jonathan': {
'password': '',
'sshkeys': [
'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcgxE6HZF/xjFtIt0thEDKPjFJxW9BpZtTVstYbDgGR9zPkHG
ZJT/j345jk345jk453jk43545j35nl3kln34n5kl4ghv3/JzWt/0Js5KZp/51KRNCs9O4t07qaoqwpLB15GwLfEX
Bx9dW26zc4O+hi6754trxcfghvjbo98765drt/LYIEg0KSQPWyJEK1g31gacbxN7Ab006xeHh7rv7HtXF6zH3WId
Uhq9rtdUag6kYnv6qvjG7sbCyHGYu5vZB7GytnNuVNbZuI+RdFvmHSnErV9HCu9xZBq6DBb+sESMS4s7nFcsruMo
edb+BAc3aww0naeWpogjSt+We7y2N'
],
'level': 15
}
},
'removed': {
}
},
'result': True
}
}
CLI Output:
.. code-block:: bash
edge01.kix01:
----------
ID: netusers_example
Function: netusers.managed
Result: True
Comment: Configuration updated!
Started: 11:03:31.957725
Duration: 1220.435 ms
Changes:
----------
added:
----------
jonathan:
----------
level:
15
password:
sshkeys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcgxE6HZF/xjFtIt0thEDKPjFJxW9BpZtTVstYbDgG
R9zPkHGZJT/j345jk345jk453jk43545j35nl3kln34n5kl4ghv3/JzWt/0Js5KZp/51KRNCs9O4t07qao
qwpLB15GwLfEXBx9dW26zc4O+hi6754trxcfghvjbo98765drt/LYIEg0KSQPWyJEK1g31gacbxN7Ab006
xeHh7rv7HtXF6zH3WIdUhq9rtdUag6kYnv6qvjG7sbCyHGYu5vZB7GytnNuVNbZuI+RdFvmHSnErV9HCu9
xZBq6DBb+sESMS4s7nFcsruMoedb+BAc3aww0naeWpogjSt+We7y2N
removed:
----------
updated:
----------
martin:
----------
sshkeys:
- ssh-dss AAAAB3NzaC1kc3MAAACBAK9dP3KariMlM/JmFW9rTSm5cXs4nR0+o6fTHP9o+bOLXMBTP8R4
vwWHh0wJPjQmJYafAqZTnlgi0srGjyifFwPtODppDWLCgLe2M4LXnu3OMqknr54w344zPHP3iFwWxHrBrZ
KtCjO8LhbWCa+X528+i87t6r5e4ersdfxgchvjbknlio87t6r5drcfhgjhbknio8976tycv7t86ftyiu87
Oz1nKsKuNzm2csoUQlJtrmRfpjsOPNookmOz5wG0YxhwDmKeo6fWK+ATk1OiP+QT39fn4G77j8o+e4WAwx
M570s35Of/vV0zoOccj753sXnpvJenvwpM2H6o3a9ALvehAJKWodAgZT7X8+iu786r5drtycghvjbiu78t
+wAAAIBURwSPZVElXe+9a43sF6M4ysT7Xv+6wTsa8q86E3+RYyu8O2ObI2kwNLC3/HTgFniE/YqRG+WJac
81/VHWQNP822gns8RVrWKjqBktmQoEm7z5yy0bkjui78675dytcghvjkoi9y7t867ftcuvhbuu9t78gy/v
+zvMmv8KvQgHg
admin:
----------
level:
15
restricted:
----------
level:
1
Summary for edge01.kix01
------------
Succeeded: 1 (changed=1)
Failed: 0
------------
Total states run: 1
Total run time: 1.220 s
'''
result = False
comment = ''
changes = {}
ret = {
'name': name,
'changes': changes,
'result': result,
'comment': comment
}
users = _ordered_dict_to_dict(users)
defaults = _ordered_dict_to_dict(defaults)
expected_users = _expand_users(users, defaults)
valid, message = _check_users(expected_users)
if not valid: # check and clean
ret['comment'] = 'Please provide a valid configuration: {error}'.format(error=message)
return ret
# ----- Retrieve existing users configuration and determine differences ------------------------------------------->
users_output = _retrieve_users()
if not users_output.get('result'):
ret['comment'] = 'Cannot retrieve users from the device: {reason}'.format(
reason=users_output.get('comment')
)
return ret
configured_users = users_output.get('out', {})
if configured_users == expected_users:
ret.update({
'comment': 'Users already configured as needed.',
'result': True
})
return ret
diff = _compute_diff(configured_users, expected_users)
users_to_add = diff.get('add', {})
users_to_update = diff.get('update', {})
users_to_remove = diff.get('remove', {})
changes = {
'added': users_to_add,
'updated': users_to_update,
'removed': users_to_remove
}
ret.update({
'changes': changes
})
if __opts__['test'] is True:
ret.update({
'result': None,
'comment': 'Testing mode: configuration was not changed!'
})
return ret
# <---- Retrieve existing NTP peers and determine peers to be added/removed --------------------------------------->
# ----- Call _set_users and _delete_users as needed --------------------------------------------------------------->
expected_config_change = False
successfully_changed = True
if users_to_add:
_set = _set_users(users_to_add)
if _set.get('result'):
expected_config_change = True
else: # something went wrong...
successfully_changed = False
comment += 'Cannot configure new users: {reason}'.format(
reason=_set.get('comment')
)
if users_to_update:
_update = _update_users(users_to_update)
if _update.get('result'):
expected_config_change = True
else: # something went wrong...
successfully_changed = False
comment += 'Cannot update the users configuration: {reason}'.format(
reason=_update.get('comment')
)
if users_to_remove:
_delete = _delete_users(users_to_remove)
if _delete.get('result'):
expected_config_change = True
else: # something went wrong...
successfully_changed = False
comment += 'Cannot remove users: {reason}'.format(
reason=_delete.get('comment')
)
# <---- Call _set_users and _delete_users as needed ----------------------------------------------------------------
# ----- Try to commit changes ------------------------------------------------------------------------------------->
if expected_config_change and successfully_changed:
config_result, config_comment = __salt__['net.config_control']()
result = config_result
comment += config_comment
# <---- Try to commit changes --------------------------------------------------------------------------------------
if expected_config_change and result and not comment:
comment = 'Configuration updated!'
ret.update({
'result': result,
'comment': comment
})
return ret | Manages the configuration of the users on the device, as specified in the state SLS file. Users not defined in that
file will be remove whilst users not configured on the device, will be added.
SLS Example:
.. code-block:: yaml
netusers_example:
netusers.managed:
- users:
admin:
level: 15
password: $1$knmhgPPv$g8745biu4rb.Zf.IT.F/U1
sshkeys: []
restricted:
level: 1
password: $1$j34j5k4b$4d5SVjTiz1l.Zf.IT.F/K7
martin:
level: 15
password: ''
sshkeys:
- ssh-dss AAAAB3NzaC1kc3MAAACBAK9dP3KariMlM/JmFW9rTSm5cXs4nR0+o6fTHP9o+bOLXMBTP8R4vwWHh0w
JPjQmJYafAqZTnlgi0srGjyifFwPtODppDWLCgLe2M4LXnu3OMqknr54w344zPHP3iFwWxHrBrZKtCjO8LhbWCa+
X528+i87t6r5e4ersdfxgchvjbknlio87t6r5drcfhgjhbknio8976tycv7t86ftyiu87Oz1nKsKuNzm2csoUQlJ
trmRfpjsOPNookmOz5wG0YxhwDmKeo6fWK+ATk1OiP+QT39fn4G77j8o+e4WAwxM570s35Of/vV0zoOccj753sXn
pvJenvwpM2H6o3a9ALvehAJKWodAgZT7X8+iu786r5drtycghvjbiu78t+wAAAIBURwSPZVElXe+9a43sF6M4ysT
7Xv+6wTsa8q86E3+RYyu8O2ObI2kwNLC3/HTgFniE/YqRG+WJac81/VHWQNP822gns8RVrWKjqBktmQoEm7z5yy0
bkjui78675dytcghvjkoi9y7t867ftcuvhbuu9t78gy/v+zvMmv8KvQgHg
jonathan:
level: 15
password: ''
sshkeys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcgxE6HZF/xjFtIt0thEDKPjFJxW9BpZtTVstYbDgGR9zPkHG
ZJT/j345jk345jk453jk43545j35nl3kln34n5kl4ghv3/JzWt/0Js5KZp/51KRNCs9O4t07qaoqwpLB15GwLfEX
Bx9dW26zc4O+hi6754trxcfghvjbo98765drt/LYIEg0KSQPWyJEK1g31gacbxN7Ab006xeHh7rv7HtXF6zH3WId
Uhq9rtdUag6kYnv6qvjG7sbCyHGYu5vZB7GytnNuVNbZuI+RdFvmHSnErV9HCu9xZBq6DBb+sESMS4s7nFcsruMo
edb+BAc3aww0naeWpogjSt+We7y2N
CLI Example:
salt 'edge01.kix01' state.sls router.users
Output example (raw python - can be reused in other modules):
.. code-block:: python
{
'netusers_|-netusers_example_|-netusers_example_|-managed': {
'comment': 'Configuration updated!',
'name': 'netusers_example',
'start_time': '10:57:08.678811',
'__id__': 'netusers_example',
'duration': 1620.982,
'__run_num__': 0,
'changes': {
'updated': {
'admin': {
'level': 15
},
'restricted': {
'level': 1
},
'martin': {
'sshkeys': [
'ssh-dss AAAAB3NzaC1kc3MAAACBAK9dP3KariMlM/JmFW9rTSm5cXs4nR0+o6fTHP9o+bOLXMBTP8R4vwWHh0w
JPjQmJYafAqZTnlgi0srGjyifFwPtODppDWLCgLe2M4LXnu3OMqknr54w344zPHP3iFwWxHrBrZKtCjO8LhbWCa+
X528+i87t6r5e4ersdfxgchvjbknlio87t6r5drcfhgjhbknio8976tycv7t86ftyiu87Oz1nKsKuNzm2csoUQlJ
trmRfpjsOPNookmOz5wG0YxhwDmKeo6fWK+ATk1OiP+QT39fn4G77j8o+e4WAwxM570s35Of/vV0zoOccj753sXn
pvJenvwpM2H6o3a9ALvehAJKWodAgZT7X8+iu786r5drtycghvjbiu78t+wAAAIBURwSPZVElXe+9a43sF6M4ysT
7Xv+6wTsa8q86E3+RYyu8O2ObI2kwNLC3/HTgFniE/YqRG+WJac81/VHWQNP822gns8RVrWKjqBktmQoEm7z5yy0
bkjui78675dytcghvjkoi9y7t867ftcuvhbuu9t78gy/v+zvMmv8KvQgHg'
]
}
},
'added': {
'jonathan': {
'password': '',
'sshkeys': [
'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcgxE6HZF/xjFtIt0thEDKPjFJxW9BpZtTVstYbDgGR9zPkHG
ZJT/j345jk345jk453jk43545j35nl3kln34n5kl4ghv3/JzWt/0Js5KZp/51KRNCs9O4t07qaoqwpLB15GwLfEX
Bx9dW26zc4O+hi6754trxcfghvjbo98765drt/LYIEg0KSQPWyJEK1g31gacbxN7Ab006xeHh7rv7HtXF6zH3WId
Uhq9rtdUag6kYnv6qvjG7sbCyHGYu5vZB7GytnNuVNbZuI+RdFvmHSnErV9HCu9xZBq6DBb+sESMS4s7nFcsruMo
edb+BAc3aww0naeWpogjSt+We7y2N'
],
'level': 15
}
},
'removed': {
}
},
'result': True
}
}
CLI Output:
.. code-block:: bash
edge01.kix01:
----------
ID: netusers_example
Function: netusers.managed
Result: True
Comment: Configuration updated!
Started: 11:03:31.957725
Duration: 1220.435 ms
Changes:
----------
added:
----------
jonathan:
----------
level:
15
password:
sshkeys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcgxE6HZF/xjFtIt0thEDKPjFJxW9BpZtTVstYbDgG
R9zPkHGZJT/j345jk345jk453jk43545j35nl3kln34n5kl4ghv3/JzWt/0Js5KZp/51KRNCs9O4t07qao
qwpLB15GwLfEXBx9dW26zc4O+hi6754trxcfghvjbo98765drt/LYIEg0KSQPWyJEK1g31gacbxN7Ab006
xeHh7rv7HtXF6zH3WIdUhq9rtdUag6kYnv6qvjG7sbCyHGYu5vZB7GytnNuVNbZuI+RdFvmHSnErV9HCu9
xZBq6DBb+sESMS4s7nFcsruMoedb+BAc3aww0naeWpogjSt+We7y2N
removed:
----------
updated:
----------
martin:
----------
sshkeys:
- ssh-dss AAAAB3NzaC1kc3MAAACBAK9dP3KariMlM/JmFW9rTSm5cXs4nR0+o6fTHP9o+bOLXMBTP8R4
vwWHh0wJPjQmJYafAqZTnlgi0srGjyifFwPtODppDWLCgLe2M4LXnu3OMqknr54w344zPHP3iFwWxHrBrZ
KtCjO8LhbWCa+X528+i87t6r5e4ersdfxgchvjbknlio87t6r5drcfhgjhbknio8976tycv7t86ftyiu87
Oz1nKsKuNzm2csoUQlJtrmRfpjsOPNookmOz5wG0YxhwDmKeo6fWK+ATk1OiP+QT39fn4G77j8o+e4WAwx
M570s35Of/vV0zoOccj753sXnpvJenvwpM2H6o3a9ALvehAJKWodAgZT7X8+iu786r5drtycghvjbiu78t
+wAAAIBURwSPZVElXe+9a43sF6M4ysT7Xv+6wTsa8q86E3+RYyu8O2ObI2kwNLC3/HTgFniE/YqRG+WJac
81/VHWQNP822gns8RVrWKjqBktmQoEm7z5yy0bkjui78675dytcghvjkoi9y7t867ftcuvhbuu9t78gy/v
+zvMmv8KvQgHg
admin:
----------
level:
15
restricted:
----------
level:
1
Summary for edge01.kix01
------------
Succeeded: 1 (changed=1)
Failed: 0
------------
Total states run: 1
Total run time: 1.220 s | Below is the the instruction that describes the task:
### Input:
Manages the configuration of the users on the device, as specified in the state SLS file. Users not defined in that
file will be remove whilst users not configured on the device, will be added.
SLS Example:
.. code-block:: yaml
netusers_example:
netusers.managed:
- users:
admin:
level: 15
password: $1$knmhgPPv$g8745biu4rb.Zf.IT.F/U1
sshkeys: []
restricted:
level: 1
password: $1$j34j5k4b$4d5SVjTiz1l.Zf.IT.F/K7
martin:
level: 15
password: ''
sshkeys:
- ssh-dss AAAAB3NzaC1kc3MAAACBAK9dP3KariMlM/JmFW9rTSm5cXs4nR0+o6fTHP9o+bOLXMBTP8R4vwWHh0w
JPjQmJYafAqZTnlgi0srGjyifFwPtODppDWLCgLe2M4LXnu3OMqknr54w344zPHP3iFwWxHrBrZKtCjO8LhbWCa+
X528+i87t6r5e4ersdfxgchvjbknlio87t6r5drcfhgjhbknio8976tycv7t86ftyiu87Oz1nKsKuNzm2csoUQlJ
trmRfpjsOPNookmOz5wG0YxhwDmKeo6fWK+ATk1OiP+QT39fn4G77j8o+e4WAwxM570s35Of/vV0zoOccj753sXn
pvJenvwpM2H6o3a9ALvehAJKWodAgZT7X8+iu786r5drtycghvjbiu78t+wAAAIBURwSPZVElXe+9a43sF6M4ysT
7Xv+6wTsa8q86E3+RYyu8O2ObI2kwNLC3/HTgFniE/YqRG+WJac81/VHWQNP822gns8RVrWKjqBktmQoEm7z5yy0
bkjui78675dytcghvjkoi9y7t867ftcuvhbuu9t78gy/v+zvMmv8KvQgHg
jonathan:
level: 15
password: ''
sshkeys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcgxE6HZF/xjFtIt0thEDKPjFJxW9BpZtTVstYbDgGR9zPkHG
ZJT/j345jk345jk453jk43545j35nl3kln34n5kl4ghv3/JzWt/0Js5KZp/51KRNCs9O4t07qaoqwpLB15GwLfEX
Bx9dW26zc4O+hi6754trxcfghvjbo98765drt/LYIEg0KSQPWyJEK1g31gacbxN7Ab006xeHh7rv7HtXF6zH3WId
Uhq9rtdUag6kYnv6qvjG7sbCyHGYu5vZB7GytnNuVNbZuI+RdFvmHSnErV9HCu9xZBq6DBb+sESMS4s7nFcsruMo
edb+BAc3aww0naeWpogjSt+We7y2N
CLI Example:
salt 'edge01.kix01' state.sls router.users
Output example (raw python - can be reused in other modules):
.. code-block:: python
{
'netusers_|-netusers_example_|-netusers_example_|-managed': {
'comment': 'Configuration updated!',
'name': 'netusers_example',
'start_time': '10:57:08.678811',
'__id__': 'netusers_example',
'duration': 1620.982,
'__run_num__': 0,
'changes': {
'updated': {
'admin': {
'level': 15
},
'restricted': {
'level': 1
},
'martin': {
'sshkeys': [
'ssh-dss AAAAB3NzaC1kc3MAAACBAK9dP3KariMlM/JmFW9rTSm5cXs4nR0+o6fTHP9o+bOLXMBTP8R4vwWHh0w
JPjQmJYafAqZTnlgi0srGjyifFwPtODppDWLCgLe2M4LXnu3OMqknr54w344zPHP3iFwWxHrBrZKtCjO8LhbWCa+
X528+i87t6r5e4ersdfxgchvjbknlio87t6r5drcfhgjhbknio8976tycv7t86ftyiu87Oz1nKsKuNzm2csoUQlJ
trmRfpjsOPNookmOz5wG0YxhwDmKeo6fWK+ATk1OiP+QT39fn4G77j8o+e4WAwxM570s35Of/vV0zoOccj753sXn
pvJenvwpM2H6o3a9ALvehAJKWodAgZT7X8+iu786r5drtycghvjbiu78t+wAAAIBURwSPZVElXe+9a43sF6M4ysT
7Xv+6wTsa8q86E3+RYyu8O2ObI2kwNLC3/HTgFniE/YqRG+WJac81/VHWQNP822gns8RVrWKjqBktmQoEm7z5yy0
bkjui78675dytcghvjkoi9y7t867ftcuvhbuu9t78gy/v+zvMmv8KvQgHg'
]
}
},
'added': {
'jonathan': {
'password': '',
'sshkeys': [
'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcgxE6HZF/xjFtIt0thEDKPjFJxW9BpZtTVstYbDgGR9zPkHG
ZJT/j345jk345jk453jk43545j35nl3kln34n5kl4ghv3/JzWt/0Js5KZp/51KRNCs9O4t07qaoqwpLB15GwLfEX
Bx9dW26zc4O+hi6754trxcfghvjbo98765drt/LYIEg0KSQPWyJEK1g31gacbxN7Ab006xeHh7rv7HtXF6zH3WId
Uhq9rtdUag6kYnv6qvjG7sbCyHGYu5vZB7GytnNuVNbZuI+RdFvmHSnErV9HCu9xZBq6DBb+sESMS4s7nFcsruMo
edb+BAc3aww0naeWpogjSt+We7y2N'
],
'level': 15
}
},
'removed': {
}
},
'result': True
}
}
CLI Output:
.. code-block:: bash
edge01.kix01:
----------
ID: netusers_example
Function: netusers.managed
Result: True
Comment: Configuration updated!
Started: 11:03:31.957725
Duration: 1220.435 ms
Changes:
----------
added:
----------
jonathan:
----------
level:
15
password:
sshkeys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcgxE6HZF/xjFtIt0thEDKPjFJxW9BpZtTVstYbDgG
R9zPkHGZJT/j345jk345jk453jk43545j35nl3kln34n5kl4ghv3/JzWt/0Js5KZp/51KRNCs9O4t07qao
qwpLB15GwLfEXBx9dW26zc4O+hi6754trxcfghvjbo98765drt/LYIEg0KSQPWyJEK1g31gacbxN7Ab006
xeHh7rv7HtXF6zH3WIdUhq9rtdUag6kYnv6qvjG7sbCyHGYu5vZB7GytnNuVNbZuI+RdFvmHSnErV9HCu9
xZBq6DBb+sESMS4s7nFcsruMoedb+BAc3aww0naeWpogjSt+We7y2N
removed:
----------
updated:
----------
martin:
----------
sshkeys:
- ssh-dss AAAAB3NzaC1kc3MAAACBAK9dP3KariMlM/JmFW9rTSm5cXs4nR0+o6fTHP9o+bOLXMBTP8R4
vwWHh0wJPjQmJYafAqZTnlgi0srGjyifFwPtODppDWLCgLe2M4LXnu3OMqknr54w344zPHP3iFwWxHrBrZ
KtCjO8LhbWCa+X528+i87t6r5e4ersdfxgchvjbknlio87t6r5drcfhgjhbknio8976tycv7t86ftyiu87
Oz1nKsKuNzm2csoUQlJtrmRfpjsOPNookmOz5wG0YxhwDmKeo6fWK+ATk1OiP+QT39fn4G77j8o+e4WAwx
M570s35Of/vV0zoOccj753sXnpvJenvwpM2H6o3a9ALvehAJKWodAgZT7X8+iu786r5drtycghvjbiu78t
+wAAAIBURwSPZVElXe+9a43sF6M4ysT7Xv+6wTsa8q86E3+RYyu8O2ObI2kwNLC3/HTgFniE/YqRG+WJac
81/VHWQNP822gns8RVrWKjqBktmQoEm7z5yy0bkjui78675dytcghvjkoi9y7t867ftcuvhbuu9t78gy/v
+zvMmv8KvQgHg
admin:
----------
level:
15
restricted:
----------
level:
1
Summary for edge01.kix01
------------
Succeeded: 1 (changed=1)
Failed: 0
------------
Total states run: 1
Total run time: 1.220 s
### Response:
def managed(name, users=None, defaults=None):
'''
Manages the configuration of the users on the device, as specified in the state SLS file. Users not defined in that
file will be remove whilst users not configured on the device, will be added.
SLS Example:
.. code-block:: yaml
netusers_example:
netusers.managed:
- users:
admin:
level: 15
password: $1$knmhgPPv$g8745biu4rb.Zf.IT.F/U1
sshkeys: []
restricted:
level: 1
password: $1$j34j5k4b$4d5SVjTiz1l.Zf.IT.F/K7
martin:
level: 15
password: ''
sshkeys:
- ssh-dss AAAAB3NzaC1kc3MAAACBAK9dP3KariMlM/JmFW9rTSm5cXs4nR0+o6fTHP9o+bOLXMBTP8R4vwWHh0w
JPjQmJYafAqZTnlgi0srGjyifFwPtODppDWLCgLe2M4LXnu3OMqknr54w344zPHP3iFwWxHrBrZKtCjO8LhbWCa+
X528+i87t6r5e4ersdfxgchvjbknlio87t6r5drcfhgjhbknio8976tycv7t86ftyiu87Oz1nKsKuNzm2csoUQlJ
trmRfpjsOPNookmOz5wG0YxhwDmKeo6fWK+ATk1OiP+QT39fn4G77j8o+e4WAwxM570s35Of/vV0zoOccj753sXn
pvJenvwpM2H6o3a9ALvehAJKWodAgZT7X8+iu786r5drtycghvjbiu78t+wAAAIBURwSPZVElXe+9a43sF6M4ysT
7Xv+6wTsa8q86E3+RYyu8O2ObI2kwNLC3/HTgFniE/YqRG+WJac81/VHWQNP822gns8RVrWKjqBktmQoEm7z5yy0
bkjui78675dytcghvjkoi9y7t867ftcuvhbuu9t78gy/v+zvMmv8KvQgHg
jonathan:
level: 15
password: ''
sshkeys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcgxE6HZF/xjFtIt0thEDKPjFJxW9BpZtTVstYbDgGR9zPkHG
ZJT/j345jk345jk453jk43545j35nl3kln34n5kl4ghv3/JzWt/0Js5KZp/51KRNCs9O4t07qaoqwpLB15GwLfEX
Bx9dW26zc4O+hi6754trxcfghvjbo98765drt/LYIEg0KSQPWyJEK1g31gacbxN7Ab006xeHh7rv7HtXF6zH3WId
Uhq9rtdUag6kYnv6qvjG7sbCyHGYu5vZB7GytnNuVNbZuI+RdFvmHSnErV9HCu9xZBq6DBb+sESMS4s7nFcsruMo
edb+BAc3aww0naeWpogjSt+We7y2N
CLI Example:
salt 'edge01.kix01' state.sls router.users
Output example (raw python - can be reused in other modules):
.. code-block:: python
{
'netusers_|-netusers_example_|-netusers_example_|-managed': {
'comment': 'Configuration updated!',
'name': 'netusers_example',
'start_time': '10:57:08.678811',
'__id__': 'netusers_example',
'duration': 1620.982,
'__run_num__': 0,
'changes': {
'updated': {
'admin': {
'level': 15
},
'restricted': {
'level': 1
},
'martin': {
'sshkeys': [
'ssh-dss AAAAB3NzaC1kc3MAAACBAK9dP3KariMlM/JmFW9rTSm5cXs4nR0+o6fTHP9o+bOLXMBTP8R4vwWHh0w
JPjQmJYafAqZTnlgi0srGjyifFwPtODppDWLCgLe2M4LXnu3OMqknr54w344zPHP3iFwWxHrBrZKtCjO8LhbWCa+
X528+i87t6r5e4ersdfxgchvjbknlio87t6r5drcfhgjhbknio8976tycv7t86ftyiu87Oz1nKsKuNzm2csoUQlJ
trmRfpjsOPNookmOz5wG0YxhwDmKeo6fWK+ATk1OiP+QT39fn4G77j8o+e4WAwxM570s35Of/vV0zoOccj753sXn
pvJenvwpM2H6o3a9ALvehAJKWodAgZT7X8+iu786r5drtycghvjbiu78t+wAAAIBURwSPZVElXe+9a43sF6M4ysT
7Xv+6wTsa8q86E3+RYyu8O2ObI2kwNLC3/HTgFniE/YqRG+WJac81/VHWQNP822gns8RVrWKjqBktmQoEm7z5yy0
bkjui78675dytcghvjkoi9y7t867ftcuvhbuu9t78gy/v+zvMmv8KvQgHg'
]
}
},
'added': {
'jonathan': {
'password': '',
'sshkeys': [
'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcgxE6HZF/xjFtIt0thEDKPjFJxW9BpZtTVstYbDgGR9zPkHG
ZJT/j345jk345jk453jk43545j35nl3kln34n5kl4ghv3/JzWt/0Js5KZp/51KRNCs9O4t07qaoqwpLB15GwLfEX
Bx9dW26zc4O+hi6754trxcfghvjbo98765drt/LYIEg0KSQPWyJEK1g31gacbxN7Ab006xeHh7rv7HtXF6zH3WId
Uhq9rtdUag6kYnv6qvjG7sbCyHGYu5vZB7GytnNuVNbZuI+RdFvmHSnErV9HCu9xZBq6DBb+sESMS4s7nFcsruMo
edb+BAc3aww0naeWpogjSt+We7y2N'
],
'level': 15
}
},
'removed': {
}
},
'result': True
}
}
CLI Output:
.. code-block:: bash
edge01.kix01:
----------
ID: netusers_example
Function: netusers.managed
Result: True
Comment: Configuration updated!
Started: 11:03:31.957725
Duration: 1220.435 ms
Changes:
----------
added:
----------
jonathan:
----------
level:
15
password:
sshkeys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcgxE6HZF/xjFtIt0thEDKPjFJxW9BpZtTVstYbDgG
R9zPkHGZJT/j345jk345jk453jk43545j35nl3kln34n5kl4ghv3/JzWt/0Js5KZp/51KRNCs9O4t07qao
qwpLB15GwLfEXBx9dW26zc4O+hi6754trxcfghvjbo98765drt/LYIEg0KSQPWyJEK1g31gacbxN7Ab006
xeHh7rv7HtXF6zH3WIdUhq9rtdUag6kYnv6qvjG7sbCyHGYu5vZB7GytnNuVNbZuI+RdFvmHSnErV9HCu9
xZBq6DBb+sESMS4s7nFcsruMoedb+BAc3aww0naeWpogjSt+We7y2N
removed:
----------
updated:
----------
martin:
----------
sshkeys:
- ssh-dss AAAAB3NzaC1kc3MAAACBAK9dP3KariMlM/JmFW9rTSm5cXs4nR0+o6fTHP9o+bOLXMBTP8R4
vwWHh0wJPjQmJYafAqZTnlgi0srGjyifFwPtODppDWLCgLe2M4LXnu3OMqknr54w344zPHP3iFwWxHrBrZ
KtCjO8LhbWCa+X528+i87t6r5e4ersdfxgchvjbknlio87t6r5drcfhgjhbknio8976tycv7t86ftyiu87
Oz1nKsKuNzm2csoUQlJtrmRfpjsOPNookmOz5wG0YxhwDmKeo6fWK+ATk1OiP+QT39fn4G77j8o+e4WAwx
M570s35Of/vV0zoOccj753sXnpvJenvwpM2H6o3a9ALvehAJKWodAgZT7X8+iu786r5drtycghvjbiu78t
+wAAAIBURwSPZVElXe+9a43sF6M4ysT7Xv+6wTsa8q86E3+RYyu8O2ObI2kwNLC3/HTgFniE/YqRG+WJac
81/VHWQNP822gns8RVrWKjqBktmQoEm7z5yy0bkjui78675dytcghvjkoi9y7t867ftcuvhbuu9t78gy/v
+zvMmv8KvQgHg
admin:
----------
level:
15
restricted:
----------
level:
1
Summary for edge01.kix01
------------
Succeeded: 1 (changed=1)
Failed: 0
------------
Total states run: 1
Total run time: 1.220 s
'''
result = False
comment = ''
changes = {}
ret = {
'name': name,
'changes': changes,
'result': result,
'comment': comment
}
users = _ordered_dict_to_dict(users)
defaults = _ordered_dict_to_dict(defaults)
expected_users = _expand_users(users, defaults)
valid, message = _check_users(expected_users)
if not valid: # check and clean
ret['comment'] = 'Please provide a valid configuration: {error}'.format(error=message)
return ret
# ----- Retrieve existing users configuration and determine differences ------------------------------------------->
users_output = _retrieve_users()
if not users_output.get('result'):
ret['comment'] = 'Cannot retrieve users from the device: {reason}'.format(
reason=users_output.get('comment')
)
return ret
configured_users = users_output.get('out', {})
if configured_users == expected_users:
ret.update({
'comment': 'Users already configured as needed.',
'result': True
})
return ret
diff = _compute_diff(configured_users, expected_users)
users_to_add = diff.get('add', {})
users_to_update = diff.get('update', {})
users_to_remove = diff.get('remove', {})
changes = {
'added': users_to_add,
'updated': users_to_update,
'removed': users_to_remove
}
ret.update({
'changes': changes
})
if __opts__['test'] is True:
ret.update({
'result': None,
'comment': 'Testing mode: configuration was not changed!'
})
return ret
# <---- Retrieve existing NTP peers and determine peers to be added/removed --------------------------------------->
# ----- Call _set_users and _delete_users as needed --------------------------------------------------------------->
expected_config_change = False
successfully_changed = True
if users_to_add:
_set = _set_users(users_to_add)
if _set.get('result'):
expected_config_change = True
else: # something went wrong...
successfully_changed = False
comment += 'Cannot configure new users: {reason}'.format(
reason=_set.get('comment')
)
if users_to_update:
_update = _update_users(users_to_update)
if _update.get('result'):
expected_config_change = True
else: # something went wrong...
successfully_changed = False
comment += 'Cannot update the users configuration: {reason}'.format(
reason=_update.get('comment')
)
if users_to_remove:
_delete = _delete_users(users_to_remove)
if _delete.get('result'):
expected_config_change = True
else: # something went wrong...
successfully_changed = False
comment += 'Cannot remove users: {reason}'.format(
reason=_delete.get('comment')
)
# <---- Call _set_users and _delete_users as needed ----------------------------------------------------------------
# ----- Try to commit changes ------------------------------------------------------------------------------------->
if expected_config_change and successfully_changed:
config_result, config_comment = __salt__['net.config_control']()
result = config_result
comment += config_comment
# <---- Try to commit changes --------------------------------------------------------------------------------------
if expected_config_change and result and not comment:
comment = 'Configuration updated!'
ret.update({
'result': result,
'comment': comment
})
return ret |
def register_incoming_conn(self, conn):
"""Add incoming connection into the heap."""
assert conn, "conn is required"
conn.set_outbound_pending_change_callback(self._on_conn_change)
self.connections.appendleft(conn)
self._set_on_close_cb(conn)
self._on_conn_change() | Add incoming connection into the heap. | Below is the the instruction that describes the task:
### Input:
Add incoming connection into the heap.
### Response:
def register_incoming_conn(self, conn):
"""Add incoming connection into the heap."""
assert conn, "conn is required"
conn.set_outbound_pending_change_callback(self._on_conn_change)
self.connections.appendleft(conn)
self._set_on_close_cb(conn)
self._on_conn_change() |
def status(name=None, id=None):
'''
List VMs running on the host, or only the VM specified by ``id``. When
both a name and id are provided, the id is ignored.
name:
Name of the defined VM.
id:
VM id.
CLI Example:
.. code-block:: bash
salt '*' vmctl.status # to list all VMs
salt '*' vmctl.status name=web1 # to get a single VM
'''
ret = {}
cmd = ['vmctl', 'status']
result = __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
python_shell=False)
if result['retcode'] != 0:
raise CommandExecutionError(
'Problem encountered running vmctl',
info={'error': [result['stderr']], 'changes': ret}
)
# Grab the header and save it with the lowercase names.
header = result['stdout'].splitlines()[0].split()
header = list([x.lower() for x in header])
# A VM can be in one of the following states (from vmm.c:vcpu_state_decode())
# - stopped
# - running
# - requesting termination
# - terminated
# - unknown
for line in result['stdout'].splitlines()[1:]:
data = line.split()
vm = dict(list(zip(header, data)))
vmname = vm.pop('name')
if vm['pid'] == '-':
# If the VM has no PID it's not running.
vm['state'] = 'stopped'
elif vmname and data[-2] == '-':
# When a VM does have a PID and the second to last field is a '-', it's
# transitioning to another state. A VM name itself cannot contain a
# '-' so it's safe to split on '-'.
vm['state'] = data[-1]
else:
vm['state'] = 'running'
# When the status is requested of a single VM (by name) which is stopping,
# vmctl doesn't print the status line. So we'll parse the full list and
# return when we've found the requested VM.
if id and int(vm['id']) == id:
return {vmname: vm}
elif name and vmname == name:
return {vmname: vm}
else:
ret[vmname] = vm
# Assert we've not come this far when an id or name have been provided. That
# means the requested VM does not exist.
if id or name:
return {}
return ret | List VMs running on the host, or only the VM specified by ``id``. When
both a name and id are provided, the id is ignored.
name:
Name of the defined VM.
id:
VM id.
CLI Example:
.. code-block:: bash
salt '*' vmctl.status # to list all VMs
salt '*' vmctl.status name=web1 # to get a single VM | Below is the the instruction that describes the task:
### Input:
List VMs running on the host, or only the VM specified by ``id``. When
both a name and id are provided, the id is ignored.
name:
Name of the defined VM.
id:
VM id.
CLI Example:
.. code-block:: bash
salt '*' vmctl.status # to list all VMs
salt '*' vmctl.status name=web1 # to get a single VM
### Response:
def status(name=None, id=None):
'''
List VMs running on the host, or only the VM specified by ``id``. When
both a name and id are provided, the id is ignored.
name:
Name of the defined VM.
id:
VM id.
CLI Example:
.. code-block:: bash
salt '*' vmctl.status # to list all VMs
salt '*' vmctl.status name=web1 # to get a single VM
'''
ret = {}
cmd = ['vmctl', 'status']
result = __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
python_shell=False)
if result['retcode'] != 0:
raise CommandExecutionError(
'Problem encountered running vmctl',
info={'error': [result['stderr']], 'changes': ret}
)
# Grab the header and save it with the lowercase names.
header = result['stdout'].splitlines()[0].split()
header = list([x.lower() for x in header])
# A VM can be in one of the following states (from vmm.c:vcpu_state_decode())
# - stopped
# - running
# - requesting termination
# - terminated
# - unknown
for line in result['stdout'].splitlines()[1:]:
data = line.split()
vm = dict(list(zip(header, data)))
vmname = vm.pop('name')
if vm['pid'] == '-':
# If the VM has no PID it's not running.
vm['state'] = 'stopped'
elif vmname and data[-2] == '-':
# When a VM does have a PID and the second to last field is a '-', it's
# transitioning to another state. A VM name itself cannot contain a
# '-' so it's safe to split on '-'.
vm['state'] = data[-1]
else:
vm['state'] = 'running'
# When the status is requested of a single VM (by name) which is stopping,
# vmctl doesn't print the status line. So we'll parse the full list and
# return when we've found the requested VM.
if id and int(vm['id']) == id:
return {vmname: vm}
elif name and vmname == name:
return {vmname: vm}
else:
ret[vmname] = vm
# Assert we've not come this far when an id or name have been provided. That
# means the requested VM does not exist.
if id or name:
return {}
return ret |
def modification_time(self):
"""dfdatetime.DateTimeValues: modification time or None if not available."""
timestamp = self._fsapfs_file_entry.get_modification_time_as_integer()
return dfdatetime_apfs_time.APFSTime(timestamp=timestamp) | dfdatetime.DateTimeValues: modification time or None if not available. | Below is the the instruction that describes the task:
### Input:
dfdatetime.DateTimeValues: modification time or None if not available.
### Response:
def modification_time(self):
"""dfdatetime.DateTimeValues: modification time or None if not available."""
timestamp = self._fsapfs_file_entry.get_modification_time_as_integer()
return dfdatetime_apfs_time.APFSTime(timestamp=timestamp) |
def add_plugin(self, plugin, call):
"""Add plugin to list of plugins.
Will be added if it has the attribute I'm bound to.
"""
meth = getattr(plugin, call, None)
if meth is not None:
self.plugins.append((plugin, meth)) | Add plugin to list of plugins.
Will be added if it has the attribute I'm bound to. | Below is the the instruction that describes the task:
### Input:
Add plugin to list of plugins.
Will be added if it has the attribute I'm bound to.
### Response:
def add_plugin(self, plugin, call):
"""Add plugin to list of plugins.
Will be added if it has the attribute I'm bound to.
"""
meth = getattr(plugin, call, None)
if meth is not None:
self.plugins.append((plugin, meth)) |
def delete_file(self, file_id):
'''Delete a file.
Args:
file_id (str): The UUID of the file to delete.
Returns:
None
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
'''
if not is_valid_uuid(file_id):
raise StorageArgumentException(
'Invalid UUID for file_id: {0}'.format(file_id))
self._authenticated_request \
.to_endpoint('file/{}/'.format(file_id)) \
.delete() | Delete a file.
Args:
file_id (str): The UUID of the file to delete.
Returns:
None
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes | Below is the the instruction that describes the task:
### Input:
Delete a file.
Args:
file_id (str): The UUID of the file to delete.
Returns:
None
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
### Response:
def delete_file(self, file_id):
'''Delete a file.
Args:
file_id (str): The UUID of the file to delete.
Returns:
None
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
'''
if not is_valid_uuid(file_id):
raise StorageArgumentException(
'Invalid UUID for file_id: {0}'.format(file_id))
self._authenticated_request \
.to_endpoint('file/{}/'.format(file_id)) \
.delete() |
def key_gen(self, key_name, type, size=2048, **kwargs):
"""Adds a new public key that can be used for name_publish.
.. code-block:: python
>>> c.key_gen('example_key_name')
{'Name': 'example_key_name',
'Id': 'QmQLaT5ZrCfSkXTH6rUKtVidcxj8jrW3X2h75Lug1AV7g8'}
Parameters
----------
key_name : str
Name of the new Key to be generated. Used to reference the Keys.
type : str
Type of key to generate. The current possible keys types are:
* ``"rsa"``
* ``"ed25519"``
size : int
Bitsize of key to generate
Returns
-------
dict : Key name and Key Id
"""
opts = {"type": type, "size": size}
kwargs.setdefault("opts", opts)
args = (key_name,)
return self._client.request('/key/gen', args,
decoder='json', **kwargs) | Adds a new public key that can be used for name_publish.
.. code-block:: python
>>> c.key_gen('example_key_name')
{'Name': 'example_key_name',
'Id': 'QmQLaT5ZrCfSkXTH6rUKtVidcxj8jrW3X2h75Lug1AV7g8'}
Parameters
----------
key_name : str
Name of the new Key to be generated. Used to reference the Keys.
type : str
Type of key to generate. The current possible keys types are:
* ``"rsa"``
* ``"ed25519"``
size : int
Bitsize of key to generate
Returns
-------
dict : Key name and Key Id | Below is the the instruction that describes the task:
### Input:
Adds a new public key that can be used for name_publish.
.. code-block:: python
>>> c.key_gen('example_key_name')
{'Name': 'example_key_name',
'Id': 'QmQLaT5ZrCfSkXTH6rUKtVidcxj8jrW3X2h75Lug1AV7g8'}
Parameters
----------
key_name : str
Name of the new Key to be generated. Used to reference the Keys.
type : str
Type of key to generate. The current possible keys types are:
* ``"rsa"``
* ``"ed25519"``
size : int
Bitsize of key to generate
Returns
-------
dict : Key name and Key Id
### Response:
def key_gen(self, key_name, type, size=2048, **kwargs):
"""Adds a new public key that can be used for name_publish.
.. code-block:: python
>>> c.key_gen('example_key_name')
{'Name': 'example_key_name',
'Id': 'QmQLaT5ZrCfSkXTH6rUKtVidcxj8jrW3X2h75Lug1AV7g8'}
Parameters
----------
key_name : str
Name of the new Key to be generated. Used to reference the Keys.
type : str
Type of key to generate. The current possible keys types are:
* ``"rsa"``
* ``"ed25519"``
size : int
Bitsize of key to generate
Returns
-------
dict : Key name and Key Id
"""
opts = {"type": type, "size": size}
kwargs.setdefault("opts", opts)
args = (key_name,)
return self._client.request('/key/gen', args,
decoder='json', **kwargs) |
def get_custom_value(self, field_name):
""" Get a value for a specified custom field
field_name - Name of the custom field you want.
"""
custom_field = self.get_custom_field(field_name)
return CustomFieldValue.objects.get_or_create(
field=custom_field, object_id=self.id)[0].value | Get a value for a specified custom field
field_name - Name of the custom field you want. | Below is the the instruction that describes the task:
### Input:
Get a value for a specified custom field
field_name - Name of the custom field you want.
### Response:
def get_custom_value(self, field_name):
""" Get a value for a specified custom field
field_name - Name of the custom field you want.
"""
custom_field = self.get_custom_field(field_name)
return CustomFieldValue.objects.get_or_create(
field=custom_field, object_id=self.id)[0].value |
def _validate(self, qobj):
"""Semantic validations of the qobj which cannot be done via schemas."""
n_qubits = qobj.config.n_qubits
max_qubits = self.configuration().n_qubits
if n_qubits > max_qubits:
raise BasicAerError('Number of qubits {} '.format(n_qubits) +
'is greater than maximum ({}) '.format(max_qubits) +
'for "{}".'.format(self.name()))
for experiment in qobj.experiments:
name = experiment.header.name
if experiment.config.memory_slots == 0:
logger.warning('No classical registers in circuit "%s", '
'counts will be empty.', name)
elif 'measure' not in [op.name for op in experiment.instructions]:
logger.warning('No measurements in circuit "%s", '
'classical register will remain all zeros.', name) | Semantic validations of the qobj which cannot be done via schemas. | Below is the the instruction that describes the task:
### Input:
Semantic validations of the qobj which cannot be done via schemas.
### Response:
def _validate(self, qobj):
"""Semantic validations of the qobj which cannot be done via schemas."""
n_qubits = qobj.config.n_qubits
max_qubits = self.configuration().n_qubits
if n_qubits > max_qubits:
raise BasicAerError('Number of qubits {} '.format(n_qubits) +
'is greater than maximum ({}) '.format(max_qubits) +
'for "{}".'.format(self.name()))
for experiment in qobj.experiments:
name = experiment.header.name
if experiment.config.memory_slots == 0:
logger.warning('No classical registers in circuit "%s", '
'counts will be empty.', name)
elif 'measure' not in [op.name for op in experiment.instructions]:
logger.warning('No measurements in circuit "%s", '
'classical register will remain all zeros.', name) |
def get_notification_language(user):
"""
Returns site-specific notification language for this user. Raises
LanguageStoreNotAvailable if this site does not use translated
notifications.
"""
if getattr(settings, "NOTIFICATION_LANGUAGE_MODULE", False):
try:
app_label, model_name = settings.NOTIFICATION_LANGUAGE_MODULE.split(".")
model = models.get_model(app_label, model_name)
# pylint: disable-msg=W0212
language_model = model._default_manager.get(user__id__exact=user.id)
if hasattr(language_model, "language"):
return language_model.language
except (ImportError, ImproperlyConfigured, model.DoesNotExist):
raise LanguageStoreNotAvailable
raise LanguageStoreNotAvailable | Returns site-specific notification language for this user. Raises
LanguageStoreNotAvailable if this site does not use translated
notifications. | Below is the the instruction that describes the task:
### Input:
Returns site-specific notification language for this user. Raises
LanguageStoreNotAvailable if this site does not use translated
notifications.
### Response:
def get_notification_language(user):
"""
Returns site-specific notification language for this user. Raises
LanguageStoreNotAvailable if this site does not use translated
notifications.
"""
if getattr(settings, "NOTIFICATION_LANGUAGE_MODULE", False):
try:
app_label, model_name = settings.NOTIFICATION_LANGUAGE_MODULE.split(".")
model = models.get_model(app_label, model_name)
# pylint: disable-msg=W0212
language_model = model._default_manager.get(user__id__exact=user.id)
if hasattr(language_model, "language"):
return language_model.language
except (ImportError, ImproperlyConfigured, model.DoesNotExist):
raise LanguageStoreNotAvailable
raise LanguageStoreNotAvailable |
def pretty_template_plot(template, size=(10.5, 7.5), background=False,
picks=False, **kwargs):
"""
Plot of a single template, possibly within background data.
:type template: obspy.core.stream.Stream
:param template: Template stream to plot
:type size: tuple
:param size: tuple of plot size
:type background: obspy.core.stream.stream
:param background: Stream to plot the template within.
:type picks: list
:param picks: List of :class:`obspy.core.event.origin.Pick` picks.
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> from obspy import read, read_events
>>> import os
>>> from eqcorrscan.core import template_gen
>>> from eqcorrscan.utils.plotting import pretty_template_plot
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>>
>>> test_file = os.path.join(TEST_PATH, 'REA', 'TEST_',
... '01-0411-15L.S201309')
>>> test_wavefile = os.path.join(
... TEST_PATH, 'WAV', 'TEST_', '2013-09-01-0410-35.DFDPC_024_00')
>>> event = read_events(test_file)[0]
>>> st = read(test_wavefile)
>>> st = st.filter('bandpass', freqmin=2.0, freqmax=15.0)
>>> for tr in st:
... tr = tr.trim(tr.stats.starttime + 30, tr.stats.endtime - 30)
... # Hack around seisan 2-letter channel naming
... tr.stats.channel = tr.stats.channel[0] + tr.stats.channel[-1]
>>> template = template_gen._template_gen(event.picks, st, 2)
>>> pretty_template_plot(template, background=st, # doctest +SKIP
... picks=event.picks) # doctest: +SKIP
.. plot::
from obspy import read, read_events
from eqcorrscan.core import template_gen
from eqcorrscan.utils.plotting import pretty_template_plot
import os
# Get the path to the test data
import eqcorrscan
import os
TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
test_file = os.path.join(
TEST_PATH, 'REA', 'TEST_', '01-0411-15L.S201309'
test_wavefile = os.path.join(
TEST_PATH, 'WAV', 'TEST_', '2013-09-01-0410-35.DFDPC_024_00')
event = read_events(test_file)[0]
st = read(test_wavefile)
st.filter('bandpass', freqmin=2.0, freqmax=15.0)
for tr in st:
tr.trim(tr.stats.starttime + 30, tr.stats.endtime - 30)
tr.stats.channel = tr.stats.channel[0] + tr.stats.channel[-1]
template = template_gen._template_gen(event.picks, st, 2)
pretty_template_plot(template, background=st,
picks=event.picks)
"""
import matplotlib.pyplot as plt
fig, axes = plt.subplots(len(template), 1, sharex=True, figsize=size)
if len(template) > 1:
axes = axes.ravel()
if not background:
mintime = template.sort(['starttime'])[0].stats.starttime
else:
mintime = background.sort(['starttime'])[0].stats.starttime
template.sort(['network', 'station', 'starttime'])
lengths = []
lines = []
labels = []
for i, tr in enumerate(template):
# Cope with a single channel template case.
if len(template) > 1:
axis = axes[i]
else:
axis = axes
delay = tr.stats.starttime - mintime
y = tr.data
x = np.linspace(0, (len(y) - 1) * tr.stats.delta, len(y))
x += delay
if background:
btr = background.select(station=tr.stats.station,
channel=tr.stats.channel)[0]
bdelay = btr.stats.starttime - mintime
by = btr.data
bx = np.linspace(0, (len(by) - 1) * btr.stats.delta, len(by))
bx += bdelay
axis.plot(bx, by, 'k', linewidth=1)
template_line, = axis.plot(x, y, 'r', linewidth=1.1,
label='Template')
if i == 0:
lines.append(template_line)
labels.append('Template')
lengths.append(max(bx[-1], x[-1]))
else:
template_line, = axis.plot(x, y, 'k', linewidth=1.1,
label='Template')
if i == 0:
lines.append(template_line)
labels.append('Template')
lengths.append(x[-1])
# print(' '.join([tr.stats.station, str(len(x)), str(len(y))]))
axis.set_ylabel('.'.join([tr.stats.station, tr.stats.channel]),
rotation=0, horizontalalignment='right')
axis.yaxis.set_ticks([])
# Plot the picks if they are given
if picks:
tr_picks = [pick for pick in picks if
pick.waveform_id.station_code == tr.stats.station and
pick.waveform_id.channel_code[0] +
pick.waveform_id.channel_code[-1] ==
tr.stats.channel[0] + tr.stats.channel[-1]]
for pick in tr_picks:
if not pick.phase_hint:
pcolor = 'k'
label = 'Unknown pick'
elif 'P' in pick.phase_hint.upper():
pcolor = 'red'
label = 'P-pick'
elif 'S' in pick.phase_hint.upper():
pcolor = 'blue'
label = 'S-pick'
else:
pcolor = 'k'
label = 'Unknown pick'
pdelay = pick.time - mintime
# print(pdelay)
line = axis.axvline(x=pdelay, color=pcolor, linewidth=2,
linestyle='--', label=label)
if label not in labels:
lines.append(line)
labels.append(label)
# axes[i].plot([pdelay, pdelay], [])
axis.set_xlim([0, max(lengths)])
if len(template) > 1:
axis = axes[len(template) - 1]
else:
axis = axes
axis.set_xlabel('Time (s) from start of template')
plt.figlegend(lines, labels, 'upper right')
title = kwargs.get("title") or None
if title:
if len(template) > 1:
axes[0].set_title(title)
else:
axes.set_title(title)
kwargs.pop("title") # Do not give title to _finalise_figure
else:
plt.subplots_adjust(top=0.98)
plt.tight_layout()
plt.subplots_adjust(hspace=0)
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig | Plot of a single template, possibly within background data.
:type template: obspy.core.stream.Stream
:param template: Template stream to plot
:type size: tuple
:param size: tuple of plot size
:type background: obspy.core.stream.stream
:param background: Stream to plot the template within.
:type picks: list
:param picks: List of :class:`obspy.core.event.origin.Pick` picks.
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> from obspy import read, read_events
>>> import os
>>> from eqcorrscan.core import template_gen
>>> from eqcorrscan.utils.plotting import pretty_template_plot
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>>
>>> test_file = os.path.join(TEST_PATH, 'REA', 'TEST_',
... '01-0411-15L.S201309')
>>> test_wavefile = os.path.join(
... TEST_PATH, 'WAV', 'TEST_', '2013-09-01-0410-35.DFDPC_024_00')
>>> event = read_events(test_file)[0]
>>> st = read(test_wavefile)
>>> st = st.filter('bandpass', freqmin=2.0, freqmax=15.0)
>>> for tr in st:
... tr = tr.trim(tr.stats.starttime + 30, tr.stats.endtime - 30)
... # Hack around seisan 2-letter channel naming
... tr.stats.channel = tr.stats.channel[0] + tr.stats.channel[-1]
>>> template = template_gen._template_gen(event.picks, st, 2)
>>> pretty_template_plot(template, background=st, # doctest +SKIP
... picks=event.picks) # doctest: +SKIP
.. plot::
from obspy import read, read_events
from eqcorrscan.core import template_gen
from eqcorrscan.utils.plotting import pretty_template_plot
import os
# Get the path to the test data
import eqcorrscan
import os
TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
test_file = os.path.join(
TEST_PATH, 'REA', 'TEST_', '01-0411-15L.S201309'
test_wavefile = os.path.join(
TEST_PATH, 'WAV', 'TEST_', '2013-09-01-0410-35.DFDPC_024_00')
event = read_events(test_file)[0]
st = read(test_wavefile)
st.filter('bandpass', freqmin=2.0, freqmax=15.0)
for tr in st:
tr.trim(tr.stats.starttime + 30, tr.stats.endtime - 30)
tr.stats.channel = tr.stats.channel[0] + tr.stats.channel[-1]
template = template_gen._template_gen(event.picks, st, 2)
pretty_template_plot(template, background=st,
picks=event.picks) | Below is the the instruction that describes the task:
### Input:
Plot of a single template, possibly within background data.
:type template: obspy.core.stream.Stream
:param template: Template stream to plot
:type size: tuple
:param size: tuple of plot size
:type background: obspy.core.stream.stream
:param background: Stream to plot the template within.
:type picks: list
:param picks: List of :class:`obspy.core.event.origin.Pick` picks.
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> from obspy import read, read_events
>>> import os
>>> from eqcorrscan.core import template_gen
>>> from eqcorrscan.utils.plotting import pretty_template_plot
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>>
>>> test_file = os.path.join(TEST_PATH, 'REA', 'TEST_',
... '01-0411-15L.S201309')
>>> test_wavefile = os.path.join(
... TEST_PATH, 'WAV', 'TEST_', '2013-09-01-0410-35.DFDPC_024_00')
>>> event = read_events(test_file)[0]
>>> st = read(test_wavefile)
>>> st = st.filter('bandpass', freqmin=2.0, freqmax=15.0)
>>> for tr in st:
... tr = tr.trim(tr.stats.starttime + 30, tr.stats.endtime - 30)
... # Hack around seisan 2-letter channel naming
... tr.stats.channel = tr.stats.channel[0] + tr.stats.channel[-1]
>>> template = template_gen._template_gen(event.picks, st, 2)
>>> pretty_template_plot(template, background=st, # doctest +SKIP
... picks=event.picks) # doctest: +SKIP
.. plot::
from obspy import read, read_events
from eqcorrscan.core import template_gen
from eqcorrscan.utils.plotting import pretty_template_plot
import os
# Get the path to the test data
import eqcorrscan
import os
TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
test_file = os.path.join(
TEST_PATH, 'REA', 'TEST_', '01-0411-15L.S201309'
test_wavefile = os.path.join(
TEST_PATH, 'WAV', 'TEST_', '2013-09-01-0410-35.DFDPC_024_00')
event = read_events(test_file)[0]
st = read(test_wavefile)
st.filter('bandpass', freqmin=2.0, freqmax=15.0)
for tr in st:
tr.trim(tr.stats.starttime + 30, tr.stats.endtime - 30)
tr.stats.channel = tr.stats.channel[0] + tr.stats.channel[-1]
template = template_gen._template_gen(event.picks, st, 2)
pretty_template_plot(template, background=st,
picks=event.picks)
### Response:
def pretty_template_plot(template, size=(10.5, 7.5), background=False,
picks=False, **kwargs):
"""
Plot of a single template, possibly within background data.
:type template: obspy.core.stream.Stream
:param template: Template stream to plot
:type size: tuple
:param size: tuple of plot size
:type background: obspy.core.stream.stream
:param background: Stream to plot the template within.
:type picks: list
:param picks: List of :class:`obspy.core.event.origin.Pick` picks.
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> from obspy import read, read_events
>>> import os
>>> from eqcorrscan.core import template_gen
>>> from eqcorrscan.utils.plotting import pretty_template_plot
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>>
>>> test_file = os.path.join(TEST_PATH, 'REA', 'TEST_',
... '01-0411-15L.S201309')
>>> test_wavefile = os.path.join(
... TEST_PATH, 'WAV', 'TEST_', '2013-09-01-0410-35.DFDPC_024_00')
>>> event = read_events(test_file)[0]
>>> st = read(test_wavefile)
>>> st = st.filter('bandpass', freqmin=2.0, freqmax=15.0)
>>> for tr in st:
... tr = tr.trim(tr.stats.starttime + 30, tr.stats.endtime - 30)
... # Hack around seisan 2-letter channel naming
... tr.stats.channel = tr.stats.channel[0] + tr.stats.channel[-1]
>>> template = template_gen._template_gen(event.picks, st, 2)
>>> pretty_template_plot(template, background=st, # doctest +SKIP
... picks=event.picks) # doctest: +SKIP
.. plot::
from obspy import read, read_events
from eqcorrscan.core import template_gen
from eqcorrscan.utils.plotting import pretty_template_plot
import os
# Get the path to the test data
import eqcorrscan
import os
TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
test_file = os.path.join(
TEST_PATH, 'REA', 'TEST_', '01-0411-15L.S201309'
test_wavefile = os.path.join(
TEST_PATH, 'WAV', 'TEST_', '2013-09-01-0410-35.DFDPC_024_00')
event = read_events(test_file)[0]
st = read(test_wavefile)
st.filter('bandpass', freqmin=2.0, freqmax=15.0)
for tr in st:
tr.trim(tr.stats.starttime + 30, tr.stats.endtime - 30)
tr.stats.channel = tr.stats.channel[0] + tr.stats.channel[-1]
template = template_gen._template_gen(event.picks, st, 2)
pretty_template_plot(template, background=st,
picks=event.picks)
"""
import matplotlib.pyplot as plt
fig, axes = plt.subplots(len(template), 1, sharex=True, figsize=size)
if len(template) > 1:
axes = axes.ravel()
if not background:
mintime = template.sort(['starttime'])[0].stats.starttime
else:
mintime = background.sort(['starttime'])[0].stats.starttime
template.sort(['network', 'station', 'starttime'])
lengths = []
lines = []
labels = []
for i, tr in enumerate(template):
# Cope with a single channel template case.
if len(template) > 1:
axis = axes[i]
else:
axis = axes
delay = tr.stats.starttime - mintime
y = tr.data
x = np.linspace(0, (len(y) - 1) * tr.stats.delta, len(y))
x += delay
if background:
btr = background.select(station=tr.stats.station,
channel=tr.stats.channel)[0]
bdelay = btr.stats.starttime - mintime
by = btr.data
bx = np.linspace(0, (len(by) - 1) * btr.stats.delta, len(by))
bx += bdelay
axis.plot(bx, by, 'k', linewidth=1)
template_line, = axis.plot(x, y, 'r', linewidth=1.1,
label='Template')
if i == 0:
lines.append(template_line)
labels.append('Template')
lengths.append(max(bx[-1], x[-1]))
else:
template_line, = axis.plot(x, y, 'k', linewidth=1.1,
label='Template')
if i == 0:
lines.append(template_line)
labels.append('Template')
lengths.append(x[-1])
# print(' '.join([tr.stats.station, str(len(x)), str(len(y))]))
axis.set_ylabel('.'.join([tr.stats.station, tr.stats.channel]),
rotation=0, horizontalalignment='right')
axis.yaxis.set_ticks([])
# Plot the picks if they are given
if picks:
tr_picks = [pick for pick in picks if
pick.waveform_id.station_code == tr.stats.station and
pick.waveform_id.channel_code[0] +
pick.waveform_id.channel_code[-1] ==
tr.stats.channel[0] + tr.stats.channel[-1]]
for pick in tr_picks:
if not pick.phase_hint:
pcolor = 'k'
label = 'Unknown pick'
elif 'P' in pick.phase_hint.upper():
pcolor = 'red'
label = 'P-pick'
elif 'S' in pick.phase_hint.upper():
pcolor = 'blue'
label = 'S-pick'
else:
pcolor = 'k'
label = 'Unknown pick'
pdelay = pick.time - mintime
# print(pdelay)
line = axis.axvline(x=pdelay, color=pcolor, linewidth=2,
linestyle='--', label=label)
if label not in labels:
lines.append(line)
labels.append(label)
# axes[i].plot([pdelay, pdelay], [])
axis.set_xlim([0, max(lengths)])
if len(template) > 1:
axis = axes[len(template) - 1]
else:
axis = axes
axis.set_xlabel('Time (s) from start of template')
plt.figlegend(lines, labels, 'upper right')
title = kwargs.get("title") or None
if title:
if len(template) > 1:
axes[0].set_title(title)
else:
axes.set_title(title)
kwargs.pop("title") # Do not give title to _finalise_figure
else:
plt.subplots_adjust(top=0.98)
plt.tight_layout()
plt.subplots_adjust(hspace=0)
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig |
def pay_with_alias(amount: Money, alias_registration_id: str, client_ref: str) -> Payment:
"""
Charges money using datatrans, given a previously registered credit card alias.
:param amount: The amount and currency we want to charge
:param alias_registration_id: The alias registration to use
:param client_ref: A unique reference for this charge
:return: a Payment (either successful or not)
"""
if amount.amount <= 0:
raise ValueError('Pay with alias takes a strictly positive amount')
alias_registration = AliasRegistration.objects.get(pk=alias_registration_id)
logger.info('paying-with-alias', amount=amount, client_ref=client_ref,
alias_registration=alias_registration)
request_xml = build_pay_with_alias_request_xml(amount, client_ref, alias_registration)
logger.info('sending-pay-with-alias-request', url=datatrans_authorize_url, data=request_xml)
response = requests.post(
url=datatrans_authorize_url,
headers={'Content-Type': 'application/xml'},
data=request_xml)
logger.info('processing-pay-with-alias-response', response=response.content)
charge_response = parse_pay_with_alias_response_xml(response.content)
charge_response.save()
charge_response.send_signal()
return charge_response | Charges money using datatrans, given a previously registered credit card alias.
:param amount: The amount and currency we want to charge
:param alias_registration_id: The alias registration to use
:param client_ref: A unique reference for this charge
:return: a Payment (either successful or not) | Below is the the instruction that describes the task:
### Input:
Charges money using datatrans, given a previously registered credit card alias.
:param amount: The amount and currency we want to charge
:param alias_registration_id: The alias registration to use
:param client_ref: A unique reference for this charge
:return: a Payment (either successful or not)
### Response:
def pay_with_alias(amount: Money, alias_registration_id: str, client_ref: str) -> Payment:
"""
Charges money using datatrans, given a previously registered credit card alias.
:param amount: The amount and currency we want to charge
:param alias_registration_id: The alias registration to use
:param client_ref: A unique reference for this charge
:return: a Payment (either successful or not)
"""
if amount.amount <= 0:
raise ValueError('Pay with alias takes a strictly positive amount')
alias_registration = AliasRegistration.objects.get(pk=alias_registration_id)
logger.info('paying-with-alias', amount=amount, client_ref=client_ref,
alias_registration=alias_registration)
request_xml = build_pay_with_alias_request_xml(amount, client_ref, alias_registration)
logger.info('sending-pay-with-alias-request', url=datatrans_authorize_url, data=request_xml)
response = requests.post(
url=datatrans_authorize_url,
headers={'Content-Type': 'application/xml'},
data=request_xml)
logger.info('processing-pay-with-alias-response', response=response.content)
charge_response = parse_pay_with_alias_response_xml(response.content)
charge_response.save()
charge_response.send_signal()
return charge_response |
def toggleglobalsitepackages_cmd(argv):
"""Toggle the current virtualenv between having and not having access to the global site-packages."""
quiet = argv == ['-q']
site = sitepackages_dir()
ngsp_file = site.parent / 'no-global-site-packages.txt'
if ngsp_file.exists():
ngsp_file.unlink()
if not quiet:
print('Enabled global site-packages')
else:
with ngsp_file.open('w'):
if not quiet:
print('Disabled global site-packages') | Toggle the current virtualenv between having and not having access to the global site-packages. | Below is the the instruction that describes the task:
### Input:
Toggle the current virtualenv between having and not having access to the global site-packages.
### Response:
def toggleglobalsitepackages_cmd(argv):
"""Toggle the current virtualenv between having and not having access to the global site-packages."""
quiet = argv == ['-q']
site = sitepackages_dir()
ngsp_file = site.parent / 'no-global-site-packages.txt'
if ngsp_file.exists():
ngsp_file.unlink()
if not quiet:
print('Enabled global site-packages')
else:
with ngsp_file.open('w'):
if not quiet:
print('Disabled global site-packages') |
def _prepare_for_cross_validation(self, corr, clf):
"""Prepare data for voxelwise cross validation.
If the classifier is sklearn.svm.SVC with precomputed kernel,
the kernel matrix of each voxel is computed, otherwise do nothing.
Parameters
----------
corr: 3D array in shape [num_processed_voxels, num_epochs, num_voxels]
the normalized correlation values of all subjects in all epochs
for the assigned values, in row-major
clf: classification function
the classifier to be used in cross validation
Returns
-------
data: 3D numpy array
If using sklearn.svm.SVC with precomputed kernel,
it is in shape [num_processed_voxels, num_epochs, num_epochs];
otherwise it is the input argument corr,
in shape [num_processed_voxels, num_epochs, num_voxels]
"""
time1 = time.time()
(num_processed_voxels, num_epochs, _) = corr.shape
if isinstance(clf, sklearn.svm.SVC) and clf.kernel == 'precomputed':
# kernel matrices should be computed
kernel_matrices = np.zeros((num_processed_voxels, num_epochs,
num_epochs),
np.float32, order='C')
for i in range(num_processed_voxels):
blas.compute_kernel_matrix('L', 'T',
num_epochs, self.num_voxels2,
1.0, corr,
i, self.num_voxels2,
0.0, kernel_matrices[i, :, :],
num_epochs)
# shrink the values for getting more stable alpha values
# in SVM training iteration
num_digits = len(str(int(kernel_matrices[i, 0, 0])))
if num_digits > 2:
proportion = 10**(2-num_digits)
kernel_matrices[i, :, :] *= proportion
data = kernel_matrices
else:
data = corr
time2 = time.time()
logger.debug(
'cross validation data preparation takes %.2f s' %
(time2 - time1)
)
return data | Prepare data for voxelwise cross validation.
If the classifier is sklearn.svm.SVC with precomputed kernel,
the kernel matrix of each voxel is computed, otherwise do nothing.
Parameters
----------
corr: 3D array in shape [num_processed_voxels, num_epochs, num_voxels]
the normalized correlation values of all subjects in all epochs
for the assigned values, in row-major
clf: classification function
the classifier to be used in cross validation
Returns
-------
data: 3D numpy array
If using sklearn.svm.SVC with precomputed kernel,
it is in shape [num_processed_voxels, num_epochs, num_epochs];
otherwise it is the input argument corr,
in shape [num_processed_voxels, num_epochs, num_voxels] | Below is the the instruction that describes the task:
### Input:
Prepare data for voxelwise cross validation.
If the classifier is sklearn.svm.SVC with precomputed kernel,
the kernel matrix of each voxel is computed, otherwise do nothing.
Parameters
----------
corr: 3D array in shape [num_processed_voxels, num_epochs, num_voxels]
the normalized correlation values of all subjects in all epochs
for the assigned values, in row-major
clf: classification function
the classifier to be used in cross validation
Returns
-------
data: 3D numpy array
If using sklearn.svm.SVC with precomputed kernel,
it is in shape [num_processed_voxels, num_epochs, num_epochs];
otherwise it is the input argument corr,
in shape [num_processed_voxels, num_epochs, num_voxels]
### Response:
def _prepare_for_cross_validation(self, corr, clf):
"""Prepare data for voxelwise cross validation.
If the classifier is sklearn.svm.SVC with precomputed kernel,
the kernel matrix of each voxel is computed, otherwise do nothing.
Parameters
----------
corr: 3D array in shape [num_processed_voxels, num_epochs, num_voxels]
the normalized correlation values of all subjects in all epochs
for the assigned values, in row-major
clf: classification function
the classifier to be used in cross validation
Returns
-------
data: 3D numpy array
If using sklearn.svm.SVC with precomputed kernel,
it is in shape [num_processed_voxels, num_epochs, num_epochs];
otherwise it is the input argument corr,
in shape [num_processed_voxels, num_epochs, num_voxels]
"""
time1 = time.time()
(num_processed_voxels, num_epochs, _) = corr.shape
if isinstance(clf, sklearn.svm.SVC) and clf.kernel == 'precomputed':
# kernel matrices should be computed
kernel_matrices = np.zeros((num_processed_voxels, num_epochs,
num_epochs),
np.float32, order='C')
for i in range(num_processed_voxels):
blas.compute_kernel_matrix('L', 'T',
num_epochs, self.num_voxels2,
1.0, corr,
i, self.num_voxels2,
0.0, kernel_matrices[i, :, :],
num_epochs)
# shrink the values for getting more stable alpha values
# in SVM training iteration
num_digits = len(str(int(kernel_matrices[i, 0, 0])))
if num_digits > 2:
proportion = 10**(2-num_digits)
kernel_matrices[i, :, :] *= proportion
data = kernel_matrices
else:
data = corr
time2 = time.time()
logger.debug(
'cross validation data preparation takes %.2f s' %
(time2 - time1)
)
return data |
def sortLocations(locations):
""" Sort the locations by ranking:
1. all on-axis points
2. all off-axis points which project onto on-axis points
these would be involved in master to master interpolations
necessary for patching. Projecting off-axis masters have
at least one coordinate in common with an on-axis master.
3. non-projecting off-axis points, 'wild' off axis points
These would be involved in projecting limits and need to be patched.
"""
onAxis = []
onAxisValues = {}
offAxis = []
offAxis_projecting = []
offAxis_wild = []
# first get the on-axis points
for l in locations:
if l.isOrigin():
continue
if l.isOnAxis():
onAxis.append(l)
for axis in l.keys():
if axis not in onAxisValues:
onAxisValues[axis] = []
onAxisValues[axis].append(l[axis])
else:
offAxis.append(l)
for l in offAxis:
ok = False
for axis in l.keys():
if axis not in onAxisValues:
continue
if l[axis] in onAxisValues[axis]:
ok = True
if ok:
offAxis_projecting.append(l)
else:
offAxis_wild.append(l)
return onAxis, offAxis_projecting, offAxis_wild | Sort the locations by ranking:
1. all on-axis points
2. all off-axis points which project onto on-axis points
these would be involved in master to master interpolations
necessary for patching. Projecting off-axis masters have
at least one coordinate in common with an on-axis master.
3. non-projecting off-axis points, 'wild' off axis points
These would be involved in projecting limits and need to be patched. | Below is the the instruction that describes the task:
### Input:
Sort the locations by ranking:
1. all on-axis points
2. all off-axis points which project onto on-axis points
these would be involved in master to master interpolations
necessary for patching. Projecting off-axis masters have
at least one coordinate in common with an on-axis master.
3. non-projecting off-axis points, 'wild' off axis points
These would be involved in projecting limits and need to be patched.
### Response:
def sortLocations(locations):
""" Sort the locations by ranking:
1. all on-axis points
2. all off-axis points which project onto on-axis points
these would be involved in master to master interpolations
necessary for patching. Projecting off-axis masters have
at least one coordinate in common with an on-axis master.
3. non-projecting off-axis points, 'wild' off axis points
These would be involved in projecting limits and need to be patched.
"""
onAxis = []
onAxisValues = {}
offAxis = []
offAxis_projecting = []
offAxis_wild = []
# first get the on-axis points
for l in locations:
if l.isOrigin():
continue
if l.isOnAxis():
onAxis.append(l)
for axis in l.keys():
if axis not in onAxisValues:
onAxisValues[axis] = []
onAxisValues[axis].append(l[axis])
else:
offAxis.append(l)
for l in offAxis:
ok = False
for axis in l.keys():
if axis not in onAxisValues:
continue
if l[axis] in onAxisValues[axis]:
ok = True
if ok:
offAxis_projecting.append(l)
else:
offAxis_wild.append(l)
return onAxis, offAxis_projecting, offAxis_wild |
def id_range(self):
"""Get the range of archor reading_ids.
Returns:
(int, int): The lowest and highest reading ids.
If no reading ids have been loaded, (0, 0) is returned.
"""
if len(self._anchor_points) == 0:
return (0, 0)
return (self._anchor_points[0].reading_id, self._anchor_points[-1].reading_id) | Get the range of archor reading_ids.
Returns:
(int, int): The lowest and highest reading ids.
If no reading ids have been loaded, (0, 0) is returned. | Below is the the instruction that describes the task:
### Input:
Get the range of archor reading_ids.
Returns:
(int, int): The lowest and highest reading ids.
If no reading ids have been loaded, (0, 0) is returned.
### Response:
def id_range(self):
"""Get the range of archor reading_ids.
Returns:
(int, int): The lowest and highest reading ids.
If no reading ids have been loaded, (0, 0) is returned.
"""
if len(self._anchor_points) == 0:
return (0, 0)
return (self._anchor_points[0].reading_id, self._anchor_points[-1].reading_id) |
def get_expectations_config(self,
discard_failed_expectations=True,
discard_result_format_kwargs=True,
discard_include_configs_kwargs=True,
discard_catch_exceptions_kwargs=True,
suppress_warnings=False
):
"""Returns _expectation_config as a JSON object, and perform some cleaning along the way.
Args:
discard_failed_expectations (boolean): \
Only include expectations with success_on_last_run=True in the exported config. Defaults to `True`.
discard_result_format_kwargs (boolean): \
In returned expectation objects, suppress the `result_format` parameter. Defaults to `True`.
discard_include_configs_kwargs (boolean): \
In returned expectation objects, suppress the `include_configs` parameter. Defaults to `True`.
discard_catch_exceptions_kwargs (boolean): \
In returned expectation objects, suppress the `catch_exceptions` parameter. Defaults to `True`.
Returns:
An expectation config.
Note:
get_expectations_config does not affect the underlying config at all. The returned config is a copy of _expectations_config, not the original object.
"""
config = dict(self._expectations_config)
config = copy.deepcopy(config)
expectations = config["expectations"]
discards = defaultdict(int)
if discard_failed_expectations:
new_expectations = []
for expectation in expectations:
# Note: This is conservative logic.
# Instead of retaining expectations IFF success==True, it discard expectations IFF success==False.
# In cases where expectation["success"] is missing or None, expectations are *retained*.
# Such a case could occur if expectations were loaded from a config file and never run.
if "success_on_last_run" in expectation and expectation["success_on_last_run"] == False:
discards["failed_expectations"] += 1
else:
new_expectations.append(expectation)
expectations = new_expectations
for expectation in expectations:
# FIXME: Factor this out into a new function. The logic is duplicated in remove_expectation, which calls _copy_and_clean_up_expectation
if "success_on_last_run" in expectation:
del expectation["success_on_last_run"]
if discard_result_format_kwargs:
if "result_format" in expectation["kwargs"]:
del expectation["kwargs"]["result_format"]
discards["result_format"] += 1
if discard_include_configs_kwargs:
if "include_configs" in expectation["kwargs"]:
del expectation["kwargs"]["include_configs"]
discards["include_configs"] += 1
if discard_catch_exceptions_kwargs:
if "catch_exceptions" in expectation["kwargs"]:
del expectation["kwargs"]["catch_exceptions"]
discards["catch_exceptions"] += 1
if not suppress_warnings:
"""
WARNING: get_expectations_config discarded
12 failing expectations
44 result_format kwargs
0 include_config kwargs
1 catch_exceptions kwargs
If you wish to change this behavior, please set discard_failed_expectations, discard_result_format_kwargs, discard_include_configs_kwargs, and discard_catch_exceptions_kwargs appropirately.
"""
if any([discard_failed_expectations, discard_result_format_kwargs, discard_include_configs_kwargs, discard_catch_exceptions_kwargs]):
print("WARNING: get_expectations_config discarded")
if discard_failed_expectations:
print("\t%d failing expectations" %
discards["failed_expectations"])
if discard_result_format_kwargs:
print("\t%d result_format kwargs" %
discards["result_format"])
if discard_include_configs_kwargs:
print("\t%d include_configs kwargs" %
discards["include_configs"])
if discard_catch_exceptions_kwargs:
print("\t%d catch_exceptions kwargs" %
discards["catch_exceptions"])
print("If you wish to change this behavior, please set discard_failed_expectations, discard_result_format_kwargs, discard_include_configs_kwargs, and discard_catch_exceptions_kwargs appropirately.")
config["expectations"] = expectations
return config | Returns _expectation_config as a JSON object, and perform some cleaning along the way.
Args:
discard_failed_expectations (boolean): \
Only include expectations with success_on_last_run=True in the exported config. Defaults to `True`.
discard_result_format_kwargs (boolean): \
In returned expectation objects, suppress the `result_format` parameter. Defaults to `True`.
discard_include_configs_kwargs (boolean): \
In returned expectation objects, suppress the `include_configs` parameter. Defaults to `True`.
discard_catch_exceptions_kwargs (boolean): \
In returned expectation objects, suppress the `catch_exceptions` parameter. Defaults to `True`.
Returns:
An expectation config.
Note:
get_expectations_config does not affect the underlying config at all. The returned config is a copy of _expectations_config, not the original object. | Below is the the instruction that describes the task:
### Input:
Returns _expectation_config as a JSON object, and perform some cleaning along the way.
Args:
discard_failed_expectations (boolean): \
Only include expectations with success_on_last_run=True in the exported config. Defaults to `True`.
discard_result_format_kwargs (boolean): \
In returned expectation objects, suppress the `result_format` parameter. Defaults to `True`.
discard_include_configs_kwargs (boolean): \
In returned expectation objects, suppress the `include_configs` parameter. Defaults to `True`.
discard_catch_exceptions_kwargs (boolean): \
In returned expectation objects, suppress the `catch_exceptions` parameter. Defaults to `True`.
Returns:
An expectation config.
Note:
get_expectations_config does not affect the underlying config at all. The returned config is a copy of _expectations_config, not the original object.
### Response:
def get_expectations_config(self,
discard_failed_expectations=True,
discard_result_format_kwargs=True,
discard_include_configs_kwargs=True,
discard_catch_exceptions_kwargs=True,
suppress_warnings=False
):
"""Returns _expectation_config as a JSON object, and perform some cleaning along the way.
Args:
discard_failed_expectations (boolean): \
Only include expectations with success_on_last_run=True in the exported config. Defaults to `True`.
discard_result_format_kwargs (boolean): \
In returned expectation objects, suppress the `result_format` parameter. Defaults to `True`.
discard_include_configs_kwargs (boolean): \
In returned expectation objects, suppress the `include_configs` parameter. Defaults to `True`.
discard_catch_exceptions_kwargs (boolean): \
In returned expectation objects, suppress the `catch_exceptions` parameter. Defaults to `True`.
Returns:
An expectation config.
Note:
get_expectations_config does not affect the underlying config at all. The returned config is a copy of _expectations_config, not the original object.
"""
config = dict(self._expectations_config)
config = copy.deepcopy(config)
expectations = config["expectations"]
discards = defaultdict(int)
if discard_failed_expectations:
new_expectations = []
for expectation in expectations:
# Note: This is conservative logic.
# Instead of retaining expectations IFF success==True, it discard expectations IFF success==False.
# In cases where expectation["success"] is missing or None, expectations are *retained*.
# Such a case could occur if expectations were loaded from a config file and never run.
if "success_on_last_run" in expectation and expectation["success_on_last_run"] == False:
discards["failed_expectations"] += 1
else:
new_expectations.append(expectation)
expectations = new_expectations
for expectation in expectations:
# FIXME: Factor this out into a new function. The logic is duplicated in remove_expectation, which calls _copy_and_clean_up_expectation
if "success_on_last_run" in expectation:
del expectation["success_on_last_run"]
if discard_result_format_kwargs:
if "result_format" in expectation["kwargs"]:
del expectation["kwargs"]["result_format"]
discards["result_format"] += 1
if discard_include_configs_kwargs:
if "include_configs" in expectation["kwargs"]:
del expectation["kwargs"]["include_configs"]
discards["include_configs"] += 1
if discard_catch_exceptions_kwargs:
if "catch_exceptions" in expectation["kwargs"]:
del expectation["kwargs"]["catch_exceptions"]
discards["catch_exceptions"] += 1
if not suppress_warnings:
"""
WARNING: get_expectations_config discarded
12 failing expectations
44 result_format kwargs
0 include_config kwargs
1 catch_exceptions kwargs
If you wish to change this behavior, please set discard_failed_expectations, discard_result_format_kwargs, discard_include_configs_kwargs, and discard_catch_exceptions_kwargs appropirately.
"""
if any([discard_failed_expectations, discard_result_format_kwargs, discard_include_configs_kwargs, discard_catch_exceptions_kwargs]):
print("WARNING: get_expectations_config discarded")
if discard_failed_expectations:
print("\t%d failing expectations" %
discards["failed_expectations"])
if discard_result_format_kwargs:
print("\t%d result_format kwargs" %
discards["result_format"])
if discard_include_configs_kwargs:
print("\t%d include_configs kwargs" %
discards["include_configs"])
if discard_catch_exceptions_kwargs:
print("\t%d catch_exceptions kwargs" %
discards["catch_exceptions"])
print("If you wish to change this behavior, please set discard_failed_expectations, discard_result_format_kwargs, discard_include_configs_kwargs, and discard_catch_exceptions_kwargs appropirately.")
config["expectations"] = expectations
return config |
def _get_wrapper_args(routine):
"""
Returns code for the parameters of the wrapper method for the stored routine.
:param dict[str,*] routine: The routine metadata.
:rtype: str
"""
ret = ''
for parameter_info in routine['parameters']:
if ret:
ret += ', '
ret += parameter_info['name']
return ret | Returns code for the parameters of the wrapper method for the stored routine.
:param dict[str,*] routine: The routine metadata.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Returns code for the parameters of the wrapper method for the stored routine.
:param dict[str,*] routine: The routine metadata.
:rtype: str
### Response:
def _get_wrapper_args(routine):
"""
Returns code for the parameters of the wrapper method for the stored routine.
:param dict[str,*] routine: The routine metadata.
:rtype: str
"""
ret = ''
for parameter_info in routine['parameters']:
if ret:
ret += ', '
ret += parameter_info['name']
return ret |
def pie(n_labels=5,mode=None):
"""
Returns a DataFrame with the required format for
a pie plot
Parameters:
-----------
n_labels : int
Number of labels
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
return pd.DataFrame({'values':np.random.randint(1,100,n_labels),
'labels':getName(n_labels,mode=mode)}) | Returns a DataFrame with the required format for
a pie plot
Parameters:
-----------
n_labels : int
Number of labels
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names | Below is the the instruction that describes the task:
### Input:
Returns a DataFrame with the required format for
a pie plot
Parameters:
-----------
n_labels : int
Number of labels
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
### Response:
def pie(n_labels=5,mode=None):
"""
Returns a DataFrame with the required format for
a pie plot
Parameters:
-----------
n_labels : int
Number of labels
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
return pd.DataFrame({'values':np.random.randint(1,100,n_labels),
'labels':getName(n_labels,mode=mode)}) |
def overlap(args):
"""
%prog overlap best.contains iid
Visualize overlaps for a given fragment. Must be run in 4-unitigger. All
overlaps for iid were retrieved, excluding the ones matching best.contains.
"""
from jcvi.apps.console import green
p = OptionParser(overlap.__doc__)
p.add_option("--maxerr", default=2, type="int", help="Maximum error rate")
p.add_option("--canvas", default=100, type="int", help="Canvas size")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bestcontains, iid = args
canvas = opts.canvas
bestcontainscache = bestcontains + ".cache"
if need_update(bestcontains, bestcontainscache):
fp = open(bestcontains)
fw = open(bestcontainscache, "w")
exclude = set()
for row in fp:
if row[0] == '#':
continue
j = int(row.split()[0])
exclude.add(j)
dump(exclude, fw)
fw.close()
exclude = load(open(bestcontainscache))
logging.debug("A total of {0} reads to exclude".format(len(exclude)))
cmd = "overlapStore -d ../asm.ovlStore -b {0} -e {0}".format(iid)
cmd += " -E {0}".format(opts.maxerr)
frags = []
for row in popen(cmd):
r = OverlapLine(row)
if r.bid in exclude:
continue
frags.append(r)
# Also include to query fragment
frags.append(OverlapLine("{0} {0} N 0 0 0 0".format(iid)))
frags.sort(key=lambda x: x.ahang)
# Determine size of the query fragment
cmd = "gatekeeper -b {0} -e {0}".format(iid)
cmd += " -tabular -dumpfragments ../asm.gkpStore"
fp = popen(cmd)
row = next(fp)
size = int(fp.next().split()[-1])
# Determine size of canvas
xmin = min(x.ahang for x in frags)
xmax = max(x.bhang for x in frags)
xsize = -xmin + size + xmax
ratio = xsize / canvas
fw = sys.stdout
for f in frags:
fsize = -f.ahang + size + f.bhang
a = (f.ahang - xmin) / ratio
b = fsize / ratio
t = '-' * b
if f.orientation == 'N':
t = t[:-1] + '>'
else:
t = '<' + t[1:]
if f.ahang == 0 and f.bhang == 0:
t = green(t)
c = canvas - a - b
fw.write(' ' * a)
fw.write(t)
fw.write(' ' * c)
print("{0} ({1})".format(str(f.bid).rjust(10), f.erate_adj), file=fw) | %prog overlap best.contains iid
Visualize overlaps for a given fragment. Must be run in 4-unitigger. All
overlaps for iid were retrieved, excluding the ones matching best.contains. | Below is the the instruction that describes the task:
### Input:
%prog overlap best.contains iid
Visualize overlaps for a given fragment. Must be run in 4-unitigger. All
overlaps for iid were retrieved, excluding the ones matching best.contains.
### Response:
def overlap(args):
"""
%prog overlap best.contains iid
Visualize overlaps for a given fragment. Must be run in 4-unitigger. All
overlaps for iid were retrieved, excluding the ones matching best.contains.
"""
from jcvi.apps.console import green
p = OptionParser(overlap.__doc__)
p.add_option("--maxerr", default=2, type="int", help="Maximum error rate")
p.add_option("--canvas", default=100, type="int", help="Canvas size")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bestcontains, iid = args
canvas = opts.canvas
bestcontainscache = bestcontains + ".cache"
if need_update(bestcontains, bestcontainscache):
fp = open(bestcontains)
fw = open(bestcontainscache, "w")
exclude = set()
for row in fp:
if row[0] == '#':
continue
j = int(row.split()[0])
exclude.add(j)
dump(exclude, fw)
fw.close()
exclude = load(open(bestcontainscache))
logging.debug("A total of {0} reads to exclude".format(len(exclude)))
cmd = "overlapStore -d ../asm.ovlStore -b {0} -e {0}".format(iid)
cmd += " -E {0}".format(opts.maxerr)
frags = []
for row in popen(cmd):
r = OverlapLine(row)
if r.bid in exclude:
continue
frags.append(r)
# Also include to query fragment
frags.append(OverlapLine("{0} {0} N 0 0 0 0".format(iid)))
frags.sort(key=lambda x: x.ahang)
# Determine size of the query fragment
cmd = "gatekeeper -b {0} -e {0}".format(iid)
cmd += " -tabular -dumpfragments ../asm.gkpStore"
fp = popen(cmd)
row = next(fp)
size = int(fp.next().split()[-1])
# Determine size of canvas
xmin = min(x.ahang for x in frags)
xmax = max(x.bhang for x in frags)
xsize = -xmin + size + xmax
ratio = xsize / canvas
fw = sys.stdout
for f in frags:
fsize = -f.ahang + size + f.bhang
a = (f.ahang - xmin) / ratio
b = fsize / ratio
t = '-' * b
if f.orientation == 'N':
t = t[:-1] + '>'
else:
t = '<' + t[1:]
if f.ahang == 0 and f.bhang == 0:
t = green(t)
c = canvas - a - b
fw.write(' ' * a)
fw.write(t)
fw.write(' ' * c)
print("{0} ({1})".format(str(f.bid).rjust(10), f.erate_adj), file=fw) |
def get_stream_action_type(stream_arn):
"""Returns the awacs Action for a stream type given an arn
Args:
stream_arn (str): The Arn of the stream.
Returns:
:class:`awacs.aws.Action`: The appropriate stream type awacs Action
class
Raises:
ValueError: If the stream type doesn't match kinesis or dynamodb.
"""
stream_type_map = {
"kinesis": awacs.kinesis.Action,
"dynamodb": awacs.dynamodb.Action,
}
stream_type = stream_arn.split(":")[2]
try:
return stream_type_map[stream_type]
except KeyError:
raise ValueError(
"Invalid stream type '%s' in arn '%s'" % (stream_type, stream_arn)
) | Returns the awacs Action for a stream type given an arn
Args:
stream_arn (str): The Arn of the stream.
Returns:
:class:`awacs.aws.Action`: The appropriate stream type awacs Action
class
Raises:
ValueError: If the stream type doesn't match kinesis or dynamodb. | Below is the the instruction that describes the task:
### Input:
Returns the awacs Action for a stream type given an arn
Args:
stream_arn (str): The Arn of the stream.
Returns:
:class:`awacs.aws.Action`: The appropriate stream type awacs Action
class
Raises:
ValueError: If the stream type doesn't match kinesis or dynamodb.
### Response:
def get_stream_action_type(stream_arn):
"""Returns the awacs Action for a stream type given an arn
Args:
stream_arn (str): The Arn of the stream.
Returns:
:class:`awacs.aws.Action`: The appropriate stream type awacs Action
class
Raises:
ValueError: If the stream type doesn't match kinesis or dynamodb.
"""
stream_type_map = {
"kinesis": awacs.kinesis.Action,
"dynamodb": awacs.dynamodb.Action,
}
stream_type = stream_arn.split(":")[2]
try:
return stream_type_map[stream_type]
except KeyError:
raise ValueError(
"Invalid stream type '%s' in arn '%s'" % (stream_type, stream_arn)
) |
def cf_encoder(variables, attributes):
"""
A function which takes a dicts of variables and attributes
and encodes them to conform to CF conventions as much
as possible. This includes masking, scaling, character
array handling, and CF-time encoding.
Decode a set of CF encoded variables and attributes.
See Also, decode_cf_variable
Parameters
----------
variables : dict
A dictionary mapping from variable name to xarray.Variable
attributes : dict
A dictionary mapping from attribute name to value
Returns
-------
encoded_variables : dict
A dictionary mapping from variable name to xarray.Variable,
encoded_attributes : dict
A dictionary mapping from attribute name to value
See also: encode_cf_variable
"""
new_vars = OrderedDict((k, encode_cf_variable(v, name=k))
for k, v in variables.items())
return new_vars, attributes | A function which takes a dicts of variables and attributes
and encodes them to conform to CF conventions as much
as possible. This includes masking, scaling, character
array handling, and CF-time encoding.
Decode a set of CF encoded variables and attributes.
See Also, decode_cf_variable
Parameters
----------
variables : dict
A dictionary mapping from variable name to xarray.Variable
attributes : dict
A dictionary mapping from attribute name to value
Returns
-------
encoded_variables : dict
A dictionary mapping from variable name to xarray.Variable,
encoded_attributes : dict
A dictionary mapping from attribute name to value
See also: encode_cf_variable | Below is the the instruction that describes the task:
### Input:
A function which takes a dicts of variables and attributes
and encodes them to conform to CF conventions as much
as possible. This includes masking, scaling, character
array handling, and CF-time encoding.
Decode a set of CF encoded variables and attributes.
See Also, decode_cf_variable
Parameters
----------
variables : dict
A dictionary mapping from variable name to xarray.Variable
attributes : dict
A dictionary mapping from attribute name to value
Returns
-------
encoded_variables : dict
A dictionary mapping from variable name to xarray.Variable,
encoded_attributes : dict
A dictionary mapping from attribute name to value
See also: encode_cf_variable
### Response:
def cf_encoder(variables, attributes):
"""
A function which takes a dicts of variables and attributes
and encodes them to conform to CF conventions as much
as possible. This includes masking, scaling, character
array handling, and CF-time encoding.
Decode a set of CF encoded variables and attributes.
See Also, decode_cf_variable
Parameters
----------
variables : dict
A dictionary mapping from variable name to xarray.Variable
attributes : dict
A dictionary mapping from attribute name to value
Returns
-------
encoded_variables : dict
A dictionary mapping from variable name to xarray.Variable,
encoded_attributes : dict
A dictionary mapping from attribute name to value
See also: encode_cf_variable
"""
new_vars = OrderedDict((k, encode_cf_variable(v, name=k))
for k, v in variables.items())
return new_vars, attributes |
def update(gandi, resource, name, algorithm, ssl_enable, ssl_disable):
"""Update a webaccelerator"""
result = gandi.webacc.update(resource, name, algorithm, ssl_enable,
ssl_disable)
return result | Update a webaccelerator | Below is the the instruction that describes the task:
### Input:
Update a webaccelerator
### Response:
def update(gandi, resource, name, algorithm, ssl_enable, ssl_disable):
"""Update a webaccelerator"""
result = gandi.webacc.update(resource, name, algorithm, ssl_enable,
ssl_disable)
return result |
def import_class(name):
"""Load class from fully-qualified python module name.
ex: import_class('bulbs.content.models.Content')
"""
module, _, klass = name.rpartition('.')
mod = import_module(module)
return getattr(mod, klass) | Load class from fully-qualified python module name.
ex: import_class('bulbs.content.models.Content') | Below is the the instruction that describes the task:
### Input:
Load class from fully-qualified python module name.
ex: import_class('bulbs.content.models.Content')
### Response:
def import_class(name):
"""Load class from fully-qualified python module name.
ex: import_class('bulbs.content.models.Content')
"""
module, _, klass = name.rpartition('.')
mod = import_module(module)
return getattr(mod, klass) |
def get_deposit(self, deposit_id, **params):
"""https://developers.coinbase.com/api/v2#show-a-deposit"""
return self.api_client.get_deposit(self.id, deposit_id, **params) | https://developers.coinbase.com/api/v2#show-a-deposit | Below is the the instruction that describes the task:
### Input:
https://developers.coinbase.com/api/v2#show-a-deposit
### Response:
def get_deposit(self, deposit_id, **params):
"""https://developers.coinbase.com/api/v2#show-a-deposit"""
return self.api_client.get_deposit(self.id, deposit_id, **params) |
def do_longlist(self, arg):
"""longlist | ll
List the whole source code for the current function or frame.
"""
filename = self.curframe.f_code.co_filename
breaklist = self.get_file_breaks(filename)
try:
lines, lineno = getsourcelines(self.curframe,
self.get_locals(self.curframe))
except IOError as err:
self.error(err)
return
self._print_lines(lines, lineno, breaklist, self.curframe) | longlist | ll
List the whole source code for the current function or frame. | Below is the the instruction that describes the task:
### Input:
longlist | ll
List the whole source code for the current function or frame.
### Response:
def do_longlist(self, arg):
"""longlist | ll
List the whole source code for the current function or frame.
"""
filename = self.curframe.f_code.co_filename
breaklist = self.get_file_breaks(filename)
try:
lines, lineno = getsourcelines(self.curframe,
self.get_locals(self.curframe))
except IOError as err:
self.error(err)
return
self._print_lines(lines, lineno, breaklist, self.curframe) |
def apply_pagination(self, q):
"""
Filters the query so that a given page is returned. The record count
must be set in advance.
:param q: Query to be paged.
:return: Paged query.
"""
# type: (Query)->Query
assert self.record_count >= 0, "Record count must be set."
return q.limit(self.page_size).offset(self.offset) | Filters the query so that a given page is returned. The record count
must be set in advance.
:param q: Query to be paged.
:return: Paged query. | Below is the the instruction that describes the task:
### Input:
Filters the query so that a given page is returned. The record count
must be set in advance.
:param q: Query to be paged.
:return: Paged query.
### Response:
def apply_pagination(self, q):
"""
Filters the query so that a given page is returned. The record count
must be set in advance.
:param q: Query to be paged.
:return: Paged query.
"""
# type: (Query)->Query
assert self.record_count >= 0, "Record count must be set."
return q.limit(self.page_size).offset(self.offset) |
def prune_tree(path):
"""
Like shutil.rmtree(), but log errors rather than discard them, and do not
waste multiple os.stat() calls discovering whether the object can be
deleted, just try deleting it instead.
"""
try:
os.unlink(path)
return
except OSError:
e = sys.exc_info()[1]
if not (os.path.isdir(path) and
e.args[0] in (errno.EPERM, errno.EISDIR)):
LOG.error('prune_tree(%r): %s', path, e)
return
try:
# Ensure write access for readonly directories. Ignore error in case
# path is on a weird filesystem (e.g. vfat).
os.chmod(path, int('0700', 8))
except OSError:
e = sys.exc_info()[1]
LOG.warning('prune_tree(%r): %s', path, e)
try:
for name in os.listdir(path):
if name not in ('.', '..'):
prune_tree(os.path.join(path, name))
os.rmdir(path)
except OSError:
e = sys.exc_info()[1]
LOG.error('prune_tree(%r): %s', path, e) | Like shutil.rmtree(), but log errors rather than discard them, and do not
waste multiple os.stat() calls discovering whether the object can be
deleted, just try deleting it instead. | Below is the the instruction that describes the task:
### Input:
Like shutil.rmtree(), but log errors rather than discard them, and do not
waste multiple os.stat() calls discovering whether the object can be
deleted, just try deleting it instead.
### Response:
def prune_tree(path):
"""
Like shutil.rmtree(), but log errors rather than discard them, and do not
waste multiple os.stat() calls discovering whether the object can be
deleted, just try deleting it instead.
"""
try:
os.unlink(path)
return
except OSError:
e = sys.exc_info()[1]
if not (os.path.isdir(path) and
e.args[0] in (errno.EPERM, errno.EISDIR)):
LOG.error('prune_tree(%r): %s', path, e)
return
try:
# Ensure write access for readonly directories. Ignore error in case
# path is on a weird filesystem (e.g. vfat).
os.chmod(path, int('0700', 8))
except OSError:
e = sys.exc_info()[1]
LOG.warning('prune_tree(%r): %s', path, e)
try:
for name in os.listdir(path):
if name not in ('.', '..'):
prune_tree(os.path.join(path, name))
os.rmdir(path)
except OSError:
e = sys.exc_info()[1]
LOG.error('prune_tree(%r): %s', path, e) |
def pdf(self, d, n=None):
r'''Computes the probability density function of a
continuous particle size distribution at a specified particle diameter,
an optionally in a specified basis. The evaluation function varies with
the distribution chosen. The interconversion between distribution
orders is performed using the following formula [1]_:
.. math::
q_s(d) = \frac{x^{(s-r)} q_r(d) dd}
{ \int_0^\infty d^{(s-r)} q_r(d) dd}
Parameters
----------
d : float
Particle size diameter, [m]
n : int, optional
None (for the `order` specified when the distribution was created),
0 (number), 1 (length), 2 (area), 3 (volume/mass),
or any integer, [-]
Returns
-------
pdf : float
The probability density function at the specified diameter and
order, [-]
Notes
-----
The pdf order conversions are typically available analytically after
some work. They have been verified numerically. See the various
functions with names ending with 'basis_integral' for the formulations.
The distributions normally do not have analytical limits for diameters
of 0 or infinity, but large values suffice to capture the area of the
integral.
Examples
--------
>>> psd = PSDLognormal(s=0.5, d_characteristic=5E-6, order=3)
>>> psd.pdf(1e-5)
30522.765209509154
>>> psd.pdf(1e-5, n=3)
30522.765209509154
>>> psd.pdf(1e-5, n=0)
1238.661379483343
References
----------
.. [1] Masuda, Hiroaki, Ko Higashitani, and Hideto Yoshida. Powder
Technology: Fundamentals of Particles, Powder Beds, and Particle
Generation. CRC Press, 2006.
'''
ans = self._pdf(d=d)
if n is not None and n != self.order:
power = n - self.order
numerator = d**power*ans
denominator = self._pdf_basis_integral_definite(d_min=0.0, d_max=self.d_excessive, n=power)
ans = numerator/denominator
# Handle splines which might go below zero
ans = max(ans, 0.0)
if self.truncated:
if d < self.d_min or d > self.d_max:
return 0.0
ans = (ans)/(self._cdf_d_max - self._cdf_d_min)
return ans | r'''Computes the probability density function of a
continuous particle size distribution at a specified particle diameter,
an optionally in a specified basis. The evaluation function varies with
the distribution chosen. The interconversion between distribution
orders is performed using the following formula [1]_:
.. math::
q_s(d) = \frac{x^{(s-r)} q_r(d) dd}
{ \int_0^\infty d^{(s-r)} q_r(d) dd}
Parameters
----------
d : float
Particle size diameter, [m]
n : int, optional
None (for the `order` specified when the distribution was created),
0 (number), 1 (length), 2 (area), 3 (volume/mass),
or any integer, [-]
Returns
-------
pdf : float
The probability density function at the specified diameter and
order, [-]
Notes
-----
The pdf order conversions are typically available analytically after
some work. They have been verified numerically. See the various
functions with names ending with 'basis_integral' for the formulations.
The distributions normally do not have analytical limits for diameters
of 0 or infinity, but large values suffice to capture the area of the
integral.
Examples
--------
>>> psd = PSDLognormal(s=0.5, d_characteristic=5E-6, order=3)
>>> psd.pdf(1e-5)
30522.765209509154
>>> psd.pdf(1e-5, n=3)
30522.765209509154
>>> psd.pdf(1e-5, n=0)
1238.661379483343
References
----------
.. [1] Masuda, Hiroaki, Ko Higashitani, and Hideto Yoshida. Powder
Technology: Fundamentals of Particles, Powder Beds, and Particle
Generation. CRC Press, 2006. | Below is the the instruction that describes the task:
### Input:
r'''Computes the probability density function of a
continuous particle size distribution at a specified particle diameter,
an optionally in a specified basis. The evaluation function varies with
the distribution chosen. The interconversion between distribution
orders is performed using the following formula [1]_:
.. math::
q_s(d) = \frac{x^{(s-r)} q_r(d) dd}
{ \int_0^\infty d^{(s-r)} q_r(d) dd}
Parameters
----------
d : float
Particle size diameter, [m]
n : int, optional
None (for the `order` specified when the distribution was created),
0 (number), 1 (length), 2 (area), 3 (volume/mass),
or any integer, [-]
Returns
-------
pdf : float
The probability density function at the specified diameter and
order, [-]
Notes
-----
The pdf order conversions are typically available analytically after
some work. They have been verified numerically. See the various
functions with names ending with 'basis_integral' for the formulations.
The distributions normally do not have analytical limits for diameters
of 0 or infinity, but large values suffice to capture the area of the
integral.
Examples
--------
>>> psd = PSDLognormal(s=0.5, d_characteristic=5E-6, order=3)
>>> psd.pdf(1e-5)
30522.765209509154
>>> psd.pdf(1e-5, n=3)
30522.765209509154
>>> psd.pdf(1e-5, n=0)
1238.661379483343
References
----------
.. [1] Masuda, Hiroaki, Ko Higashitani, and Hideto Yoshida. Powder
Technology: Fundamentals of Particles, Powder Beds, and Particle
Generation. CRC Press, 2006.
### Response:
def pdf(self, d, n=None):
r'''Computes the probability density function of a
continuous particle size distribution at a specified particle diameter,
an optionally in a specified basis. The evaluation function varies with
the distribution chosen. The interconversion between distribution
orders is performed using the following formula [1]_:
.. math::
q_s(d) = \frac{x^{(s-r)} q_r(d) dd}
{ \int_0^\infty d^{(s-r)} q_r(d) dd}
Parameters
----------
d : float
Particle size diameter, [m]
n : int, optional
None (for the `order` specified when the distribution was created),
0 (number), 1 (length), 2 (area), 3 (volume/mass),
or any integer, [-]
Returns
-------
pdf : float
The probability density function at the specified diameter and
order, [-]
Notes
-----
The pdf order conversions are typically available analytically after
some work. They have been verified numerically. See the various
functions with names ending with 'basis_integral' for the formulations.
The distributions normally do not have analytical limits for diameters
of 0 or infinity, but large values suffice to capture the area of the
integral.
Examples
--------
>>> psd = PSDLognormal(s=0.5, d_characteristic=5E-6, order=3)
>>> psd.pdf(1e-5)
30522.765209509154
>>> psd.pdf(1e-5, n=3)
30522.765209509154
>>> psd.pdf(1e-5, n=0)
1238.661379483343
References
----------
.. [1] Masuda, Hiroaki, Ko Higashitani, and Hideto Yoshida. Powder
Technology: Fundamentals of Particles, Powder Beds, and Particle
Generation. CRC Press, 2006.
'''
ans = self._pdf(d=d)
if n is not None and n != self.order:
power = n - self.order
numerator = d**power*ans
denominator = self._pdf_basis_integral_definite(d_min=0.0, d_max=self.d_excessive, n=power)
ans = numerator/denominator
# Handle splines which might go below zero
ans = max(ans, 0.0)
if self.truncated:
if d < self.d_min or d > self.d_max:
return 0.0
ans = (ans)/(self._cdf_d_max - self._cdf_d_min)
return ans |
def _set_size_code(self):
"""Set the code for a size operation.
"""
if not self._op.startswith(self.SIZE):
self._size_code = None
return
if len(self._op) == len(self.SIZE):
self._size_code = self.SZ_EQ
else:
suffix = self._op[len(self.SIZE):]
self._size_code = self.SZ_MAPPING.get(suffix, None)
if self._size_code is None:
raise ValueError('invalid "{}" suffix "{}"'.format(self.SIZE, suffix)) | Set the code for a size operation. | Below is the the instruction that describes the task:
### Input:
Set the code for a size operation.
### Response:
def _set_size_code(self):
"""Set the code for a size operation.
"""
if not self._op.startswith(self.SIZE):
self._size_code = None
return
if len(self._op) == len(self.SIZE):
self._size_code = self.SZ_EQ
else:
suffix = self._op[len(self.SIZE):]
self._size_code = self.SZ_MAPPING.get(suffix, None)
if self._size_code is None:
raise ValueError('invalid "{}" suffix "{}"'.format(self.SIZE, suffix)) |
def digest(self):
"""Terminate the message-digest computation and return digest.
Return the digest of the strings passed to the update()
method so far. This is a 16-byte string which may contain
non-ASCII characters, including null bytes.
"""
A = self.A
B = self.B
C = self.C
D = self.D
input = [] + self.input
count = [] + self.count
index = (self.count[0] >> 3) & 0x3f
if index < 56:
padLen = 56 - index
else:
padLen = 120 - index
padding = [b'\200'] + [b'\000'] * 63
self.update(padding[:padLen])
# Append length (before padding).
bits = _bytelist2long(self.input[:56]) + count
self._transform(bits)
# Store state in digest.
digest = struct.pack("<IIII", self.A, self.B, self.C, self.D)
self.A = A
self.B = B
self.C = C
self.D = D
self.input = input
self.count = count
return digest | Terminate the message-digest computation and return digest.
Return the digest of the strings passed to the update()
method so far. This is a 16-byte string which may contain
non-ASCII characters, including null bytes. | Below is the the instruction that describes the task:
### Input:
Terminate the message-digest computation and return digest.
Return the digest of the strings passed to the update()
method so far. This is a 16-byte string which may contain
non-ASCII characters, including null bytes.
### Response:
def digest(self):
"""Terminate the message-digest computation and return digest.
Return the digest of the strings passed to the update()
method so far. This is a 16-byte string which may contain
non-ASCII characters, including null bytes.
"""
A = self.A
B = self.B
C = self.C
D = self.D
input = [] + self.input
count = [] + self.count
index = (self.count[0] >> 3) & 0x3f
if index < 56:
padLen = 56 - index
else:
padLen = 120 - index
padding = [b'\200'] + [b'\000'] * 63
self.update(padding[:padLen])
# Append length (before padding).
bits = _bytelist2long(self.input[:56]) + count
self._transform(bits)
# Store state in digest.
digest = struct.pack("<IIII", self.A, self.B, self.C, self.D)
self.A = A
self.B = B
self.C = C
self.D = D
self.input = input
self.count = count
return digest |
def dispatch_pure(
request: str,
methods: Methods,
*,
context: Any,
convert_camel_case: bool,
debug: bool,
) -> Response:
"""
Pure version of dispatch - no logging, no optional parameters.
Does two things:
1. Deserializes and validates the string.
2. Calls each request.
Args:
request: The incoming request string.
methods: Collection of methods that can be called.
context: If specified, will be the first positional argument in all requests.
convert_camel_case: Will convert the method name/any named params to snake case.
debug: Include more information in error responses.
Returns:
A Response.
"""
try:
deserialized = validate(deserialize(request), schema)
except JSONDecodeError as exc:
return InvalidJSONResponse(data=str(exc), debug=debug)
except ValidationError as exc:
return InvalidJSONRPCResponse(data=None, debug=debug)
return call_requests(
create_requests(
deserialized, context=context, convert_camel_case=convert_camel_case
),
methods,
debug=debug,
) | Pure version of dispatch - no logging, no optional parameters.
Does two things:
1. Deserializes and validates the string.
2. Calls each request.
Args:
request: The incoming request string.
methods: Collection of methods that can be called.
context: If specified, will be the first positional argument in all requests.
convert_camel_case: Will convert the method name/any named params to snake case.
debug: Include more information in error responses.
Returns:
A Response. | Below is the the instruction that describes the task:
### Input:
Pure version of dispatch - no logging, no optional parameters.
Does two things:
1. Deserializes and validates the string.
2. Calls each request.
Args:
request: The incoming request string.
methods: Collection of methods that can be called.
context: If specified, will be the first positional argument in all requests.
convert_camel_case: Will convert the method name/any named params to snake case.
debug: Include more information in error responses.
Returns:
A Response.
### Response:
def dispatch_pure(
request: str,
methods: Methods,
*,
context: Any,
convert_camel_case: bool,
debug: bool,
) -> Response:
"""
Pure version of dispatch - no logging, no optional parameters.
Does two things:
1. Deserializes and validates the string.
2. Calls each request.
Args:
request: The incoming request string.
methods: Collection of methods that can be called.
context: If specified, will be the first positional argument in all requests.
convert_camel_case: Will convert the method name/any named params to snake case.
debug: Include more information in error responses.
Returns:
A Response.
"""
try:
deserialized = validate(deserialize(request), schema)
except JSONDecodeError as exc:
return InvalidJSONResponse(data=str(exc), debug=debug)
except ValidationError as exc:
return InvalidJSONRPCResponse(data=None, debug=debug)
return call_requests(
create_requests(
deserialized, context=context, convert_camel_case=convert_camel_case
),
methods,
debug=debug,
) |
def create_hls_profile(apps, schema_editor):
""" Create hls profile """
Profile = apps.get_model("edxval", "Profile")
Profile.objects.get_or_create(profile_name=HLS_PROFILE) | Create hls profile | Below is the the instruction that describes the task:
### Input:
Create hls profile
### Response:
def create_hls_profile(apps, schema_editor):
""" Create hls profile """
Profile = apps.get_model("edxval", "Profile")
Profile.objects.get_or_create(profile_name=HLS_PROFILE) |
def _set_operable_view(self, session):
"""Sets the underlying operable views to match current view"""
for obj_name in self._operable_views:
if self._operable_views[obj_name] == ACTIVE:
try:
getattr(session, 'use_active_' + obj_name + '_view')()
except AttributeError:
pass
else:
try:
getattr(session, 'use_any_status_' + obj_name + '_view')()
except AttributeError:
pass | Sets the underlying operable views to match current view | Below is the the instruction that describes the task:
### Input:
Sets the underlying operable views to match current view
### Response:
def _set_operable_view(self, session):
"""Sets the underlying operable views to match current view"""
for obj_name in self._operable_views:
if self._operable_views[obj_name] == ACTIVE:
try:
getattr(session, 'use_active_' + obj_name + '_view')()
except AttributeError:
pass
else:
try:
getattr(session, 'use_any_status_' + obj_name + '_view')()
except AttributeError:
pass |
def normalize_hostname(hostname):
'''Normalizes a hostname so that it is ASCII and valid domain name.'''
try:
new_hostname = hostname.encode('idna').decode('ascii').lower()
except UnicodeError as error:
raise UnicodeError('Hostname {} rejected: {}'.format(hostname, error)) from error
if hostname != new_hostname:
# Check for round-trip. May raise UnicodeError
new_hostname.encode('idna')
return new_hostname | Normalizes a hostname so that it is ASCII and valid domain name. | Below is the the instruction that describes the task:
### Input:
Normalizes a hostname so that it is ASCII and valid domain name.
### Response:
def normalize_hostname(hostname):
'''Normalizes a hostname so that it is ASCII and valid domain name.'''
try:
new_hostname = hostname.encode('idna').decode('ascii').lower()
except UnicodeError as error:
raise UnicodeError('Hostname {} rejected: {}'.format(hostname, error)) from error
if hostname != new_hostname:
# Check for round-trip. May raise UnicodeError
new_hostname.encode('idna')
return new_hostname |
def marker(self, marker_name=None, label=None,
color=None, retina=False):
"""Returns a single marker image without any
background map.
Parameters
----------
marker_name : str
The marker's shape and size.
label : str, optional
The marker's alphanumeric label.
Options are a through z, 0 through 99, or the
name of a valid Maki icon.
color : str, optional
The marker's color.
Options are three- or six-digit hexadecimal
color codes.
retina : bool, optional
The marker's scale, where True indicates Retina scale
(double scale) and False indicates regular scale.
The default value is false.
Returns
-------
request.Response
The response object with the specified marker.
"""
# Check for marker_name.
if marker_name is None:
raise ValidationError(
"marker_name is a required argument"
)
# Validate marker_name and retina.
marker_name = self._validate_marker_name(marker_name)
retina = self._validate_retina(retina)
# Create dict and start building URI resource path.
path_values = dict(
marker_name=marker_name
)
path_part = "/marker/{marker_name}"
# Validate label, update dict,
# and continue building URI resource path.
if label is not None:
label = self._validate_label(label)
path_values["label"] = label
path_part += "-{label}"
# Validate color, update dict,
# and continue building URI resource path.
if color is not None:
color = self._validate_color(color)
path_values["color"] = color
path_part += "+{color}"
uri = URITemplate(self.base_uri + path_part).expand(**path_values)
# Finish building URI resource path.
path_part = "{}.png".format(retina)
uri += path_part
# Send HTTP GET request.
response = self.session.get(uri)
self.handle_http_error(response)
return response | Returns a single marker image without any
background map.
Parameters
----------
marker_name : str
The marker's shape and size.
label : str, optional
The marker's alphanumeric label.
Options are a through z, 0 through 99, or the
name of a valid Maki icon.
color : str, optional
The marker's color.
Options are three- or six-digit hexadecimal
color codes.
retina : bool, optional
The marker's scale, where True indicates Retina scale
(double scale) and False indicates regular scale.
The default value is false.
Returns
-------
request.Response
The response object with the specified marker. | Below is the the instruction that describes the task:
### Input:
Returns a single marker image without any
background map.
Parameters
----------
marker_name : str
The marker's shape and size.
label : str, optional
The marker's alphanumeric label.
Options are a through z, 0 through 99, or the
name of a valid Maki icon.
color : str, optional
The marker's color.
Options are three- or six-digit hexadecimal
color codes.
retina : bool, optional
The marker's scale, where True indicates Retina scale
(double scale) and False indicates regular scale.
The default value is false.
Returns
-------
request.Response
The response object with the specified marker.
### Response:
def marker(self, marker_name=None, label=None,
color=None, retina=False):
"""Returns a single marker image without any
background map.
Parameters
----------
marker_name : str
The marker's shape and size.
label : str, optional
The marker's alphanumeric label.
Options are a through z, 0 through 99, or the
name of a valid Maki icon.
color : str, optional
The marker's color.
Options are three- or six-digit hexadecimal
color codes.
retina : bool, optional
The marker's scale, where True indicates Retina scale
(double scale) and False indicates regular scale.
The default value is false.
Returns
-------
request.Response
The response object with the specified marker.
"""
# Check for marker_name.
if marker_name is None:
raise ValidationError(
"marker_name is a required argument"
)
# Validate marker_name and retina.
marker_name = self._validate_marker_name(marker_name)
retina = self._validate_retina(retina)
# Create dict and start building URI resource path.
path_values = dict(
marker_name=marker_name
)
path_part = "/marker/{marker_name}"
# Validate label, update dict,
# and continue building URI resource path.
if label is not None:
label = self._validate_label(label)
path_values["label"] = label
path_part += "-{label}"
# Validate color, update dict,
# and continue building URI resource path.
if color is not None:
color = self._validate_color(color)
path_values["color"] = color
path_part += "+{color}"
uri = URITemplate(self.base_uri + path_part).expand(**path_values)
# Finish building URI resource path.
path_part = "{}.png".format(retina)
uri += path_part
# Send HTTP GET request.
response = self.session.get(uri)
self.handle_http_error(response)
return response |
def format_underline(s, char="=", indents=0):
"""
Traces a dashed line below string
Args:
s: string
char:
indents: number of leading intenting spaces
Returns: list
>>> print("\\n".join(format_underline("Life of João da Silva", "^", 2)))
Life of João da Silva
^^^^^^^^^^^^^^^^^^^^^
"""
n = len(s)
ind = " " * indents
return ["{}{}".format(ind, s), "{}{}".format(ind, char*n)] | Traces a dashed line below string
Args:
s: string
char:
indents: number of leading intenting spaces
Returns: list
>>> print("\\n".join(format_underline("Life of João da Silva", "^", 2)))
Life of João da Silva
^^^^^^^^^^^^^^^^^^^^^ | Below is the the instruction that describes the task:
### Input:
Traces a dashed line below string
Args:
s: string
char:
indents: number of leading intenting spaces
Returns: list
>>> print("\\n".join(format_underline("Life of João da Silva", "^", 2)))
Life of João da Silva
^^^^^^^^^^^^^^^^^^^^^
### Response:
def format_underline(s, char="=", indents=0):
"""
Traces a dashed line below string
Args:
s: string
char:
indents: number of leading intenting spaces
Returns: list
>>> print("\\n".join(format_underline("Life of João da Silva", "^", 2)))
Life of João da Silva
^^^^^^^^^^^^^^^^^^^^^
"""
n = len(s)
ind = " " * indents
return ["{}{}".format(ind, s), "{}{}".format(ind, char*n)] |
def lpd_to_noaa(D, wds_url, lpd_url, version, path=""):
"""
Convert a LiPD format to NOAA format
:param dict D: Metadata
:return dict D: Metadata
"""
logger_noaa.info("enter process_lpd")
d = D
try:
dsn = get_dsn(D)
# Remove all the characters that are not allowed here. Since we're making URLs, they have to be compliant.
dsn = re.sub(r'[^A-Za-z-.0-9]', '', dsn)
# project = re.sub(r'[^A-Za-z-.0-9]', '', project)
version = re.sub(r'[^A-Za-z-.0-9]', '', version)
# Create the conversion object, and start the conversion process
_convert_obj = LPD_NOAA(D, dsn, wds_url, lpd_url, version, path)
_convert_obj.main()
# get our new, modified master JSON from the conversion object
d = _convert_obj.get_master()
noaas = _convert_obj.get_noaa_texts()
__write_noaas(noaas, path)
# remove any root level urls that are deprecated
d = __rm_wdc_url(d)
except Exception as e:
logger_noaa.error("lpd_to_noaa: {}".format(e))
print("Error: lpd_to_noaa: {}".format(e))
# logger_noaa.info("exit lpd_to_noaa")
return d | Convert a LiPD format to NOAA format
:param dict D: Metadata
:return dict D: Metadata | Below is the the instruction that describes the task:
### Input:
Convert a LiPD format to NOAA format
:param dict D: Metadata
:return dict D: Metadata
### Response:
def lpd_to_noaa(D, wds_url, lpd_url, version, path=""):
"""
Convert a LiPD format to NOAA format
:param dict D: Metadata
:return dict D: Metadata
"""
logger_noaa.info("enter process_lpd")
d = D
try:
dsn = get_dsn(D)
# Remove all the characters that are not allowed here. Since we're making URLs, they have to be compliant.
dsn = re.sub(r'[^A-Za-z-.0-9]', '', dsn)
# project = re.sub(r'[^A-Za-z-.0-9]', '', project)
version = re.sub(r'[^A-Za-z-.0-9]', '', version)
# Create the conversion object, and start the conversion process
_convert_obj = LPD_NOAA(D, dsn, wds_url, lpd_url, version, path)
_convert_obj.main()
# get our new, modified master JSON from the conversion object
d = _convert_obj.get_master()
noaas = _convert_obj.get_noaa_texts()
__write_noaas(noaas, path)
# remove any root level urls that are deprecated
d = __rm_wdc_url(d)
except Exception as e:
logger_noaa.error("lpd_to_noaa: {}".format(e))
print("Error: lpd_to_noaa: {}".format(e))
# logger_noaa.info("exit lpd_to_noaa")
return d |
def load_module(self, name):
"""
If we get this far, then there are hooks waiting to be called on
import of this module. We manually load the module and then run the
hooks.
@param name: The name of the module to import.
"""
self.loaded_modules.append(name)
try:
__import__(name, {}, {}, [])
mod = sys.modules[name]
self._run_hooks(name, mod)
except:
self.loaded_modules.pop()
raise
return mod | If we get this far, then there are hooks waiting to be called on
import of this module. We manually load the module and then run the
hooks.
@param name: The name of the module to import. | Below is the the instruction that describes the task:
### Input:
If we get this far, then there are hooks waiting to be called on
import of this module. We manually load the module and then run the
hooks.
@param name: The name of the module to import.
### Response:
def load_module(self, name):
"""
If we get this far, then there are hooks waiting to be called on
import of this module. We manually load the module and then run the
hooks.
@param name: The name of the module to import.
"""
self.loaded_modules.append(name)
try:
__import__(name, {}, {}, [])
mod = sys.modules[name]
self._run_hooks(name, mod)
except:
self.loaded_modules.pop()
raise
return mod |
def _make_request(self, opener, request, timeout=None):
"""Make the API call and return the response. This is separated into
it's own function, so we can mock it easily for testing.
:param opener:
:type opener:
:param request: url payload to request
:type request: urllib.Request object
:param timeout: timeout value or None
:type timeout: float
:return: urllib response
"""
timeout = timeout or self.timeout
try:
return opener.open(request, timeout=timeout)
except HTTPError as err:
exc = handle_error(err)
exc.__cause__ = None
raise exc | Make the API call and return the response. This is separated into
it's own function, so we can mock it easily for testing.
:param opener:
:type opener:
:param request: url payload to request
:type request: urllib.Request object
:param timeout: timeout value or None
:type timeout: float
:return: urllib response | Below is the the instruction that describes the task:
### Input:
Make the API call and return the response. This is separated into
it's own function, so we can mock it easily for testing.
:param opener:
:type opener:
:param request: url payload to request
:type request: urllib.Request object
:param timeout: timeout value or None
:type timeout: float
:return: urllib response
### Response:
def _make_request(self, opener, request, timeout=None):
"""Make the API call and return the response. This is separated into
it's own function, so we can mock it easily for testing.
:param opener:
:type opener:
:param request: url payload to request
:type request: urllib.Request object
:param timeout: timeout value or None
:type timeout: float
:return: urllib response
"""
timeout = timeout or self.timeout
try:
return opener.open(request, timeout=timeout)
except HTTPError as err:
exc = handle_error(err)
exc.__cause__ = None
raise exc |
def _set_bookmarks(self, bookmarks):
"""
Set the bookmarks stored on the server.
"""
storage = bookmark_xso.Storage()
storage.bookmarks[:] = bookmarks
yield from self._private_xml.set_private_xml(storage) | Set the bookmarks stored on the server. | Below is the the instruction that describes the task:
### Input:
Set the bookmarks stored on the server.
### Response:
def _set_bookmarks(self, bookmarks):
"""
Set the bookmarks stored on the server.
"""
storage = bookmark_xso.Storage()
storage.bookmarks[:] = bookmarks
yield from self._private_xml.set_private_xml(storage) |
def transform_courserun_description(self, content_metadata_item):
"""
Return the description of the courserun content item.
"""
description_with_locales = []
content_metadata_language_code = transform_language_code(content_metadata_item.get('content_language', ''))
for locale in self.enterprise_configuration.get_locales(default_locale=content_metadata_language_code):
description_with_locales.append({
'locale': locale,
'value': (
content_metadata_item['full_description'] or
content_metadata_item['short_description'] or
content_metadata_item['title'] or
''
)
})
return description_with_locales | Return the description of the courserun content item. | Below is the the instruction that describes the task:
### Input:
Return the description of the courserun content item.
### Response:
def transform_courserun_description(self, content_metadata_item):
"""
Return the description of the courserun content item.
"""
description_with_locales = []
content_metadata_language_code = transform_language_code(content_metadata_item.get('content_language', ''))
for locale in self.enterprise_configuration.get_locales(default_locale=content_metadata_language_code):
description_with_locales.append({
'locale': locale,
'value': (
content_metadata_item['full_description'] or
content_metadata_item['short_description'] or
content_metadata_item['title'] or
''
)
})
return description_with_locales |
def participate(self):
"""Finish reading and send text"""
try:
while True:
left = WebDriverWait(self.driver, 10).until(
EC.element_to_be_clickable((By.ID, "left_button"))
)
right = WebDriverWait(self.driver, 10).until(
EC.element_to_be_clickable((By.ID, "right_button"))
)
random.choice((left, right)).click()
time.sleep(1.0)
except TimeoutException:
return False | Finish reading and send text | Below is the the instruction that describes the task:
### Input:
Finish reading and send text
### Response:
def participate(self):
"""Finish reading and send text"""
try:
while True:
left = WebDriverWait(self.driver, 10).until(
EC.element_to_be_clickable((By.ID, "left_button"))
)
right = WebDriverWait(self.driver, 10).until(
EC.element_to_be_clickable((By.ID, "right_button"))
)
random.choice((left, right)).click()
time.sleep(1.0)
except TimeoutException:
return False |
def release_lock(self, lock, force=False):
''' Frees a lock '''
pid = os.getpid()
caller = inspect.stack()[0][3]
# try:
# rl = redlock.Redlock([{"host": settings.REDIS_SERVERS['std_redis']['host'], "port": settings.REDIS_SERVERS['std_redis']['port'], "db": settings.REDIS_SERVERS['std_redis']['db']}, ])
# except:
# logger.error('Process {0} ({1}) could not release lock {2}'.format(pid, caller, lock.resource))
# return False
if lock and lock._held:
lock.release()
if self.logger:
self.logger.debug('Process {0} ({1}) released lock'.format(pid, caller)) | Frees a lock | Below is the the instruction that describes the task:
### Input:
Frees a lock
### Response:
def release_lock(self, lock, force=False):
''' Frees a lock '''
pid = os.getpid()
caller = inspect.stack()[0][3]
# try:
# rl = redlock.Redlock([{"host": settings.REDIS_SERVERS['std_redis']['host'], "port": settings.REDIS_SERVERS['std_redis']['port'], "db": settings.REDIS_SERVERS['std_redis']['db']}, ])
# except:
# logger.error('Process {0} ({1}) could not release lock {2}'.format(pid, caller, lock.resource))
# return False
if lock and lock._held:
lock.release()
if self.logger:
self.logger.debug('Process {0} ({1}) released lock'.format(pid, caller)) |
def bind_unix_socket(path):
""" Returns a unix file socket bound on (path). """
assert path
bindsocket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
os.unlink(path)
except OSError:
if os.path.exists(path):
raise
try:
bindsocket.bind(path)
except socket.error:
logger.error("Couldn't bind socket on %s", path)
return None
logger.info('Listening on %s', path)
bindsocket.listen(0)
return bindsocket | Returns a unix file socket bound on (path). | Below is the the instruction that describes the task:
### Input:
Returns a unix file socket bound on (path).
### Response:
def bind_unix_socket(path):
""" Returns a unix file socket bound on (path). """
assert path
bindsocket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
os.unlink(path)
except OSError:
if os.path.exists(path):
raise
try:
bindsocket.bind(path)
except socket.error:
logger.error("Couldn't bind socket on %s", path)
return None
logger.info('Listening on %s', path)
bindsocket.listen(0)
return bindsocket |
def p_field_optional2_5(self, p):
"""
field : alias name directives
"""
p[0] = Field(name=p[2], alias=p[1], directives=p[3]) | field : alias name directives | Below is the the instruction that describes the task:
### Input:
field : alias name directives
### Response:
def p_field_optional2_5(self, p):
"""
field : alias name directives
"""
p[0] = Field(name=p[2], alias=p[1], directives=p[3]) |
def read_byte(self, address):
"""Reads unadressed byte from a device. """
LOGGER.debug("Reading byte from device %s!", hex(address))
return self.driver.read_byte(address) | Reads unadressed byte from a device. | Below is the the instruction that describes the task:
### Input:
Reads unadressed byte from a device.
### Response:
def read_byte(self, address):
"""Reads unadressed byte from a device. """
LOGGER.debug("Reading byte from device %s!", hex(address))
return self.driver.read_byte(address) |
def get_word_at(self, coordinates):
"""Return word at *coordinates* (QPoint)"""
cursor = self.cursorForPosition(coordinates)
cursor.select(QTextCursor.WordUnderCursor)
return to_text_string(cursor.selectedText()) | Return word at *coordinates* (QPoint) | Below is the the instruction that describes the task:
### Input:
Return word at *coordinates* (QPoint)
### Response:
def get_word_at(self, coordinates):
"""Return word at *coordinates* (QPoint)"""
cursor = self.cursorForPosition(coordinates)
cursor.select(QTextCursor.WordUnderCursor)
return to_text_string(cursor.selectedText()) |
def cached_class(klass):
"""
Decorator to cache class instances by constructor arguments.
This results in a class that behaves like a singleton for each
set of constructor arguments, ensuring efficiency.
Note that this should be used for *immutable classes only*. Having
a cached mutable class makes very little sense. For efficiency,
avoid using this decorator for situations where there are many
constructor arguments permutations.
The keywords argument dictionary is converted to a tuple because
dicts are mutable; keywords themselves are strings and
so are always hashable, but if any arguments (keyword
or positional) are non-hashable, that set of arguments
is not cached.
"""
cache = {}
@wraps(klass, assigned=("__name__", "__module__"), updated=())
class _decorated(klass):
# The wraps decorator can't do this because __doc__
# isn't writable once the class is created
__doc__ = klass.__doc__
def __new__(cls, *args, **kwargs):
key = (cls,) + args + tuple(kwargs.items())
try:
inst = cache.get(key, None)
except TypeError:
# Can't cache this set of arguments
inst = key = None
if inst is None:
# Technically this is cheating, but it works,
# and takes care of initializing the instance
# (so we can override __init__ below safely);
# calling up to klass.__new__ would be the
# "official" way to create the instance, but
# that raises DeprecationWarning if there are
# args or kwargs and klass does not override
# __new__ (which most classes don't), because
# object.__new__ takes no parameters (and in
# Python 3 the warning will become an error)
inst = klass(*args, **kwargs)
# This makes isinstance and issubclass work
# properly
inst.__class__ = cls
if key is not None:
cache[key] = inst
return inst
def __init__(self, *args, **kwargs):
# This will be called every time __new__ is
# called, so we skip initializing here and do
# it only when the instance is created above
pass
return _decorated | Decorator to cache class instances by constructor arguments.
This results in a class that behaves like a singleton for each
set of constructor arguments, ensuring efficiency.
Note that this should be used for *immutable classes only*. Having
a cached mutable class makes very little sense. For efficiency,
avoid using this decorator for situations where there are many
constructor arguments permutations.
The keywords argument dictionary is converted to a tuple because
dicts are mutable; keywords themselves are strings and
so are always hashable, but if any arguments (keyword
or positional) are non-hashable, that set of arguments
is not cached. | Below is the the instruction that describes the task:
### Input:
Decorator to cache class instances by constructor arguments.
This results in a class that behaves like a singleton for each
set of constructor arguments, ensuring efficiency.
Note that this should be used for *immutable classes only*. Having
a cached mutable class makes very little sense. For efficiency,
avoid using this decorator for situations where there are many
constructor arguments permutations.
The keywords argument dictionary is converted to a tuple because
dicts are mutable; keywords themselves are strings and
so are always hashable, but if any arguments (keyword
or positional) are non-hashable, that set of arguments
is not cached.
### Response:
def cached_class(klass):
"""
Decorator to cache class instances by constructor arguments.
This results in a class that behaves like a singleton for each
set of constructor arguments, ensuring efficiency.
Note that this should be used for *immutable classes only*. Having
a cached mutable class makes very little sense. For efficiency,
avoid using this decorator for situations where there are many
constructor arguments permutations.
The keywords argument dictionary is converted to a tuple because
dicts are mutable; keywords themselves are strings and
so are always hashable, but if any arguments (keyword
or positional) are non-hashable, that set of arguments
is not cached.
"""
cache = {}
@wraps(klass, assigned=("__name__", "__module__"), updated=())
class _decorated(klass):
# The wraps decorator can't do this because __doc__
# isn't writable once the class is created
__doc__ = klass.__doc__
def __new__(cls, *args, **kwargs):
key = (cls,) + args + tuple(kwargs.items())
try:
inst = cache.get(key, None)
except TypeError:
# Can't cache this set of arguments
inst = key = None
if inst is None:
# Technically this is cheating, but it works,
# and takes care of initializing the instance
# (so we can override __init__ below safely);
# calling up to klass.__new__ would be the
# "official" way to create the instance, but
# that raises DeprecationWarning if there are
# args or kwargs and klass does not override
# __new__ (which most classes don't), because
# object.__new__ takes no parameters (and in
# Python 3 the warning will become an error)
inst = klass(*args, **kwargs)
# This makes isinstance and issubclass work
# properly
inst.__class__ = cls
if key is not None:
cache[key] = inst
return inst
def __init__(self, *args, **kwargs):
# This will be called every time __new__ is
# called, so we skip initializing here and do
# it only when the instance is created above
pass
return _decorated |
def _fromdata(self, code, dtype, count, value, name=None):
"""Initialize instance from arguments."""
self.code = int(code)
self.name = name if name else str(code)
self.dtype = TIFF_DATA_TYPES[dtype]
self.count = int(count)
self.value = value | Initialize instance from arguments. | Below is the the instruction that describes the task:
### Input:
Initialize instance from arguments.
### Response:
def _fromdata(self, code, dtype, count, value, name=None):
"""Initialize instance from arguments."""
self.code = int(code)
self.name = name if name else str(code)
self.dtype = TIFF_DATA_TYPES[dtype]
self.count = int(count)
self.value = value |
def remove_member_data_in(self, leaderboard_name, member):
'''
Remove the optional member data for a given member in the named leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param member [String] Member name.
'''
self.redis_connection.hdel(
self._member_data_key(leaderboard_name),
member) | Remove the optional member data for a given member in the named leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param member [String] Member name. | Below is the the instruction that describes the task:
### Input:
Remove the optional member data for a given member in the named leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param member [String] Member name.
### Response:
def remove_member_data_in(self, leaderboard_name, member):
'''
Remove the optional member data for a given member in the named leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param member [String] Member name.
'''
self.redis_connection.hdel(
self._member_data_key(leaderboard_name),
member) |
def memory_usage(method):
"""Log memory usage before and after a method."""
def wrapper(*args, **kwargs):
logging.info('Memory before method %s is %s.',
method.__name__, runtime.memory_usage().current())
result = method(*args, **kwargs)
logging.info('Memory after method %s is %s',
method.__name__, runtime.memory_usage().current())
return result
return wrapper | Log memory usage before and after a method. | Below is the the instruction that describes the task:
### Input:
Log memory usage before and after a method.
### Response:
def memory_usage(method):
"""Log memory usage before and after a method."""
def wrapper(*args, **kwargs):
logging.info('Memory before method %s is %s.',
method.__name__, runtime.memory_usage().current())
result = method(*args, **kwargs)
logging.info('Memory after method %s is %s',
method.__name__, runtime.memory_usage().current())
return result
return wrapper |
def pipe_strconcat(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that builds a string. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items
conf : {
'part': [
{'value': '<img src="'},
{'subkey': 'img.src'},
{'value': '">'}
]
}
Returns
-------
_OUTPUT : generator of joined strings
"""
splits = get_splits(_INPUT, conf['part'], **cdicts(opts, kwargs))
_OUTPUT = starmap(parse_result, splits)
return _OUTPUT | A string module that builds a string. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items
conf : {
'part': [
{'value': '<img src="'},
{'subkey': 'img.src'},
{'value': '">'}
]
}
Returns
-------
_OUTPUT : generator of joined strings | Below is the the instruction that describes the task:
### Input:
A string module that builds a string. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items
conf : {
'part': [
{'value': '<img src="'},
{'subkey': 'img.src'},
{'value': '">'}
]
}
Returns
-------
_OUTPUT : generator of joined strings
### Response:
def pipe_strconcat(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that builds a string. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items
conf : {
'part': [
{'value': '<img src="'},
{'subkey': 'img.src'},
{'value': '">'}
]
}
Returns
-------
_OUTPUT : generator of joined strings
"""
splits = get_splits(_INPUT, conf['part'], **cdicts(opts, kwargs))
_OUTPUT = starmap(parse_result, splits)
return _OUTPUT |
def _download_files(self, client, flow_id):
"""Download files from the specified flow.
Args:
client: GRR Client object to which to download flow data from.
flow_id: GRR flow ID.
Returns:
str: path of downloaded files.
"""
output_file_path = os.path.join(
self.output_path, '.'.join((flow_id, 'zip')))
if os.path.exists(output_file_path):
print('{0:s} already exists: Skipping'.format(output_file_path))
return None
flow = client.Flow(flow_id)
file_archive = flow.GetFilesArchive()
file_archive.WriteToFile(output_file_path)
# Unzip archive for processing and remove redundant zip
fqdn = client.data.os_info.fqdn.lower()
client_output_file = os.path.join(self.output_path, fqdn)
if not os.path.isdir(client_output_file):
os.makedirs(client_output_file)
with zipfile.ZipFile(output_file_path) as archive:
archive.extractall(path=client_output_file)
os.remove(output_file_path)
return client_output_file | Download files from the specified flow.
Args:
client: GRR Client object to which to download flow data from.
flow_id: GRR flow ID.
Returns:
str: path of downloaded files. | Below is the the instruction that describes the task:
### Input:
Download files from the specified flow.
Args:
client: GRR Client object to which to download flow data from.
flow_id: GRR flow ID.
Returns:
str: path of downloaded files.
### Response:
def _download_files(self, client, flow_id):
"""Download files from the specified flow.
Args:
client: GRR Client object to which to download flow data from.
flow_id: GRR flow ID.
Returns:
str: path of downloaded files.
"""
output_file_path = os.path.join(
self.output_path, '.'.join((flow_id, 'zip')))
if os.path.exists(output_file_path):
print('{0:s} already exists: Skipping'.format(output_file_path))
return None
flow = client.Flow(flow_id)
file_archive = flow.GetFilesArchive()
file_archive.WriteToFile(output_file_path)
# Unzip archive for processing and remove redundant zip
fqdn = client.data.os_info.fqdn.lower()
client_output_file = os.path.join(self.output_path, fqdn)
if not os.path.isdir(client_output_file):
os.makedirs(client_output_file)
with zipfile.ZipFile(output_file_path) as archive:
archive.extractall(path=client_output_file)
os.remove(output_file_path)
return client_output_file |
def from_soup(self,author,soup):
"""
Factory Pattern. Fetches contact data from given soup and builds the object
"""
email = soup.find('span',class_='icon icon-mail').findParent('a').get('href').split(':')[-1] if soup.find('span',class_='icon icon-mail') else ''
facebook = soup.find('span',class_='icon icon-facebook').findParent('a').get('href') if soup.find('span',class_='icon icon-facebook') else ''
twitter = soup.find('span',class_='icon icon-twitter-3').findParent('a').get('href') if soup.find('span',class_='icon icon-twitter-3') else ''
link = soup.find('span',class_='icon icon-link').findParent('a').get('href') if soup.find('span',class_='icon icon-link') else ''
return Contact(email,facebook,twitter,link) | Factory Pattern. Fetches contact data from given soup and builds the object | Below is the the instruction that describes the task:
### Input:
Factory Pattern. Fetches contact data from given soup and builds the object
### Response:
def from_soup(self,author,soup):
"""
Factory Pattern. Fetches contact data from given soup and builds the object
"""
email = soup.find('span',class_='icon icon-mail').findParent('a').get('href').split(':')[-1] if soup.find('span',class_='icon icon-mail') else ''
facebook = soup.find('span',class_='icon icon-facebook').findParent('a').get('href') if soup.find('span',class_='icon icon-facebook') else ''
twitter = soup.find('span',class_='icon icon-twitter-3').findParent('a').get('href') if soup.find('span',class_='icon icon-twitter-3') else ''
link = soup.find('span',class_='icon icon-link').findParent('a').get('href') if soup.find('span',class_='icon icon-link') else ''
return Contact(email,facebook,twitter,link) |
def define_as_input(self, pin, pullup=False):
"""Set the input or output mode for a specified pin. Mode should be
either GPIO.OUT or GPIO.IN.
"""
self._validate_channel(pin)
# Set bit to 1 for input or 0 for output.
self.iodir[int(pin/8)] |= 1 << (int(pin%8))
self._write_iodir()
self.pullup(pin, pullup) | Set the input or output mode for a specified pin. Mode should be
either GPIO.OUT or GPIO.IN. | Below is the the instruction that describes the task:
### Input:
Set the input or output mode for a specified pin. Mode should be
either GPIO.OUT or GPIO.IN.
### Response:
def define_as_input(self, pin, pullup=False):
"""Set the input or output mode for a specified pin. Mode should be
either GPIO.OUT or GPIO.IN.
"""
self._validate_channel(pin)
# Set bit to 1 for input or 0 for output.
self.iodir[int(pin/8)] |= 1 << (int(pin%8))
self._write_iodir()
self.pullup(pin, pullup) |
def get_runnable_effects(self) -> List[Effect]:
"""
Returns all runnable effects in the project.
:return: List of all runnable effects
"""
return [effect for name, effect in self._effects.items() if effect.runnable] | Returns all runnable effects in the project.
:return: List of all runnable effects | Below is the the instruction that describes the task:
### Input:
Returns all runnable effects in the project.
:return: List of all runnable effects
### Response:
def get_runnable_effects(self) -> List[Effect]:
"""
Returns all runnable effects in the project.
:return: List of all runnable effects
"""
return [effect for name, effect in self._effects.items() if effect.runnable] |
def get_float(self, input_string):
"""
Return float type user input
"""
if input_string == '--training_fraction':
# was the flag set?
try:
index = self.args.index(input_string) + 1
except ValueError:
# it wasn't, it's optional, so return the appropriate default
return None
# the flag was set, so check if a value was set, otherwise exit
try:
if self.args[index] in self.flags:
print("\n {flag} was set but a value was not specified".format(flag=input_string))
print_short_help()
sys.exit(1)
except IndexError:
print("\n {flag} was set but a value was not specified".format(flag=input_string))
print_short_help()
sys.exit(1)
# a value was set, so check if its the correct type
try:
value = float(self.args[index])
except ValueError:
print("\n {flag} must be a float less than or equal to 1, e.g. 0.4".format(flag=input_string))
print_short_help()
sys.exit(1)
if value > 1.0 or value < 0:
print("\n {flag} must be a float less than or equal to 1, e.g. 0.4".format(flag=input_string))
print_short_help()
sys.exit(1)
# everything checks out, so return the appropriate value
return value | Return float type user input | Below is the the instruction that describes the task:
### Input:
Return float type user input
### Response:
def get_float(self, input_string):
"""
Return float type user input
"""
if input_string == '--training_fraction':
# was the flag set?
try:
index = self.args.index(input_string) + 1
except ValueError:
# it wasn't, it's optional, so return the appropriate default
return None
# the flag was set, so check if a value was set, otherwise exit
try:
if self.args[index] in self.flags:
print("\n {flag} was set but a value was not specified".format(flag=input_string))
print_short_help()
sys.exit(1)
except IndexError:
print("\n {flag} was set but a value was not specified".format(flag=input_string))
print_short_help()
sys.exit(1)
# a value was set, so check if its the correct type
try:
value = float(self.args[index])
except ValueError:
print("\n {flag} must be a float less than or equal to 1, e.g. 0.4".format(flag=input_string))
print_short_help()
sys.exit(1)
if value > 1.0 or value < 0:
print("\n {flag} must be a float less than or equal to 1, e.g. 0.4".format(flag=input_string))
print_short_help()
sys.exit(1)
# everything checks out, so return the appropriate value
return value |
def keys(self):
"""
Access the keys
:returns: twilio.rest.api.v2010.account.key.KeyList
:rtype: twilio.rest.api.v2010.account.key.KeyList
"""
if self._keys is None:
self._keys = KeyList(self._version, account_sid=self._solution['sid'], )
return self._keys | Access the keys
:returns: twilio.rest.api.v2010.account.key.KeyList
:rtype: twilio.rest.api.v2010.account.key.KeyList | Below is the the instruction that describes the task:
### Input:
Access the keys
:returns: twilio.rest.api.v2010.account.key.KeyList
:rtype: twilio.rest.api.v2010.account.key.KeyList
### Response:
def keys(self):
"""
Access the keys
:returns: twilio.rest.api.v2010.account.key.KeyList
:rtype: twilio.rest.api.v2010.account.key.KeyList
"""
if self._keys is None:
self._keys = KeyList(self._version, account_sid=self._solution['sid'], )
return self._keys |
def copy_root_log_to_file(filename: str,
fmt: str = LOG_FORMAT,
datefmt: str = LOG_DATEFMT) -> None:
"""
Copy all currently configured logs to the specified file.
Should ONLY be called from the ``if __name__ == 'main'`` script;
see https://docs.python.org/3.4/howto/logging.html#library-config.
"""
fh = logging.FileHandler(filename)
# default file mode is 'a' for append
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
fh.setFormatter(formatter)
apply_handler_to_root_log(fh) | Copy all currently configured logs to the specified file.
Should ONLY be called from the ``if __name__ == 'main'`` script;
see https://docs.python.org/3.4/howto/logging.html#library-config. | Below is the the instruction that describes the task:
### Input:
Copy all currently configured logs to the specified file.
Should ONLY be called from the ``if __name__ == 'main'`` script;
see https://docs.python.org/3.4/howto/logging.html#library-config.
### Response:
def copy_root_log_to_file(filename: str,
fmt: str = LOG_FORMAT,
datefmt: str = LOG_DATEFMT) -> None:
"""
Copy all currently configured logs to the specified file.
Should ONLY be called from the ``if __name__ == 'main'`` script;
see https://docs.python.org/3.4/howto/logging.html#library-config.
"""
fh = logging.FileHandler(filename)
# default file mode is 'a' for append
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
fh.setFormatter(formatter)
apply_handler_to_root_log(fh) |
def discover(path, filter_specs=filter_specs):
"""
Discover all of the specs recursively inside ``path``.
Successively yields the (full) relative paths to each spec.
"""
for dirpath, _, filenames in os.walk(path):
for spec in filter_specs(filenames):
yield os.path.join(dirpath, spec) | Discover all of the specs recursively inside ``path``.
Successively yields the (full) relative paths to each spec. | Below is the the instruction that describes the task:
### Input:
Discover all of the specs recursively inside ``path``.
Successively yields the (full) relative paths to each spec.
### Response:
def discover(path, filter_specs=filter_specs):
"""
Discover all of the specs recursively inside ``path``.
Successively yields the (full) relative paths to each spec.
"""
for dirpath, _, filenames in os.walk(path):
for spec in filter_specs(filenames):
yield os.path.join(dirpath, spec) |
def LeaseFlowForProcessing(self, client_id, flow_id, processing_time):
"""Marks a flow as being processed on this worker and returns it."""
rdf_flow = self.ReadFlowObject(client_id, flow_id)
# TODO(user): remove the check for a legacy hunt prefix as soon as
# AFF4 is gone.
if rdf_flow.parent_hunt_id and not rdf_flow.parent_hunt_id.startswith("H:"):
rdf_hunt = self.ReadHuntObject(rdf_flow.parent_hunt_id)
if not rdf_hunt_objects.IsHuntSuitableForFlowProcessing(
rdf_hunt.hunt_state):
raise db.ParentHuntIsNotRunningError(client_id, flow_id,
rdf_hunt.hunt_id,
rdf_hunt.hunt_state)
now = rdfvalue.RDFDatetime.Now()
if rdf_flow.processing_on and rdf_flow.processing_deadline > now:
raise ValueError("Flow %s on client %s is already being processed." %
(client_id, flow_id))
processing_deadline = now + processing_time
process_id_string = utils.ProcessIdString()
self.UpdateFlow(
client_id,
flow_id,
processing_on=process_id_string,
processing_since=now,
processing_deadline=processing_deadline)
rdf_flow.processing_on = process_id_string
rdf_flow.processing_since = now
rdf_flow.processing_deadline = processing_deadline
return rdf_flow | Marks a flow as being processed on this worker and returns it. | Below is the the instruction that describes the task:
### Input:
Marks a flow as being processed on this worker and returns it.
### Response:
def LeaseFlowForProcessing(self, client_id, flow_id, processing_time):
"""Marks a flow as being processed on this worker and returns it."""
rdf_flow = self.ReadFlowObject(client_id, flow_id)
# TODO(user): remove the check for a legacy hunt prefix as soon as
# AFF4 is gone.
if rdf_flow.parent_hunt_id and not rdf_flow.parent_hunt_id.startswith("H:"):
rdf_hunt = self.ReadHuntObject(rdf_flow.parent_hunt_id)
if not rdf_hunt_objects.IsHuntSuitableForFlowProcessing(
rdf_hunt.hunt_state):
raise db.ParentHuntIsNotRunningError(client_id, flow_id,
rdf_hunt.hunt_id,
rdf_hunt.hunt_state)
now = rdfvalue.RDFDatetime.Now()
if rdf_flow.processing_on and rdf_flow.processing_deadline > now:
raise ValueError("Flow %s on client %s is already being processed." %
(client_id, flow_id))
processing_deadline = now + processing_time
process_id_string = utils.ProcessIdString()
self.UpdateFlow(
client_id,
flow_id,
processing_on=process_id_string,
processing_since=now,
processing_deadline=processing_deadline)
rdf_flow.processing_on = process_id_string
rdf_flow.processing_since = now
rdf_flow.processing_deadline = processing_deadline
return rdf_flow |
def generate_docker_compose(self):
""" Generate a sample docker compose
"""
example = {}
example['app'] = {}
example['app']['environment'] = []
for key in sorted(list(self.spec.keys())):
if self.spec[key]['type'] in (dict, list):
value = f"\'{json.dumps(self.spec[key].get('example', ''))}\'"
else:
value = f"{self.spec[key].get('example', '')}"
example['app']['environment'].append(f"{self.env_prefix}_{key.upper()}={value}")
print(yaml.dump(example, default_flow_style=False)) | Generate a sample docker compose | Below is the the instruction that describes the task:
### Input:
Generate a sample docker compose
### Response:
def generate_docker_compose(self):
""" Generate a sample docker compose
"""
example = {}
example['app'] = {}
example['app']['environment'] = []
for key in sorted(list(self.spec.keys())):
if self.spec[key]['type'] in (dict, list):
value = f"\'{json.dumps(self.spec[key].get('example', ''))}\'"
else:
value = f"{self.spec[key].get('example', '')}"
example['app']['environment'].append(f"{self.env_prefix}_{key.upper()}={value}")
print(yaml.dump(example, default_flow_style=False)) |
def parse_response(self, info, sformat="", state="", **kwargs):
"""
This the start of a pipeline that will:
1 Deserializes a response into it's response message class.
Or :py:class:`oidcmsg.oauth2.ErrorResponse` if it's an error
message
2 verifies the correctness of the response by running the
verify method belonging to the message class used.
3 runs the do_post_parse_response method iff the response was not
an error response.
:param info: The response, can be either in a JSON or an urlencoded
format
:param sformat: Which serialization that was used
:param state: The state
:param kwargs: Extra key word arguments
:return: The parsed and to some extend verified response
"""
if not sformat:
sformat = self.response_body_type
logger.debug('response format: {}'.format(sformat))
if sformat in ['jose', 'jws', 'jwe']:
resp = self.post_parse_response(info, state=state)
if not resp:
logger.error('Missing or faulty response')
raise ResponseError("Missing or faulty response")
return resp
# If format is urlencoded 'info' may be a URL
# in which case I have to get at the query/fragment part
if sformat == "urlencoded":
info = self.get_urlinfo(info)
if sformat == 'jwt':
args = {'allowed_sign_algs':
self.service_context.get_sign_alg(self.service_name)}
enc_algs = self.service_context.get_enc_alg_enc(self.service_name)
args['allowed_enc_algs'] = enc_algs['alg']
args['allowed_enc_encs'] = enc_algs['enc']
_jwt = JWT(key_jar=self.service_context.keyjar, **args)
_jwt.iss = self.service_context.client_id
info = _jwt.unpack(info)
sformat = "dict"
logger.debug('response_cls: {}'.format(self.response_cls.__name__))
try:
resp = self.response_cls().deserialize(
info, sformat, iss=self.service_context.issuer, **kwargs)
except Exception as err:
resp = None
if sformat == 'json':
# Could be JWS or JWE but wrongly tagged
# Adding issuer is just a fail-safe. If one things was wrong
# then two can be.
try:
resp = self.response_cls().deserialize(
info, 'jwt', iss=self.service_context.issuer, **kwargs)
except Exception as err2:
pass
if resp is None:
logger.error('Error while deserializing: {}'.format(err))
raise
msg = 'Initial response parsing => "{}"'
logger.debug(msg.format(resp.to_dict()))
# is this an error message
if is_error_message(resp):
logger.debug('Error response: {}'.format(resp))
else:
vargs = self.gather_verify_arguments()
logger.debug("Verify response with {}".format(vargs))
try:
# verify the message. If something is wrong an exception is
# thrown
resp.verify(**vargs)
except Exception as err:
logger.error(
'Got exception while verifying response: {}'.format(err))
raise
resp = self.post_parse_response(resp, state=state)
if not resp:
logger.error('Missing or faulty response')
raise ResponseError("Missing or faulty response")
return resp | This the start of a pipeline that will:
1 Deserializes a response into it's response message class.
Or :py:class:`oidcmsg.oauth2.ErrorResponse` if it's an error
message
2 verifies the correctness of the response by running the
verify method belonging to the message class used.
3 runs the do_post_parse_response method iff the response was not
an error response.
:param info: The response, can be either in a JSON or an urlencoded
format
:param sformat: Which serialization that was used
:param state: The state
:param kwargs: Extra key word arguments
:return: The parsed and to some extend verified response | Below is the the instruction that describes the task:
### Input:
This the start of a pipeline that will:
1 Deserializes a response into it's response message class.
Or :py:class:`oidcmsg.oauth2.ErrorResponse` if it's an error
message
2 verifies the correctness of the response by running the
verify method belonging to the message class used.
3 runs the do_post_parse_response method iff the response was not
an error response.
:param info: The response, can be either in a JSON or an urlencoded
format
:param sformat: Which serialization that was used
:param state: The state
:param kwargs: Extra key word arguments
:return: The parsed and to some extend verified response
### Response:
def parse_response(self, info, sformat="", state="", **kwargs):
"""
This the start of a pipeline that will:
1 Deserializes a response into it's response message class.
Or :py:class:`oidcmsg.oauth2.ErrorResponse` if it's an error
message
2 verifies the correctness of the response by running the
verify method belonging to the message class used.
3 runs the do_post_parse_response method iff the response was not
an error response.
:param info: The response, can be either in a JSON or an urlencoded
format
:param sformat: Which serialization that was used
:param state: The state
:param kwargs: Extra key word arguments
:return: The parsed and to some extend verified response
"""
if not sformat:
sformat = self.response_body_type
logger.debug('response format: {}'.format(sformat))
if sformat in ['jose', 'jws', 'jwe']:
resp = self.post_parse_response(info, state=state)
if not resp:
logger.error('Missing or faulty response')
raise ResponseError("Missing or faulty response")
return resp
# If format is urlencoded 'info' may be a URL
# in which case I have to get at the query/fragment part
if sformat == "urlencoded":
info = self.get_urlinfo(info)
if sformat == 'jwt':
args = {'allowed_sign_algs':
self.service_context.get_sign_alg(self.service_name)}
enc_algs = self.service_context.get_enc_alg_enc(self.service_name)
args['allowed_enc_algs'] = enc_algs['alg']
args['allowed_enc_encs'] = enc_algs['enc']
_jwt = JWT(key_jar=self.service_context.keyjar, **args)
_jwt.iss = self.service_context.client_id
info = _jwt.unpack(info)
sformat = "dict"
logger.debug('response_cls: {}'.format(self.response_cls.__name__))
try:
resp = self.response_cls().deserialize(
info, sformat, iss=self.service_context.issuer, **kwargs)
except Exception as err:
resp = None
if sformat == 'json':
# Could be JWS or JWE but wrongly tagged
# Adding issuer is just a fail-safe. If one things was wrong
# then two can be.
try:
resp = self.response_cls().deserialize(
info, 'jwt', iss=self.service_context.issuer, **kwargs)
except Exception as err2:
pass
if resp is None:
logger.error('Error while deserializing: {}'.format(err))
raise
msg = 'Initial response parsing => "{}"'
logger.debug(msg.format(resp.to_dict()))
# is this an error message
if is_error_message(resp):
logger.debug('Error response: {}'.format(resp))
else:
vargs = self.gather_verify_arguments()
logger.debug("Verify response with {}".format(vargs))
try:
# verify the message. If something is wrong an exception is
# thrown
resp.verify(**vargs)
except Exception as err:
logger.error(
'Got exception while verifying response: {}'.format(err))
raise
resp = self.post_parse_response(resp, state=state)
if not resp:
logger.error('Missing or faulty response')
raise ResponseError("Missing or faulty response")
return resp |
def _unique_resource_identifier_from_kwargs(**kwargs):
"""Chooses an identifier given different choices
The unique identifier in BIG-IP's REST API at the time of this writing
is called 'name'. This is in contrast to the unique identifier that is
used by iWorkflow and BIG-IQ which at some times is 'name' and other
times is 'uuid'.
For example, in iWorkflow, there consider this URI
* https://10.2.2.3/mgmt/cm/cloud/tenants/{0}/services/iapp
Then consider this iWorkflow URI
* https://localhost/mgmt/cm/cloud/connectors/local/{0}
In the first example, the identifier, {0}, is what we would normally
consider a name. For example, "tenant1". In the second example though,
the value is expected to be what we would normally consider to be a
UUID. For example, '244bd478-374e-4eb2-8c73-6e46d7112604'.
This method only tries to rectify the problem of which to use.
I believe there might be some change that the two can appear together,
although I have not yet experienced it. If it is possible, I believe it
would happen in BIG-IQ/iWorkflow land where the UUID and Name both have
significance. That's why I deliberately prefer the UUID when it exists
in the parameters sent to the URL.
:param kwargs:
:return:
"""
name = kwargs.pop('name', '')
uuid = kwargs.pop('uuid', '')
id = kwargs.pop('id', '')
if uuid:
return uuid, kwargs
elif id:
# Used for /mgmt/cm/system/authn/providers/tmos on BIG-IP
return id, kwargs
else:
return name, kwargs | Chooses an identifier given different choices
The unique identifier in BIG-IP's REST API at the time of this writing
is called 'name'. This is in contrast to the unique identifier that is
used by iWorkflow and BIG-IQ which at some times is 'name' and other
times is 'uuid'.
For example, in iWorkflow, there consider this URI
* https://10.2.2.3/mgmt/cm/cloud/tenants/{0}/services/iapp
Then consider this iWorkflow URI
* https://localhost/mgmt/cm/cloud/connectors/local/{0}
In the first example, the identifier, {0}, is what we would normally
consider a name. For example, "tenant1". In the second example though,
the value is expected to be what we would normally consider to be a
UUID. For example, '244bd478-374e-4eb2-8c73-6e46d7112604'.
This method only tries to rectify the problem of which to use.
I believe there might be some change that the two can appear together,
although I have not yet experienced it. If it is possible, I believe it
would happen in BIG-IQ/iWorkflow land where the UUID and Name both have
significance. That's why I deliberately prefer the UUID when it exists
in the parameters sent to the URL.
:param kwargs:
:return: | Below is the the instruction that describes the task:
### Input:
Chooses an identifier given different choices
The unique identifier in BIG-IP's REST API at the time of this writing
is called 'name'. This is in contrast to the unique identifier that is
used by iWorkflow and BIG-IQ which at some times is 'name' and other
times is 'uuid'.
For example, in iWorkflow, there consider this URI
* https://10.2.2.3/mgmt/cm/cloud/tenants/{0}/services/iapp
Then consider this iWorkflow URI
* https://localhost/mgmt/cm/cloud/connectors/local/{0}
In the first example, the identifier, {0}, is what we would normally
consider a name. For example, "tenant1". In the second example though,
the value is expected to be what we would normally consider to be a
UUID. For example, '244bd478-374e-4eb2-8c73-6e46d7112604'.
This method only tries to rectify the problem of which to use.
I believe there might be some change that the two can appear together,
although I have not yet experienced it. If it is possible, I believe it
would happen in BIG-IQ/iWorkflow land where the UUID and Name both have
significance. That's why I deliberately prefer the UUID when it exists
in the parameters sent to the URL.
:param kwargs:
:return:
### Response:
def _unique_resource_identifier_from_kwargs(**kwargs):
"""Chooses an identifier given different choices
The unique identifier in BIG-IP's REST API at the time of this writing
is called 'name'. This is in contrast to the unique identifier that is
used by iWorkflow and BIG-IQ which at some times is 'name' and other
times is 'uuid'.
For example, in iWorkflow, there consider this URI
* https://10.2.2.3/mgmt/cm/cloud/tenants/{0}/services/iapp
Then consider this iWorkflow URI
* https://localhost/mgmt/cm/cloud/connectors/local/{0}
In the first example, the identifier, {0}, is what we would normally
consider a name. For example, "tenant1". In the second example though,
the value is expected to be what we would normally consider to be a
UUID. For example, '244bd478-374e-4eb2-8c73-6e46d7112604'.
This method only tries to rectify the problem of which to use.
I believe there might be some change that the two can appear together,
although I have not yet experienced it. If it is possible, I believe it
would happen in BIG-IQ/iWorkflow land where the UUID and Name both have
significance. That's why I deliberately prefer the UUID when it exists
in the parameters sent to the URL.
:param kwargs:
:return:
"""
name = kwargs.pop('name', '')
uuid = kwargs.pop('uuid', '')
id = kwargs.pop('id', '')
if uuid:
return uuid, kwargs
elif id:
# Used for /mgmt/cm/system/authn/providers/tmos on BIG-IP
return id, kwargs
else:
return name, kwargs |
def digest_file(fname):
"""
Digest files using SHA-2 (256-bit)
TESTING
Produces identical output to `openssl sha256 FILE` for the following:
* on all source .py files and some binary pyc files in parent dir
* empty files with different names
* 3.3GB DNAse Hypersensitive file
* empty file, file with one space, file with one return all produce
* distinct output
PERF takes about 20 seconds to hash 3.3GB file
on an empty file and on build.py
INSPIRATION: http://stackoverflow.com/questions/3431825/generating-an-md5-checksum-of-a-file
WARNING: not clear if we need to pad file bytes for proper cryptographic
hashing
"""
#chunk size in bytes
size = 4096
hval = hashlib.new(HASH_TYPE)
with open(fname, 'rb') as fd:
for chunk in iter(lambda: fd.read(size), b''):
hval.update(chunk)
return hval.hexdigest() | Digest files using SHA-2 (256-bit)
TESTING
Produces identical output to `openssl sha256 FILE` for the following:
* on all source .py files and some binary pyc files in parent dir
* empty files with different names
* 3.3GB DNAse Hypersensitive file
* empty file, file with one space, file with one return all produce
* distinct output
PERF takes about 20 seconds to hash 3.3GB file
on an empty file and on build.py
INSPIRATION: http://stackoverflow.com/questions/3431825/generating-an-md5-checksum-of-a-file
WARNING: not clear if we need to pad file bytes for proper cryptographic
hashing | Below is the the instruction that describes the task:
### Input:
Digest files using SHA-2 (256-bit)
TESTING
Produces identical output to `openssl sha256 FILE` for the following:
* on all source .py files and some binary pyc files in parent dir
* empty files with different names
* 3.3GB DNAse Hypersensitive file
* empty file, file with one space, file with one return all produce
* distinct output
PERF takes about 20 seconds to hash 3.3GB file
on an empty file and on build.py
INSPIRATION: http://stackoverflow.com/questions/3431825/generating-an-md5-checksum-of-a-file
WARNING: not clear if we need to pad file bytes for proper cryptographic
hashing
### Response:
def digest_file(fname):
"""
Digest files using SHA-2 (256-bit)
TESTING
Produces identical output to `openssl sha256 FILE` for the following:
* on all source .py files and some binary pyc files in parent dir
* empty files with different names
* 3.3GB DNAse Hypersensitive file
* empty file, file with one space, file with one return all produce
* distinct output
PERF takes about 20 seconds to hash 3.3GB file
on an empty file and on build.py
INSPIRATION: http://stackoverflow.com/questions/3431825/generating-an-md5-checksum-of-a-file
WARNING: not clear if we need to pad file bytes for proper cryptographic
hashing
"""
#chunk size in bytes
size = 4096
hval = hashlib.new(HASH_TYPE)
with open(fname, 'rb') as fd:
for chunk in iter(lambda: fd.read(size), b''):
hval.update(chunk)
return hval.hexdigest() |
def bitpos(self, key, bit, start=None, end=None):
"""
Return the position of the first bit set to 1 or 0 in a string.
``start`` and ``end`` difines search range. The range is interpreted
as a range of bytes and not a range of bits, so start=0 and end=2
means to look at the first three bytes.
"""
if bit not in (0, 1):
raise DataError('bit must be 0 or 1')
params = [key, bit]
start is not None and params.append(start)
if start is not None and end is not None:
params.append(end)
elif start is None and end is not None:
raise DataError("start argument is not set, "
"when end is specified")
return self.execute_command('BITPOS', *params) | Return the position of the first bit set to 1 or 0 in a string.
``start`` and ``end`` difines search range. The range is interpreted
as a range of bytes and not a range of bits, so start=0 and end=2
means to look at the first three bytes. | Below is the the instruction that describes the task:
### Input:
Return the position of the first bit set to 1 or 0 in a string.
``start`` and ``end`` difines search range. The range is interpreted
as a range of bytes and not a range of bits, so start=0 and end=2
means to look at the first three bytes.
### Response:
def bitpos(self, key, bit, start=None, end=None):
"""
Return the position of the first bit set to 1 or 0 in a string.
``start`` and ``end`` difines search range. The range is interpreted
as a range of bytes and not a range of bits, so start=0 and end=2
means to look at the first three bytes.
"""
if bit not in (0, 1):
raise DataError('bit must be 0 or 1')
params = [key, bit]
start is not None and params.append(start)
if start is not None and end is not None:
params.append(end)
elif start is None and end is not None:
raise DataError("start argument is not set, "
"when end is specified")
return self.execute_command('BITPOS', *params) |
def enrichrgram(self, lib, axis='row'):
'''
Add Enrichr gene enrichment results to your visualization (where your rows
are genes). Run enrichrgram before clustering to incldue enrichment results
as row categories. Enrichrgram can also be run on the front-end using the
Enrichr logo at the top left.
Set lib to the Enrichr library that you want to use for enrichment analysis.
Libraries included:
* ChEA_2016
* KEA_2015
* ENCODE_TF_ChIP-seq_2015
* ENCODE_Histone_Modifications_2015
* Disease_Perturbations_from_GEO_up
* Disease_Perturbations_from_GEO_down
* GO_Molecular_Function_2015
* GO_Biological_Process_2015
* GO_Cellular_Component_2015
* Reactome_2016
* KEGG_2016
* MGI_Mammalian_Phenotype_Level_4
* LINCS_L1000_Chem_Pert_up
* LINCS_L1000_Chem_Pert_down
'''
df = self.export_df()
df, bar_info = enr_fun.add_enrichr_cats(df, axis, lib)
self.load_df(df)
self.dat['enrichrgram_lib'] = lib
self.dat['row_cat_bars'] = bar_info | Add Enrichr gene enrichment results to your visualization (where your rows
are genes). Run enrichrgram before clustering to incldue enrichment results
as row categories. Enrichrgram can also be run on the front-end using the
Enrichr logo at the top left.
Set lib to the Enrichr library that you want to use for enrichment analysis.
Libraries included:
* ChEA_2016
* KEA_2015
* ENCODE_TF_ChIP-seq_2015
* ENCODE_Histone_Modifications_2015
* Disease_Perturbations_from_GEO_up
* Disease_Perturbations_from_GEO_down
* GO_Molecular_Function_2015
* GO_Biological_Process_2015
* GO_Cellular_Component_2015
* Reactome_2016
* KEGG_2016
* MGI_Mammalian_Phenotype_Level_4
* LINCS_L1000_Chem_Pert_up
* LINCS_L1000_Chem_Pert_down | Below is the the instruction that describes the task:
### Input:
Add Enrichr gene enrichment results to your visualization (where your rows
are genes). Run enrichrgram before clustering to incldue enrichment results
as row categories. Enrichrgram can also be run on the front-end using the
Enrichr logo at the top left.
Set lib to the Enrichr library that you want to use for enrichment analysis.
Libraries included:
* ChEA_2016
* KEA_2015
* ENCODE_TF_ChIP-seq_2015
* ENCODE_Histone_Modifications_2015
* Disease_Perturbations_from_GEO_up
* Disease_Perturbations_from_GEO_down
* GO_Molecular_Function_2015
* GO_Biological_Process_2015
* GO_Cellular_Component_2015
* Reactome_2016
* KEGG_2016
* MGI_Mammalian_Phenotype_Level_4
* LINCS_L1000_Chem_Pert_up
* LINCS_L1000_Chem_Pert_down
### Response:
def enrichrgram(self, lib, axis='row'):
'''
Add Enrichr gene enrichment results to your visualization (where your rows
are genes). Run enrichrgram before clustering to incldue enrichment results
as row categories. Enrichrgram can also be run on the front-end using the
Enrichr logo at the top left.
Set lib to the Enrichr library that you want to use for enrichment analysis.
Libraries included:
* ChEA_2016
* KEA_2015
* ENCODE_TF_ChIP-seq_2015
* ENCODE_Histone_Modifications_2015
* Disease_Perturbations_from_GEO_up
* Disease_Perturbations_from_GEO_down
* GO_Molecular_Function_2015
* GO_Biological_Process_2015
* GO_Cellular_Component_2015
* Reactome_2016
* KEGG_2016
* MGI_Mammalian_Phenotype_Level_4
* LINCS_L1000_Chem_Pert_up
* LINCS_L1000_Chem_Pert_down
'''
df = self.export_df()
df, bar_info = enr_fun.add_enrichr_cats(df, axis, lib)
self.load_df(df)
self.dat['enrichrgram_lib'] = lib
self.dat['row_cat_bars'] = bar_info |
def _set_telnet(self, v, load=False):
"""
Setter method for telnet, mapped from YANG variable /rbridge_id/telnet (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_telnet is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_telnet() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=telnet.telnet, is_container='container', presence=False, yang_name="telnet", rest_name="telnet", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Telnet Server', u'cli-incomplete-no': None, u'sort-priority': u'1'}}, namespace='urn:brocade.com:mgmt:brocade-sec-services', defining_module='brocade-sec-services', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """telnet must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=telnet.telnet, is_container='container', presence=False, yang_name="telnet", rest_name="telnet", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Telnet Server', u'cli-incomplete-no': None, u'sort-priority': u'1'}}, namespace='urn:brocade.com:mgmt:brocade-sec-services', defining_module='brocade-sec-services', yang_type='container', is_config=True)""",
})
self.__telnet = t
if hasattr(self, '_set'):
self._set() | Setter method for telnet, mapped from YANG variable /rbridge_id/telnet (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_telnet is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_telnet() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for telnet, mapped from YANG variable /rbridge_id/telnet (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_telnet is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_telnet() directly.
### Response:
def _set_telnet(self, v, load=False):
"""
Setter method for telnet, mapped from YANG variable /rbridge_id/telnet (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_telnet is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_telnet() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=telnet.telnet, is_container='container', presence=False, yang_name="telnet", rest_name="telnet", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Telnet Server', u'cli-incomplete-no': None, u'sort-priority': u'1'}}, namespace='urn:brocade.com:mgmt:brocade-sec-services', defining_module='brocade-sec-services', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """telnet must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=telnet.telnet, is_container='container', presence=False, yang_name="telnet", rest_name="telnet", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Telnet Server', u'cli-incomplete-no': None, u'sort-priority': u'1'}}, namespace='urn:brocade.com:mgmt:brocade-sec-services', defining_module='brocade-sec-services', yang_type='container', is_config=True)""",
})
self.__telnet = t
if hasattr(self, '_set'):
self._set() |
def campaign(self, name, owner=None, **kwargs):
"""
Create the Campaign TI object.
Args:
owner:
name:
**kwargs:
Return:
"""
return Campaign(self.tcex, name, owner=owner, **kwargs) | Create the Campaign TI object.
Args:
owner:
name:
**kwargs:
Return: | Below is the the instruction that describes the task:
### Input:
Create the Campaign TI object.
Args:
owner:
name:
**kwargs:
Return:
### Response:
def campaign(self, name, owner=None, **kwargs):
"""
Create the Campaign TI object.
Args:
owner:
name:
**kwargs:
Return:
"""
return Campaign(self.tcex, name, owner=owner, **kwargs) |
def read_command(self):
"""
Attempt to read the next command from the editor/server
:return: boolean. Did we actually read a command?
"""
# Do a non-blocking read here so the demo can keep running if there is no data
comm = self.reader.byte(blocking=False)
if comm is None:
return False
cmds = {
SET_KEY: self.handle_set_key,
DELETE_KEY: self.handle_delete_key,
SET_ROW: self.handle_set_row,
PAUSE: self.handle_pause,
SAVE_TRACKS: self.handle_save_tracks
}
func = cmds.get(comm)
if func:
func()
else:
logger.error("Unknown command: %s", comm)
return True | Attempt to read the next command from the editor/server
:return: boolean. Did we actually read a command? | Below is the the instruction that describes the task:
### Input:
Attempt to read the next command from the editor/server
:return: boolean. Did we actually read a command?
### Response:
def read_command(self):
"""
Attempt to read the next command from the editor/server
:return: boolean. Did we actually read a command?
"""
# Do a non-blocking read here so the demo can keep running if there is no data
comm = self.reader.byte(blocking=False)
if comm is None:
return False
cmds = {
SET_KEY: self.handle_set_key,
DELETE_KEY: self.handle_delete_key,
SET_ROW: self.handle_set_row,
PAUSE: self.handle_pause,
SAVE_TRACKS: self.handle_save_tracks
}
func = cmds.get(comm)
if func:
func()
else:
logger.error("Unknown command: %s", comm)
return True |
def convert(sk_obj, input_features = None,
output_feature_names = None):
"""
Convert scikit-learn pipeline, classifier, or regressor to Core ML format.
Parameters
----------
sk_obj: model | [model] of scikit-learn format.
Scikit learn model(s) to convert to a Core ML format.
The input model may be a single scikit learn model, a scikit learn
pipeline model, or a list of scikit learn models.
Currently supported scikit learn models are:
- Linear and Logistic Regression
- LinearSVC and LinearSVR
- SVC and SVR
- NuSVC and NuSVR
- Gradient Boosting Classifier and Regressor
- Decision Tree Classifier and Regressor
- Random Forest Classifier and Regressor
- Normalizer
- Imputer
- Standard Scaler
- DictVectorizer
- One Hot Encoder
The input model, or the last model in a pipeline or list of models,
determines whether this is exposed as a Transformer, Regressor,
or Classifier.
Note that there may not be a one-to-one correspondence between scikit
learn models and which Core ML models are used to represent them. For
example, many scikit learn models are embedded in a pipeline to handle
processing of input features.
input_features: str | dict | list
Optional name(s) that can be given to the inputs of the scikit-learn
model. Defaults to 'input'.
Input features can be specified in a number of forms.
- Single string: In this case, the input is assumed to be a single
array, with the number of dimensions set using num_dimensions.
- List of strings: In this case, the overall input dimensions to the
scikit-learn model is assumed to be the length of the list. If
neighboring names are identical, they are assumed to be an input
array of that length. For example:
["a", "b", "c"]
resolves to
[("a", Double), ("b", Double), ("c", Double)].
And:
["a", "a", "b"]
resolves to
[("a", Array(2)), ("b", Double)].
- Dictionary: Where the keys are the names and the indices or ranges of
feature indices.
In this case, it's presented as a mapping from keys to indices or
ranges of contiguous indices. For example,
{"a" : 0, "b" : [2,3], "c" : 1}
Resolves to
[("a", Double), ("c", Double), ("b", Array(2))].
Note that the ordering is determined by the indices.
- List of tuples of the form `(name, datatype)`. Here, `name` is the
name of the exposed feature, and `datatype` is an instance of
`String`, `Double`, `Int64`, `Array`, or `Dictionary`.
output_feature_names: string or list of strings
Optional name(s) that can be given to the inputs of the scikit-learn
model.
The output_feature_names is interpreted according to the model type:
- If the scikit-learn model is a transformer, it is the name of the
array feature output by the final sequence of the transformer
(defaults to "output").
- If it is a classifier, it should be a 2-tuple of names giving the top
class prediction and the array of scores for each class (defaults to
"classLabel" and "classScores").
- If it is a regressor, it should give the name of the prediction value
(defaults to "prediction").
Returns
-------
model:MLModel
Returns an MLModel instance representing a Core ML model.
Examples
--------
.. sourcecode:: python
>>> from sklearn.linear_model import LinearRegression
>>> import pandas as pd
# Load data
>>> data = pd.read_csv('houses.csv')
# Train a model
>>> model = LinearRegression()
>>> model.fit(data[["bedroom", "bath", "size"]], data["price"])
# Convert and save the scikit-learn model
>>> import coremltools
>>> coreml_model = coremltools.converters.sklearn.convert(model,
["bedroom", "bath", "size"],
"price")
>>> coreml_model.save('HousePricer.mlmodel')
"""
# This function is just a thin wrapper around the internal converter so
# that sklearn isn't actually imported unless this function is called
from ...models import MLModel
# NOTE: Providing user-defined class labels will be enabled when
# several issues with the ordering of the classes are worked out. For now,
# to use custom class labels, directly import the internal function below.
from ._converter_internal import _convert_sklearn_model
spec = _convert_sklearn_model(
sk_obj, input_features, output_feature_names, class_labels = None)
return MLModel(spec) | Convert scikit-learn pipeline, classifier, or regressor to Core ML format.
Parameters
----------
sk_obj: model | [model] of scikit-learn format.
Scikit learn model(s) to convert to a Core ML format.
The input model may be a single scikit learn model, a scikit learn
pipeline model, or a list of scikit learn models.
Currently supported scikit learn models are:
- Linear and Logistic Regression
- LinearSVC and LinearSVR
- SVC and SVR
- NuSVC and NuSVR
- Gradient Boosting Classifier and Regressor
- Decision Tree Classifier and Regressor
- Random Forest Classifier and Regressor
- Normalizer
- Imputer
- Standard Scaler
- DictVectorizer
- One Hot Encoder
The input model, or the last model in a pipeline or list of models,
determines whether this is exposed as a Transformer, Regressor,
or Classifier.
Note that there may not be a one-to-one correspondence between scikit
learn models and which Core ML models are used to represent them. For
example, many scikit learn models are embedded in a pipeline to handle
processing of input features.
input_features: str | dict | list
Optional name(s) that can be given to the inputs of the scikit-learn
model. Defaults to 'input'.
Input features can be specified in a number of forms.
- Single string: In this case, the input is assumed to be a single
array, with the number of dimensions set using num_dimensions.
- List of strings: In this case, the overall input dimensions to the
scikit-learn model is assumed to be the length of the list. If
neighboring names are identical, they are assumed to be an input
array of that length. For example:
["a", "b", "c"]
resolves to
[("a", Double), ("b", Double), ("c", Double)].
And:
["a", "a", "b"]
resolves to
[("a", Array(2)), ("b", Double)].
- Dictionary: Where the keys are the names and the indices or ranges of
feature indices.
In this case, it's presented as a mapping from keys to indices or
ranges of contiguous indices. For example,
{"a" : 0, "b" : [2,3], "c" : 1}
Resolves to
[("a", Double), ("c", Double), ("b", Array(2))].
Note that the ordering is determined by the indices.
- List of tuples of the form `(name, datatype)`. Here, `name` is the
name of the exposed feature, and `datatype` is an instance of
`String`, `Double`, `Int64`, `Array`, or `Dictionary`.
output_feature_names: string or list of strings
Optional name(s) that can be given to the inputs of the scikit-learn
model.
The output_feature_names is interpreted according to the model type:
- If the scikit-learn model is a transformer, it is the name of the
array feature output by the final sequence of the transformer
(defaults to "output").
- If it is a classifier, it should be a 2-tuple of names giving the top
class prediction and the array of scores for each class (defaults to
"classLabel" and "classScores").
- If it is a regressor, it should give the name of the prediction value
(defaults to "prediction").
Returns
-------
model:MLModel
Returns an MLModel instance representing a Core ML model.
Examples
--------
.. sourcecode:: python
>>> from sklearn.linear_model import LinearRegression
>>> import pandas as pd
# Load data
>>> data = pd.read_csv('houses.csv')
# Train a model
>>> model = LinearRegression()
>>> model.fit(data[["bedroom", "bath", "size"]], data["price"])
# Convert and save the scikit-learn model
>>> import coremltools
>>> coreml_model = coremltools.converters.sklearn.convert(model,
["bedroom", "bath", "size"],
"price")
>>> coreml_model.save('HousePricer.mlmodel') | Below is the the instruction that describes the task:
### Input:
Convert scikit-learn pipeline, classifier, or regressor to Core ML format.
Parameters
----------
sk_obj: model | [model] of scikit-learn format.
Scikit learn model(s) to convert to a Core ML format.
The input model may be a single scikit learn model, a scikit learn
pipeline model, or a list of scikit learn models.
Currently supported scikit learn models are:
- Linear and Logistic Regression
- LinearSVC and LinearSVR
- SVC and SVR
- NuSVC and NuSVR
- Gradient Boosting Classifier and Regressor
- Decision Tree Classifier and Regressor
- Random Forest Classifier and Regressor
- Normalizer
- Imputer
- Standard Scaler
- DictVectorizer
- One Hot Encoder
The input model, or the last model in a pipeline or list of models,
determines whether this is exposed as a Transformer, Regressor,
or Classifier.
Note that there may not be a one-to-one correspondence between scikit
learn models and which Core ML models are used to represent them. For
example, many scikit learn models are embedded in a pipeline to handle
processing of input features.
input_features: str | dict | list
Optional name(s) that can be given to the inputs of the scikit-learn
model. Defaults to 'input'.
Input features can be specified in a number of forms.
- Single string: In this case, the input is assumed to be a single
array, with the number of dimensions set using num_dimensions.
- List of strings: In this case, the overall input dimensions to the
scikit-learn model is assumed to be the length of the list. If
neighboring names are identical, they are assumed to be an input
array of that length. For example:
["a", "b", "c"]
resolves to
[("a", Double), ("b", Double), ("c", Double)].
And:
["a", "a", "b"]
resolves to
[("a", Array(2)), ("b", Double)].
- Dictionary: Where the keys are the names and the indices or ranges of
feature indices.
In this case, it's presented as a mapping from keys to indices or
ranges of contiguous indices. For example,
{"a" : 0, "b" : [2,3], "c" : 1}
Resolves to
[("a", Double), ("c", Double), ("b", Array(2))].
Note that the ordering is determined by the indices.
- List of tuples of the form `(name, datatype)`. Here, `name` is the
name of the exposed feature, and `datatype` is an instance of
`String`, `Double`, `Int64`, `Array`, or `Dictionary`.
output_feature_names: string or list of strings
Optional name(s) that can be given to the inputs of the scikit-learn
model.
The output_feature_names is interpreted according to the model type:
- If the scikit-learn model is a transformer, it is the name of the
array feature output by the final sequence of the transformer
(defaults to "output").
- If it is a classifier, it should be a 2-tuple of names giving the top
class prediction and the array of scores for each class (defaults to
"classLabel" and "classScores").
- If it is a regressor, it should give the name of the prediction value
(defaults to "prediction").
Returns
-------
model:MLModel
Returns an MLModel instance representing a Core ML model.
Examples
--------
.. sourcecode:: python
>>> from sklearn.linear_model import LinearRegression
>>> import pandas as pd
# Load data
>>> data = pd.read_csv('houses.csv')
# Train a model
>>> model = LinearRegression()
>>> model.fit(data[["bedroom", "bath", "size"]], data["price"])
# Convert and save the scikit-learn model
>>> import coremltools
>>> coreml_model = coremltools.converters.sklearn.convert(model,
["bedroom", "bath", "size"],
"price")
>>> coreml_model.save('HousePricer.mlmodel')
### Response:
def convert(sk_obj, input_features = None,
output_feature_names = None):
"""
Convert scikit-learn pipeline, classifier, or regressor to Core ML format.
Parameters
----------
sk_obj: model | [model] of scikit-learn format.
Scikit learn model(s) to convert to a Core ML format.
The input model may be a single scikit learn model, a scikit learn
pipeline model, or a list of scikit learn models.
Currently supported scikit learn models are:
- Linear and Logistic Regression
- LinearSVC and LinearSVR
- SVC and SVR
- NuSVC and NuSVR
- Gradient Boosting Classifier and Regressor
- Decision Tree Classifier and Regressor
- Random Forest Classifier and Regressor
- Normalizer
- Imputer
- Standard Scaler
- DictVectorizer
- One Hot Encoder
The input model, or the last model in a pipeline or list of models,
determines whether this is exposed as a Transformer, Regressor,
or Classifier.
Note that there may not be a one-to-one correspondence between scikit
learn models and which Core ML models are used to represent them. For
example, many scikit learn models are embedded in a pipeline to handle
processing of input features.
input_features: str | dict | list
Optional name(s) that can be given to the inputs of the scikit-learn
model. Defaults to 'input'.
Input features can be specified in a number of forms.
- Single string: In this case, the input is assumed to be a single
array, with the number of dimensions set using num_dimensions.
- List of strings: In this case, the overall input dimensions to the
scikit-learn model is assumed to be the length of the list. If
neighboring names are identical, they are assumed to be an input
array of that length. For example:
["a", "b", "c"]
resolves to
[("a", Double), ("b", Double), ("c", Double)].
And:
["a", "a", "b"]
resolves to
[("a", Array(2)), ("b", Double)].
- Dictionary: Where the keys are the names and the indices or ranges of
feature indices.
In this case, it's presented as a mapping from keys to indices or
ranges of contiguous indices. For example,
{"a" : 0, "b" : [2,3], "c" : 1}
Resolves to
[("a", Double), ("c", Double), ("b", Array(2))].
Note that the ordering is determined by the indices.
- List of tuples of the form `(name, datatype)`. Here, `name` is the
name of the exposed feature, and `datatype` is an instance of
`String`, `Double`, `Int64`, `Array`, or `Dictionary`.
output_feature_names: string or list of strings
Optional name(s) that can be given to the inputs of the scikit-learn
model.
The output_feature_names is interpreted according to the model type:
- If the scikit-learn model is a transformer, it is the name of the
array feature output by the final sequence of the transformer
(defaults to "output").
- If it is a classifier, it should be a 2-tuple of names giving the top
class prediction and the array of scores for each class (defaults to
"classLabel" and "classScores").
- If it is a regressor, it should give the name of the prediction value
(defaults to "prediction").
Returns
-------
model:MLModel
Returns an MLModel instance representing a Core ML model.
Examples
--------
.. sourcecode:: python
>>> from sklearn.linear_model import LinearRegression
>>> import pandas as pd
# Load data
>>> data = pd.read_csv('houses.csv')
# Train a model
>>> model = LinearRegression()
>>> model.fit(data[["bedroom", "bath", "size"]], data["price"])
# Convert and save the scikit-learn model
>>> import coremltools
>>> coreml_model = coremltools.converters.sklearn.convert(model,
["bedroom", "bath", "size"],
"price")
>>> coreml_model.save('HousePricer.mlmodel')
"""
# This function is just a thin wrapper around the internal converter so
# that sklearn isn't actually imported unless this function is called
from ...models import MLModel
# NOTE: Providing user-defined class labels will be enabled when
# several issues with the ordering of the classes are worked out. For now,
# to use custom class labels, directly import the internal function below.
from ._converter_internal import _convert_sklearn_model
spec = _convert_sklearn_model(
sk_obj, input_features, output_feature_names, class_labels = None)
return MLModel(spec) |
def can_manage_groups(cls, user):
"""
For use with user_passes_test decorator.
Check if the user can manage groups. Either has the
auth.group_management permission or is a leader of at least one group
and is also a Member.
:param user: django.contrib.auth.models.User for the request
:return: bool True if user can manage groups, False otherwise
"""
if user.is_authenticated:
return cls.has_management_permission(user) or user.leads_groups.all()
return False | For use with user_passes_test decorator.
Check if the user can manage groups. Either has the
auth.group_management permission or is a leader of at least one group
and is also a Member.
:param user: django.contrib.auth.models.User for the request
:return: bool True if user can manage groups, False otherwise | Below is the the instruction that describes the task:
### Input:
For use with user_passes_test decorator.
Check if the user can manage groups. Either has the
auth.group_management permission or is a leader of at least one group
and is also a Member.
:param user: django.contrib.auth.models.User for the request
:return: bool True if user can manage groups, False otherwise
### Response:
def can_manage_groups(cls, user):
"""
For use with user_passes_test decorator.
Check if the user can manage groups. Either has the
auth.group_management permission or is a leader of at least one group
and is also a Member.
:param user: django.contrib.auth.models.User for the request
:return: bool True if user can manage groups, False otherwise
"""
if user.is_authenticated:
return cls.has_management_permission(user) or user.leads_groups.all()
return False |
def merge_variables(variables, name=None, **kwargs):
'''Merge/concatenate a list of variables along the row axis.
Parameters
----------
variables : :obj:`list`
A list of Variables to merge.
name : :obj:`str`
Optional name to assign to the output Variable. By default, uses the
same name as the input variables.
kwargs
Optional keyword arguments to pass onto the class-specific merge() call.
Possible args:
- sampling_rate (int, str): The sampling rate to use if resampling
of DenseRunVariables is necessary for harmonization. If 'auto',
the highest sampling rate found will be used. This argument is
only used when passing DenseRunVariables in the variables list.
Returns
-------
A single BIDSVariable of the same class as the input variables.
Notes
-----
- Currently, this function only support homogenously-typed lists. In
future, it may be extended to support implicit conversion.
- Variables in the list must all share the same name (i.e., it is not
possible to merge two different variables into a single variable.)
'''
classes = set([v.__class__ for v in variables])
if len(classes) > 1:
raise ValueError("Variables of different classes cannot be merged. "
"Variables passed are of classes: %s" % classes)
sources = set([v.source for v in variables])
if len(sources) > 1:
raise ValueError("Variables extracted from different types of files "
"cannot be merged. Sources found: %s" % sources)
return list(classes)[0].merge(variables, **kwargs) | Merge/concatenate a list of variables along the row axis.
Parameters
----------
variables : :obj:`list`
A list of Variables to merge.
name : :obj:`str`
Optional name to assign to the output Variable. By default, uses the
same name as the input variables.
kwargs
Optional keyword arguments to pass onto the class-specific merge() call.
Possible args:
- sampling_rate (int, str): The sampling rate to use if resampling
of DenseRunVariables is necessary for harmonization. If 'auto',
the highest sampling rate found will be used. This argument is
only used when passing DenseRunVariables in the variables list.
Returns
-------
A single BIDSVariable of the same class as the input variables.
Notes
-----
- Currently, this function only support homogenously-typed lists. In
future, it may be extended to support implicit conversion.
- Variables in the list must all share the same name (i.e., it is not
possible to merge two different variables into a single variable.) | Below is the the instruction that describes the task:
### Input:
Merge/concatenate a list of variables along the row axis.
Parameters
----------
variables : :obj:`list`
A list of Variables to merge.
name : :obj:`str`
Optional name to assign to the output Variable. By default, uses the
same name as the input variables.
kwargs
Optional keyword arguments to pass onto the class-specific merge() call.
Possible args:
- sampling_rate (int, str): The sampling rate to use if resampling
of DenseRunVariables is necessary for harmonization. If 'auto',
the highest sampling rate found will be used. This argument is
only used when passing DenseRunVariables in the variables list.
Returns
-------
A single BIDSVariable of the same class as the input variables.
Notes
-----
- Currently, this function only support homogenously-typed lists. In
future, it may be extended to support implicit conversion.
- Variables in the list must all share the same name (i.e., it is not
possible to merge two different variables into a single variable.)
### Response:
def merge_variables(variables, name=None, **kwargs):
'''Merge/concatenate a list of variables along the row axis.
Parameters
----------
variables : :obj:`list`
A list of Variables to merge.
name : :obj:`str`
Optional name to assign to the output Variable. By default, uses the
same name as the input variables.
kwargs
Optional keyword arguments to pass onto the class-specific merge() call.
Possible args:
- sampling_rate (int, str): The sampling rate to use if resampling
of DenseRunVariables is necessary for harmonization. If 'auto',
the highest sampling rate found will be used. This argument is
only used when passing DenseRunVariables in the variables list.
Returns
-------
A single BIDSVariable of the same class as the input variables.
Notes
-----
- Currently, this function only support homogenously-typed lists. In
future, it may be extended to support implicit conversion.
- Variables in the list must all share the same name (i.e., it is not
possible to merge two different variables into a single variable.)
'''
classes = set([v.__class__ for v in variables])
if len(classes) > 1:
raise ValueError("Variables of different classes cannot be merged. "
"Variables passed are of classes: %s" % classes)
sources = set([v.source for v in variables])
if len(sources) > 1:
raise ValueError("Variables extracted from different types of files "
"cannot be merged. Sources found: %s" % sources)
return list(classes)[0].merge(variables, **kwargs) |
def pprint(self):
"""Returns:
text: a stream information text summary
"""
s = u"ASF (%s) %d bps, %s Hz, %d channels, %.2f seconds" % (
self.codec_type or self.codec_name or u"???", self.bitrate,
self.sample_rate, self.channels, self.length)
return s | Returns:
text: a stream information text summary | Below is the the instruction that describes the task:
### Input:
Returns:
text: a stream information text summary
### Response:
def pprint(self):
"""Returns:
text: a stream information text summary
"""
s = u"ASF (%s) %d bps, %s Hz, %d channels, %.2f seconds" % (
self.codec_type or self.codec_name or u"???", self.bitrate,
self.sample_rate, self.channels, self.length)
return s |
def grad_named(fun, argname):
'''Takes gradients with respect to a named argument.
Doesn't work on *args or **kwargs.'''
arg_index = getargspec(fun).args.index(argname)
return grad(fun, arg_index) | Takes gradients with respect to a named argument.
Doesn't work on *args or **kwargs. | Below is the the instruction that describes the task:
### Input:
Takes gradients with respect to a named argument.
Doesn't work on *args or **kwargs.
### Response:
def grad_named(fun, argname):
'''Takes gradients with respect to a named argument.
Doesn't work on *args or **kwargs.'''
arg_index = getargspec(fun).args.index(argname)
return grad(fun, arg_index) |
def mel(sr, n_dft, n_mels=128, fmin=0.0, fmax=None, htk=False, norm=1):
"""[np] create a filterbank matrix to combine stft bins into mel-frequency bins
use Slaney (said Librosa)
n_mels: numbre of mel bands
fmin : lowest frequency [Hz]
fmax : highest frequency [Hz]
If `None`, use `sr / 2.0`
"""
return librosa.filters.mel(sr=sr, n_fft=n_dft, n_mels=n_mels,
fmin=fmin, fmax=fmax,
htk=htk, norm=norm).astype(K.floatx()) | [np] create a filterbank matrix to combine stft bins into mel-frequency bins
use Slaney (said Librosa)
n_mels: numbre of mel bands
fmin : lowest frequency [Hz]
fmax : highest frequency [Hz]
If `None`, use `sr / 2.0` | Below is the the instruction that describes the task:
### Input:
[np] create a filterbank matrix to combine stft bins into mel-frequency bins
use Slaney (said Librosa)
n_mels: numbre of mel bands
fmin : lowest frequency [Hz]
fmax : highest frequency [Hz]
If `None`, use `sr / 2.0`
### Response:
def mel(sr, n_dft, n_mels=128, fmin=0.0, fmax=None, htk=False, norm=1):
"""[np] create a filterbank matrix to combine stft bins into mel-frequency bins
use Slaney (said Librosa)
n_mels: numbre of mel bands
fmin : lowest frequency [Hz]
fmax : highest frequency [Hz]
If `None`, use `sr / 2.0`
"""
return librosa.filters.mel(sr=sr, n_fft=n_dft, n_mels=n_mels,
fmin=fmin, fmax=fmax,
htk=htk, norm=norm).astype(K.floatx()) |
def _get_conn(ret):
'''
Return a mongodb connection object
'''
_options = _get_options(ret)
host = _options.get('host')
port = _options.get('port')
db_ = _options.get('db')
user = _options.get('user')
password = _options.get('password')
indexes = _options.get('indexes', False)
# at some point we should remove support for
# pymongo versions < 2.3 until then there are
# a bunch of these sections that need to be supported
if PYMONGO_VERSION > _LooseVersion('2.3'):
conn = pymongo.MongoClient(host, port)
else:
conn = pymongo.Connection(host, port)
mdb = conn[db_]
if user and password:
mdb.authenticate(user, password)
if indexes:
if PYMONGO_VERSION > _LooseVersion('2.3'):
mdb.saltReturns.create_index('minion')
mdb.saltReturns.create_index('jid')
mdb.jobs.create_index('jid')
else:
mdb.saltReturns.ensure_index('minion')
mdb.saltReturns.ensure_index('jid')
mdb.jobs.ensure_index('jid')
return conn, mdb | Return a mongodb connection object | Below is the the instruction that describes the task:
### Input:
Return a mongodb connection object
### Response:
def _get_conn(ret):
'''
Return a mongodb connection object
'''
_options = _get_options(ret)
host = _options.get('host')
port = _options.get('port')
db_ = _options.get('db')
user = _options.get('user')
password = _options.get('password')
indexes = _options.get('indexes', False)
# at some point we should remove support for
# pymongo versions < 2.3 until then there are
# a bunch of these sections that need to be supported
if PYMONGO_VERSION > _LooseVersion('2.3'):
conn = pymongo.MongoClient(host, port)
else:
conn = pymongo.Connection(host, port)
mdb = conn[db_]
if user and password:
mdb.authenticate(user, password)
if indexes:
if PYMONGO_VERSION > _LooseVersion('2.3'):
mdb.saltReturns.create_index('minion')
mdb.saltReturns.create_index('jid')
mdb.jobs.create_index('jid')
else:
mdb.saltReturns.ensure_index('minion')
mdb.saltReturns.ensure_index('jid')
mdb.jobs.ensure_index('jid')
return conn, mdb |
def generate_imports_for_referenced_namespaces(
backend, namespace, insert_type_ignore=False):
# type: (Backend, ApiNamespace, bool) -> None
"""
Both the true Python backend and the Python PEP 484 Type Stub backend have
to perform the same imports.
:param insert_type_ignore: add a MyPy type-ignore comment to the imports in
the except: clause.
"""
imported_namespaces = namespace.get_imported_namespaces(consider_annotation_types=True)
if not imported_namespaces:
return
type_ignore_comment = TYPE_IGNORE_COMMENT if insert_type_ignore else ""
backend.emit('try:')
with backend.indent():
backend.emit('from . import (')
with backend.indent():
for ns in imported_namespaces:
backend.emit(fmt_namespace(ns.name) + ',')
backend.emit(')')
backend.emit('except (ImportError, SystemError, ValueError):')
# Fallback if imported from outside a package.
with backend.indent():
for ns in imported_namespaces:
backend.emit('import {namespace_name}{type_ignore_comment}'.format(
namespace_name=fmt_namespace(ns.name),
type_ignore_comment=type_ignore_comment
))
backend.emit() | Both the true Python backend and the Python PEP 484 Type Stub backend have
to perform the same imports.
:param insert_type_ignore: add a MyPy type-ignore comment to the imports in
the except: clause. | Below is the the instruction that describes the task:
### Input:
Both the true Python backend and the Python PEP 484 Type Stub backend have
to perform the same imports.
:param insert_type_ignore: add a MyPy type-ignore comment to the imports in
the except: clause.
### Response:
def generate_imports_for_referenced_namespaces(
backend, namespace, insert_type_ignore=False):
# type: (Backend, ApiNamespace, bool) -> None
"""
Both the true Python backend and the Python PEP 484 Type Stub backend have
to perform the same imports.
:param insert_type_ignore: add a MyPy type-ignore comment to the imports in
the except: clause.
"""
imported_namespaces = namespace.get_imported_namespaces(consider_annotation_types=True)
if not imported_namespaces:
return
type_ignore_comment = TYPE_IGNORE_COMMENT if insert_type_ignore else ""
backend.emit('try:')
with backend.indent():
backend.emit('from . import (')
with backend.indent():
for ns in imported_namespaces:
backend.emit(fmt_namespace(ns.name) + ',')
backend.emit(')')
backend.emit('except (ImportError, SystemError, ValueError):')
# Fallback if imported from outside a package.
with backend.indent():
for ns in imported_namespaces:
backend.emit('import {namespace_name}{type_ignore_comment}'.format(
namespace_name=fmt_namespace(ns.name),
type_ignore_comment=type_ignore_comment
))
backend.emit() |
def get_user(self):
"""Access basic account information."""
method = 'GET'
endpoint = '/rest/v1/users/{}'.format(self.client.sauce_username)
return self.client.request(method, endpoint) | Access basic account information. | Below is the the instruction that describes the task:
### Input:
Access basic account information.
### Response:
def get_user(self):
"""Access basic account information."""
method = 'GET'
endpoint = '/rest/v1/users/{}'.format(self.client.sauce_username)
return self.client.request(method, endpoint) |
def get(self, path, params=None):
"""Perform GET request"""
r = requests.get(url=self.url + path, params=params, timeout=self.timeout)
r.raise_for_status()
return r.json() | Perform GET request | Below is the the instruction that describes the task:
### Input:
Perform GET request
### Response:
def get(self, path, params=None):
"""Perform GET request"""
r = requests.get(url=self.url + path, params=params, timeout=self.timeout)
r.raise_for_status()
return r.json() |
def get_jac(self):
""" Derives the jacobian from ``self.exprs`` and ``self.dep``. """
if self._jac is True:
if self.sparse is True:
self._jac, self._colptrs, self._rowvals = self.be.sparse_jacobian_csc(self.exprs, self.dep)
elif self.band is not None: # Banded
self._jac = self.be.banded_jacobian(self.exprs, self.dep, *self.band)
else:
f = self.be.Matrix(1, self.ny, self.exprs)
self._jac = f.jacobian(self.be.Matrix(1, self.ny, self.dep))
elif self._jac is False:
return False
return self._jac | Derives the jacobian from ``self.exprs`` and ``self.dep``. | Below is the the instruction that describes the task:
### Input:
Derives the jacobian from ``self.exprs`` and ``self.dep``.
### Response:
def get_jac(self):
""" Derives the jacobian from ``self.exprs`` and ``self.dep``. """
if self._jac is True:
if self.sparse is True:
self._jac, self._colptrs, self._rowvals = self.be.sparse_jacobian_csc(self.exprs, self.dep)
elif self.band is not None: # Banded
self._jac = self.be.banded_jacobian(self.exprs, self.dep, *self.band)
else:
f = self.be.Matrix(1, self.ny, self.exprs)
self._jac = f.jacobian(self.be.Matrix(1, self.ny, self.dep))
elif self._jac is False:
return False
return self._jac |
def _set_names(self, names, level=None, validate=True):
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
validate : boolean, default True
validate that the names match level lengths
Raises
------
TypeError if each name is not hashable.
Notes
-----
sets names on levels. WARNING: mutates!
Note that you generally want to set this *after* changing levels, so
that it only acts on copies
"""
# GH 15110
# Don't allow a single string for names in a MultiIndex
if names is not None and not is_list_like(names):
raise ValueError('Names should be list-like for a MultiIndex')
names = list(names)
if validate and level is not None and len(names) != len(level):
raise ValueError('Length of names must match length of level.')
if validate and level is None and len(names) != self.nlevels:
raise ValueError('Length of names must match number of levels in '
'MultiIndex.')
if level is None:
level = range(self.nlevels)
else:
level = [self._get_level_number(l) for l in level]
# set the name
for l, name in zip(level, names):
if name is not None:
# GH 20527
# All items in 'names' need to be hashable:
if not is_hashable(name):
raise TypeError('{}.name must be a hashable type'
.format(self.__class__.__name__))
self.levels[l].rename(name, inplace=True) | Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
validate : boolean, default True
validate that the names match level lengths
Raises
------
TypeError if each name is not hashable.
Notes
-----
sets names on levels. WARNING: mutates!
Note that you generally want to set this *after* changing levels, so
that it only acts on copies | Below is the the instruction that describes the task:
### Input:
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
validate : boolean, default True
validate that the names match level lengths
Raises
------
TypeError if each name is not hashable.
Notes
-----
sets names on levels. WARNING: mutates!
Note that you generally want to set this *after* changing levels, so
that it only acts on copies
### Response:
def _set_names(self, names, level=None, validate=True):
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
validate : boolean, default True
validate that the names match level lengths
Raises
------
TypeError if each name is not hashable.
Notes
-----
sets names on levels. WARNING: mutates!
Note that you generally want to set this *after* changing levels, so
that it only acts on copies
"""
# GH 15110
# Don't allow a single string for names in a MultiIndex
if names is not None and not is_list_like(names):
raise ValueError('Names should be list-like for a MultiIndex')
names = list(names)
if validate and level is not None and len(names) != len(level):
raise ValueError('Length of names must match length of level.')
if validate and level is None and len(names) != self.nlevels:
raise ValueError('Length of names must match number of levels in '
'MultiIndex.')
if level is None:
level = range(self.nlevels)
else:
level = [self._get_level_number(l) for l in level]
# set the name
for l, name in zip(level, names):
if name is not None:
# GH 20527
# All items in 'names' need to be hashable:
if not is_hashable(name):
raise TypeError('{}.name must be a hashable type'
.format(self.__class__.__name__))
self.levels[l].rename(name, inplace=True) |
def method_repr_string(inst_str, meth_str, arg_strs=None,
allow_mixed_seps=True):
r"""Return a repr string for a method that respects line width.
This function is useful to generate a ``repr`` string for a derived
class that is created through a method, for instance ::
functional.translated(x)
as a better way of representing ::
FunctionalTranslation(functional, x)
Parameters
----------
inst_str : str
Stringification of a class instance.
meth_str : str
Name of the method (not including the ``'.'``).
arg_strs : sequence of str, optional
Stringification of the arguments to the method.
allow_mixed_seps : bool, optional
If ``False`` and the argument strings do not fit on one line, use
``',\n'`` to separate all strings.
By default, a mixture of ``', '`` and ``',\n'`` is used to fit
as much on one line as possible.
In case some of the ``arg_strs`` span multiple lines, it is
usually advisable to set ``allow_mixed_seps`` to ``False`` since
the result tends to be more readable that way.
Returns
-------
meth_repr_str : str
Concatenation of all strings in a way that the line width
is respected.
Examples
--------
>>> inst_str = 'MyClass'
>>> meth_str = 'empty'
>>> arg_strs = []
>>> print(method_repr_string(inst_str, meth_str, arg_strs))
MyClass.empty()
>>> inst_str = 'MyClass'
>>> meth_str = 'fromfile'
>>> arg_strs = ["'tmpfile.txt'"]
>>> print(method_repr_string(inst_str, meth_str, arg_strs))
MyClass.fromfile('tmpfile.txt')
>>> inst_str = "MyClass('init string')"
>>> meth_str = 'method'
>>> arg_strs = ['2.0']
>>> print(method_repr_string(inst_str, meth_str, arg_strs))
MyClass('init string').method(2.0)
>>> long_inst_str = (
... "MyClass('long string that will definitely trigger a line break')"
... )
>>> meth_str = 'method'
>>> long_arg1 = "'long argument string that should come on the next line'"
>>> arg2 = 'param1=1'
>>> arg3 = 'param2=2.0'
>>> arg_strs = [long_arg1, arg2, arg3]
>>> print(method_repr_string(long_inst_str, meth_str, arg_strs))
MyClass(
'long string that will definitely trigger a line break'
).method(
'long argument string that should come on the next line',
param1=1, param2=2.0
)
>>> print(method_repr_string(long_inst_str, meth_str, arg_strs,
... allow_mixed_seps=False))
MyClass(
'long string that will definitely trigger a line break'
).method(
'long argument string that should come on the next line',
param1=1,
param2=2.0
)
"""
linewidth = np.get_printoptions()['linewidth']
# Part up to the method name
if (len(inst_str) + 1 + len(meth_str) + 1 <= linewidth or
'(' not in inst_str):
init_parts = [inst_str, meth_str]
# Length of the line to the end of the method name
meth_line_start_len = len(inst_str) + 1 + len(meth_str)
else:
# TODO(kohr-h): use `maxsplit=1` kwarg, not supported in Py 2
left, rest = inst_str.split('(', 1)
right, middle = rest[::-1].split(')', 1)
middle, right = middle[::-1], right[::-1]
if middle.startswith('\n') and middle.endswith('\n'):
# Already on multiple lines
new_inst_str = inst_str
else:
new_inst_str = '(\n'.join([left, indent(middle)]) + '\n)' + right
# Length of the line to the end of the method name, consisting of
# ')' + '.' + <method name>
meth_line_start_len = 1 + 1 + len(meth_str)
init_parts = [new_inst_str, meth_str]
# Method call part
arg_str_oneline = ', '.join(arg_strs)
if meth_line_start_len + 1 + len(arg_str_oneline) + 1 <= linewidth:
meth_call_str = '(' + arg_str_oneline + ')'
elif not arg_str_oneline:
meth_call_str = '(\n)'
else:
if allow_mixed_seps:
arg_seps = _separators(arg_strs, linewidth - 4) # indented
else:
arg_seps = [',\n'] * (len(arg_strs) - 1)
full_arg_str = ''
for arg_str, sep in zip_longest(arg_strs, arg_seps, fillvalue=''):
full_arg_str += arg_str + sep
meth_call_str = '(\n' + indent(full_arg_str) + '\n)'
return '.'.join(init_parts) + meth_call_str | r"""Return a repr string for a method that respects line width.
This function is useful to generate a ``repr`` string for a derived
class that is created through a method, for instance ::
functional.translated(x)
as a better way of representing ::
FunctionalTranslation(functional, x)
Parameters
----------
inst_str : str
Stringification of a class instance.
meth_str : str
Name of the method (not including the ``'.'``).
arg_strs : sequence of str, optional
Stringification of the arguments to the method.
allow_mixed_seps : bool, optional
If ``False`` and the argument strings do not fit on one line, use
``',\n'`` to separate all strings.
By default, a mixture of ``', '`` and ``',\n'`` is used to fit
as much on one line as possible.
In case some of the ``arg_strs`` span multiple lines, it is
usually advisable to set ``allow_mixed_seps`` to ``False`` since
the result tends to be more readable that way.
Returns
-------
meth_repr_str : str
Concatenation of all strings in a way that the line width
is respected.
Examples
--------
>>> inst_str = 'MyClass'
>>> meth_str = 'empty'
>>> arg_strs = []
>>> print(method_repr_string(inst_str, meth_str, arg_strs))
MyClass.empty()
>>> inst_str = 'MyClass'
>>> meth_str = 'fromfile'
>>> arg_strs = ["'tmpfile.txt'"]
>>> print(method_repr_string(inst_str, meth_str, arg_strs))
MyClass.fromfile('tmpfile.txt')
>>> inst_str = "MyClass('init string')"
>>> meth_str = 'method'
>>> arg_strs = ['2.0']
>>> print(method_repr_string(inst_str, meth_str, arg_strs))
MyClass('init string').method(2.0)
>>> long_inst_str = (
... "MyClass('long string that will definitely trigger a line break')"
... )
>>> meth_str = 'method'
>>> long_arg1 = "'long argument string that should come on the next line'"
>>> arg2 = 'param1=1'
>>> arg3 = 'param2=2.0'
>>> arg_strs = [long_arg1, arg2, arg3]
>>> print(method_repr_string(long_inst_str, meth_str, arg_strs))
MyClass(
'long string that will definitely trigger a line break'
).method(
'long argument string that should come on the next line',
param1=1, param2=2.0
)
>>> print(method_repr_string(long_inst_str, meth_str, arg_strs,
... allow_mixed_seps=False))
MyClass(
'long string that will definitely trigger a line break'
).method(
'long argument string that should come on the next line',
param1=1,
param2=2.0
) | Below is the the instruction that describes the task:
### Input:
r"""Return a repr string for a method that respects line width.
This function is useful to generate a ``repr`` string for a derived
class that is created through a method, for instance ::
functional.translated(x)
as a better way of representing ::
FunctionalTranslation(functional, x)
Parameters
----------
inst_str : str
Stringification of a class instance.
meth_str : str
Name of the method (not including the ``'.'``).
arg_strs : sequence of str, optional
Stringification of the arguments to the method.
allow_mixed_seps : bool, optional
If ``False`` and the argument strings do not fit on one line, use
``',\n'`` to separate all strings.
By default, a mixture of ``', '`` and ``',\n'`` is used to fit
as much on one line as possible.
In case some of the ``arg_strs`` span multiple lines, it is
usually advisable to set ``allow_mixed_seps`` to ``False`` since
the result tends to be more readable that way.
Returns
-------
meth_repr_str : str
Concatenation of all strings in a way that the line width
is respected.
Examples
--------
>>> inst_str = 'MyClass'
>>> meth_str = 'empty'
>>> arg_strs = []
>>> print(method_repr_string(inst_str, meth_str, arg_strs))
MyClass.empty()
>>> inst_str = 'MyClass'
>>> meth_str = 'fromfile'
>>> arg_strs = ["'tmpfile.txt'"]
>>> print(method_repr_string(inst_str, meth_str, arg_strs))
MyClass.fromfile('tmpfile.txt')
>>> inst_str = "MyClass('init string')"
>>> meth_str = 'method'
>>> arg_strs = ['2.0']
>>> print(method_repr_string(inst_str, meth_str, arg_strs))
MyClass('init string').method(2.0)
>>> long_inst_str = (
... "MyClass('long string that will definitely trigger a line break')"
... )
>>> meth_str = 'method'
>>> long_arg1 = "'long argument string that should come on the next line'"
>>> arg2 = 'param1=1'
>>> arg3 = 'param2=2.0'
>>> arg_strs = [long_arg1, arg2, arg3]
>>> print(method_repr_string(long_inst_str, meth_str, arg_strs))
MyClass(
'long string that will definitely trigger a line break'
).method(
'long argument string that should come on the next line',
param1=1, param2=2.0
)
>>> print(method_repr_string(long_inst_str, meth_str, arg_strs,
... allow_mixed_seps=False))
MyClass(
'long string that will definitely trigger a line break'
).method(
'long argument string that should come on the next line',
param1=1,
param2=2.0
)
### Response:
def method_repr_string(inst_str, meth_str, arg_strs=None,
allow_mixed_seps=True):
r"""Return a repr string for a method that respects line width.
This function is useful to generate a ``repr`` string for a derived
class that is created through a method, for instance ::
functional.translated(x)
as a better way of representing ::
FunctionalTranslation(functional, x)
Parameters
----------
inst_str : str
Stringification of a class instance.
meth_str : str
Name of the method (not including the ``'.'``).
arg_strs : sequence of str, optional
Stringification of the arguments to the method.
allow_mixed_seps : bool, optional
If ``False`` and the argument strings do not fit on one line, use
``',\n'`` to separate all strings.
By default, a mixture of ``', '`` and ``',\n'`` is used to fit
as much on one line as possible.
In case some of the ``arg_strs`` span multiple lines, it is
usually advisable to set ``allow_mixed_seps`` to ``False`` since
the result tends to be more readable that way.
Returns
-------
meth_repr_str : str
Concatenation of all strings in a way that the line width
is respected.
Examples
--------
>>> inst_str = 'MyClass'
>>> meth_str = 'empty'
>>> arg_strs = []
>>> print(method_repr_string(inst_str, meth_str, arg_strs))
MyClass.empty()
>>> inst_str = 'MyClass'
>>> meth_str = 'fromfile'
>>> arg_strs = ["'tmpfile.txt'"]
>>> print(method_repr_string(inst_str, meth_str, arg_strs))
MyClass.fromfile('tmpfile.txt')
>>> inst_str = "MyClass('init string')"
>>> meth_str = 'method'
>>> arg_strs = ['2.0']
>>> print(method_repr_string(inst_str, meth_str, arg_strs))
MyClass('init string').method(2.0)
>>> long_inst_str = (
... "MyClass('long string that will definitely trigger a line break')"
... )
>>> meth_str = 'method'
>>> long_arg1 = "'long argument string that should come on the next line'"
>>> arg2 = 'param1=1'
>>> arg3 = 'param2=2.0'
>>> arg_strs = [long_arg1, arg2, arg3]
>>> print(method_repr_string(long_inst_str, meth_str, arg_strs))
MyClass(
'long string that will definitely trigger a line break'
).method(
'long argument string that should come on the next line',
param1=1, param2=2.0
)
>>> print(method_repr_string(long_inst_str, meth_str, arg_strs,
... allow_mixed_seps=False))
MyClass(
'long string that will definitely trigger a line break'
).method(
'long argument string that should come on the next line',
param1=1,
param2=2.0
)
"""
linewidth = np.get_printoptions()['linewidth']
# Part up to the method name
if (len(inst_str) + 1 + len(meth_str) + 1 <= linewidth or
'(' not in inst_str):
init_parts = [inst_str, meth_str]
# Length of the line to the end of the method name
meth_line_start_len = len(inst_str) + 1 + len(meth_str)
else:
# TODO(kohr-h): use `maxsplit=1` kwarg, not supported in Py 2
left, rest = inst_str.split('(', 1)
right, middle = rest[::-1].split(')', 1)
middle, right = middle[::-1], right[::-1]
if middle.startswith('\n') and middle.endswith('\n'):
# Already on multiple lines
new_inst_str = inst_str
else:
new_inst_str = '(\n'.join([left, indent(middle)]) + '\n)' + right
# Length of the line to the end of the method name, consisting of
# ')' + '.' + <method name>
meth_line_start_len = 1 + 1 + len(meth_str)
init_parts = [new_inst_str, meth_str]
# Method call part
arg_str_oneline = ', '.join(arg_strs)
if meth_line_start_len + 1 + len(arg_str_oneline) + 1 <= linewidth:
meth_call_str = '(' + arg_str_oneline + ')'
elif not arg_str_oneline:
meth_call_str = '(\n)'
else:
if allow_mixed_seps:
arg_seps = _separators(arg_strs, linewidth - 4) # indented
else:
arg_seps = [',\n'] * (len(arg_strs) - 1)
full_arg_str = ''
for arg_str, sep in zip_longest(arg_strs, arg_seps, fillvalue=''):
full_arg_str += arg_str + sep
meth_call_str = '(\n' + indent(full_arg_str) + '\n)'
return '.'.join(init_parts) + meth_call_str |
def focusWindow(self, hwnd):
""" Brings specified window to the front """
Debug.log(3, "Focusing window: " + str(hwnd))
SW_RESTORE = 9
if ctypes.windll.user32.IsIconic(hwnd):
ctypes.windll.user32.ShowWindow(hwnd, SW_RESTORE)
ctypes.windll.user32.SetForegroundWindow(hwnd) | Brings specified window to the front | Below is the the instruction that describes the task:
### Input:
Brings specified window to the front
### Response:
def focusWindow(self, hwnd):
""" Brings specified window to the front """
Debug.log(3, "Focusing window: " + str(hwnd))
SW_RESTORE = 9
if ctypes.windll.user32.IsIconic(hwnd):
ctypes.windll.user32.ShowWindow(hwnd, SW_RESTORE)
ctypes.windll.user32.SetForegroundWindow(hwnd) |
Subsets and Splits