code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def stop(self, timeout=None):
"""
Stop the producer (async mode). Blocks until async thread completes.
"""
if timeout is not None:
log.warning('timeout argument to stop() is deprecated - '
'it will be removed in future release')
if not self.async_send:
log.warning('producer.stop() called, but producer is not async')
return
if self.stopped:
log.warning('producer.stop() called, but producer is already stopped')
return
if self.async_send:
self.queue.put((STOP_ASYNC_PRODUCER, None, None))
self.thread_stop_event.set()
self.thread.join()
if hasattr(self, '_cleanup_func'):
# Remove cleanup handler now that we've stopped
# py3 supports unregistering
if hasattr(atexit, 'unregister'):
atexit.unregister(self._cleanup_func) # pylint: disable=no-member
# py2 requires removing from private attribute...
else:
# ValueError on list.remove() if the exithandler no longer exists
# but that is fine here
try:
atexit._exithandlers.remove( # pylint: disable=no-member
(self._cleanup_func, (self,), {}))
except ValueError:
pass
del self._cleanup_func
self.stopped = True | Stop the producer (async mode). Blocks until async thread completes. | Below is the the instruction that describes the task:
### Input:
Stop the producer (async mode). Blocks until async thread completes.
### Response:
def stop(self, timeout=None):
"""
Stop the producer (async mode). Blocks until async thread completes.
"""
if timeout is not None:
log.warning('timeout argument to stop() is deprecated - '
'it will be removed in future release')
if not self.async_send:
log.warning('producer.stop() called, but producer is not async')
return
if self.stopped:
log.warning('producer.stop() called, but producer is already stopped')
return
if self.async_send:
self.queue.put((STOP_ASYNC_PRODUCER, None, None))
self.thread_stop_event.set()
self.thread.join()
if hasattr(self, '_cleanup_func'):
# Remove cleanup handler now that we've stopped
# py3 supports unregistering
if hasattr(atexit, 'unregister'):
atexit.unregister(self._cleanup_func) # pylint: disable=no-member
# py2 requires removing from private attribute...
else:
# ValueError on list.remove() if the exithandler no longer exists
# but that is fine here
try:
atexit._exithandlers.remove( # pylint: disable=no-member
(self._cleanup_func, (self,), {}))
except ValueError:
pass
del self._cleanup_func
self.stopped = True |
def get_planes(im, squeeze=True):
r"""
Extracts three planar images from the volumetric image, one for each
principle axis. The planes are taken from the middle of the domain.
Parameters
----------
im : ND-array
The volumetric image from which the 3 planar images are to be obtained
squeeze : boolean, optional
If True (default) the returned images are 2D (i.e. squeezed). If
False, the images are 1 element deep along the axis where the slice
was obtained.
Returns
-------
planes : list
A list of 2D-images
"""
x, y, z = (sp.array(im.shape) / 2).astype(int)
planes = [im[x, :, :], im[:, y, :], im[:, :, z]]
if not squeeze:
imx = planes[0]
planes[0] = sp.reshape(imx, [1, imx.shape[0], imx.shape[1]])
imy = planes[1]
planes[1] = sp.reshape(imy, [imy.shape[0], 1, imy.shape[1]])
imz = planes[2]
planes[2] = sp.reshape(imz, [imz.shape[0], imz.shape[1], 1])
return planes | r"""
Extracts three planar images from the volumetric image, one for each
principle axis. The planes are taken from the middle of the domain.
Parameters
----------
im : ND-array
The volumetric image from which the 3 planar images are to be obtained
squeeze : boolean, optional
If True (default) the returned images are 2D (i.e. squeezed). If
False, the images are 1 element deep along the axis where the slice
was obtained.
Returns
-------
planes : list
A list of 2D-images | Below is the the instruction that describes the task:
### Input:
r"""
Extracts three planar images from the volumetric image, one for each
principle axis. The planes are taken from the middle of the domain.
Parameters
----------
im : ND-array
The volumetric image from which the 3 planar images are to be obtained
squeeze : boolean, optional
If True (default) the returned images are 2D (i.e. squeezed). If
False, the images are 1 element deep along the axis where the slice
was obtained.
Returns
-------
planes : list
A list of 2D-images
### Response:
def get_planes(im, squeeze=True):
r"""
Extracts three planar images from the volumetric image, one for each
principle axis. The planes are taken from the middle of the domain.
Parameters
----------
im : ND-array
The volumetric image from which the 3 planar images are to be obtained
squeeze : boolean, optional
If True (default) the returned images are 2D (i.e. squeezed). If
False, the images are 1 element deep along the axis where the slice
was obtained.
Returns
-------
planes : list
A list of 2D-images
"""
x, y, z = (sp.array(im.shape) / 2).astype(int)
planes = [im[x, :, :], im[:, y, :], im[:, :, z]]
if not squeeze:
imx = planes[0]
planes[0] = sp.reshape(imx, [1, imx.shape[0], imx.shape[1]])
imy = planes[1]
planes[1] = sp.reshape(imy, [imy.shape[0], 1, imy.shape[1]])
imz = planes[2]
planes[2] = sp.reshape(imz, [imz.shape[0], imz.shape[1], 1])
return planes |
def ends_with(self, suffix):
"""Asserts that val is string or iterable and ends with suffix."""
if suffix is None:
raise TypeError('given suffix arg must not be none')
if isinstance(self.val, str_types):
if not isinstance(suffix, str_types):
raise TypeError('given suffix arg must be a string')
if len(suffix) == 0:
raise ValueError('given suffix arg must not be empty')
if not self.val.endswith(suffix):
self._err('Expected <%s> to end with <%s>, but did not.' % (self.val, suffix))
elif isinstance(self.val, Iterable):
if len(self.val) == 0:
raise ValueError('val must not be empty')
last = None
for last in self.val:
pass
if last != suffix:
self._err('Expected %s to end with <%s>, but did not.' % (self.val, suffix))
else:
raise TypeError('val is not a string or iterable')
return self | Asserts that val is string or iterable and ends with suffix. | Below is the the instruction that describes the task:
### Input:
Asserts that val is string or iterable and ends with suffix.
### Response:
def ends_with(self, suffix):
"""Asserts that val is string or iterable and ends with suffix."""
if suffix is None:
raise TypeError('given suffix arg must not be none')
if isinstance(self.val, str_types):
if not isinstance(suffix, str_types):
raise TypeError('given suffix arg must be a string')
if len(suffix) == 0:
raise ValueError('given suffix arg must not be empty')
if not self.val.endswith(suffix):
self._err('Expected <%s> to end with <%s>, but did not.' % (self.val, suffix))
elif isinstance(self.val, Iterable):
if len(self.val) == 0:
raise ValueError('val must not be empty')
last = None
for last in self.val:
pass
if last != suffix:
self._err('Expected %s to end with <%s>, but did not.' % (self.val, suffix))
else:
raise TypeError('val is not a string or iterable')
return self |
def __skaters(self, tab):
"""
Constructs dictionary of players on the ice in the provided table at time of play.
:param tab: RTSS table of the skaters and goalie on at the time of the play
:rtype: dictionary, key = player number, value = [position, name]
"""
res = { }
for td in tab.iterchildren():
if len(td):
pl_data = td.xpath("./table/tr")
pl = pl_data[0].xpath("./td/font")
if pl[0].text.isdigit():
res[int(pl[0].text)] = [s.strip() for s in pl[0].get("title").split("-")][::-1]
s = pl[0].get("title").split("-")
pos = pl_data[1].getchildren()[0].text
return res | Constructs dictionary of players on the ice in the provided table at time of play.
:param tab: RTSS table of the skaters and goalie on at the time of the play
:rtype: dictionary, key = player number, value = [position, name] | Below is the the instruction that describes the task:
### Input:
Constructs dictionary of players on the ice in the provided table at time of play.
:param tab: RTSS table of the skaters and goalie on at the time of the play
:rtype: dictionary, key = player number, value = [position, name]
### Response:
def __skaters(self, tab):
"""
Constructs dictionary of players on the ice in the provided table at time of play.
:param tab: RTSS table of the skaters and goalie on at the time of the play
:rtype: dictionary, key = player number, value = [position, name]
"""
res = { }
for td in tab.iterchildren():
if len(td):
pl_data = td.xpath("./table/tr")
pl = pl_data[0].xpath("./td/font")
if pl[0].text.isdigit():
res[int(pl[0].text)] = [s.strip() for s in pl[0].get("title").split("-")][::-1]
s = pl[0].get("title").split("-")
pos = pl_data[1].getchildren()[0].text
return res |
def from_bytes(cls, mybytes, byteorder='big', signed=False):
"""
Return the integer represented by the given array of bytes.
The mybytes argument must either support the buffer protocol or be an
iterable object producing bytes. Bytes and bytearray are examples of
built-in objects that support the buffer protocol.
The byteorder argument determines the byte order used to represent the
integer. If byteorder is 'big', the most significant byte is at the
beginning of the byte array. If byteorder is 'little', the most
significant byte is at the end of the byte array. To request the native
byte order of the host system, use `sys.byteorder' as the byte order value.
The signed keyword-only argument indicates whether two's complement is
used to represent the integer.
"""
if byteorder not in ('little', 'big'):
raise ValueError("byteorder must be either 'little' or 'big'")
if isinstance(mybytes, unicode):
raise TypeError("cannot convert unicode objects to bytes")
# mybytes can also be passed as a sequence of integers on Py3.
# Test for this:
elif isinstance(mybytes, collections.Iterable):
mybytes = newbytes(mybytes)
b = mybytes if byteorder == 'big' else mybytes[::-1]
if len(b) == 0:
b = b'\x00'
# The encode() method has been disabled by newbytes, but Py2's
# str has it:
num = int(native(b).encode('hex'), 16)
if signed and (b[0] & 0x80):
num = num - (2 ** (len(b)*8))
return cls(num) | Return the integer represented by the given array of bytes.
The mybytes argument must either support the buffer protocol or be an
iterable object producing bytes. Bytes and bytearray are examples of
built-in objects that support the buffer protocol.
The byteorder argument determines the byte order used to represent the
integer. If byteorder is 'big', the most significant byte is at the
beginning of the byte array. If byteorder is 'little', the most
significant byte is at the end of the byte array. To request the native
byte order of the host system, use `sys.byteorder' as the byte order value.
The signed keyword-only argument indicates whether two's complement is
used to represent the integer. | Below is the the instruction that describes the task:
### Input:
Return the integer represented by the given array of bytes.
The mybytes argument must either support the buffer protocol or be an
iterable object producing bytes. Bytes and bytearray are examples of
built-in objects that support the buffer protocol.
The byteorder argument determines the byte order used to represent the
integer. If byteorder is 'big', the most significant byte is at the
beginning of the byte array. If byteorder is 'little', the most
significant byte is at the end of the byte array. To request the native
byte order of the host system, use `sys.byteorder' as the byte order value.
The signed keyword-only argument indicates whether two's complement is
used to represent the integer.
### Response:
def from_bytes(cls, mybytes, byteorder='big', signed=False):
"""
Return the integer represented by the given array of bytes.
The mybytes argument must either support the buffer protocol or be an
iterable object producing bytes. Bytes and bytearray are examples of
built-in objects that support the buffer protocol.
The byteorder argument determines the byte order used to represent the
integer. If byteorder is 'big', the most significant byte is at the
beginning of the byte array. If byteorder is 'little', the most
significant byte is at the end of the byte array. To request the native
byte order of the host system, use `sys.byteorder' as the byte order value.
The signed keyword-only argument indicates whether two's complement is
used to represent the integer.
"""
if byteorder not in ('little', 'big'):
raise ValueError("byteorder must be either 'little' or 'big'")
if isinstance(mybytes, unicode):
raise TypeError("cannot convert unicode objects to bytes")
# mybytes can also be passed as a sequence of integers on Py3.
# Test for this:
elif isinstance(mybytes, collections.Iterable):
mybytes = newbytes(mybytes)
b = mybytes if byteorder == 'big' else mybytes[::-1]
if len(b) == 0:
b = b'\x00'
# The encode() method has been disabled by newbytes, but Py2's
# str has it:
num = int(native(b).encode('hex'), 16)
if signed and (b[0] & 0x80):
num = num - (2 ** (len(b)*8))
return cls(num) |
def purge_node(self, node_id, remove_definition=False, sandbox=None):
"""
Purges a node (collection of streams)
:param node_id: The node identifier
:param remove_definition: Whether to remove the stream definition as well
:param sandbox: The sandbox
:return: None
"""
for stream_id in list(self.streams):
stream = self.streams[stream_id]
if not stream.parent_node:
# This can happen if streams have been defined outside of nodes - generally nothing to worry about
logging.debug("cannot purge the stream with id {} because it has no parent node".format(stream_id))
continue
if stream.parent_node.node_id == node_id:
self.purge_stream(stream_id, remove_definition=remove_definition, sandbox=sandbox) | Purges a node (collection of streams)
:param node_id: The node identifier
:param remove_definition: Whether to remove the stream definition as well
:param sandbox: The sandbox
:return: None | Below is the the instruction that describes the task:
### Input:
Purges a node (collection of streams)
:param node_id: The node identifier
:param remove_definition: Whether to remove the stream definition as well
:param sandbox: The sandbox
:return: None
### Response:
def purge_node(self, node_id, remove_definition=False, sandbox=None):
"""
Purges a node (collection of streams)
:param node_id: The node identifier
:param remove_definition: Whether to remove the stream definition as well
:param sandbox: The sandbox
:return: None
"""
for stream_id in list(self.streams):
stream = self.streams[stream_id]
if not stream.parent_node:
# This can happen if streams have been defined outside of nodes - generally nothing to worry about
logging.debug("cannot purge the stream with id {} because it has no parent node".format(stream_id))
continue
if stream.parent_node.node_id == node_id:
self.purge_stream(stream_id, remove_definition=remove_definition, sandbox=sandbox) |
def com_google_fonts_check_name_no_copyright_on_description(ttFont):
"""Description strings in the name table must not contain copyright info."""
failed = False
for name in ttFont['name'].names:
if 'opyright' in name.string.decode(name.getEncoding())\
and name.nameID == NameID.DESCRIPTION:
failed = True
if failed:
yield FAIL, ("Namerecords with ID={} (NameID.DESCRIPTION)"
" should be removed (perhaps these were added by"
" a longstanding FontLab Studio 5.x bug that"
" copied copyright notices to them.)"
"").format(NameID.DESCRIPTION)
else:
yield PASS, ("Description strings in the name table"
" do not contain any copyright string.") | Description strings in the name table must not contain copyright info. | Below is the the instruction that describes the task:
### Input:
Description strings in the name table must not contain copyright info.
### Response:
def com_google_fonts_check_name_no_copyright_on_description(ttFont):
"""Description strings in the name table must not contain copyright info."""
failed = False
for name in ttFont['name'].names:
if 'opyright' in name.string.decode(name.getEncoding())\
and name.nameID == NameID.DESCRIPTION:
failed = True
if failed:
yield FAIL, ("Namerecords with ID={} (NameID.DESCRIPTION)"
" should be removed (perhaps these were added by"
" a longstanding FontLab Studio 5.x bug that"
" copied copyright notices to them.)"
"").format(NameID.DESCRIPTION)
else:
yield PASS, ("Description strings in the name table"
" do not contain any copyright string.") |
def new_dataset(data, identifier=None):
"""Initialize a new RT-DC dataset
Parameters
----------
data:
can be one of the following:
- dict
- .tdms file
- .rtdc file
- subclass of `RTDCBase`
(will create a hierarchy child)
identifier: str
A unique identifier for this dataset. If set to `None`
an identifier is generated.
Returns
-------
dataset: subclass of :class:`dclab.rtdc_dataset.RTDCBase`
A new dataset instance
"""
if isinstance(data, dict):
return fmt_dict.RTDC_Dict(data, identifier=identifier)
elif isinstance(data, (str_types)) or isinstance(data, pathlib.Path):
return load_file(data, identifier=identifier)
elif isinstance(data, RTDCBase):
return fmt_hierarchy.RTDC_Hierarchy(data, identifier=identifier)
else:
msg = "data type not supported: {}".format(data.__class__)
raise NotImplementedError(msg) | Initialize a new RT-DC dataset
Parameters
----------
data:
can be one of the following:
- dict
- .tdms file
- .rtdc file
- subclass of `RTDCBase`
(will create a hierarchy child)
identifier: str
A unique identifier for this dataset. If set to `None`
an identifier is generated.
Returns
-------
dataset: subclass of :class:`dclab.rtdc_dataset.RTDCBase`
A new dataset instance | Below is the the instruction that describes the task:
### Input:
Initialize a new RT-DC dataset
Parameters
----------
data:
can be one of the following:
- dict
- .tdms file
- .rtdc file
- subclass of `RTDCBase`
(will create a hierarchy child)
identifier: str
A unique identifier for this dataset. If set to `None`
an identifier is generated.
Returns
-------
dataset: subclass of :class:`dclab.rtdc_dataset.RTDCBase`
A new dataset instance
### Response:
def new_dataset(data, identifier=None):
"""Initialize a new RT-DC dataset
Parameters
----------
data:
can be one of the following:
- dict
- .tdms file
- .rtdc file
- subclass of `RTDCBase`
(will create a hierarchy child)
identifier: str
A unique identifier for this dataset. If set to `None`
an identifier is generated.
Returns
-------
dataset: subclass of :class:`dclab.rtdc_dataset.RTDCBase`
A new dataset instance
"""
if isinstance(data, dict):
return fmt_dict.RTDC_Dict(data, identifier=identifier)
elif isinstance(data, (str_types)) or isinstance(data, pathlib.Path):
return load_file(data, identifier=identifier)
elif isinstance(data, RTDCBase):
return fmt_hierarchy.RTDC_Hierarchy(data, identifier=identifier)
else:
msg = "data type not supported: {}".format(data.__class__)
raise NotImplementedError(msg) |
def from_string(self, value):
"""Convert string to enum value."""
if not isinstance(value, basestring):
raise ValueError('expected string value: ' + str(type(value)))
self.test_value(value)
return value | Convert string to enum value. | Below is the the instruction that describes the task:
### Input:
Convert string to enum value.
### Response:
def from_string(self, value):
"""Convert string to enum value."""
if not isinstance(value, basestring):
raise ValueError('expected string value: ' + str(type(value)))
self.test_value(value)
return value |
def html_page_for_render_items(bundle, docs_json, render_items, title, template=None, template_variables={}):
''' Render an HTML page from a template and Bokeh render items.
Args:
bundle (tuple):
a tuple containing (bokehjs, bokehcss)
docs_json (JSON-like):
Serialized Bokeh Document
render_items (RenderItems)
Specific items to render from the document and where
title (str or None)
A title for the HTML page. If None, DEFAULT_TITLE is used
template (str or Template or None, optional) :
A Template to be used for the HTML page. If None, FILE is used.
template_variables (dict, optional):
Any Additional variables to pass to the template
Returns:
str
'''
if title is None:
title = DEFAULT_TITLE
bokeh_js, bokeh_css = bundle
json_id = make_id()
json = escape(serialize_json(docs_json), quote=False)
json = wrap_in_script_tag(json, "application/json", json_id)
script = wrap_in_script_tag(script_for_render_items(json_id, render_items))
context = template_variables.copy()
context.update(dict(
title = title,
bokeh_js = bokeh_js,
bokeh_css = bokeh_css,
plot_script = json + script,
docs = render_items,
base = FILE,
macros = MACROS,
))
if len(render_items) == 1:
context["doc"] = context["docs"][0]
context["roots"] = context["doc"].roots
# XXX: backwards compatibility, remove for 1.0
context["plot_div"] = "\n".join(div_for_render_item(item) for item in render_items)
if template is None:
template = FILE
elif isinstance(template, string_types):
template = _env.from_string("{% extends base %}\n" + template)
html = template.render(context)
return encode_utf8(html) | Render an HTML page from a template and Bokeh render items.
Args:
bundle (tuple):
a tuple containing (bokehjs, bokehcss)
docs_json (JSON-like):
Serialized Bokeh Document
render_items (RenderItems)
Specific items to render from the document and where
title (str or None)
A title for the HTML page. If None, DEFAULT_TITLE is used
template (str or Template or None, optional) :
A Template to be used for the HTML page. If None, FILE is used.
template_variables (dict, optional):
Any Additional variables to pass to the template
Returns:
str | Below is the the instruction that describes the task:
### Input:
Render an HTML page from a template and Bokeh render items.
Args:
bundle (tuple):
a tuple containing (bokehjs, bokehcss)
docs_json (JSON-like):
Serialized Bokeh Document
render_items (RenderItems)
Specific items to render from the document and where
title (str or None)
A title for the HTML page. If None, DEFAULT_TITLE is used
template (str or Template or None, optional) :
A Template to be used for the HTML page. If None, FILE is used.
template_variables (dict, optional):
Any Additional variables to pass to the template
Returns:
str
### Response:
def html_page_for_render_items(bundle, docs_json, render_items, title, template=None, template_variables={}):
''' Render an HTML page from a template and Bokeh render items.
Args:
bundle (tuple):
a tuple containing (bokehjs, bokehcss)
docs_json (JSON-like):
Serialized Bokeh Document
render_items (RenderItems)
Specific items to render from the document and where
title (str or None)
A title for the HTML page. If None, DEFAULT_TITLE is used
template (str or Template or None, optional) :
A Template to be used for the HTML page. If None, FILE is used.
template_variables (dict, optional):
Any Additional variables to pass to the template
Returns:
str
'''
if title is None:
title = DEFAULT_TITLE
bokeh_js, bokeh_css = bundle
json_id = make_id()
json = escape(serialize_json(docs_json), quote=False)
json = wrap_in_script_tag(json, "application/json", json_id)
script = wrap_in_script_tag(script_for_render_items(json_id, render_items))
context = template_variables.copy()
context.update(dict(
title = title,
bokeh_js = bokeh_js,
bokeh_css = bokeh_css,
plot_script = json + script,
docs = render_items,
base = FILE,
macros = MACROS,
))
if len(render_items) == 1:
context["doc"] = context["docs"][0]
context["roots"] = context["doc"].roots
# XXX: backwards compatibility, remove for 1.0
context["plot_div"] = "\n".join(div_for_render_item(item) for item in render_items)
if template is None:
template = FILE
elif isinstance(template, string_types):
template = _env.from_string("{% extends base %}\n" + template)
html = template.render(context)
return encode_utf8(html) |
def p_block(self, p):
'block : BEGIN block_statements END'
p[0] = Block(p[2], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | block : BEGIN block_statements END | Below is the the instruction that describes the task:
### Input:
block : BEGIN block_statements END
### Response:
def p_block(self, p):
'block : BEGIN block_statements END'
p[0] = Block(p[2], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) |
def edit_pool(self, auth, spec, attr):
""" Update pool given by `spec` with attributes `attr`.
* `auth` [BaseAuth]
AAA options.
* `spec` [pool_spec]
Specifies what pool to edit.
* `attr` [pool_attr]
Attributes to update and their new values.
This is the documentation of the internal backend function. It's
exposed over XML-RPC, please also see the XML-RPC documentation for
:py:func:`nipap.xmlrpc.NipapXMLRPC.edit_pool` for full
understanding.
"""
self._logger.debug("edit_pool called; spec: %s attr: %s" %
(unicode(spec), unicode(attr)))
if ('id' not in spec and 'name' not in spec) or ( 'id' in spec and 'name' in spec ):
raise NipapMissingInputError('''pool spec must contain either 'id' or 'name' ''')
self._check_pool_attr(attr)
where, params1 = self._expand_pool_spec(spec)
update, params2 = self._sql_expand_update(attr)
params = dict(params2.items() + params1.items())
pools = self.list_pool(auth, spec)
sql = "UPDATE ip_net_pool SET " + update
sql += " FROM ip_net_pool AS po WHERE ip_net_pool.id = po.id AND " + where
sql += " RETURNING po.id AS id"
self._execute(sql, params)
updated_pools = self._get_updated_rows(auth, self.search_pool)
# write to audit table
audit_params = {
'username': auth.username,
'authenticated_as': auth.authenticated_as,
'full_name': auth.full_name,
'authoritative_source': auth.authoritative_source
}
for p in pools:
audit_params['pool_id'] = p['id']
audit_params['pool_name'] = p['name']
audit_params['description'] = 'Edited pool %s attr: %s' % (p['name'], unicode(attr))
sql, params = self._sql_expand_insert(audit_params)
self._execute('INSERT INTO ip_net_log %s' % sql, params)
return updated_pools | Update pool given by `spec` with attributes `attr`.
* `auth` [BaseAuth]
AAA options.
* `spec` [pool_spec]
Specifies what pool to edit.
* `attr` [pool_attr]
Attributes to update and their new values.
This is the documentation of the internal backend function. It's
exposed over XML-RPC, please also see the XML-RPC documentation for
:py:func:`nipap.xmlrpc.NipapXMLRPC.edit_pool` for full
understanding. | Below is the the instruction that describes the task:
### Input:
Update pool given by `spec` with attributes `attr`.
* `auth` [BaseAuth]
AAA options.
* `spec` [pool_spec]
Specifies what pool to edit.
* `attr` [pool_attr]
Attributes to update and their new values.
This is the documentation of the internal backend function. It's
exposed over XML-RPC, please also see the XML-RPC documentation for
:py:func:`nipap.xmlrpc.NipapXMLRPC.edit_pool` for full
understanding.
### Response:
def edit_pool(self, auth, spec, attr):
""" Update pool given by `spec` with attributes `attr`.
* `auth` [BaseAuth]
AAA options.
* `spec` [pool_spec]
Specifies what pool to edit.
* `attr` [pool_attr]
Attributes to update and their new values.
This is the documentation of the internal backend function. It's
exposed over XML-RPC, please also see the XML-RPC documentation for
:py:func:`nipap.xmlrpc.NipapXMLRPC.edit_pool` for full
understanding.
"""
self._logger.debug("edit_pool called; spec: %s attr: %s" %
(unicode(spec), unicode(attr)))
if ('id' not in spec and 'name' not in spec) or ( 'id' in spec and 'name' in spec ):
raise NipapMissingInputError('''pool spec must contain either 'id' or 'name' ''')
self._check_pool_attr(attr)
where, params1 = self._expand_pool_spec(spec)
update, params2 = self._sql_expand_update(attr)
params = dict(params2.items() + params1.items())
pools = self.list_pool(auth, spec)
sql = "UPDATE ip_net_pool SET " + update
sql += " FROM ip_net_pool AS po WHERE ip_net_pool.id = po.id AND " + where
sql += " RETURNING po.id AS id"
self._execute(sql, params)
updated_pools = self._get_updated_rows(auth, self.search_pool)
# write to audit table
audit_params = {
'username': auth.username,
'authenticated_as': auth.authenticated_as,
'full_name': auth.full_name,
'authoritative_source': auth.authoritative_source
}
for p in pools:
audit_params['pool_id'] = p['id']
audit_params['pool_name'] = p['name']
audit_params['description'] = 'Edited pool %s attr: %s' % (p['name'], unicode(attr))
sql, params = self._sql_expand_insert(audit_params)
self._execute('INSERT INTO ip_net_log %s' % sql, params)
return updated_pools |
def get_hierarchy_traversal_session(self, proxy):
"""Gets the ``OsidSession`` associated with the hierarchy traversal service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.hierarchy.HierarchyTraversalSession) - a
``HierarchyTraversalSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_hierarchy_traversal()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_hierarchy_traversal()`` is ``true``.*
"""
if not self.supports_hierarchy_traversal():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.HierarchyTraversalSession(proxy=proxy, runtime=self._runtime) | Gets the ``OsidSession`` associated with the hierarchy traversal service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.hierarchy.HierarchyTraversalSession) - a
``HierarchyTraversalSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_hierarchy_traversal()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_hierarchy_traversal()`` is ``true``.* | Below is the the instruction that describes the task:
### Input:
Gets the ``OsidSession`` associated with the hierarchy traversal service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.hierarchy.HierarchyTraversalSession) - a
``HierarchyTraversalSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_hierarchy_traversal()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_hierarchy_traversal()`` is ``true``.*
### Response:
def get_hierarchy_traversal_session(self, proxy):
"""Gets the ``OsidSession`` associated with the hierarchy traversal service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.hierarchy.HierarchyTraversalSession) - a
``HierarchyTraversalSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_hierarchy_traversal()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_hierarchy_traversal()`` is ``true``.*
"""
if not self.supports_hierarchy_traversal():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.HierarchyTraversalSession(proxy=proxy, runtime=self._runtime) |
def _list_fields(self):
"""
Get the current settings of the model. The keys depend on the type of
model.
Returns
-------
out : list
A list of fields that can be queried using the ``get`` method.
"""
response = self.__proxy__.list_fields()
return [s for s in response['value'] if not s.startswith("_")] | Get the current settings of the model. The keys depend on the type of
model.
Returns
-------
out : list
A list of fields that can be queried using the ``get`` method. | Below is the the instruction that describes the task:
### Input:
Get the current settings of the model. The keys depend on the type of
model.
Returns
-------
out : list
A list of fields that can be queried using the ``get`` method.
### Response:
def _list_fields(self):
"""
Get the current settings of the model. The keys depend on the type of
model.
Returns
-------
out : list
A list of fields that can be queried using the ``get`` method.
"""
response = self.__proxy__.list_fields()
return [s for s in response['value'] if not s.startswith("_")] |
def purge_db(self):
"""
Clear all matching our user_id.
"""
with self.engine.begin() as db:
purge_user(db, self.user_id) | Clear all matching our user_id. | Below is the the instruction that describes the task:
### Input:
Clear all matching our user_id.
### Response:
def purge_db(self):
"""
Clear all matching our user_id.
"""
with self.engine.begin() as db:
purge_user(db, self.user_id) |
def cal_pst(self, v):
"""
calculate static pressure at 300 K.
:param v: unit-cell volume in A^3
:return: static pressure at t_ref (=300 K) in GPa
"""
params = self._set_params(self.params_st)
return func_st[self.eqn_st](v, *params) | calculate static pressure at 300 K.
:param v: unit-cell volume in A^3
:return: static pressure at t_ref (=300 K) in GPa | Below is the the instruction that describes the task:
### Input:
calculate static pressure at 300 K.
:param v: unit-cell volume in A^3
:return: static pressure at t_ref (=300 K) in GPa
### Response:
def cal_pst(self, v):
"""
calculate static pressure at 300 K.
:param v: unit-cell volume in A^3
:return: static pressure at t_ref (=300 K) in GPa
"""
params = self._set_params(self.params_st)
return func_st[self.eqn_st](v, *params) |
def page_size(self) -> int:
"""
A property that returns how large a page is, calculated from the paginator properties.
If this exceeds `max_page_size`, an exception is raised upon instantiation.
"""
page_count = self.page_count
return self.paginator.max_size + len(f'\nPage {page_count}/{page_count}') | A property that returns how large a page is, calculated from the paginator properties.
If this exceeds `max_page_size`, an exception is raised upon instantiation. | Below is the the instruction that describes the task:
### Input:
A property that returns how large a page is, calculated from the paginator properties.
If this exceeds `max_page_size`, an exception is raised upon instantiation.
### Response:
def page_size(self) -> int:
"""
A property that returns how large a page is, calculated from the paginator properties.
If this exceeds `max_page_size`, an exception is raised upon instantiation.
"""
page_count = self.page_count
return self.paginator.max_size + len(f'\nPage {page_count}/{page_count}') |
def pass_condition(b, test, a):
"""
Generic test function used by Scout2 / AWS recipes
.
:param b: Value to be tested against
:param test: Name of the test case to run
:param a: Value to be tested
:return: True of condition is met, False otherwise
"""
# Return false by default
result = False
# Equality tests
if test == 'equal':
a = str(a)
b = str(b)
result = (a == b)
elif test == 'notEqual':
result = (not pass_condition(b, 'equal', a))
# More/Less tests
elif test == 'lessThan':
result = (int(b) < int(a))
elif test == 'lessOrEqual':
result = (int(b) <= int(a))
elif test == 'moreThan':
result = (int(b) > int(a))
elif test == 'moreOrEqual':
result = (int(b) >= int(a))
# Empty tests
elif test == 'empty':
result = ((type(b) == dict and b == {}) or (type(b) == list and b == []) or (type(b) == list and b == [None]))
elif test == 'notEmpty':
result = (not pass_condition(b, 'empty', 'a'))
elif test == 'null':
result = ((b == None) or (type(b) == str and b == 'None'))
elif test == 'notNull':
result = (not pass_condition(b, 'null', a))
# Boolean tests
elif test == 'true':
result = (str(b).lower() == 'true')
elif test == 'notTrue' or test == 'false':
result = (str(b).lower() == 'false')
# Object length tests
elif test == 'lengthLessThan':
result = (len(b) < int(a))
elif test == 'lengthMoreThan':
result = (len(b) > int(a))
elif test == 'lengthEqual':
result = (len(b) == int(a))
# Dictionary keys tests
elif test == 'withKey':
result = (a in b)
elif test == 'withoutKey':
result = (not a in b)
# List tests
elif test == 'containAtLeastOneOf':
result = False
if not type(b) == list:
b = [ b ]
if not type(a) == list:
a = [ a ]
for c in b:
if type(c):
c = str(c)
if c in a:
result = True
break
elif test == 'containAtLeastOneDifferentFrom':
result = False
if not type(b) == list:
b = [ b ]
if not type(a) == list:
a = [ a ]
for c in b:
if c != None and c != '' and c not in a:
result = True
break
elif test == 'containNoneOf':
result = True
if not type(b) == list:
b = [ b ]
if not type(a) == list:
a = [ a ]
for c in b:
if c in a:
result = False
break
# Regex tests
elif test == 'match':
if type(a) != list:
a = [ a ]
b = str(b)
for c in a:
if re.match(c, b) != None:
result = True
break
elif test == 'notMatch':
result = (not pass_condition(b, 'match', a))
# Date tests
elif test == 'priorToDate':
b = dateutil.parser.parse(str(b)).replace(tzinfo=None)
a = dateutil.parser.parse(str(a)).replace(tzinfo=None)
result = (b < a)
elif test == 'olderThan':
age, threshold = __prepare_age_test(a, b)
result = (age > threshold)
elif test == 'newerThan':
age, threshold = __prepare_age_test(a, b)
result = (age < threshold)
# CIDR tests
elif test == 'inSubnets':
result = False
grant = netaddr.IPNetwork(b)
if type(a) != list:
a = [ a ]
for c in a:
known_subnet = netaddr.IPNetwork(c)
if grant in known_subnet:
result = True
break
elif test == 'notInSubnets':
result = (not pass_condition(b, 'inSubnets', a))
# Policy statement tests
elif test == 'containAction':
result = False
if type(b) != dict:
b = json.loads(b)
statement_actions = get_actions_from_statement(b)
rule_actions = _expand_wildcard_action(a)
for action in rule_actions:
if action.lower() in statement_actions:
result = True
break
elif test == 'notContainAction':
result = (not pass_condition(b, 'containAction', a))
elif test == 'containAtLeastOneAction':
result = False
if type(b) != dict:
b = json.loads(b)
if type(a) != list:
a = [ a ]
actions = get_actions_from_statement(b)
for c in a:
if c.lower() in actions:
result = True
break
# Policy principal tests
elif test == 'isCrossAccount':
result = False
if type(b) != list:
b = [b]
for c in b:
if c != a and not re.match(r'arn:aws:iam:.*?:%s:.*' % a, c):
result = True
break
elif test == 'isSameAccount':
result = False
if type(b) != list:
b = [b]
for c in b:
if c == a or re.match(r'arn:aws:iam:.*?:%s:.*' % a, c):
result = True
break
# Unknown test case
else:
printError('Error: unknown test case %s' % test)
raise Exception
return result | Generic test function used by Scout2 / AWS recipes
.
:param b: Value to be tested against
:param test: Name of the test case to run
:param a: Value to be tested
:return: True of condition is met, False otherwise | Below is the the instruction that describes the task:
### Input:
Generic test function used by Scout2 / AWS recipes
.
:param b: Value to be tested against
:param test: Name of the test case to run
:param a: Value to be tested
:return: True of condition is met, False otherwise
### Response:
def pass_condition(b, test, a):
"""
Generic test function used by Scout2 / AWS recipes
.
:param b: Value to be tested against
:param test: Name of the test case to run
:param a: Value to be tested
:return: True of condition is met, False otherwise
"""
# Return false by default
result = False
# Equality tests
if test == 'equal':
a = str(a)
b = str(b)
result = (a == b)
elif test == 'notEqual':
result = (not pass_condition(b, 'equal', a))
# More/Less tests
elif test == 'lessThan':
result = (int(b) < int(a))
elif test == 'lessOrEqual':
result = (int(b) <= int(a))
elif test == 'moreThan':
result = (int(b) > int(a))
elif test == 'moreOrEqual':
result = (int(b) >= int(a))
# Empty tests
elif test == 'empty':
result = ((type(b) == dict and b == {}) or (type(b) == list and b == []) or (type(b) == list and b == [None]))
elif test == 'notEmpty':
result = (not pass_condition(b, 'empty', 'a'))
elif test == 'null':
result = ((b == None) or (type(b) == str and b == 'None'))
elif test == 'notNull':
result = (not pass_condition(b, 'null', a))
# Boolean tests
elif test == 'true':
result = (str(b).lower() == 'true')
elif test == 'notTrue' or test == 'false':
result = (str(b).lower() == 'false')
# Object length tests
elif test == 'lengthLessThan':
result = (len(b) < int(a))
elif test == 'lengthMoreThan':
result = (len(b) > int(a))
elif test == 'lengthEqual':
result = (len(b) == int(a))
# Dictionary keys tests
elif test == 'withKey':
result = (a in b)
elif test == 'withoutKey':
result = (not a in b)
# List tests
elif test == 'containAtLeastOneOf':
result = False
if not type(b) == list:
b = [ b ]
if not type(a) == list:
a = [ a ]
for c in b:
if type(c):
c = str(c)
if c in a:
result = True
break
elif test == 'containAtLeastOneDifferentFrom':
result = False
if not type(b) == list:
b = [ b ]
if not type(a) == list:
a = [ a ]
for c in b:
if c != None and c != '' and c not in a:
result = True
break
elif test == 'containNoneOf':
result = True
if not type(b) == list:
b = [ b ]
if not type(a) == list:
a = [ a ]
for c in b:
if c in a:
result = False
break
# Regex tests
elif test == 'match':
if type(a) != list:
a = [ a ]
b = str(b)
for c in a:
if re.match(c, b) != None:
result = True
break
elif test == 'notMatch':
result = (not pass_condition(b, 'match', a))
# Date tests
elif test == 'priorToDate':
b = dateutil.parser.parse(str(b)).replace(tzinfo=None)
a = dateutil.parser.parse(str(a)).replace(tzinfo=None)
result = (b < a)
elif test == 'olderThan':
age, threshold = __prepare_age_test(a, b)
result = (age > threshold)
elif test == 'newerThan':
age, threshold = __prepare_age_test(a, b)
result = (age < threshold)
# CIDR tests
elif test == 'inSubnets':
result = False
grant = netaddr.IPNetwork(b)
if type(a) != list:
a = [ a ]
for c in a:
known_subnet = netaddr.IPNetwork(c)
if grant in known_subnet:
result = True
break
elif test == 'notInSubnets':
result = (not pass_condition(b, 'inSubnets', a))
# Policy statement tests
elif test == 'containAction':
result = False
if type(b) != dict:
b = json.loads(b)
statement_actions = get_actions_from_statement(b)
rule_actions = _expand_wildcard_action(a)
for action in rule_actions:
if action.lower() in statement_actions:
result = True
break
elif test == 'notContainAction':
result = (not pass_condition(b, 'containAction', a))
elif test == 'containAtLeastOneAction':
result = False
if type(b) != dict:
b = json.loads(b)
if type(a) != list:
a = [ a ]
actions = get_actions_from_statement(b)
for c in a:
if c.lower() in actions:
result = True
break
# Policy principal tests
elif test == 'isCrossAccount':
result = False
if type(b) != list:
b = [b]
for c in b:
if c != a and not re.match(r'arn:aws:iam:.*?:%s:.*' % a, c):
result = True
break
elif test == 'isSameAccount':
result = False
if type(b) != list:
b = [b]
for c in b:
if c == a or re.match(r'arn:aws:iam:.*?:%s:.*' % a, c):
result = True
break
# Unknown test case
else:
printError('Error: unknown test case %s' % test)
raise Exception
return result |
def _leave_handler(self, event):
"""Callback for :obj:`<Leave>` event on marker, to set normal options"""
iid = self.current_iid
if iid is None or self.active == iid:
return
self.update_state(iid, "normal") | Callback for :obj:`<Leave>` event on marker, to set normal options | Below is the the instruction that describes the task:
### Input:
Callback for :obj:`<Leave>` event on marker, to set normal options
### Response:
def _leave_handler(self, event):
"""Callback for :obj:`<Leave>` event on marker, to set normal options"""
iid = self.current_iid
if iid is None or self.active == iid:
return
self.update_state(iid, "normal") |
def http_basic_auth(request):
"""
Extracts the credentials of a client using HTTP Basic Auth.
Expects the ``client_id`` to be the username and the ``client_secret`` to
be the password part of the Authorization header.
:param request: The incoming request
:type request: oauth2.web.Request
:return: A tuple in the format of (<CLIENT ID>, <CLIENT SECRET>)`
:rtype: tuple
"""
auth_header = request.header("authorization")
if auth_header is None:
raise OAuthInvalidError(error="invalid_request",
explanation="Authorization header is missing")
auth_parts = auth_header.strip().encode("latin1").split(None)
if auth_parts[0].strip().lower() != b'basic':
raise OAuthInvalidError(
error="invalid_request",
explanation="Provider supports basic authentication only")
client_id, client_secret = b64decode(auth_parts[1]).split(b':', 1)
return client_id.decode("latin1"), client_secret.decode("latin1") | Extracts the credentials of a client using HTTP Basic Auth.
Expects the ``client_id`` to be the username and the ``client_secret`` to
be the password part of the Authorization header.
:param request: The incoming request
:type request: oauth2.web.Request
:return: A tuple in the format of (<CLIENT ID>, <CLIENT SECRET>)`
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Extracts the credentials of a client using HTTP Basic Auth.
Expects the ``client_id`` to be the username and the ``client_secret`` to
be the password part of the Authorization header.
:param request: The incoming request
:type request: oauth2.web.Request
:return: A tuple in the format of (<CLIENT ID>, <CLIENT SECRET>)`
:rtype: tuple
### Response:
def http_basic_auth(request):
"""
Extracts the credentials of a client using HTTP Basic Auth.
Expects the ``client_id`` to be the username and the ``client_secret`` to
be the password part of the Authorization header.
:param request: The incoming request
:type request: oauth2.web.Request
:return: A tuple in the format of (<CLIENT ID>, <CLIENT SECRET>)`
:rtype: tuple
"""
auth_header = request.header("authorization")
if auth_header is None:
raise OAuthInvalidError(error="invalid_request",
explanation="Authorization header is missing")
auth_parts = auth_header.strip().encode("latin1").split(None)
if auth_parts[0].strip().lower() != b'basic':
raise OAuthInvalidError(
error="invalid_request",
explanation="Provider supports basic authentication only")
client_id, client_secret = b64decode(auth_parts[1]).split(b':', 1)
return client_id.decode("latin1"), client_secret.decode("latin1") |
def set_layer(self, layer=None, keywords=None):
"""Set layer and update UI accordingly.
:param layer: A QgsVectorLayer.
:type layer: QgsVectorLayer
:param keywords: Keywords for the layer.
:type keywords: dict, None
"""
if self.field_mapping_widget is not None:
self.field_mapping_widget.setParent(None)
self.field_mapping_widget.close()
self.field_mapping_widget.deleteLater()
self.main_layout.removeWidget(self.field_mapping_widget)
self.field_mapping_widget = None
if layer:
self.layer = layer
else:
self.layer = self.layer_combo_box.currentLayer()
if not self.layer:
return
if keywords is not None:
self.metadata = keywords
else:
# Always read from metadata file.
try:
self.metadata = self.keyword_io.read_keywords(self.layer)
except (
NoKeywordsFoundError,
KeywordNotFoundError,
MetadataReadError) as e:
raise e
if 'inasafe_default_values' not in self.metadata:
self.metadata['inasafe_default_values'] = {}
if 'inasafe_fields' not in self.metadata:
self.metadata['inasafe_fields'] = {}
self.field_mapping_widget = FieldMappingWidget(
parent=self, iface=self.iface)
self.field_mapping_widget.set_layer(self.layer, self.metadata)
self.field_mapping_widget.show()
self.main_layout.addWidget(self.field_mapping_widget)
# Set header label
group_names = [
self.field_mapping_widget.tabText(i) for i in range(
self.field_mapping_widget.count())]
if len(group_names) == 0:
header_text = tr(
'There is no field group for this layer. Please select '
'another layer.')
self.header_label.setText(header_text)
return
elif len(group_names) == 1:
pretty_group_name = group_names[0]
elif len(group_names) == 2:
pretty_group_name = group_names[0] + tr(' and ') + group_names[1]
else:
pretty_group_name = ', '.join(group_names[:-1])
pretty_group_name += tr(', and {0}').format(group_names[-1])
header_text = tr(
'Please fill the information for every tab to determine the '
'attribute for {0} group.').format(pretty_group_name)
self.header_label.setText(header_text) | Set layer and update UI accordingly.
:param layer: A QgsVectorLayer.
:type layer: QgsVectorLayer
:param keywords: Keywords for the layer.
:type keywords: dict, None | Below is the the instruction that describes the task:
### Input:
Set layer and update UI accordingly.
:param layer: A QgsVectorLayer.
:type layer: QgsVectorLayer
:param keywords: Keywords for the layer.
:type keywords: dict, None
### Response:
def set_layer(self, layer=None, keywords=None):
"""Set layer and update UI accordingly.
:param layer: A QgsVectorLayer.
:type layer: QgsVectorLayer
:param keywords: Keywords for the layer.
:type keywords: dict, None
"""
if self.field_mapping_widget is not None:
self.field_mapping_widget.setParent(None)
self.field_mapping_widget.close()
self.field_mapping_widget.deleteLater()
self.main_layout.removeWidget(self.field_mapping_widget)
self.field_mapping_widget = None
if layer:
self.layer = layer
else:
self.layer = self.layer_combo_box.currentLayer()
if not self.layer:
return
if keywords is not None:
self.metadata = keywords
else:
# Always read from metadata file.
try:
self.metadata = self.keyword_io.read_keywords(self.layer)
except (
NoKeywordsFoundError,
KeywordNotFoundError,
MetadataReadError) as e:
raise e
if 'inasafe_default_values' not in self.metadata:
self.metadata['inasafe_default_values'] = {}
if 'inasafe_fields' not in self.metadata:
self.metadata['inasafe_fields'] = {}
self.field_mapping_widget = FieldMappingWidget(
parent=self, iface=self.iface)
self.field_mapping_widget.set_layer(self.layer, self.metadata)
self.field_mapping_widget.show()
self.main_layout.addWidget(self.field_mapping_widget)
# Set header label
group_names = [
self.field_mapping_widget.tabText(i) for i in range(
self.field_mapping_widget.count())]
if len(group_names) == 0:
header_text = tr(
'There is no field group for this layer. Please select '
'another layer.')
self.header_label.setText(header_text)
return
elif len(group_names) == 1:
pretty_group_name = group_names[0]
elif len(group_names) == 2:
pretty_group_name = group_names[0] + tr(' and ') + group_names[1]
else:
pretty_group_name = ', '.join(group_names[:-1])
pretty_group_name += tr(', and {0}').format(group_names[-1])
header_text = tr(
'Please fill the information for every tab to determine the '
'attribute for {0} group.').format(pretty_group_name)
self.header_label.setText(header_text) |
def db_for_write(self, model, **hints):
"""
If the app has its own database, use it for writes
"""
if model._meta.app_label in self._apps:
return getattr(model, '_db_alias', model._meta.app_label)
return None | If the app has its own database, use it for writes | Below is the the instruction that describes the task:
### Input:
If the app has its own database, use it for writes
### Response:
def db_for_write(self, model, **hints):
"""
If the app has its own database, use it for writes
"""
if model._meta.app_label in self._apps:
return getattr(model, '_db_alias', model._meta.app_label)
return None |
def parse_file_entities(filename, entities=None, config=None,
include_unmatched=False):
""" Parse the passed filename for entity/value pairs.
Args:
filename (str): The filename to parse for entity values
entities (list): An optional list of Entity instances to use in
extraction. If passed, the config argument is ignored.
config (str, Config, list): One or more Config objects or names of
configurations to use in matching. Each element must be a Config
object, or a valid Config name (e.g., 'bids' or 'derivatives').
If None, all available configs are used.
include_unmatched (bool): If True, unmatched entities are included
in the returned dict, with values set to None. If False
(default), unmatched entities are ignored.
Returns: A dict, where keys are Entity names and values are the
values extracted from the filename.
"""
# Load Configs if needed
if entities is None:
if config is None:
config = ['bids', 'derivatives']
config = [Config.load(c) if not isinstance(c, Config) else c
for c in listify(config)]
# Consolidate entities from all Configs into a single dict
entities = {}
for c in config:
entities.update(c.entities)
entities = entities.values()
# Extract matches
bf = BIDSFile(filename)
ent_vals = {}
for ent in entities:
match = ent.match_file(bf)
if match is not None or include_unmatched:
ent_vals[ent.name] = match
return ent_vals | Parse the passed filename for entity/value pairs.
Args:
filename (str): The filename to parse for entity values
entities (list): An optional list of Entity instances to use in
extraction. If passed, the config argument is ignored.
config (str, Config, list): One or more Config objects or names of
configurations to use in matching. Each element must be a Config
object, or a valid Config name (e.g., 'bids' or 'derivatives').
If None, all available configs are used.
include_unmatched (bool): If True, unmatched entities are included
in the returned dict, with values set to None. If False
(default), unmatched entities are ignored.
Returns: A dict, where keys are Entity names and values are the
values extracted from the filename. | Below is the the instruction that describes the task:
### Input:
Parse the passed filename for entity/value pairs.
Args:
filename (str): The filename to parse for entity values
entities (list): An optional list of Entity instances to use in
extraction. If passed, the config argument is ignored.
config (str, Config, list): One or more Config objects or names of
configurations to use in matching. Each element must be a Config
object, or a valid Config name (e.g., 'bids' or 'derivatives').
If None, all available configs are used.
include_unmatched (bool): If True, unmatched entities are included
in the returned dict, with values set to None. If False
(default), unmatched entities are ignored.
Returns: A dict, where keys are Entity names and values are the
values extracted from the filename.
### Response:
def parse_file_entities(filename, entities=None, config=None,
include_unmatched=False):
""" Parse the passed filename for entity/value pairs.
Args:
filename (str): The filename to parse for entity values
entities (list): An optional list of Entity instances to use in
extraction. If passed, the config argument is ignored.
config (str, Config, list): One or more Config objects or names of
configurations to use in matching. Each element must be a Config
object, or a valid Config name (e.g., 'bids' or 'derivatives').
If None, all available configs are used.
include_unmatched (bool): If True, unmatched entities are included
in the returned dict, with values set to None. If False
(default), unmatched entities are ignored.
Returns: A dict, where keys are Entity names and values are the
values extracted from the filename.
"""
# Load Configs if needed
if entities is None:
if config is None:
config = ['bids', 'derivatives']
config = [Config.load(c) if not isinstance(c, Config) else c
for c in listify(config)]
# Consolidate entities from all Configs into a single dict
entities = {}
for c in config:
entities.update(c.entities)
entities = entities.values()
# Extract matches
bf = BIDSFile(filename)
ent_vals = {}
for ent in entities:
match = ent.match_file(bf)
if match is not None or include_unmatched:
ent_vals[ent.name] = match
return ent_vals |
def imagetransformer_b12l_4h_uncond_dr03_tpu():
"""TPU related small model."""
hparams = imagetransformer_b12l_4h_b256_uncond_dr03_tpu()
hparams.learning_rate = 0.2
hparams.learning_rate_warmup_steps = 4000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
return hparams | TPU related small model. | Below is the the instruction that describes the task:
### Input:
TPU related small model.
### Response:
def imagetransformer_b12l_4h_uncond_dr03_tpu():
"""TPU related small model."""
hparams = imagetransformer_b12l_4h_b256_uncond_dr03_tpu()
hparams.learning_rate = 0.2
hparams.learning_rate_warmup_steps = 4000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
return hparams |
def isfile(self, path, follow_symlinks=True):
"""Determine if path identifies a regular file.
Args:
path: Path to filesystem object.
Returns:
`True` if path points to a regular file (following symlinks).
Raises:
TypeError: if path is None.
"""
return self._is_of_type(path, S_IFREG, follow_symlinks) | Determine if path identifies a regular file.
Args:
path: Path to filesystem object.
Returns:
`True` if path points to a regular file (following symlinks).
Raises:
TypeError: if path is None. | Below is the the instruction that describes the task:
### Input:
Determine if path identifies a regular file.
Args:
path: Path to filesystem object.
Returns:
`True` if path points to a regular file (following symlinks).
Raises:
TypeError: if path is None.
### Response:
def isfile(self, path, follow_symlinks=True):
"""Determine if path identifies a regular file.
Args:
path: Path to filesystem object.
Returns:
`True` if path points to a regular file (following symlinks).
Raises:
TypeError: if path is None.
"""
return self._is_of_type(path, S_IFREG, follow_symlinks) |
def _palette_cmd(self, event):
"""Respond to user click on a palette item."""
label = event.widget
label.master.focus_set()
label.master.configure(relief="sunken")
r, g, b = self.winfo_rgb(label.cget("background"))
r = round2(r * 255 / 65535)
g = round2(g * 255 / 65535)
b = round2(b * 255 / 65535)
args = (r, g, b)
if self.alpha_channel:
a = self.alpha.get()
args += (a,)
self.alphabar.set_color(args)
color = rgb_to_hexa(*args)
h, s, v = rgb_to_hsv(r, g, b)
self.red.set(r)
self.green.set(g)
self.blue.set(b)
self.hue.set(h)
self.saturation.set(s)
self.value.set(v)
self.hexa.delete(0, "end")
self.hexa.insert(0, color.upper())
self.bar.set(h)
self.square.set_hsv((h, s, v))
self._update_preview() | Respond to user click on a palette item. | Below is the the instruction that describes the task:
### Input:
Respond to user click on a palette item.
### Response:
def _palette_cmd(self, event):
"""Respond to user click on a palette item."""
label = event.widget
label.master.focus_set()
label.master.configure(relief="sunken")
r, g, b = self.winfo_rgb(label.cget("background"))
r = round2(r * 255 / 65535)
g = round2(g * 255 / 65535)
b = round2(b * 255 / 65535)
args = (r, g, b)
if self.alpha_channel:
a = self.alpha.get()
args += (a,)
self.alphabar.set_color(args)
color = rgb_to_hexa(*args)
h, s, v = rgb_to_hsv(r, g, b)
self.red.set(r)
self.green.set(g)
self.blue.set(b)
self.hue.set(h)
self.saturation.set(s)
self.value.set(v)
self.hexa.delete(0, "end")
self.hexa.insert(0, color.upper())
self.bar.set(h)
self.square.set_hsv((h, s, v))
self._update_preview() |
def get(self, requirement):
"""Find packages matching ``requirement``.
:param requirement: Requirement to match against repository packages.
:type requirement: `str` or :class:`.Requirement`
:returns: :func:`list` of matching :class:`.Package` objects.
"""
if isinstance(requirement, basestring):
requirement = Requirement.parse(requirement)
return sorted(p for p in self.packages
if requirement.name == p.name and requirement.match(p)) | Find packages matching ``requirement``.
:param requirement: Requirement to match against repository packages.
:type requirement: `str` or :class:`.Requirement`
:returns: :func:`list` of matching :class:`.Package` objects. | Below is the the instruction that describes the task:
### Input:
Find packages matching ``requirement``.
:param requirement: Requirement to match against repository packages.
:type requirement: `str` or :class:`.Requirement`
:returns: :func:`list` of matching :class:`.Package` objects.
### Response:
def get(self, requirement):
"""Find packages matching ``requirement``.
:param requirement: Requirement to match against repository packages.
:type requirement: `str` or :class:`.Requirement`
:returns: :func:`list` of matching :class:`.Package` objects.
"""
if isinstance(requirement, basestring):
requirement = Requirement.parse(requirement)
return sorted(p for p in self.packages
if requirement.name == p.name and requirement.match(p)) |
def copy_attrs(obj1, obj2, attrs):
"""
Allows copy a list of attributes from object2 to object1.
Useful for copy ccs attributes to fragment
"""
for attr in attrs:
value = getattr(obj2, attr) if hasattr(obj2, attr) else None
if value is None and isinstance(obj2, dict) and attr in obj2:
value = obj2[attr]
setattr(obj1, attr, value) | Allows copy a list of attributes from object2 to object1.
Useful for copy ccs attributes to fragment | Below is the the instruction that describes the task:
### Input:
Allows copy a list of attributes from object2 to object1.
Useful for copy ccs attributes to fragment
### Response:
def copy_attrs(obj1, obj2, attrs):
"""
Allows copy a list of attributes from object2 to object1.
Useful for copy ccs attributes to fragment
"""
for attr in attrs:
value = getattr(obj2, attr) if hasattr(obj2, attr) else None
if value is None and isinstance(obj2, dict) and attr in obj2:
value = obj2[attr]
setattr(obj1, attr, value) |
def sound_touch_stop(self, call_params):
"""REST Remove soundtouch audio effects on a Call
"""
path = '/' + self.api_version + '/SoundTouchStop/'
method = 'POST'
return self.request(path, method, call_params) | REST Remove soundtouch audio effects on a Call | Below is the the instruction that describes the task:
### Input:
REST Remove soundtouch audio effects on a Call
### Response:
def sound_touch_stop(self, call_params):
"""REST Remove soundtouch audio effects on a Call
"""
path = '/' + self.api_version + '/SoundTouchStop/'
method = 'POST'
return self.request(path, method, call_params) |
def multpasswordbox(msg="Fill in values for the fields."
, title=" "
, fields=tuple()
,values=tuple()
):
r"""
Same interface as multenterbox. But in multpassword box,
the last of the fields is assumed to be a password, and
is masked with asterisks.
Example
=======
Here is some example code, that shows how values returned from
multpasswordbox can be checked for validity before they are accepted::
msg = "Enter logon information"
title = "Demo of multpasswordbox"
fieldNames = ["Server ID", "User ID", "Password"]
fieldValues = [] # we start with blanks for the values
fieldValues = multpasswordbox(msg,title, fieldNames)
# make sure that none of the fields was left blank
while 1:
if fieldValues == None: break
errmsg = ""
for i in range(len(fieldNames)):
if fieldValues[i].strip() == "":
errmsg = errmsg + ('"%s" is a required field.\n\n' % fieldNames[i])
if errmsg == "": break # no problems found
fieldValues = multpasswordbox(errmsg, title, fieldNames, fieldValues)
writeln("Reply was: %s" % str(fieldValues))
"""
return __multfillablebox(msg,title,fields,values,"*") | r"""
Same interface as multenterbox. But in multpassword box,
the last of the fields is assumed to be a password, and
is masked with asterisks.
Example
=======
Here is some example code, that shows how values returned from
multpasswordbox can be checked for validity before they are accepted::
msg = "Enter logon information"
title = "Demo of multpasswordbox"
fieldNames = ["Server ID", "User ID", "Password"]
fieldValues = [] # we start with blanks for the values
fieldValues = multpasswordbox(msg,title, fieldNames)
# make sure that none of the fields was left blank
while 1:
if fieldValues == None: break
errmsg = ""
for i in range(len(fieldNames)):
if fieldValues[i].strip() == "":
errmsg = errmsg + ('"%s" is a required field.\n\n' % fieldNames[i])
if errmsg == "": break # no problems found
fieldValues = multpasswordbox(errmsg, title, fieldNames, fieldValues)
writeln("Reply was: %s" % str(fieldValues)) | Below is the the instruction that describes the task:
### Input:
r"""
Same interface as multenterbox. But in multpassword box,
the last of the fields is assumed to be a password, and
is masked with asterisks.
Example
=======
Here is some example code, that shows how values returned from
multpasswordbox can be checked for validity before they are accepted::
msg = "Enter logon information"
title = "Demo of multpasswordbox"
fieldNames = ["Server ID", "User ID", "Password"]
fieldValues = [] # we start with blanks for the values
fieldValues = multpasswordbox(msg,title, fieldNames)
# make sure that none of the fields was left blank
while 1:
if fieldValues == None: break
errmsg = ""
for i in range(len(fieldNames)):
if fieldValues[i].strip() == "":
errmsg = errmsg + ('"%s" is a required field.\n\n' % fieldNames[i])
if errmsg == "": break # no problems found
fieldValues = multpasswordbox(errmsg, title, fieldNames, fieldValues)
writeln("Reply was: %s" % str(fieldValues))
### Response:
def multpasswordbox(msg="Fill in values for the fields."
, title=" "
, fields=tuple()
,values=tuple()
):
r"""
Same interface as multenterbox. But in multpassword box,
the last of the fields is assumed to be a password, and
is masked with asterisks.
Example
=======
Here is some example code, that shows how values returned from
multpasswordbox can be checked for validity before they are accepted::
msg = "Enter logon information"
title = "Demo of multpasswordbox"
fieldNames = ["Server ID", "User ID", "Password"]
fieldValues = [] # we start with blanks for the values
fieldValues = multpasswordbox(msg,title, fieldNames)
# make sure that none of the fields was left blank
while 1:
if fieldValues == None: break
errmsg = ""
for i in range(len(fieldNames)):
if fieldValues[i].strip() == "":
errmsg = errmsg + ('"%s" is a required field.\n\n' % fieldNames[i])
if errmsg == "": break # no problems found
fieldValues = multpasswordbox(errmsg, title, fieldNames, fieldValues)
writeln("Reply was: %s" % str(fieldValues))
"""
return __multfillablebox(msg,title,fields,values,"*") |
def state_get(self, block_id, addresses):
'''Returns a list of address/data pairs (str, bytes)'''
block = self._get_blocks([block_id.hex()])[0]
block_header = BlockHeader()
block_header.ParseFromString(block.header)
try:
state_view = self._state_view_factory.create_view(
block_header.state_root_hash)
except KeyError:
LOGGER.error(
'State from block %s requested, but root hash %s was missing. '
'Returning empty state.',
block_id.hex(),
block_header.state_root_hash)
# The state root does not exist, which may indicate a pruned root
# from a dropped fork or an invalid state.
return []
result = []
for address in addresses:
# a fully specified address
if len(address) == 70:
try:
value = state_view.get(address)
except KeyError:
# if the key is missing, leave it out of the response
continue
result.append((address, value))
continue
# an address prefix
leaves = state_view.leaves(address)
for leaf in leaves:
result.append(leaf)
return result | Returns a list of address/data pairs (str, bytes) | Below is the the instruction that describes the task:
### Input:
Returns a list of address/data pairs (str, bytes)
### Response:
def state_get(self, block_id, addresses):
'''Returns a list of address/data pairs (str, bytes)'''
block = self._get_blocks([block_id.hex()])[0]
block_header = BlockHeader()
block_header.ParseFromString(block.header)
try:
state_view = self._state_view_factory.create_view(
block_header.state_root_hash)
except KeyError:
LOGGER.error(
'State from block %s requested, but root hash %s was missing. '
'Returning empty state.',
block_id.hex(),
block_header.state_root_hash)
# The state root does not exist, which may indicate a pruned root
# from a dropped fork or an invalid state.
return []
result = []
for address in addresses:
# a fully specified address
if len(address) == 70:
try:
value = state_view.get(address)
except KeyError:
# if the key is missing, leave it out of the response
continue
result.append((address, value))
continue
# an address prefix
leaves = state_view.leaves(address)
for leaf in leaves:
result.append(leaf)
return result |
def get(self, key, sort_key):
""" Get an element in dictionary """
key = self.prefixed('{}:{}'.format(key, sort_key))
self.logger.debug('Storage - get {}'.format(key))
if key not in self.cache.keys():
return None
return self.cache[key] | Get an element in dictionary | Below is the the instruction that describes the task:
### Input:
Get an element in dictionary
### Response:
def get(self, key, sort_key):
""" Get an element in dictionary """
key = self.prefixed('{}:{}'.format(key, sort_key))
self.logger.debug('Storage - get {}'.format(key))
if key not in self.cache.keys():
return None
return self.cache[key] |
def hide_routemap_holder_route_map_content_match_vrf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
route_map = ET.SubElement(hide_routemap_holder, "route-map")
name_key = ET.SubElement(route_map, "name")
name_key.text = kwargs.pop('name')
action_rm_key = ET.SubElement(route_map, "action-rm")
action_rm_key.text = kwargs.pop('action_rm')
instance_key = ET.SubElement(route_map, "instance")
instance_key.text = kwargs.pop('instance')
content = ET.SubElement(route_map, "content")
match = ET.SubElement(content, "match")
vrf = ET.SubElement(match, "vrf")
vrf.text = kwargs.pop('vrf')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def hide_routemap_holder_route_map_content_match_vrf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
route_map = ET.SubElement(hide_routemap_holder, "route-map")
name_key = ET.SubElement(route_map, "name")
name_key.text = kwargs.pop('name')
action_rm_key = ET.SubElement(route_map, "action-rm")
action_rm_key.text = kwargs.pop('action_rm')
instance_key = ET.SubElement(route_map, "instance")
instance_key.text = kwargs.pop('instance')
content = ET.SubElement(route_map, "content")
match = ET.SubElement(content, "match")
vrf = ET.SubElement(match, "vrf")
vrf.text = kwargs.pop('vrf')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _ensure_valid_index(self, value):
"""
Ensure that if we don't have an index, that we can create one from the
passed value.
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value):
try:
value = Series(value)
except (ValueError, NotImplementedError, TypeError):
raise ValueError('Cannot set a frame with no defined index '
'and a value that cannot be converted to a '
'Series')
self._data = self._data.reindex_axis(value.index.copy(), axis=1,
fill_value=np.nan) | Ensure that if we don't have an index, that we can create one from the
passed value. | Below is the the instruction that describes the task:
### Input:
Ensure that if we don't have an index, that we can create one from the
passed value.
### Response:
def _ensure_valid_index(self, value):
"""
Ensure that if we don't have an index, that we can create one from the
passed value.
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value):
try:
value = Series(value)
except (ValueError, NotImplementedError, TypeError):
raise ValueError('Cannot set a frame with no defined index '
'and a value that cannot be converted to a '
'Series')
self._data = self._data.reindex_axis(value.index.copy(), axis=1,
fill_value=np.nan) |
def validate(self, input):
"""
Validate the given input value against this L{Field} definition.
@param input: An input value supposedly serializable by this L{Field}.
@raises ValidationError: If the value is not serializable or fails to
be validated by the additional validator.
"""
# Make sure the input serializes:
self._serializer(input)
# Use extra validator, if given:
if self._extraValidator is not None:
self._extraValidator(input) | Validate the given input value against this L{Field} definition.
@param input: An input value supposedly serializable by this L{Field}.
@raises ValidationError: If the value is not serializable or fails to
be validated by the additional validator. | Below is the the instruction that describes the task:
### Input:
Validate the given input value against this L{Field} definition.
@param input: An input value supposedly serializable by this L{Field}.
@raises ValidationError: If the value is not serializable or fails to
be validated by the additional validator.
### Response:
def validate(self, input):
"""
Validate the given input value against this L{Field} definition.
@param input: An input value supposedly serializable by this L{Field}.
@raises ValidationError: If the value is not serializable or fails to
be validated by the additional validator.
"""
# Make sure the input serializes:
self._serializer(input)
# Use extra validator, if given:
if self._extraValidator is not None:
self._extraValidator(input) |
def get_cached(location, **kwargs):
"""
Simple wrapper that adds Django caching support to 'geocoder.get()'.
"""
result = cache.get(location)
# Result is not cached or wrong
if not result or not result.ok:
result = geocoder.get(location, **kwargs)
if result.ok:
cache.set(location, result)
return result | Simple wrapper that adds Django caching support to 'geocoder.get()'. | Below is the the instruction that describes the task:
### Input:
Simple wrapper that adds Django caching support to 'geocoder.get()'.
### Response:
def get_cached(location, **kwargs):
"""
Simple wrapper that adds Django caching support to 'geocoder.get()'.
"""
result = cache.get(location)
# Result is not cached or wrong
if not result or not result.ok:
result = geocoder.get(location, **kwargs)
if result.ok:
cache.set(location, result)
return result |
def compute_fitness_cdf(chromosomes, ga):
"""
Return a list of fitness-weighted cumulative probabilities for a set of chromosomes.
chromosomes: chromosomes to use for fitness-based calculations
ga: ``algorithms.BaseGeneticAlgorithm`` used to obtain fitness values using its ``eval_fitness`` method
return: list of fitness-weighted cumulative probabilities in [0, 1]
"""
ga.sort(chromosomes)
fitness = [ga.eval_fitness(c) for c in chromosomes]
min_fit = min(fitness)
fit_range = max(fitness) - min_fit
if fit_range == 0:
# all chromosomes have equal chance of being chosen
n = len(chromosomes)
return [i / n for i in range(1, n + 1)]
return [(fit - min_fit) / fit_range for fit in fitness] | Return a list of fitness-weighted cumulative probabilities for a set of chromosomes.
chromosomes: chromosomes to use for fitness-based calculations
ga: ``algorithms.BaseGeneticAlgorithm`` used to obtain fitness values using its ``eval_fitness`` method
return: list of fitness-weighted cumulative probabilities in [0, 1] | Below is the the instruction that describes the task:
### Input:
Return a list of fitness-weighted cumulative probabilities for a set of chromosomes.
chromosomes: chromosomes to use for fitness-based calculations
ga: ``algorithms.BaseGeneticAlgorithm`` used to obtain fitness values using its ``eval_fitness`` method
return: list of fitness-weighted cumulative probabilities in [0, 1]
### Response:
def compute_fitness_cdf(chromosomes, ga):
"""
Return a list of fitness-weighted cumulative probabilities for a set of chromosomes.
chromosomes: chromosomes to use for fitness-based calculations
ga: ``algorithms.BaseGeneticAlgorithm`` used to obtain fitness values using its ``eval_fitness`` method
return: list of fitness-weighted cumulative probabilities in [0, 1]
"""
ga.sort(chromosomes)
fitness = [ga.eval_fitness(c) for c in chromosomes]
min_fit = min(fitness)
fit_range = max(fitness) - min_fit
if fit_range == 0:
# all chromosomes have equal chance of being chosen
n = len(chromosomes)
return [i / n for i in range(1, n + 1)]
return [(fit - min_fit) / fit_range for fit in fitness] |
def check_thickness(s):
"""
Check and parse thickness specs as either a single [s] or a list of [s,s,s,...]
"""
s = check_1d(s, "thickness")
if any(map(lambda d: d <= 0, s)):
raise Exception('Thickness cannot be 0 or negative')
return s | Check and parse thickness specs as either a single [s] or a list of [s,s,s,...] | Below is the the instruction that describes the task:
### Input:
Check and parse thickness specs as either a single [s] or a list of [s,s,s,...]
### Response:
def check_thickness(s):
"""
Check and parse thickness specs as either a single [s] or a list of [s,s,s,...]
"""
s = check_1d(s, "thickness")
if any(map(lambda d: d <= 0, s)):
raise Exception('Thickness cannot be 0 or negative')
return s |
def ListFiles(directory):
'''
Lists the files in the given directory
:type directory: unicode | unicode
:param directory:
A directory or URL
:rtype: list(unicode) | list(unicode)
:returns:
List of filenames/directories found in the given directory.
Returns None if the given directory does not exists.
If `directory` is a unicode string, all files returned will also be unicode
:raises NotImplementedProtocol:
If file protocol is not local or FTP
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
'''
from six.moves.urllib.parse import urlparse
directory_url = urlparse(directory)
# Handle local
if _UrlIsLocal(directory_url):
if not os.path.isdir(directory):
return None
return os.listdir(directory)
# Handle FTP
elif directory_url.scheme == 'ftp':
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(directory_url.scheme)
else:
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(directory_url.scheme) | Lists the files in the given directory
:type directory: unicode | unicode
:param directory:
A directory or URL
:rtype: list(unicode) | list(unicode)
:returns:
List of filenames/directories found in the given directory.
Returns None if the given directory does not exists.
If `directory` is a unicode string, all files returned will also be unicode
:raises NotImplementedProtocol:
If file protocol is not local or FTP
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information | Below is the the instruction that describes the task:
### Input:
Lists the files in the given directory
:type directory: unicode | unicode
:param directory:
A directory or URL
:rtype: list(unicode) | list(unicode)
:returns:
List of filenames/directories found in the given directory.
Returns None if the given directory does not exists.
If `directory` is a unicode string, all files returned will also be unicode
:raises NotImplementedProtocol:
If file protocol is not local or FTP
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
### Response:
def ListFiles(directory):
'''
Lists the files in the given directory
:type directory: unicode | unicode
:param directory:
A directory or URL
:rtype: list(unicode) | list(unicode)
:returns:
List of filenames/directories found in the given directory.
Returns None if the given directory does not exists.
If `directory` is a unicode string, all files returned will also be unicode
:raises NotImplementedProtocol:
If file protocol is not local or FTP
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
'''
from six.moves.urllib.parse import urlparse
directory_url = urlparse(directory)
# Handle local
if _UrlIsLocal(directory_url):
if not os.path.isdir(directory):
return None
return os.listdir(directory)
# Handle FTP
elif directory_url.scheme == 'ftp':
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(directory_url.scheme)
else:
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(directory_url.scheme) |
def access_storage_rm(name, yes, **kwargs):
"""
Remove ACL for the specified collection.
If none is specified - removes ACL for all collections.
"""
if name is None:
if not yes:
click.confirm('Are you sure you want to remove all ACL?', abort=True)
ctx = Context(**kwargs)
ctx.execute_action('access:storage:rm', **{
'storage': ctx.repo.create_secure_service('storage'),
'name': name,
}) | Remove ACL for the specified collection.
If none is specified - removes ACL for all collections. | Below is the the instruction that describes the task:
### Input:
Remove ACL for the specified collection.
If none is specified - removes ACL for all collections.
### Response:
def access_storage_rm(name, yes, **kwargs):
"""
Remove ACL for the specified collection.
If none is specified - removes ACL for all collections.
"""
if name is None:
if not yes:
click.confirm('Are you sure you want to remove all ACL?', abort=True)
ctx = Context(**kwargs)
ctx.execute_action('access:storage:rm', **{
'storage': ctx.repo.create_secure_service('storage'),
'name': name,
}) |
def template_shebang(template, renderers, default, blacklist, whitelist, input_data):
'''
Check the template shebang line and return the list of renderers specified
in the pipe.
Example shebang lines::
#!yaml_jinja
#!yaml_mako
#!mako|yaml
#!jinja|yaml
#!jinja|mako|yaml
#!mako|yaml|stateconf
#!jinja|yaml|stateconf
#!mako|yaml_odict
#!mako|yaml_odict|stateconf
'''
line = ''
# Open up the first line of the sls template
if template == ':string:':
line = input_data.split()[0]
else:
with salt.utils.files.fopen(template, 'r') as ifile:
line = salt.utils.stringutils.to_unicode(ifile.readline())
# Check if it starts with a shebang and not a path
if line.startswith('#!') and not line.startswith('#!/'):
# pull out the shebang data
# If the shebang does not contain recognized/not-blacklisted/whitelisted
# renderers, do not fall back to the default renderer
return check_render_pipe_str(line.strip()[2:], renderers, blacklist, whitelist)
else:
return check_render_pipe_str(default, renderers, blacklist, whitelist) | Check the template shebang line and return the list of renderers specified
in the pipe.
Example shebang lines::
#!yaml_jinja
#!yaml_mako
#!mako|yaml
#!jinja|yaml
#!jinja|mako|yaml
#!mako|yaml|stateconf
#!jinja|yaml|stateconf
#!mako|yaml_odict
#!mako|yaml_odict|stateconf | Below is the the instruction that describes the task:
### Input:
Check the template shebang line and return the list of renderers specified
in the pipe.
Example shebang lines::
#!yaml_jinja
#!yaml_mako
#!mako|yaml
#!jinja|yaml
#!jinja|mako|yaml
#!mako|yaml|stateconf
#!jinja|yaml|stateconf
#!mako|yaml_odict
#!mako|yaml_odict|stateconf
### Response:
def template_shebang(template, renderers, default, blacklist, whitelist, input_data):
'''
Check the template shebang line and return the list of renderers specified
in the pipe.
Example shebang lines::
#!yaml_jinja
#!yaml_mako
#!mako|yaml
#!jinja|yaml
#!jinja|mako|yaml
#!mako|yaml|stateconf
#!jinja|yaml|stateconf
#!mako|yaml_odict
#!mako|yaml_odict|stateconf
'''
line = ''
# Open up the first line of the sls template
if template == ':string:':
line = input_data.split()[0]
else:
with salt.utils.files.fopen(template, 'r') as ifile:
line = salt.utils.stringutils.to_unicode(ifile.readline())
# Check if it starts with a shebang and not a path
if line.startswith('#!') and not line.startswith('#!/'):
# pull out the shebang data
# If the shebang does not contain recognized/not-blacklisted/whitelisted
# renderers, do not fall back to the default renderer
return check_render_pipe_str(line.strip()[2:], renderers, blacklist, whitelist)
else:
return check_render_pipe_str(default, renderers, blacklist, whitelist) |
def _get_simple_dtype_and_shape(self, colnum, rows=None):
"""
When reading a single column, we want the basic data
type and the shape of the array.
for scalar columns, shape is just nrows, otherwise
it is (nrows, dim1, dim2)
Note if rows= is sent and only a single row is requested,
the shape will be (dim2,dim2)
"""
# basic datatype
npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
info = self._info['colinfo'][colnum]
name = info['name']
if rows is None:
nrows = self._info['nrows']
else:
nrows = rows.size
shape = None
tdim = info['tdim']
shape = _tdim2shape(tdim, name, is_string=(npy_type[0] == 'S'))
if shape is not None:
if nrows > 1:
if not isinstance(shape, tuple):
# vector
shape = (nrows, shape)
else:
# multi-dimensional
shape = tuple([nrows] + list(shape))
else:
# scalar
shape = nrows
return npy_type, shape | When reading a single column, we want the basic data
type and the shape of the array.
for scalar columns, shape is just nrows, otherwise
it is (nrows, dim1, dim2)
Note if rows= is sent and only a single row is requested,
the shape will be (dim2,dim2) | Below is the the instruction that describes the task:
### Input:
When reading a single column, we want the basic data
type and the shape of the array.
for scalar columns, shape is just nrows, otherwise
it is (nrows, dim1, dim2)
Note if rows= is sent and only a single row is requested,
the shape will be (dim2,dim2)
### Response:
def _get_simple_dtype_and_shape(self, colnum, rows=None):
"""
When reading a single column, we want the basic data
type and the shape of the array.
for scalar columns, shape is just nrows, otherwise
it is (nrows, dim1, dim2)
Note if rows= is sent and only a single row is requested,
the shape will be (dim2,dim2)
"""
# basic datatype
npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
info = self._info['colinfo'][colnum]
name = info['name']
if rows is None:
nrows = self._info['nrows']
else:
nrows = rows.size
shape = None
tdim = info['tdim']
shape = _tdim2shape(tdim, name, is_string=(npy_type[0] == 'S'))
if shape is not None:
if nrows > 1:
if not isinstance(shape, tuple):
# vector
shape = (nrows, shape)
else:
# multi-dimensional
shape = tuple([nrows] + list(shape))
else:
# scalar
shape = nrows
return npy_type, shape |
def log_likelihood(C, T):
r"""Log-likelihood of the count matrix given a transition matrix.
Parameters
----------
C : (M, M) ndarray or scipy.sparse matrix
Count matrix
T : (M, M) ndarray orscipy.sparse matrix
Transition matrix
Returns
-------
logL : float
Log-likelihood of the count matrix
Notes
-----
The likelihood of a set of observed transition counts
:math:`C=(c_{ij})` for a given matrix of transition counts
:math:`T=(t_{ij})` is given by
.. math:: L(C|P)=\prod_{i=1}^{M} \left( \prod_{j=1}^{M} p_{ij}^{c_{ij}} \right)
The log-likelihood is given by
.. math:: l(C|P)=\sum_{i,j=1}^{M}c_{ij} \log p_{ij}.
The likelihood describes the probability of making an observation
:math:`C` for a given model :math:`P`.
Examples
--------
>>> import numpy as np
>>> from msmtools.estimation import log_likelihood
>>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]])
>>> C = np.array([[58, 7, 0], [6, 0, 4], [0, 3, 21]])
>>> logL = log_likelihood(C, T)
>>> logL # doctest: +ELLIPSIS
-38.2808034725...
>>> C = np.array([[58, 20, 0], [6, 0, 4], [0, 3, 21]])
>>> logL = log_likelihood(C, T)
>>> logL # doctest: +ELLIPSIS
-68.2144096814...
References
----------
.. [1] Prinz, J H, H Wu, M Sarich, B Keller, M Senne, M Held, J D
Chodera, C Schuette and F Noe. 2011. Markov models of
molecular kinetics: Generation and validation. J Chem Phys
134: 174105
"""
if issparse(C) and issparse(T):
return sparse.likelihood.log_likelihood(C, T)
else:
# use the dense likelihood calculator for all other cases
# if a mix of dense/sparse C/T matrices is used, then both
# will be converted to ndarrays.
if not isinstance(C, np.ndarray):
C = np.array(C)
if not isinstance(T, np.ndarray):
T = np.array(T)
# computation is still efficient, because we only use terms
# for nonzero elements of T
nz = np.nonzero(T)
return np.dot(C[nz], np.log(T[nz])) | r"""Log-likelihood of the count matrix given a transition matrix.
Parameters
----------
C : (M, M) ndarray or scipy.sparse matrix
Count matrix
T : (M, M) ndarray orscipy.sparse matrix
Transition matrix
Returns
-------
logL : float
Log-likelihood of the count matrix
Notes
-----
The likelihood of a set of observed transition counts
:math:`C=(c_{ij})` for a given matrix of transition counts
:math:`T=(t_{ij})` is given by
.. math:: L(C|P)=\prod_{i=1}^{M} \left( \prod_{j=1}^{M} p_{ij}^{c_{ij}} \right)
The log-likelihood is given by
.. math:: l(C|P)=\sum_{i,j=1}^{M}c_{ij} \log p_{ij}.
The likelihood describes the probability of making an observation
:math:`C` for a given model :math:`P`.
Examples
--------
>>> import numpy as np
>>> from msmtools.estimation import log_likelihood
>>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]])
>>> C = np.array([[58, 7, 0], [6, 0, 4], [0, 3, 21]])
>>> logL = log_likelihood(C, T)
>>> logL # doctest: +ELLIPSIS
-38.2808034725...
>>> C = np.array([[58, 20, 0], [6, 0, 4], [0, 3, 21]])
>>> logL = log_likelihood(C, T)
>>> logL # doctest: +ELLIPSIS
-68.2144096814...
References
----------
.. [1] Prinz, J H, H Wu, M Sarich, B Keller, M Senne, M Held, J D
Chodera, C Schuette and F Noe. 2011. Markov models of
molecular kinetics: Generation and validation. J Chem Phys
134: 174105 | Below is the the instruction that describes the task:
### Input:
r"""Log-likelihood of the count matrix given a transition matrix.
Parameters
----------
C : (M, M) ndarray or scipy.sparse matrix
Count matrix
T : (M, M) ndarray orscipy.sparse matrix
Transition matrix
Returns
-------
logL : float
Log-likelihood of the count matrix
Notes
-----
The likelihood of a set of observed transition counts
:math:`C=(c_{ij})` for a given matrix of transition counts
:math:`T=(t_{ij})` is given by
.. math:: L(C|P)=\prod_{i=1}^{M} \left( \prod_{j=1}^{M} p_{ij}^{c_{ij}} \right)
The log-likelihood is given by
.. math:: l(C|P)=\sum_{i,j=1}^{M}c_{ij} \log p_{ij}.
The likelihood describes the probability of making an observation
:math:`C` for a given model :math:`P`.
Examples
--------
>>> import numpy as np
>>> from msmtools.estimation import log_likelihood
>>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]])
>>> C = np.array([[58, 7, 0], [6, 0, 4], [0, 3, 21]])
>>> logL = log_likelihood(C, T)
>>> logL # doctest: +ELLIPSIS
-38.2808034725...
>>> C = np.array([[58, 20, 0], [6, 0, 4], [0, 3, 21]])
>>> logL = log_likelihood(C, T)
>>> logL # doctest: +ELLIPSIS
-68.2144096814...
References
----------
.. [1] Prinz, J H, H Wu, M Sarich, B Keller, M Senne, M Held, J D
Chodera, C Schuette and F Noe. 2011. Markov models of
molecular kinetics: Generation and validation. J Chem Phys
134: 174105
### Response:
def log_likelihood(C, T):
r"""Log-likelihood of the count matrix given a transition matrix.
Parameters
----------
C : (M, M) ndarray or scipy.sparse matrix
Count matrix
T : (M, M) ndarray orscipy.sparse matrix
Transition matrix
Returns
-------
logL : float
Log-likelihood of the count matrix
Notes
-----
The likelihood of a set of observed transition counts
:math:`C=(c_{ij})` for a given matrix of transition counts
:math:`T=(t_{ij})` is given by
.. math:: L(C|P)=\prod_{i=1}^{M} \left( \prod_{j=1}^{M} p_{ij}^{c_{ij}} \right)
The log-likelihood is given by
.. math:: l(C|P)=\sum_{i,j=1}^{M}c_{ij} \log p_{ij}.
The likelihood describes the probability of making an observation
:math:`C` for a given model :math:`P`.
Examples
--------
>>> import numpy as np
>>> from msmtools.estimation import log_likelihood
>>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]])
>>> C = np.array([[58, 7, 0], [6, 0, 4], [0, 3, 21]])
>>> logL = log_likelihood(C, T)
>>> logL # doctest: +ELLIPSIS
-38.2808034725...
>>> C = np.array([[58, 20, 0], [6, 0, 4], [0, 3, 21]])
>>> logL = log_likelihood(C, T)
>>> logL # doctest: +ELLIPSIS
-68.2144096814...
References
----------
.. [1] Prinz, J H, H Wu, M Sarich, B Keller, M Senne, M Held, J D
Chodera, C Schuette and F Noe. 2011. Markov models of
molecular kinetics: Generation and validation. J Chem Phys
134: 174105
"""
if issparse(C) and issparse(T):
return sparse.likelihood.log_likelihood(C, T)
else:
# use the dense likelihood calculator for all other cases
# if a mix of dense/sparse C/T matrices is used, then both
# will be converted to ndarrays.
if not isinstance(C, np.ndarray):
C = np.array(C)
if not isinstance(T, np.ndarray):
T = np.array(T)
# computation is still efficient, because we only use terms
# for nonzero elements of T
nz = np.nonzero(T)
return np.dot(C[nz], np.log(T[nz])) |
def update(self, fallback_actions=values.unset):
"""
Update the AssistantFallbackActionsInstance
:param dict fallback_actions: The fallback_actions
:returns: Updated AssistantFallbackActionsInstance
:rtype: twilio.rest.preview.understand.assistant.assistant_fallback_actions.AssistantFallbackActionsInstance
"""
return self._proxy.update(fallback_actions=fallback_actions, ) | Update the AssistantFallbackActionsInstance
:param dict fallback_actions: The fallback_actions
:returns: Updated AssistantFallbackActionsInstance
:rtype: twilio.rest.preview.understand.assistant.assistant_fallback_actions.AssistantFallbackActionsInstance | Below is the the instruction that describes the task:
### Input:
Update the AssistantFallbackActionsInstance
:param dict fallback_actions: The fallback_actions
:returns: Updated AssistantFallbackActionsInstance
:rtype: twilio.rest.preview.understand.assistant.assistant_fallback_actions.AssistantFallbackActionsInstance
### Response:
def update(self, fallback_actions=values.unset):
"""
Update the AssistantFallbackActionsInstance
:param dict fallback_actions: The fallback_actions
:returns: Updated AssistantFallbackActionsInstance
:rtype: twilio.rest.preview.understand.assistant.assistant_fallback_actions.AssistantFallbackActionsInstance
"""
return self._proxy.update(fallback_actions=fallback_actions, ) |
def remove_password(entry, username=None):
"""
Removes the password for the specific user in the user's keychain.
:param entry: The entry in the keychain. This is a caller specific key.
:param username: The username whose password is to be removed. Default is the current user.
"""
if username is None:
username = get_username()
has_keychain = initialize_keychain()
if has_keychain:
try:
keyring.delete_password(entry, username)
except Exception as e:
print e
log.warn("Unable to delete password in keyring. Continuing..")
log.debug(e) | Removes the password for the specific user in the user's keychain.
:param entry: The entry in the keychain. This is a caller specific key.
:param username: The username whose password is to be removed. Default is the current user. | Below is the the instruction that describes the task:
### Input:
Removes the password for the specific user in the user's keychain.
:param entry: The entry in the keychain. This is a caller specific key.
:param username: The username whose password is to be removed. Default is the current user.
### Response:
def remove_password(entry, username=None):
"""
Removes the password for the specific user in the user's keychain.
:param entry: The entry in the keychain. This is a caller specific key.
:param username: The username whose password is to be removed. Default is the current user.
"""
if username is None:
username = get_username()
has_keychain = initialize_keychain()
if has_keychain:
try:
keyring.delete_password(entry, username)
except Exception as e:
print e
log.warn("Unable to delete password in keyring. Continuing..")
log.debug(e) |
def remove_root_repository(self, repository_id):
"""Removes a root repository.
arg: repository_id (osid.id.Id): the ``Id`` of a repository
raise: NotFound - ``repository_id`` not a root
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.remove_root_bin_template
if self._catalog_session is not None:
return self._catalog_session.remove_root_catalog(catalog_id=repository_id)
return self._hierarchy_session.remove_root(id_=repository_id) | Removes a root repository.
arg: repository_id (osid.id.Id): the ``Id`` of a repository
raise: NotFound - ``repository_id`` not a root
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Removes a root repository.
arg: repository_id (osid.id.Id): the ``Id`` of a repository
raise: NotFound - ``repository_id`` not a root
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def remove_root_repository(self, repository_id):
"""Removes a root repository.
arg: repository_id (osid.id.Id): the ``Id`` of a repository
raise: NotFound - ``repository_id`` not a root
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.remove_root_bin_template
if self._catalog_session is not None:
return self._catalog_session.remove_root_catalog(catalog_id=repository_id)
return self._hierarchy_session.remove_root(id_=repository_id) |
def user_context(self, worker_ctx, exc_info):
""" Merge any user context to include in the sentry payload.
Extracts user identifiers from the worker context data by matching
context keys with
"""
user = {}
for key in worker_ctx.context_data:
for matcher in self.user_type_context_keys:
if re.search(matcher, key):
user[key] = worker_ctx.context_data[key]
break
self.client.user_context(user) | Merge any user context to include in the sentry payload.
Extracts user identifiers from the worker context data by matching
context keys with | Below is the the instruction that describes the task:
### Input:
Merge any user context to include in the sentry payload.
Extracts user identifiers from the worker context data by matching
context keys with
### Response:
def user_context(self, worker_ctx, exc_info):
""" Merge any user context to include in the sentry payload.
Extracts user identifiers from the worker context data by matching
context keys with
"""
user = {}
for key in worker_ctx.context_data:
for matcher in self.user_type_context_keys:
if re.search(matcher, key):
user[key] = worker_ctx.context_data[key]
break
self.client.user_context(user) |
def _get_entry(self, entry, entry_tree):
'''Helper function for retrieving a particular entry from the prefix trees'''
for e in entry_tree[entry.filename]:
if entry == e:
return e | Helper function for retrieving a particular entry from the prefix trees | Below is the the instruction that describes the task:
### Input:
Helper function for retrieving a particular entry from the prefix trees
### Response:
def _get_entry(self, entry, entry_tree):
'''Helper function for retrieving a particular entry from the prefix trees'''
for e in entry_tree[entry.filename]:
if entry == e:
return e |
def add_idle(self, callback, *args, **kwds):
"""Add an idle callback.
An idle callback can return True, False or None. These mean:
- None: remove the callback (don't reschedule)
- False: the callback did no work; reschedule later
- True: the callback did some work; reschedule soon
If the callback raises an exception, the traceback is logged and
the callback is removed.
"""
self.idlers.append((callback, args, kwds)) | Add an idle callback.
An idle callback can return True, False or None. These mean:
- None: remove the callback (don't reschedule)
- False: the callback did no work; reschedule later
- True: the callback did some work; reschedule soon
If the callback raises an exception, the traceback is logged and
the callback is removed. | Below is the the instruction that describes the task:
### Input:
Add an idle callback.
An idle callback can return True, False or None. These mean:
- None: remove the callback (don't reschedule)
- False: the callback did no work; reschedule later
- True: the callback did some work; reschedule soon
If the callback raises an exception, the traceback is logged and
the callback is removed.
### Response:
def add_idle(self, callback, *args, **kwds):
"""Add an idle callback.
An idle callback can return True, False or None. These mean:
- None: remove the callback (don't reschedule)
- False: the callback did no work; reschedule later
- True: the callback did some work; reschedule soon
If the callback raises an exception, the traceback is logged and
the callback is removed.
"""
self.idlers.append((callback, args, kwds)) |
def fit(self, x, y):
"""
:param x: 2D np.ndarray (n_instances, n_features) should be categorical data, must be of type int
:param y: 1D np.ndarray (n_instances, ) labels
:return:
"""
verbose = self.verbose
# Create temporary files
data_file = tempfile.NamedTemporaryFile("w+b", delete=False)
label_file = tempfile.NamedTemporaryFile("w+b", delete=False)
start = time.time()
raw_rules = categorical2pysbrl_data(x, y, data_file.name, label_file.name, supp=self.min_support,
zmin=self.min_rule_len, zmax=self.max_rule_len, method=self.fim_method)
if verbose > 1:
print("Info: sbrl data files saved to %s and %s temporarily" % (data_file.name, label_file.name))
data_file.close()
label_file.close()
cat_time = time.time() - start
if verbose:
print("Info: time for rule mining: %.4fs" % cat_time)
n_labels = int(np.max(y)) + 1
start = time.time()
_model = train_sbrl(data_file.name, label_file.name, self.lambda_, eta=self.eta,
max_iters=self.iters, n_chains=self.n_chains, seed=self.seed,
alpha=self.alpha, verbose=verbose)
train_time = time.time() - start
if verbose:
print("Info: training time: %.4fs" % train_time)
# update model parameters
self._n_classes = n_labels
self._n_features = x.shape[1]
# convert the raw parameters to rules
self.from_raw(_model[0], _model[1], raw_rules)
self._supports = self.compute_support(x, y)
# Close the temp files
os.unlink(data_file.name)
os.unlink(label_file.name) | :param x: 2D np.ndarray (n_instances, n_features) should be categorical data, must be of type int
:param y: 1D np.ndarray (n_instances, ) labels
:return: | Below is the the instruction that describes the task:
### Input:
:param x: 2D np.ndarray (n_instances, n_features) should be categorical data, must be of type int
:param y: 1D np.ndarray (n_instances, ) labels
:return:
### Response:
def fit(self, x, y):
"""
:param x: 2D np.ndarray (n_instances, n_features) should be categorical data, must be of type int
:param y: 1D np.ndarray (n_instances, ) labels
:return:
"""
verbose = self.verbose
# Create temporary files
data_file = tempfile.NamedTemporaryFile("w+b", delete=False)
label_file = tempfile.NamedTemporaryFile("w+b", delete=False)
start = time.time()
raw_rules = categorical2pysbrl_data(x, y, data_file.name, label_file.name, supp=self.min_support,
zmin=self.min_rule_len, zmax=self.max_rule_len, method=self.fim_method)
if verbose > 1:
print("Info: sbrl data files saved to %s and %s temporarily" % (data_file.name, label_file.name))
data_file.close()
label_file.close()
cat_time = time.time() - start
if verbose:
print("Info: time for rule mining: %.4fs" % cat_time)
n_labels = int(np.max(y)) + 1
start = time.time()
_model = train_sbrl(data_file.name, label_file.name, self.lambda_, eta=self.eta,
max_iters=self.iters, n_chains=self.n_chains, seed=self.seed,
alpha=self.alpha, verbose=verbose)
train_time = time.time() - start
if verbose:
print("Info: training time: %.4fs" % train_time)
# update model parameters
self._n_classes = n_labels
self._n_features = x.shape[1]
# convert the raw parameters to rules
self.from_raw(_model[0], _model[1], raw_rules)
self._supports = self.compute_support(x, y)
# Close the temp files
os.unlink(data_file.name)
os.unlink(label_file.name) |
def parse_config(self, config_file):
"""
Given a configuration file, read in and interpret the results
:param config_file:
:return:
"""
with open(config_file, 'r') as f:
config = json.load(f)
self.params = config
if self.params['proxy']['proxy_type']:
self.params['proxy'] = {self.params['proxy']['proxy_type']:
self.params['proxy']['proxy_url']} | Given a configuration file, read in and interpret the results
:param config_file:
:return: | Below is the the instruction that describes the task:
### Input:
Given a configuration file, read in and interpret the results
:param config_file:
:return:
### Response:
def parse_config(self, config_file):
"""
Given a configuration file, read in and interpret the results
:param config_file:
:return:
"""
with open(config_file, 'r') as f:
config = json.load(f)
self.params = config
if self.params['proxy']['proxy_type']:
self.params['proxy'] = {self.params['proxy']['proxy_type']:
self.params['proxy']['proxy_url']} |
def fields(self, value):
"""sets the fields variable"""
if type(value) is list:
self._fields = value
else:
raise TypeError("Input must be a list") | sets the fields variable | Below is the the instruction that describes the task:
### Input:
sets the fields variable
### Response:
def fields(self, value):
"""sets the fields variable"""
if type(value) is list:
self._fields = value
else:
raise TypeError("Input must be a list") |
def gen_otu_dict(nex_obj, nexson_version=None):
"""Takes a NexSON object and returns a dict of
otu_id -> otu_obj
"""
if nexson_version is None:
nexson_version = detect_nexson_version(nex_obj)
if _is_by_id_hbf(nexson_version):
otus = nex_obj['nexml']['otusById']
if len(otus) > 1:
d = {}
for v in otus.values():
d.update(v['otuById'])
return d
else:
return otus.values()[0]['otuById']
o_dict = {}
for ob in nex_obj.get('otus', []):
for o in ob.get('otu', []):
oid = o['@id']
o_dict[oid] = o
return o_dict | Takes a NexSON object and returns a dict of
otu_id -> otu_obj | Below is the the instruction that describes the task:
### Input:
Takes a NexSON object and returns a dict of
otu_id -> otu_obj
### Response:
def gen_otu_dict(nex_obj, nexson_version=None):
"""Takes a NexSON object and returns a dict of
otu_id -> otu_obj
"""
if nexson_version is None:
nexson_version = detect_nexson_version(nex_obj)
if _is_by_id_hbf(nexson_version):
otus = nex_obj['nexml']['otusById']
if len(otus) > 1:
d = {}
for v in otus.values():
d.update(v['otuById'])
return d
else:
return otus.values()[0]['otuById']
o_dict = {}
for ob in nex_obj.get('otus', []):
for o in ob.get('otu', []):
oid = o['@id']
o_dict[oid] = o
return o_dict |
def flushmany(self):
"""Send a potentially huge number of pending signals over the message bus.
This method assumes that the number of pending signals might
be huge, so that they might not fit into memory. However,
`SignalBus.flushmany` is not very smart in handling concurrent
senders. It is mostly useful when recovering from long periods
of disconnectedness from the message bus.
:return: The total number of signals that have been sent
"""
models_to_flush = self.get_signal_models()
try:
return sum(self._flushmany_signals(model) for model in models_to_flush)
finally:
self.signal_session.remove() | Send a potentially huge number of pending signals over the message bus.
This method assumes that the number of pending signals might
be huge, so that they might not fit into memory. However,
`SignalBus.flushmany` is not very smart in handling concurrent
senders. It is mostly useful when recovering from long periods
of disconnectedness from the message bus.
:return: The total number of signals that have been sent | Below is the the instruction that describes the task:
### Input:
Send a potentially huge number of pending signals over the message bus.
This method assumes that the number of pending signals might
be huge, so that they might not fit into memory. However,
`SignalBus.flushmany` is not very smart in handling concurrent
senders. It is mostly useful when recovering from long periods
of disconnectedness from the message bus.
:return: The total number of signals that have been sent
### Response:
def flushmany(self):
"""Send a potentially huge number of pending signals over the message bus.
This method assumes that the number of pending signals might
be huge, so that they might not fit into memory. However,
`SignalBus.flushmany` is not very smart in handling concurrent
senders. It is mostly useful when recovering from long periods
of disconnectedness from the message bus.
:return: The total number of signals that have been sent
"""
models_to_flush = self.get_signal_models()
try:
return sum(self._flushmany_signals(model) for model in models_to_flush)
finally:
self.signal_session.remove() |
def recommend_k_items_slow(self, test, top_k=10, remove_seen=True):
"""Recommend top K items for all users which are in the test set.
Args:
test: test Spark dataframe
top_k: top n items to return
remove_seen: remove items test users have already seen in the past from the recommended set.
"""
# TODO: remove seen
if remove_seen:
raise ValueError("Not implemented")
self.get_user_affinity(test)\
.write.mode("overwrite")\
.saveAsTable(self.f("{prefix}user_affinity"))
# user_affinity * item_similarity
# filter top-k
query = self.f(
"""
SELECT {col_user}, {col_item}, score
FROM
(
SELECT df.{col_user},
S.i2 {col_item},
SUM(df.{col_rating} * S.value) AS score,
row_number() OVER(PARTITION BY {col_user} ORDER BY SUM(df.{col_rating} * S.value) DESC) rank
FROM
{prefix}user_affinity df,
{prefix}item_similarity S
WHERE df.{col_item} = S.i1
GROUP BY df.{col_user}, S.i2
)
WHERE rank <= {top_k}
""",
top_k=top_k,
)
return self.spark.sql(query) | Recommend top K items for all users which are in the test set.
Args:
test: test Spark dataframe
top_k: top n items to return
remove_seen: remove items test users have already seen in the past from the recommended set. | Below is the the instruction that describes the task:
### Input:
Recommend top K items for all users which are in the test set.
Args:
test: test Spark dataframe
top_k: top n items to return
remove_seen: remove items test users have already seen in the past from the recommended set.
### Response:
def recommend_k_items_slow(self, test, top_k=10, remove_seen=True):
"""Recommend top K items for all users which are in the test set.
Args:
test: test Spark dataframe
top_k: top n items to return
remove_seen: remove items test users have already seen in the past from the recommended set.
"""
# TODO: remove seen
if remove_seen:
raise ValueError("Not implemented")
self.get_user_affinity(test)\
.write.mode("overwrite")\
.saveAsTable(self.f("{prefix}user_affinity"))
# user_affinity * item_similarity
# filter top-k
query = self.f(
"""
SELECT {col_user}, {col_item}, score
FROM
(
SELECT df.{col_user},
S.i2 {col_item},
SUM(df.{col_rating} * S.value) AS score,
row_number() OVER(PARTITION BY {col_user} ORDER BY SUM(df.{col_rating} * S.value) DESC) rank
FROM
{prefix}user_affinity df,
{prefix}item_similarity S
WHERE df.{col_item} = S.i1
GROUP BY df.{col_user}, S.i2
)
WHERE rank <= {top_k}
""",
top_k=top_k,
)
return self.spark.sql(query) |
def data(self, data, part=False, dataset=''):
"""Parse data and update links.
Parameters
----------
data
Data to parse.
part : `bool`, optional
True if data is partial (default: `False`).
dataset : `str`, optional
Dataset key prefix (default: '').
"""
links = self.parser(self.scanner(data, part), part, dataset)
self.storage.add_links(links) | Parse data and update links.
Parameters
----------
data
Data to parse.
part : `bool`, optional
True if data is partial (default: `False`).
dataset : `str`, optional
Dataset key prefix (default: ''). | Below is the the instruction that describes the task:
### Input:
Parse data and update links.
Parameters
----------
data
Data to parse.
part : `bool`, optional
True if data is partial (default: `False`).
dataset : `str`, optional
Dataset key prefix (default: '').
### Response:
def data(self, data, part=False, dataset=''):
"""Parse data and update links.
Parameters
----------
data
Data to parse.
part : `bool`, optional
True if data is partial (default: `False`).
dataset : `str`, optional
Dataset key prefix (default: '').
"""
links = self.parser(self.scanner(data, part), part, dataset)
self.storage.add_links(links) |
def merge(self, evaluation_context: 'EvaluationContext') -> None:
"""
Merges the provided evaluation context to the current evaluation context.
:param evaluation_context: Evaluation context to merge.
"""
self.global_context.merge(evaluation_context.global_context)
self.local_context.merge(evaluation_context.local_context) | Merges the provided evaluation context to the current evaluation context.
:param evaluation_context: Evaluation context to merge. | Below is the the instruction that describes the task:
### Input:
Merges the provided evaluation context to the current evaluation context.
:param evaluation_context: Evaluation context to merge.
### Response:
def merge(self, evaluation_context: 'EvaluationContext') -> None:
"""
Merges the provided evaluation context to the current evaluation context.
:param evaluation_context: Evaluation context to merge.
"""
self.global_context.merge(evaluation_context.global_context)
self.local_context.merge(evaluation_context.local_context) |
def hide_cursor(stream=sys.stdout):
"""
Hide the console cursor on the given stream
:param stream: The name of the stream to get the handle for
:return: None
:rtype: None
"""
handle = get_stream_handle(stream=stream)
if os.name == "nt":
from ctypes import windll
cursor_info = CONSOLE_CURSOR_INFO()
windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(cursor_info))
cursor_info.visible = False
windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(cursor_info))
else:
handle.write("\033[?25l")
handle.flush() | Hide the console cursor on the given stream
:param stream: The name of the stream to get the handle for
:return: None
:rtype: None | Below is the the instruction that describes the task:
### Input:
Hide the console cursor on the given stream
:param stream: The name of the stream to get the handle for
:return: None
:rtype: None
### Response:
def hide_cursor(stream=sys.stdout):
"""
Hide the console cursor on the given stream
:param stream: The name of the stream to get the handle for
:return: None
:rtype: None
"""
handle = get_stream_handle(stream=stream)
if os.name == "nt":
from ctypes import windll
cursor_info = CONSOLE_CURSOR_INFO()
windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(cursor_info))
cursor_info.visible = False
windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(cursor_info))
else:
handle.write("\033[?25l")
handle.flush() |
def post_upgrade_checks(self, upgrades):
"""Run post-upgrade checks after applying all pending upgrades.
Post checks may be used to emit warnings encountered when applying an
upgrade, but post-checks can also be used to advice the user to run
re-indexing or similar long running processes.
Post-checks may query for user-input, but should respect the
--yes-i-know option to run in an unattended mode.
All applied upgrades post-checks are executed.
:param upgrades: List of upgrades sorted in topological order.
"""
errors = []
for u in upgrades:
self._setup_log_prefix(plugin_id=u.name)
try:
u.post_upgrade()
except RuntimeError as e:
errors.append((u.name, e.args))
for check in self.global_post_upgrade:
self._setup_log_prefix(plugin_id=check.__name__)
try:
check()
except RuntimeError as e:
errors.append((check.__name__, e.args))
self._teardown_log_prefix()
self._check_errors(errors, "Post-upgrade check for %s failed with the "
"following errors:") | Run post-upgrade checks after applying all pending upgrades.
Post checks may be used to emit warnings encountered when applying an
upgrade, but post-checks can also be used to advice the user to run
re-indexing or similar long running processes.
Post-checks may query for user-input, but should respect the
--yes-i-know option to run in an unattended mode.
All applied upgrades post-checks are executed.
:param upgrades: List of upgrades sorted in topological order. | Below is the the instruction that describes the task:
### Input:
Run post-upgrade checks after applying all pending upgrades.
Post checks may be used to emit warnings encountered when applying an
upgrade, but post-checks can also be used to advice the user to run
re-indexing or similar long running processes.
Post-checks may query for user-input, but should respect the
--yes-i-know option to run in an unattended mode.
All applied upgrades post-checks are executed.
:param upgrades: List of upgrades sorted in topological order.
### Response:
def post_upgrade_checks(self, upgrades):
"""Run post-upgrade checks after applying all pending upgrades.
Post checks may be used to emit warnings encountered when applying an
upgrade, but post-checks can also be used to advice the user to run
re-indexing or similar long running processes.
Post-checks may query for user-input, but should respect the
--yes-i-know option to run in an unattended mode.
All applied upgrades post-checks are executed.
:param upgrades: List of upgrades sorted in topological order.
"""
errors = []
for u in upgrades:
self._setup_log_prefix(plugin_id=u.name)
try:
u.post_upgrade()
except RuntimeError as e:
errors.append((u.name, e.args))
for check in self.global_post_upgrade:
self._setup_log_prefix(plugin_id=check.__name__)
try:
check()
except RuntimeError as e:
errors.append((check.__name__, e.args))
self._teardown_log_prefix()
self._check_errors(errors, "Post-upgrade check for %s failed with the "
"following errors:") |
def _apply_mask(
data: np.ndarray,
encoded_fill_values: list,
decoded_fill_value: Any,
dtype: Any,
) -> np.ndarray:
"""Mask all matching values in a NumPy arrays."""
data = np.asarray(data, dtype=dtype)
condition = False
for fv in encoded_fill_values:
condition |= data == fv
return np.where(condition, decoded_fill_value, data) | Mask all matching values in a NumPy arrays. | Below is the the instruction that describes the task:
### Input:
Mask all matching values in a NumPy arrays.
### Response:
def _apply_mask(
data: np.ndarray,
encoded_fill_values: list,
decoded_fill_value: Any,
dtype: Any,
) -> np.ndarray:
"""Mask all matching values in a NumPy arrays."""
data = np.asarray(data, dtype=dtype)
condition = False
for fv in encoded_fill_values:
condition |= data == fv
return np.where(condition, decoded_fill_value, data) |
def verify_element_present(self, locator, msg=None):
"""
Soft assert for whether and element is present in the current window/frame
:params locator: the locator of the element to search for
:params msg: (Optional) msg explaining the difference
"""
try:
self.asset_element_present(locator, msg)
except AssertionError, e:
if msg:
m = "%s:\n%s" % (msg, str(e))
else:
m = str(e)
self.verification_erorrs.append(m) | Soft assert for whether and element is present in the current window/frame
:params locator: the locator of the element to search for
:params msg: (Optional) msg explaining the difference | Below is the the instruction that describes the task:
### Input:
Soft assert for whether and element is present in the current window/frame
:params locator: the locator of the element to search for
:params msg: (Optional) msg explaining the difference
### Response:
def verify_element_present(self, locator, msg=None):
"""
Soft assert for whether and element is present in the current window/frame
:params locator: the locator of the element to search for
:params msg: (Optional) msg explaining the difference
"""
try:
self.asset_element_present(locator, msg)
except AssertionError, e:
if msg:
m = "%s:\n%s" % (msg, str(e))
else:
m = str(e)
self.verification_erorrs.append(m) |
def function_type(self):
"""returns function type. See :class:`type_t` hierarchy"""
return cpptypes.free_function_type_t(
return_type=self.return_type,
arguments_types=[
arg.decl_type for arg in self.arguments]) | returns function type. See :class:`type_t` hierarchy | Below is the the instruction that describes the task:
### Input:
returns function type. See :class:`type_t` hierarchy
### Response:
def function_type(self):
"""returns function type. See :class:`type_t` hierarchy"""
return cpptypes.free_function_type_t(
return_type=self.return_type,
arguments_types=[
arg.decl_type for arg in self.arguments]) |
def used_in_func(statement: str, filename: str = '<string>',
mode: str = 'exec'):
'''Parse a Python statement and analyze the symbols used. The result
will be used to determine what variables a step depends upon.'''
try:
return get_used_in_func(ast.parse(statement, filename, mode))
except Exception as e:
raise RuntimeError(f'Failed to parse statement: {statement} {e}') | Parse a Python statement and analyze the symbols used. The result
will be used to determine what variables a step depends upon. | Below is the the instruction that describes the task:
### Input:
Parse a Python statement and analyze the symbols used. The result
will be used to determine what variables a step depends upon.
### Response:
def used_in_func(statement: str, filename: str = '<string>',
mode: str = 'exec'):
'''Parse a Python statement and analyze the symbols used. The result
will be used to determine what variables a step depends upon.'''
try:
return get_used_in_func(ast.parse(statement, filename, mode))
except Exception as e:
raise RuntimeError(f'Failed to parse statement: {statement} {e}') |
def setdefault(obj: JsonObj, k: str, value: Union[Dict, JsonTypes]) -> JsonObjTypes:
""" Dictionary setdefault reoutine """
return obj._setdefault(k, value) | Dictionary setdefault reoutine | Below is the the instruction that describes the task:
### Input:
Dictionary setdefault reoutine
### Response:
def setdefault(obj: JsonObj, k: str, value: Union[Dict, JsonTypes]) -> JsonObjTypes:
""" Dictionary setdefault reoutine """
return obj._setdefault(k, value) |
def get_msg(self, block=True, timeout=None):
"Gets a message if there is one that is ready."
return self._in_queue.get(block, timeout) | Gets a message if there is one that is ready. | Below is the the instruction that describes the task:
### Input:
Gets a message if there is one that is ready.
### Response:
def get_msg(self, block=True, timeout=None):
"Gets a message if there is one that is ready."
return self._in_queue.get(block, timeout) |
def str(self, value, tolerant=False, limit=1000, seen=frozenset()):
"""Transform value into a representation suitable for substitution."""
if value is None:
if tolerant:
return ""
raise ValueError("value is None")
if isinstance(value, (bool, numbers.Number, basestring)):
return str(value)
if not isinstance(value, collections.Iterable):
if not tolerant:
raise ValueError("unknown value type")
try:
name = value.name
except AttributeError:
try:
name = value.__name__
except AttributeError:
try:
name = value.__class__.__name__
except AttributeError:
return "<?>"
return "<%s>" % (name,)
is_mapping = isinstance(value, collections.Mapping)
if not seen:
wrap = "%s"
elif is_mapping:
wrap = "{%s}"
else:
wrap = "[%s]"
id_ = id(value)
if id_ in seen:
if tolerant:
return wrap % ("...",)
raise ValueError("recursive representation")
seen = seen.union((id_,))
if is_mapping:
items = [(self.str(n, tolerant=tolerant, limit=limit, seen=seen),
self.str(v, tolerant=tolerant, limit=limit, seen=seen))
for n, v in value.items()]
items.sort()
items = ("%s=%s" for n, v in items)
else:
it = iter(value)
items = [self.str(item, tolerant=tolerant, limit=limit, seen=seen)
for item in itertools.islice(
it,
len(value)
if isinstance(value, collections.Sized)
else limit)]
items.sort()
try:
next(it)
except StopIteration:
pass
else:
if not tolerant:
raise ValueError("iterable too long")
items.append("...")
return wrap % (", ".join(items),) | Transform value into a representation suitable for substitution. | Below is the the instruction that describes the task:
### Input:
Transform value into a representation suitable for substitution.
### Response:
def str(self, value, tolerant=False, limit=1000, seen=frozenset()):
"""Transform value into a representation suitable for substitution."""
if value is None:
if tolerant:
return ""
raise ValueError("value is None")
if isinstance(value, (bool, numbers.Number, basestring)):
return str(value)
if not isinstance(value, collections.Iterable):
if not tolerant:
raise ValueError("unknown value type")
try:
name = value.name
except AttributeError:
try:
name = value.__name__
except AttributeError:
try:
name = value.__class__.__name__
except AttributeError:
return "<?>"
return "<%s>" % (name,)
is_mapping = isinstance(value, collections.Mapping)
if not seen:
wrap = "%s"
elif is_mapping:
wrap = "{%s}"
else:
wrap = "[%s]"
id_ = id(value)
if id_ in seen:
if tolerant:
return wrap % ("...",)
raise ValueError("recursive representation")
seen = seen.union((id_,))
if is_mapping:
items = [(self.str(n, tolerant=tolerant, limit=limit, seen=seen),
self.str(v, tolerant=tolerant, limit=limit, seen=seen))
for n, v in value.items()]
items.sort()
items = ("%s=%s" for n, v in items)
else:
it = iter(value)
items = [self.str(item, tolerant=tolerant, limit=limit, seen=seen)
for item in itertools.islice(
it,
len(value)
if isinstance(value, collections.Sized)
else limit)]
items.sort()
try:
next(it)
except StopIteration:
pass
else:
if not tolerant:
raise ValueError("iterable too long")
items.append("...")
return wrap % (", ".join(items),) |
def _warn_unsafe_options(cls, args):
'''Print warnings about any enabled hazardous options.
This function will print messages complaining about:
* ``--save-headers``
* ``--no-iri``
* ``--output-document``
* ``--ignore-fatal-errors``
'''
enabled_options = []
for option_name in cls.UNSAFE_OPTIONS:
if getattr(args, option_name):
enabled_options.append(option_name)
if enabled_options:
_logger.warning(__(
_('The following unsafe options are enabled: {list}.'),
list=enabled_options
))
_logger.warning(
_('The use of unsafe options may lead to unexpected behavior '
'or file corruption.'))
if not args.retr_symlinks:
_logger.warning(
_('The --retr-symlinks=off option is a security risk.')
) | Print warnings about any enabled hazardous options.
This function will print messages complaining about:
* ``--save-headers``
* ``--no-iri``
* ``--output-document``
* ``--ignore-fatal-errors`` | Below is the the instruction that describes the task:
### Input:
Print warnings about any enabled hazardous options.
This function will print messages complaining about:
* ``--save-headers``
* ``--no-iri``
* ``--output-document``
* ``--ignore-fatal-errors``
### Response:
def _warn_unsafe_options(cls, args):
'''Print warnings about any enabled hazardous options.
This function will print messages complaining about:
* ``--save-headers``
* ``--no-iri``
* ``--output-document``
* ``--ignore-fatal-errors``
'''
enabled_options = []
for option_name in cls.UNSAFE_OPTIONS:
if getattr(args, option_name):
enabled_options.append(option_name)
if enabled_options:
_logger.warning(__(
_('The following unsafe options are enabled: {list}.'),
list=enabled_options
))
_logger.warning(
_('The use of unsafe options may lead to unexpected behavior '
'or file corruption.'))
if not args.retr_symlinks:
_logger.warning(
_('The --retr-symlinks=off option is a security risk.')
) |
def ReadUsers(self, database_link, options=None):
"""Reads all users in a database.
:params str database_link:
The link to the database.
:params dict options:
The request options for the request.
:return:
Query iterable of Users.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
return self.QueryUsers(database_link, None, options) | Reads all users in a database.
:params str database_link:
The link to the database.
:params dict options:
The request options for the request.
:return:
Query iterable of Users.
:rtype:
query_iterable.QueryIterable | Below is the the instruction that describes the task:
### Input:
Reads all users in a database.
:params str database_link:
The link to the database.
:params dict options:
The request options for the request.
:return:
Query iterable of Users.
:rtype:
query_iterable.QueryIterable
### Response:
def ReadUsers(self, database_link, options=None):
"""Reads all users in a database.
:params str database_link:
The link to the database.
:params dict options:
The request options for the request.
:return:
Query iterable of Users.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
return self.QueryUsers(database_link, None, options) |
def understand(self):
"""
:returns: Version understand of preview
:rtype: twilio.rest.preview.understand.Understand
"""
if self._understand is None:
self._understand = Understand(self)
return self._understand | :returns: Version understand of preview
:rtype: twilio.rest.preview.understand.Understand | Below is the the instruction that describes the task:
### Input:
:returns: Version understand of preview
:rtype: twilio.rest.preview.understand.Understand
### Response:
def understand(self):
"""
:returns: Version understand of preview
:rtype: twilio.rest.preview.understand.Understand
"""
if self._understand is None:
self._understand = Understand(self)
return self._understand |
def expect_exact(self, pattern_list, timeout=-1, searchwindowsize=-1,
async_=False, **kw):
'''This is similar to expect(), but uses plain string matching instead
of compiled regular expressions in 'pattern_list'. The 'pattern_list'
may be a string; a list or other sequence of strings; or TIMEOUT and
EOF.
This call might be faster than expect() for two reasons: string
searching is faster than RE matching and it is possible to limit the
search to just the end of the input buffer.
This method is also useful when you don't want to have to worry about
escaping regular expression characters that you want to match.
Like :meth:`expect`, passing ``async_=True`` will make this return an
asyncio coroutine.
'''
if timeout == -1:
timeout = self.timeout
if 'async' in kw:
async_ = kw.pop('async')
if kw:
raise TypeError("Unknown keyword arguments: {}".format(kw))
if (isinstance(pattern_list, self.allowed_string_types) or
pattern_list in (TIMEOUT, EOF)):
pattern_list = [pattern_list]
def prepare_pattern(pattern):
if pattern in (TIMEOUT, EOF):
return pattern
if isinstance(pattern, self.allowed_string_types):
return self._coerce_expect_string(pattern)
self._pattern_type_err(pattern)
try:
pattern_list = iter(pattern_list)
except TypeError:
self._pattern_type_err(pattern_list)
pattern_list = [prepare_pattern(p) for p in pattern_list]
exp = Expecter(self, searcher_string(pattern_list), searchwindowsize)
if async_:
from ._async import expect_async
return expect_async(exp, timeout)
else:
return exp.expect_loop(timeout) | This is similar to expect(), but uses plain string matching instead
of compiled regular expressions in 'pattern_list'. The 'pattern_list'
may be a string; a list or other sequence of strings; or TIMEOUT and
EOF.
This call might be faster than expect() for two reasons: string
searching is faster than RE matching and it is possible to limit the
search to just the end of the input buffer.
This method is also useful when you don't want to have to worry about
escaping regular expression characters that you want to match.
Like :meth:`expect`, passing ``async_=True`` will make this return an
asyncio coroutine. | Below is the the instruction that describes the task:
### Input:
This is similar to expect(), but uses plain string matching instead
of compiled regular expressions in 'pattern_list'. The 'pattern_list'
may be a string; a list or other sequence of strings; or TIMEOUT and
EOF.
This call might be faster than expect() for two reasons: string
searching is faster than RE matching and it is possible to limit the
search to just the end of the input buffer.
This method is also useful when you don't want to have to worry about
escaping regular expression characters that you want to match.
Like :meth:`expect`, passing ``async_=True`` will make this return an
asyncio coroutine.
### Response:
def expect_exact(self, pattern_list, timeout=-1, searchwindowsize=-1,
async_=False, **kw):
'''This is similar to expect(), but uses plain string matching instead
of compiled regular expressions in 'pattern_list'. The 'pattern_list'
may be a string; a list or other sequence of strings; or TIMEOUT and
EOF.
This call might be faster than expect() for two reasons: string
searching is faster than RE matching and it is possible to limit the
search to just the end of the input buffer.
This method is also useful when you don't want to have to worry about
escaping regular expression characters that you want to match.
Like :meth:`expect`, passing ``async_=True`` will make this return an
asyncio coroutine.
'''
if timeout == -1:
timeout = self.timeout
if 'async' in kw:
async_ = kw.pop('async')
if kw:
raise TypeError("Unknown keyword arguments: {}".format(kw))
if (isinstance(pattern_list, self.allowed_string_types) or
pattern_list in (TIMEOUT, EOF)):
pattern_list = [pattern_list]
def prepare_pattern(pattern):
if pattern in (TIMEOUT, EOF):
return pattern
if isinstance(pattern, self.allowed_string_types):
return self._coerce_expect_string(pattern)
self._pattern_type_err(pattern)
try:
pattern_list = iter(pattern_list)
except TypeError:
self._pattern_type_err(pattern_list)
pattern_list = [prepare_pattern(p) for p in pattern_list]
exp = Expecter(self, searcher_string(pattern_list), searchwindowsize)
if async_:
from ._async import expect_async
return expect_async(exp, timeout)
else:
return exp.expect_loop(timeout) |
def ssa(scatterer, h_pol=True):
"""Single-scattering albedo for the current setup, with polarization.
Args:
scatterer: a Scatterer instance.
h_pol: If True (default), use horizontal polarization.
If False, use vertical polarization.
Returns:
The single-scattering albedo.
"""
ext_xs = ext_xsect(scatterer, h_pol=h_pol)
return sca_xsect(scatterer, h_pol=h_pol)/ext_xs if ext_xs > 0.0 else 0.0 | Single-scattering albedo for the current setup, with polarization.
Args:
scatterer: a Scatterer instance.
h_pol: If True (default), use horizontal polarization.
If False, use vertical polarization.
Returns:
The single-scattering albedo. | Below is the the instruction that describes the task:
### Input:
Single-scattering albedo for the current setup, with polarization.
Args:
scatterer: a Scatterer instance.
h_pol: If True (default), use horizontal polarization.
If False, use vertical polarization.
Returns:
The single-scattering albedo.
### Response:
def ssa(scatterer, h_pol=True):
"""Single-scattering albedo for the current setup, with polarization.
Args:
scatterer: a Scatterer instance.
h_pol: If True (default), use horizontal polarization.
If False, use vertical polarization.
Returns:
The single-scattering albedo.
"""
ext_xs = ext_xsect(scatterer, h_pol=h_pol)
return sca_xsect(scatterer, h_pol=h_pol)/ext_xs if ext_xs > 0.0 else 0.0 |
def dump_resource_to_zipfile(resource, zipfile, content_type=None):
"""
Convenience function. See
:meth:`everest.resources.io.ConnectedResourcesSerializer.to_zipfile` for
details.
The given context type defaults to CSV.
"""
if content_type is None:
content_type = CsvMime
srl = ConnectedResourcesSerializer(content_type)
srl.to_zipfile(resource, zipfile) | Convenience function. See
:meth:`everest.resources.io.ConnectedResourcesSerializer.to_zipfile` for
details.
The given context type defaults to CSV. | Below is the the instruction that describes the task:
### Input:
Convenience function. See
:meth:`everest.resources.io.ConnectedResourcesSerializer.to_zipfile` for
details.
The given context type defaults to CSV.
### Response:
def dump_resource_to_zipfile(resource, zipfile, content_type=None):
"""
Convenience function. See
:meth:`everest.resources.io.ConnectedResourcesSerializer.to_zipfile` for
details.
The given context type defaults to CSV.
"""
if content_type is None:
content_type = CsvMime
srl = ConnectedResourcesSerializer(content_type)
srl.to_zipfile(resource, zipfile) |
def _get_float(data, position, dummy0, dummy1, dummy2):
"""Decode a BSON double to python float."""
end = position + 8
return _UNPACK_FLOAT(data[position:end])[0], end | Decode a BSON double to python float. | Below is the the instruction that describes the task:
### Input:
Decode a BSON double to python float.
### Response:
def _get_float(data, position, dummy0, dummy1, dummy2):
"""Decode a BSON double to python float."""
end = position + 8
return _UNPACK_FLOAT(data[position:end])[0], end |
def getallobjlists(idf, refname):
"""get all object-list fields for refname
return a list:
[('OBJKEY', refname, fieldindexlist), ...] where
fieldindexlist = index of the field where the object-list = refname
"""
dtls = idf.model.dtls
objlists = []
for i, fieldidds in enumerate(idf.idd_info):
indexlist = []
for j, fieldidd in enumerate(fieldidds):
if 'object-list' in fieldidd:
if fieldidd['object-list'][0].upper() == refname.upper():
indexlist.append(j)
if indexlist != []:
objkey = dtls[i]
objlists.append((objkey, refname, indexlist))
return objlists | get all object-list fields for refname
return a list:
[('OBJKEY', refname, fieldindexlist), ...] where
fieldindexlist = index of the field where the object-list = refname | Below is the the instruction that describes the task:
### Input:
get all object-list fields for refname
return a list:
[('OBJKEY', refname, fieldindexlist), ...] where
fieldindexlist = index of the field where the object-list = refname
### Response:
def getallobjlists(idf, refname):
"""get all object-list fields for refname
return a list:
[('OBJKEY', refname, fieldindexlist), ...] where
fieldindexlist = index of the field where the object-list = refname
"""
dtls = idf.model.dtls
objlists = []
for i, fieldidds in enumerate(idf.idd_info):
indexlist = []
for j, fieldidd in enumerate(fieldidds):
if 'object-list' in fieldidd:
if fieldidd['object-list'][0].upper() == refname.upper():
indexlist.append(j)
if indexlist != []:
objkey = dtls[i]
objlists.append((objkey, refname, indexlist))
return objlists |
def position_i(self):
"""
The integral constant for the position PID.
"""
self._position_i, value = self.get_attr_int(self._position_i, 'hold_pid/Ki')
return value | The integral constant for the position PID. | Below is the the instruction that describes the task:
### Input:
The integral constant for the position PID.
### Response:
def position_i(self):
"""
The integral constant for the position PID.
"""
self._position_i, value = self.get_attr_int(self._position_i, 'hold_pid/Ki')
return value |
def view(ctx, schema, uuid, object_filter):
"""Show stored objects"""
database = ctx.obj['db']
if schema is None:
log('No schema given. Read the help', lvl=warn)
return
model = database.objectmodels[schema]
if uuid:
obj = model.find({'uuid': uuid})
elif object_filter:
obj = model.find(literal_eval(object_filter))
else:
obj = model.find()
for item in obj:
pprint(item._fields) | Show stored objects | Below is the the instruction that describes the task:
### Input:
Show stored objects
### Response:
def view(ctx, schema, uuid, object_filter):
"""Show stored objects"""
database = ctx.obj['db']
if schema is None:
log('No schema given. Read the help', lvl=warn)
return
model = database.objectmodels[schema]
if uuid:
obj = model.find({'uuid': uuid})
elif object_filter:
obj = model.find(literal_eval(object_filter))
else:
obj = model.find()
for item in obj:
pprint(item._fields) |
def draw_lines():
"""
Draws a line between a set of random values
"""
r = numpy.random.randn(200)
fig = pyplot.figure()
ax = fig.add_subplot(111)
ax.plot(r)
ax.grid(True)
pyplot.savefig(lines_filename) | Draws a line between a set of random values | Below is the the instruction that describes the task:
### Input:
Draws a line between a set of random values
### Response:
def draw_lines():
"""
Draws a line between a set of random values
"""
r = numpy.random.randn(200)
fig = pyplot.figure()
ax = fig.add_subplot(111)
ax.plot(r)
ax.grid(True)
pyplot.savefig(lines_filename) |
def row_append(self, row_key, value_list):
"""
append a new row to a DataRange
:param row_key: a string
:param value_list: a list
"""
if row_key in self._row_keys:
raise KeyError('Key %s already exists in row keys.' % row_key)
if not len(value_list) == len(self._col_keys):
raise ValueError('Length of data to set does not meet expected row length of %i' % len(self._col_keys))
self._row_keys.append(row_key)
for c, v in zip(self._col_keys, value_list):
self[row_key, c] = v | append a new row to a DataRange
:param row_key: a string
:param value_list: a list | Below is the the instruction that describes the task:
### Input:
append a new row to a DataRange
:param row_key: a string
:param value_list: a list
### Response:
def row_append(self, row_key, value_list):
"""
append a new row to a DataRange
:param row_key: a string
:param value_list: a list
"""
if row_key in self._row_keys:
raise KeyError('Key %s already exists in row keys.' % row_key)
if not len(value_list) == len(self._col_keys):
raise ValueError('Length of data to set does not meet expected row length of %i' % len(self._col_keys))
self._row_keys.append(row_key)
for c, v in zip(self._col_keys, value_list):
self[row_key, c] = v |
def apply_styles(self, cmdmap):
"""
Apply the set of commands defined in cmdmap. for example, apply_styles({'FONTSIZE': 12, 'BACKGROUND': white})
:param cmdmap: dict of commands mapped to the command arguments
:return: self
"""
is_list_like = lambda arg: isinstance(arg, (list, tuple))
is_first_param_list = lambda c: c in ('COLBACKGROUNDS', 'ROWBACKGROUNDS')
for cmd, args in cmdmap.iteritems():
if not is_list_like(args):
args = [args]
elif is_first_param_list(cmd) and is_list_like(args) and not is_list_like(args[0]):
args = [args]
self.apply_style(cmd, *args)
return self | Apply the set of commands defined in cmdmap. for example, apply_styles({'FONTSIZE': 12, 'BACKGROUND': white})
:param cmdmap: dict of commands mapped to the command arguments
:return: self | Below is the the instruction that describes the task:
### Input:
Apply the set of commands defined in cmdmap. for example, apply_styles({'FONTSIZE': 12, 'BACKGROUND': white})
:param cmdmap: dict of commands mapped to the command arguments
:return: self
### Response:
def apply_styles(self, cmdmap):
"""
Apply the set of commands defined in cmdmap. for example, apply_styles({'FONTSIZE': 12, 'BACKGROUND': white})
:param cmdmap: dict of commands mapped to the command arguments
:return: self
"""
is_list_like = lambda arg: isinstance(arg, (list, tuple))
is_first_param_list = lambda c: c in ('COLBACKGROUNDS', 'ROWBACKGROUNDS')
for cmd, args in cmdmap.iteritems():
if not is_list_like(args):
args = [args]
elif is_first_param_list(cmd) and is_list_like(args) and not is_list_like(args[0]):
args = [args]
self.apply_style(cmd, *args)
return self |
def txt_to_obj(cls, file_path=None, text='', columns=None,
remove_empty_rows=True, key_on=None,
row_columns=None, deliminator='\t', eval_cells=True):
"""
This will convert text file or text to a seaborn table
and return it
:param file_path: str of the path to the file
:param text: str of the csv text
:param columns: list of str of columns to use
:param row_columns: list of str of columns in data but not to use
:param remove_empty_rows: bool if True will remove empty rows
:param key_on: list of str of columns to key on
:param deliminator: str to use as a deliminator
:param eval_cells: bool if True will try to evaluate numbers
:return: SeabornTable
"""
return cls.str_to_obj(file_path=file_path, text=text, columns=columns,
remove_empty_rows=remove_empty_rows,
key_on=key_on, row_columns=row_columns,
deliminator=deliminator, eval_cells=eval_cells) | This will convert text file or text to a seaborn table
and return it
:param file_path: str of the path to the file
:param text: str of the csv text
:param columns: list of str of columns to use
:param row_columns: list of str of columns in data but not to use
:param remove_empty_rows: bool if True will remove empty rows
:param key_on: list of str of columns to key on
:param deliminator: str to use as a deliminator
:param eval_cells: bool if True will try to evaluate numbers
:return: SeabornTable | Below is the the instruction that describes the task:
### Input:
This will convert text file or text to a seaborn table
and return it
:param file_path: str of the path to the file
:param text: str of the csv text
:param columns: list of str of columns to use
:param row_columns: list of str of columns in data but not to use
:param remove_empty_rows: bool if True will remove empty rows
:param key_on: list of str of columns to key on
:param deliminator: str to use as a deliminator
:param eval_cells: bool if True will try to evaluate numbers
:return: SeabornTable
### Response:
def txt_to_obj(cls, file_path=None, text='', columns=None,
remove_empty_rows=True, key_on=None,
row_columns=None, deliminator='\t', eval_cells=True):
"""
This will convert text file or text to a seaborn table
and return it
:param file_path: str of the path to the file
:param text: str of the csv text
:param columns: list of str of columns to use
:param row_columns: list of str of columns in data but not to use
:param remove_empty_rows: bool if True will remove empty rows
:param key_on: list of str of columns to key on
:param deliminator: str to use as a deliminator
:param eval_cells: bool if True will try to evaluate numbers
:return: SeabornTable
"""
return cls.str_to_obj(file_path=file_path, text=text, columns=columns,
remove_empty_rows=remove_empty_rows,
key_on=key_on, row_columns=row_columns,
deliminator=deliminator, eval_cells=eval_cells) |
def _poly_eval_0(self, u, ids):
"""Evaluate internal polynomial."""
return u * (u * (self._a[ids] * u + self._b[ids]) + self._c[ids]) + self._d[ids] | Evaluate internal polynomial. | Below is the the instruction that describes the task:
### Input:
Evaluate internal polynomial.
### Response:
def _poly_eval_0(self, u, ids):
"""Evaluate internal polynomial."""
return u * (u * (self._a[ids] * u + self._b[ids]) + self._c[ids]) + self._d[ids] |
def add_spatial_unit_condition(self, droppable_id, container_id, spatial_unit, match=True):
"""stub"""
if not isinstance(spatial_unit, abc_mapping_primitives.SpatialUnit):
raise InvalidArgument('spatial_unit is not a SpatialUnit')
self.my_osid_object_form._my_map['spatialUnitConditions'].append(
{'droppableId': droppable_id, 'containerId': container_id, 'spatialUnit': spatial_unit.get_spatial_unit_map(), 'match': match})
self.my_osid_object_form._my_map['spatialUnitConditions'].sort() | stub | Below is the the instruction that describes the task:
### Input:
stub
### Response:
def add_spatial_unit_condition(self, droppable_id, container_id, spatial_unit, match=True):
"""stub"""
if not isinstance(spatial_unit, abc_mapping_primitives.SpatialUnit):
raise InvalidArgument('spatial_unit is not a SpatialUnit')
self.my_osid_object_form._my_map['spatialUnitConditions'].append(
{'droppableId': droppable_id, 'containerId': container_id, 'spatialUnit': spatial_unit.get_spatial_unit_map(), 'match': match})
self.my_osid_object_form._my_map['spatialUnitConditions'].sort() |
def _from_dict(cls, _dict):
"""Initialize a LogQueryResponseResult object from a json dictionary."""
args = {}
if 'environment_id' in _dict:
args['environment_id'] = _dict.get('environment_id')
if 'customer_id' in _dict:
args['customer_id'] = _dict.get('customer_id')
if 'document_type' in _dict:
args['document_type'] = _dict.get('document_type')
if 'natural_language_query' in _dict:
args['natural_language_query'] = _dict.get('natural_language_query')
if 'document_results' in _dict:
args[
'document_results'] = LogQueryResponseResultDocuments._from_dict(
_dict.get('document_results'))
if 'created_timestamp' in _dict:
args['created_timestamp'] = string_to_datetime(
_dict.get('created_timestamp'))
if 'client_timestamp' in _dict:
args['client_timestamp'] = string_to_datetime(
_dict.get('client_timestamp'))
if 'query_id' in _dict:
args['query_id'] = _dict.get('query_id')
if 'session_token' in _dict:
args['session_token'] = _dict.get('session_token')
if 'collection_id' in _dict:
args['collection_id'] = _dict.get('collection_id')
if 'display_rank' in _dict:
args['display_rank'] = _dict.get('display_rank')
if 'document_id' in _dict:
args['document_id'] = _dict.get('document_id')
if 'event_type' in _dict:
args['event_type'] = _dict.get('event_type')
if 'result_type' in _dict:
args['result_type'] = _dict.get('result_type')
return cls(**args) | Initialize a LogQueryResponseResult object from a json dictionary. | Below is the the instruction that describes the task:
### Input:
Initialize a LogQueryResponseResult object from a json dictionary.
### Response:
def _from_dict(cls, _dict):
"""Initialize a LogQueryResponseResult object from a json dictionary."""
args = {}
if 'environment_id' in _dict:
args['environment_id'] = _dict.get('environment_id')
if 'customer_id' in _dict:
args['customer_id'] = _dict.get('customer_id')
if 'document_type' in _dict:
args['document_type'] = _dict.get('document_type')
if 'natural_language_query' in _dict:
args['natural_language_query'] = _dict.get('natural_language_query')
if 'document_results' in _dict:
args[
'document_results'] = LogQueryResponseResultDocuments._from_dict(
_dict.get('document_results'))
if 'created_timestamp' in _dict:
args['created_timestamp'] = string_to_datetime(
_dict.get('created_timestamp'))
if 'client_timestamp' in _dict:
args['client_timestamp'] = string_to_datetime(
_dict.get('client_timestamp'))
if 'query_id' in _dict:
args['query_id'] = _dict.get('query_id')
if 'session_token' in _dict:
args['session_token'] = _dict.get('session_token')
if 'collection_id' in _dict:
args['collection_id'] = _dict.get('collection_id')
if 'display_rank' in _dict:
args['display_rank'] = _dict.get('display_rank')
if 'document_id' in _dict:
args['document_id'] = _dict.get('document_id')
if 'event_type' in _dict:
args['event_type'] = _dict.get('event_type')
if 'result_type' in _dict:
args['result_type'] = _dict.get('result_type')
return cls(**args) |
def seqannotation(self, seqrecord, allele, loc):
"""
Gets the Annotation from the found sequence
:return: The Annotation from the found sequence
:rtype: Annotation
"""
#seqrecord = self.seqrecord(allele, loc)
complete_annotation = get_features(seqrecord)
annotation = Annotation(annotation=complete_annotation,
method='match',
complete_annotation=True)
if self.alignments:
alignment = {f: self.annoated_alignments[loc][allele][f]['Seq']
for f in self.annoated_alignments[loc][allele].keys()}
annotation.aligned = alignment
return annotation | Gets the Annotation from the found sequence
:return: The Annotation from the found sequence
:rtype: Annotation | Below is the the instruction that describes the task:
### Input:
Gets the Annotation from the found sequence
:return: The Annotation from the found sequence
:rtype: Annotation
### Response:
def seqannotation(self, seqrecord, allele, loc):
"""
Gets the Annotation from the found sequence
:return: The Annotation from the found sequence
:rtype: Annotation
"""
#seqrecord = self.seqrecord(allele, loc)
complete_annotation = get_features(seqrecord)
annotation = Annotation(annotation=complete_annotation,
method='match',
complete_annotation=True)
if self.alignments:
alignment = {f: self.annoated_alignments[loc][allele][f]['Seq']
for f in self.annoated_alignments[loc][allele].keys()}
annotation.aligned = alignment
return annotation |
def _run_genotype_gvcfs_genomicsdb(genomics_db, region, out_file, data):
"""GenotypeGVCFs from a merged GenomicsDB input: GATK4.
ropts += [str(x) for x in resources.get("options", [])]
No core scaling -- not yet supported in GATK4.
"""
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
broad_runner = broad.runner_from_config(data["config"])
params = ["-T", "GenotypeGVCFs",
"--variant", "gendb://%s" % genomics_db,
"-R", dd.get_ref_file(data),
"--output", tx_out_file,
"-L", bamprep.region_to_gatk(region)]
params += ["-ploidy", str(ploidy.get_ploidy([data], region))]
# Avoid slow genotyping runtimes with improved quality score calculation in GATK4
# https://gatkforums.broadinstitute.org/gatk/discussion/11471/performance-troubleshooting-tips-for-genotypegvcfs/p1
params += ["--use-new-qual-calculator"]
resources = config_utils.get_resources("gatk", data["config"])
params += [str(x) for x in resources.get("options", [])]
cores = dd.get_cores(data)
memscale = {"magnitude": 0.9 * cores, "direction": "increase"} if cores > 1 else None
broad_runner.run_gatk(params, memscale=memscale)
return vcfutils.bgzip_and_index(out_file, data["config"]) | GenotypeGVCFs from a merged GenomicsDB input: GATK4.
ropts += [str(x) for x in resources.get("options", [])]
No core scaling -- not yet supported in GATK4. | Below is the the instruction that describes the task:
### Input:
GenotypeGVCFs from a merged GenomicsDB input: GATK4.
ropts += [str(x) for x in resources.get("options", [])]
No core scaling -- not yet supported in GATK4.
### Response:
def _run_genotype_gvcfs_genomicsdb(genomics_db, region, out_file, data):
"""GenotypeGVCFs from a merged GenomicsDB input: GATK4.
ropts += [str(x) for x in resources.get("options", [])]
No core scaling -- not yet supported in GATK4.
"""
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
broad_runner = broad.runner_from_config(data["config"])
params = ["-T", "GenotypeGVCFs",
"--variant", "gendb://%s" % genomics_db,
"-R", dd.get_ref_file(data),
"--output", tx_out_file,
"-L", bamprep.region_to_gatk(region)]
params += ["-ploidy", str(ploidy.get_ploidy([data], region))]
# Avoid slow genotyping runtimes with improved quality score calculation in GATK4
# https://gatkforums.broadinstitute.org/gatk/discussion/11471/performance-troubleshooting-tips-for-genotypegvcfs/p1
params += ["--use-new-qual-calculator"]
resources = config_utils.get_resources("gatk", data["config"])
params += [str(x) for x in resources.get("options", [])]
cores = dd.get_cores(data)
memscale = {"magnitude": 0.9 * cores, "direction": "increase"} if cores > 1 else None
broad_runner.run_gatk(params, memscale=memscale)
return vcfutils.bgzip_and_index(out_file, data["config"]) |
def candle_lighting(self):
"""Return the time for candle lighting, or None if not applicable."""
today = HDate(gdate=self.date, diaspora=self.location.diaspora)
tomorrow = HDate(gdate=self.date + dt.timedelta(days=1),
diaspora=self.location.diaspora)
# If today is a Yom Tov or Shabbat, and tomorrow is a Yom Tov or
# Shabbat return the havdalah time as the candle lighting time.
if ((today.is_yom_tov or today.is_shabbat)
and (tomorrow.is_yom_tov or tomorrow.is_shabbat)):
return self._havdalah_datetime
# Otherwise, if today is Friday or erev Yom Tov, return candle
# lighting.
if tomorrow.is_shabbat or tomorrow.is_yom_tov:
return (self.zmanim["sunset"]
- dt.timedelta(minutes=self.candle_lighting_offset))
return None | Return the time for candle lighting, or None if not applicable. | Below is the the instruction that describes the task:
### Input:
Return the time for candle lighting, or None if not applicable.
### Response:
def candle_lighting(self):
"""Return the time for candle lighting, or None if not applicable."""
today = HDate(gdate=self.date, diaspora=self.location.diaspora)
tomorrow = HDate(gdate=self.date + dt.timedelta(days=1),
diaspora=self.location.diaspora)
# If today is a Yom Tov or Shabbat, and tomorrow is a Yom Tov or
# Shabbat return the havdalah time as the candle lighting time.
if ((today.is_yom_tov or today.is_shabbat)
and (tomorrow.is_yom_tov or tomorrow.is_shabbat)):
return self._havdalah_datetime
# Otherwise, if today is Friday or erev Yom Tov, return candle
# lighting.
if tomorrow.is_shabbat or tomorrow.is_yom_tov:
return (self.zmanim["sunset"]
- dt.timedelta(minutes=self.candle_lighting_offset))
return None |
def vector(p1, p2):
'''compute vector between two 3D points
Args:
p1, p2: indexable objects with
indices 0, 1, 2 corresponding to 3D cartesian coordinates.
Returns:
3-vector from p1 - p2
'''
return np.subtract(p1[COLS.XYZ], p2[COLS.XYZ]) | compute vector between two 3D points
Args:
p1, p2: indexable objects with
indices 0, 1, 2 corresponding to 3D cartesian coordinates.
Returns:
3-vector from p1 - p2 | Below is the the instruction that describes the task:
### Input:
compute vector between two 3D points
Args:
p1, p2: indexable objects with
indices 0, 1, 2 corresponding to 3D cartesian coordinates.
Returns:
3-vector from p1 - p2
### Response:
def vector(p1, p2):
'''compute vector between two 3D points
Args:
p1, p2: indexable objects with
indices 0, 1, 2 corresponding to 3D cartesian coordinates.
Returns:
3-vector from p1 - p2
'''
return np.subtract(p1[COLS.XYZ], p2[COLS.XYZ]) |
def run_query(self, cmd="", **kwargs):
"""
Remote Popen (actually execute the Spark-sql query)
:param cmd: command to remotely execute
:param kwargs: extra arguments to Popen (see subprocess.Popen)
"""
spark_sql_cmd = self._prepare_command(cmd)
self._sp = subprocess.Popen(spark_sql_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**kwargs)
for line in iter(self._sp.stdout.readline, ''):
self.log.info(line)
returncode = self._sp.wait()
if returncode:
raise AirflowException(
"Cannot execute {} on {}. Process exit code: {}.".format(
cmd, self._conn.host, returncode
)
) | Remote Popen (actually execute the Spark-sql query)
:param cmd: command to remotely execute
:param kwargs: extra arguments to Popen (see subprocess.Popen) | Below is the the instruction that describes the task:
### Input:
Remote Popen (actually execute the Spark-sql query)
:param cmd: command to remotely execute
:param kwargs: extra arguments to Popen (see subprocess.Popen)
### Response:
def run_query(self, cmd="", **kwargs):
"""
Remote Popen (actually execute the Spark-sql query)
:param cmd: command to remotely execute
:param kwargs: extra arguments to Popen (see subprocess.Popen)
"""
spark_sql_cmd = self._prepare_command(cmd)
self._sp = subprocess.Popen(spark_sql_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**kwargs)
for line in iter(self._sp.stdout.readline, ''):
self.log.info(line)
returncode = self._sp.wait()
if returncode:
raise AirflowException(
"Cannot execute {} on {}. Process exit code: {}.".format(
cmd, self._conn.host, returncode
)
) |
def from_config(cls, cp, **kwargs):
r"""Initializes an instance of this class from the given config file.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
\**kwargs :
All additional keyword arguments are passed to the class. Any
provided keyword will over ride what is in the config file.
"""
args = cls._init_args_from_config(cp)
args['low_frequency_cutoff'] = low_frequency_cutoff_from_config(cp)
args['high_frequency_cutoff'] = high_frequency_cutoff_from_config(cp)
# get any other keyword arguments provided in the model section
ignore_args = ['name', 'low-frequency-cutoff', 'high-frequency-cutoff']
args.update(cls.extra_args_from_config(cp, "model",
skip_args=ignore_args))
args.update(kwargs)
return cls(**args) | r"""Initializes an instance of this class from the given config file.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
\**kwargs :
All additional keyword arguments are passed to the class. Any
provided keyword will over ride what is in the config file. | Below is the the instruction that describes the task:
### Input:
r"""Initializes an instance of this class from the given config file.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
\**kwargs :
All additional keyword arguments are passed to the class. Any
provided keyword will over ride what is in the config file.
### Response:
def from_config(cls, cp, **kwargs):
r"""Initializes an instance of this class from the given config file.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
\**kwargs :
All additional keyword arguments are passed to the class. Any
provided keyword will over ride what is in the config file.
"""
args = cls._init_args_from_config(cp)
args['low_frequency_cutoff'] = low_frequency_cutoff_from_config(cp)
args['high_frequency_cutoff'] = high_frequency_cutoff_from_config(cp)
# get any other keyword arguments provided in the model section
ignore_args = ['name', 'low-frequency-cutoff', 'high-frequency-cutoff']
args.update(cls.extra_args_from_config(cp, "model",
skip_args=ignore_args))
args.update(kwargs)
return cls(**args) |
def register(self, mimetype):
"""Register a function to handle a particular mimetype."""
def dec(func):
self._reg[mimetype] = func
return func
return dec | Register a function to handle a particular mimetype. | Below is the the instruction that describes the task:
### Input:
Register a function to handle a particular mimetype.
### Response:
def register(self, mimetype):
"""Register a function to handle a particular mimetype."""
def dec(func):
self._reg[mimetype] = func
return func
return dec |
def reject_milestone_request(session, milestone_request_id):
"""
Reject a milestone request
"""
params_data = {
'action': 'reject',
}
# POST /api/projects/0.1/milestone_requests/{milestone_request_id}/?action=
# reject
endpoint = 'milestone_requests/{}'.format(milestone_request_id)
response = make_put_request(session, endpoint, params_data=params_data)
json_data = response.json()
if response.status_code == 200:
return json_data['status']
else:
raise MilestoneRequestNotRejectedException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) | Reject a milestone request | Below is the the instruction that describes the task:
### Input:
Reject a milestone request
### Response:
def reject_milestone_request(session, milestone_request_id):
"""
Reject a milestone request
"""
params_data = {
'action': 'reject',
}
# POST /api/projects/0.1/milestone_requests/{milestone_request_id}/?action=
# reject
endpoint = 'milestone_requests/{}'.format(milestone_request_id)
response = make_put_request(session, endpoint, params_data=params_data)
json_data = response.json()
if response.status_code == 200:
return json_data['status']
else:
raise MilestoneRequestNotRejectedException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def call(self, command, *args):
""" Execute local OR remote command and show response """
if not command:
return
# Look for local methods first
try:
res = self.registered[command]['function'](self, *args)
return Response('local', res, None)
# Method not found, try remote
except KeyError:
# Execute remote command
res, err = self.client.call(command, *args)
return Response('remote', res, err, self.client.is_multi())
# Local exception
except Exception as e:
return Response('local', res, str(e)) | Execute local OR remote command and show response | Below is the the instruction that describes the task:
### Input:
Execute local OR remote command and show response
### Response:
def call(self, command, *args):
""" Execute local OR remote command and show response """
if not command:
return
# Look for local methods first
try:
res = self.registered[command]['function'](self, *args)
return Response('local', res, None)
# Method not found, try remote
except KeyError:
# Execute remote command
res, err = self.client.call(command, *args)
return Response('remote', res, err, self.client.is_multi())
# Local exception
except Exception as e:
return Response('local', res, str(e)) |
def validate(filename, verbose=False):
"""
Validate file and return JSON result as dictionary.
"filename" can be a file name or an HTTP URL.
Return "" if the validator does not return valid JSON.
Raise OSError if curl command returns an error status.
"""
# is_css = filename.endswith(".css")
is_remote = filename.startswith("http://") or filename.startswith(
"https://")
with tempfile.TemporaryFile() if is_remote else open(
filename, "rb") as f:
if is_remote:
r = requests.get(filename, verify=False)
f.write(r.content)
f.seek(0)
# if is_css:
# cmd = (
# "curl -sF \"file=@%s;type=text/css\" -F output=json -F warning=0 %s"
# % (quoted_filename, CSS_VALIDATOR_URL))
# _ = cmd
# else:
r = requests.post(
HTML_VALIDATOR_URL,
files={"file": (filename, f, "text/html")},
data={
"out": "json",
"showsource": "yes",
},
verify=False)
return r.json() | Validate file and return JSON result as dictionary.
"filename" can be a file name or an HTTP URL.
Return "" if the validator does not return valid JSON.
Raise OSError if curl command returns an error status. | Below is the the instruction that describes the task:
### Input:
Validate file and return JSON result as dictionary.
"filename" can be a file name or an HTTP URL.
Return "" if the validator does not return valid JSON.
Raise OSError if curl command returns an error status.
### Response:
def validate(filename, verbose=False):
"""
Validate file and return JSON result as dictionary.
"filename" can be a file name or an HTTP URL.
Return "" if the validator does not return valid JSON.
Raise OSError if curl command returns an error status.
"""
# is_css = filename.endswith(".css")
is_remote = filename.startswith("http://") or filename.startswith(
"https://")
with tempfile.TemporaryFile() if is_remote else open(
filename, "rb") as f:
if is_remote:
r = requests.get(filename, verify=False)
f.write(r.content)
f.seek(0)
# if is_css:
# cmd = (
# "curl -sF \"file=@%s;type=text/css\" -F output=json -F warning=0 %s"
# % (quoted_filename, CSS_VALIDATOR_URL))
# _ = cmd
# else:
r = requests.post(
HTML_VALIDATOR_URL,
files={"file": (filename, f, "text/html")},
data={
"out": "json",
"showsource": "yes",
},
verify=False)
return r.json() |
def do_string(self, parent=None, ident=0):
"""
Handles a TC_STRING opcode
:param parent:
:param ident: Log indentation level
:return: A string
"""
log_debug("[string]", ident)
ba = JavaString(self._readString())
self._add_reference(ba, ident)
return ba | Handles a TC_STRING opcode
:param parent:
:param ident: Log indentation level
:return: A string | Below is the the instruction that describes the task:
### Input:
Handles a TC_STRING opcode
:param parent:
:param ident: Log indentation level
:return: A string
### Response:
def do_string(self, parent=None, ident=0):
"""
Handles a TC_STRING opcode
:param parent:
:param ident: Log indentation level
:return: A string
"""
log_debug("[string]", ident)
ba = JavaString(self._readString())
self._add_reference(ba, ident)
return ba |
def build(self, stmts=None, set_check_var=True, invert=False):
"""Construct code for performing the match then executing stmts."""
out = ""
if set_check_var:
out += self.check_var + " = False\n"
out += self.out()
if stmts is not None:
out += "if " + ("not " if invert else "") + self.check_var + ":" + "\n" + openindent + "".join(stmts) + closeindent
return out | Construct code for performing the match then executing stmts. | Below is the the instruction that describes the task:
### Input:
Construct code for performing the match then executing stmts.
### Response:
def build(self, stmts=None, set_check_var=True, invert=False):
"""Construct code for performing the match then executing stmts."""
out = ""
if set_check_var:
out += self.check_var + " = False\n"
out += self.out()
if stmts is not None:
out += "if " + ("not " if invert else "") + self.check_var + ":" + "\n" + openindent + "".join(stmts) + closeindent
return out |
async def deserialize(data: dict):
"""
Create the object from a previously serialized object.
:param data: The output of the "serialize" call
Example:
source_id = 'foobar123'
schema_name = 'Schema Name'
payment_handle = 0
credential_def1 = await CredentialDef.create(source_id, name, schema_id, payment_handle)
data1 = await credential_def1.serialize()
credential_def2 = await CredentialDef.deserialize(data1)
:return: A re-instantiated object
"""
try:
credential_def = await CredentialDef._deserialize("vcx_credentialdef_deserialize",
json.dumps(data),
data['data']['source_id'],
data['data']['name'],
data['data']['id'])
return credential_def
except KeyError:
raise VcxError(ErrorCode.InvalidCredentialDef) | Create the object from a previously serialized object.
:param data: The output of the "serialize" call
Example:
source_id = 'foobar123'
schema_name = 'Schema Name'
payment_handle = 0
credential_def1 = await CredentialDef.create(source_id, name, schema_id, payment_handle)
data1 = await credential_def1.serialize()
credential_def2 = await CredentialDef.deserialize(data1)
:return: A re-instantiated object | Below is the the instruction that describes the task:
### Input:
Create the object from a previously serialized object.
:param data: The output of the "serialize" call
Example:
source_id = 'foobar123'
schema_name = 'Schema Name'
payment_handle = 0
credential_def1 = await CredentialDef.create(source_id, name, schema_id, payment_handle)
data1 = await credential_def1.serialize()
credential_def2 = await CredentialDef.deserialize(data1)
:return: A re-instantiated object
### Response:
async def deserialize(data: dict):
"""
Create the object from a previously serialized object.
:param data: The output of the "serialize" call
Example:
source_id = 'foobar123'
schema_name = 'Schema Name'
payment_handle = 0
credential_def1 = await CredentialDef.create(source_id, name, schema_id, payment_handle)
data1 = await credential_def1.serialize()
credential_def2 = await CredentialDef.deserialize(data1)
:return: A re-instantiated object
"""
try:
credential_def = await CredentialDef._deserialize("vcx_credentialdef_deserialize",
json.dumps(data),
data['data']['source_id'],
data['data']['name'],
data['data']['id'])
return credential_def
except KeyError:
raise VcxError(ErrorCode.InvalidCredentialDef) |
def _assert_lt(self, cost):
"""
The method enforces an upper bound on the cost of the MaxSAT
solution. This is done by encoding the sum of all soft clause
selectors with the use the iterative totalizer encoding, i.e.
:class:`.ITotalizer`. Note that the sum is created once, at the
beginning. Each of the following calls to this method only enforces
the upper bound on the created sum by adding the corresponding unit
size clause. Each such clause is added on the fly with no restart
of the underlying SAT oracle.
:param cost: the cost of the next MaxSAT solution is enforced to be
*lower* than this current cost
:type cost: int
"""
if self.tot == None:
self.tot = ITotalizer(lits=self.sels, ubound=cost-1, top_id=self.topv)
self.topv = self.tot.top_id
for cl in self.tot.cnf.clauses:
self.oracle.add_clause(cl)
self.oracle.add_clause([-self.tot.rhs[cost-1]]) | The method enforces an upper bound on the cost of the MaxSAT
solution. This is done by encoding the sum of all soft clause
selectors with the use the iterative totalizer encoding, i.e.
:class:`.ITotalizer`. Note that the sum is created once, at the
beginning. Each of the following calls to this method only enforces
the upper bound on the created sum by adding the corresponding unit
size clause. Each such clause is added on the fly with no restart
of the underlying SAT oracle.
:param cost: the cost of the next MaxSAT solution is enforced to be
*lower* than this current cost
:type cost: int | Below is the the instruction that describes the task:
### Input:
The method enforces an upper bound on the cost of the MaxSAT
solution. This is done by encoding the sum of all soft clause
selectors with the use the iterative totalizer encoding, i.e.
:class:`.ITotalizer`. Note that the sum is created once, at the
beginning. Each of the following calls to this method only enforces
the upper bound on the created sum by adding the corresponding unit
size clause. Each such clause is added on the fly with no restart
of the underlying SAT oracle.
:param cost: the cost of the next MaxSAT solution is enforced to be
*lower* than this current cost
:type cost: int
### Response:
def _assert_lt(self, cost):
"""
The method enforces an upper bound on the cost of the MaxSAT
solution. This is done by encoding the sum of all soft clause
selectors with the use the iterative totalizer encoding, i.e.
:class:`.ITotalizer`. Note that the sum is created once, at the
beginning. Each of the following calls to this method only enforces
the upper bound on the created sum by adding the corresponding unit
size clause. Each such clause is added on the fly with no restart
of the underlying SAT oracle.
:param cost: the cost of the next MaxSAT solution is enforced to be
*lower* than this current cost
:type cost: int
"""
if self.tot == None:
self.tot = ITotalizer(lits=self.sels, ubound=cost-1, top_id=self.topv)
self.topv = self.tot.top_id
for cl in self.tot.cnf.clauses:
self.oracle.add_clause(cl)
self.oracle.add_clause([-self.tot.rhs[cost-1]]) |
def parallelize(self, seconds_to_wait=2):
"""Start a parallel thread for receiving messages.
If :meth:`start` was no called before, start will be called in the
thread.
The thread calls :meth:`receive_message` until the :attr:`state`
:meth:`~AYABInterface.communication.states.State.is_connection_closed`.
:param float seconds_to_wait: A time in seconds to wait with the
parallel execution. This is useful to allow the controller time to
initialize.
.. seealso:: :attr:`lock`, :meth:`runs_in_parallel`
"""
with self.lock:
thread = Thread(target=self._parallel_receive_loop,
args=(seconds_to_wait,))
thread.deamon = True
thread.start()
self._thread = thread | Start a parallel thread for receiving messages.
If :meth:`start` was no called before, start will be called in the
thread.
The thread calls :meth:`receive_message` until the :attr:`state`
:meth:`~AYABInterface.communication.states.State.is_connection_closed`.
:param float seconds_to_wait: A time in seconds to wait with the
parallel execution. This is useful to allow the controller time to
initialize.
.. seealso:: :attr:`lock`, :meth:`runs_in_parallel` | Below is the the instruction that describes the task:
### Input:
Start a parallel thread for receiving messages.
If :meth:`start` was no called before, start will be called in the
thread.
The thread calls :meth:`receive_message` until the :attr:`state`
:meth:`~AYABInterface.communication.states.State.is_connection_closed`.
:param float seconds_to_wait: A time in seconds to wait with the
parallel execution. This is useful to allow the controller time to
initialize.
.. seealso:: :attr:`lock`, :meth:`runs_in_parallel`
### Response:
def parallelize(self, seconds_to_wait=2):
"""Start a parallel thread for receiving messages.
If :meth:`start` was no called before, start will be called in the
thread.
The thread calls :meth:`receive_message` until the :attr:`state`
:meth:`~AYABInterface.communication.states.State.is_connection_closed`.
:param float seconds_to_wait: A time in seconds to wait with the
parallel execution. This is useful to allow the controller time to
initialize.
.. seealso:: :attr:`lock`, :meth:`runs_in_parallel`
"""
with self.lock:
thread = Thread(target=self._parallel_receive_loop,
args=(seconds_to_wait,))
thread.deamon = True
thread.start()
self._thread = thread |
def get_conn(opts, profile):
'''
Return a client object for accessing consul
'''
opts_pillar = opts.get('pillar', {})
opts_master = opts_pillar.get('master', {})
opts_merged = {}
opts_merged.update(opts_master)
opts_merged.update(opts_pillar)
opts_merged.update(opts)
if profile:
conf = opts_merged.get(profile, {})
else:
conf = opts_merged
params = {}
for key in conf:
if key.startswith('consul.'):
params[key.split('.')[1]] = conf[key]
if 'dc' in params:
pillarenv = opts_merged.get('pillarenv') or 'base'
params['dc'] = _resolve_datacenter(params['dc'], pillarenv)
if consul:
# Sanity check. ACL Tokens are supported on python-consul 0.4.7 onwards only.
if consul.__version__ < '0.4.7' and params.get('target'):
params.pop('target')
return consul.Consul(**params)
else:
raise CommandExecutionError(
'(unable to import consul, '
'module most likely not installed. Download python-consul '
'module and be sure to import consul)'
) | Return a client object for accessing consul | Below is the the instruction that describes the task:
### Input:
Return a client object for accessing consul
### Response:
def get_conn(opts, profile):
'''
Return a client object for accessing consul
'''
opts_pillar = opts.get('pillar', {})
opts_master = opts_pillar.get('master', {})
opts_merged = {}
opts_merged.update(opts_master)
opts_merged.update(opts_pillar)
opts_merged.update(opts)
if profile:
conf = opts_merged.get(profile, {})
else:
conf = opts_merged
params = {}
for key in conf:
if key.startswith('consul.'):
params[key.split('.')[1]] = conf[key]
if 'dc' in params:
pillarenv = opts_merged.get('pillarenv') or 'base'
params['dc'] = _resolve_datacenter(params['dc'], pillarenv)
if consul:
# Sanity check. ACL Tokens are supported on python-consul 0.4.7 onwards only.
if consul.__version__ < '0.4.7' and params.get('target'):
params.pop('target')
return consul.Consul(**params)
else:
raise CommandExecutionError(
'(unable to import consul, '
'module most likely not installed. Download python-consul '
'module and be sure to import consul)'
) |
def cloud_init(names, host=None, quiet=False, **kwargs):
'''
Wrapper for using lxc.init in saltcloud compatibility mode
names
Name of the containers, supports a single name or a comma delimited
list of names.
host
Minion to start the container on. Required.
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
saltcloud_mode
init the container with the saltcloud opts format instead
'''
if quiet:
log.warning("'quiet' argument is being deprecated. Please migrate to --quiet")
return __salt__['lxc.init'](names=names, host=host,
saltcloud_mode=True, quiet=quiet, **kwargs) | Wrapper for using lxc.init in saltcloud compatibility mode
names
Name of the containers, supports a single name or a comma delimited
list of names.
host
Minion to start the container on. Required.
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
saltcloud_mode
init the container with the saltcloud opts format instead | Below is the the instruction that describes the task:
### Input:
Wrapper for using lxc.init in saltcloud compatibility mode
names
Name of the containers, supports a single name or a comma delimited
list of names.
host
Minion to start the container on. Required.
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
saltcloud_mode
init the container with the saltcloud opts format instead
### Response:
def cloud_init(names, host=None, quiet=False, **kwargs):
'''
Wrapper for using lxc.init in saltcloud compatibility mode
names
Name of the containers, supports a single name or a comma delimited
list of names.
host
Minion to start the container on. Required.
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
saltcloud_mode
init the container with the saltcloud opts format instead
'''
if quiet:
log.warning("'quiet' argument is being deprecated. Please migrate to --quiet")
return __salt__['lxc.init'](names=names, host=host,
saltcloud_mode=True, quiet=quiet, **kwargs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.