code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def set_scrollregion(self, event=None):
""" Set the scroll region on the canvas"""
self.canvas.configure(scrollregion=self.canvas.bbox('all')) | Set the scroll region on the canvas | Below is the the instruction that describes the task:
### Input:
Set the scroll region on the canvas
### Response:
def set_scrollregion(self, event=None):
""" Set the scroll region on the canvas"""
self.canvas.configure(scrollregion=self.canvas.bbox('all')) |
def cmd(self):
"""Returns the (last) saved command line.
If the file was created from a run that resumed from a checkpoint, only
the last command line used is returned.
Returns
-------
cmd : string
The command line that created this InferenceFile.
"""
cmd = self.attrs["cmd"]
if isinstance(cmd, numpy.ndarray):
cmd = cmd[-1]
return cmd | Returns the (last) saved command line.
If the file was created from a run that resumed from a checkpoint, only
the last command line used is returned.
Returns
-------
cmd : string
The command line that created this InferenceFile. | Below is the the instruction that describes the task:
### Input:
Returns the (last) saved command line.
If the file was created from a run that resumed from a checkpoint, only
the last command line used is returned.
Returns
-------
cmd : string
The command line that created this InferenceFile.
### Response:
def cmd(self):
"""Returns the (last) saved command line.
If the file was created from a run that resumed from a checkpoint, only
the last command line used is returned.
Returns
-------
cmd : string
The command line that created this InferenceFile.
"""
cmd = self.attrs["cmd"]
if isinstance(cmd, numpy.ndarray):
cmd = cmd[-1]
return cmd |
def clear(self):
# type: () -> None
"""Clears the entire scope."""
self._level = None
self._fingerprint = None
self._transaction = None
self._user = None
self._tags = {} # type: Dict[str, Any]
self._contexts = {} # type: Dict[str, Dict]
self._extras = {} # type: Dict[str, Any]
self.clear_breadcrumbs()
self._should_capture = True
self._span = None | Clears the entire scope. | Below is the the instruction that describes the task:
### Input:
Clears the entire scope.
### Response:
def clear(self):
# type: () -> None
"""Clears the entire scope."""
self._level = None
self._fingerprint = None
self._transaction = None
self._user = None
self._tags = {} # type: Dict[str, Any]
self._contexts = {} # type: Dict[str, Dict]
self._extras = {} # type: Dict[str, Any]
self.clear_breadcrumbs()
self._should_capture = True
self._span = None |
def initialize(self, session_creator, session_init):
"""
Create the session and set `self.sess`.
Call `self.initiailize_hooks()`
Finalize the graph.
It must be called after callbacks are setup.
Args:
session_creator (tf.train.SessionCreator):
session_init (sessinit.SessionInit):
"""
assert isinstance(session_creator, tfv1.train.SessionCreator), session_creator
assert isinstance(session_init, SessionInit), session_init
session_init._setup_graph()
logger.info("Creating the session ...")
self.sess = session_creator.create_session()
self.initialize_hooks()
if self.is_chief:
logger.info("Initializing the session ...")
session_init._run_init(self.sess)
else:
if not isinstance(session_init, JustCurrentSession):
logger.warn("This is not a chief worker, 'session_init' was ignored!")
self.sess.graph.finalize()
logger.info("Graph Finalized.") | Create the session and set `self.sess`.
Call `self.initiailize_hooks()`
Finalize the graph.
It must be called after callbacks are setup.
Args:
session_creator (tf.train.SessionCreator):
session_init (sessinit.SessionInit): | Below is the the instruction that describes the task:
### Input:
Create the session and set `self.sess`.
Call `self.initiailize_hooks()`
Finalize the graph.
It must be called after callbacks are setup.
Args:
session_creator (tf.train.SessionCreator):
session_init (sessinit.SessionInit):
### Response:
def initialize(self, session_creator, session_init):
"""
Create the session and set `self.sess`.
Call `self.initiailize_hooks()`
Finalize the graph.
It must be called after callbacks are setup.
Args:
session_creator (tf.train.SessionCreator):
session_init (sessinit.SessionInit):
"""
assert isinstance(session_creator, tfv1.train.SessionCreator), session_creator
assert isinstance(session_init, SessionInit), session_init
session_init._setup_graph()
logger.info("Creating the session ...")
self.sess = session_creator.create_session()
self.initialize_hooks()
if self.is_chief:
logger.info("Initializing the session ...")
session_init._run_init(self.sess)
else:
if not isinstance(session_init, JustCurrentSession):
logger.warn("This is not a chief worker, 'session_init' was ignored!")
self.sess.graph.finalize()
logger.info("Graph Finalized.") |
def _bfs_from_cluster_tree(tree, bfs_root):
"""
Perform a breadth first search on a tree in condensed tree format
"""
result = []
to_process = [bfs_root]
while to_process:
result.extend(to_process)
to_process = tree['child'][np.in1d(tree['parent'], to_process)].tolist()
return result | Perform a breadth first search on a tree in condensed tree format | Below is the the instruction that describes the task:
### Input:
Perform a breadth first search on a tree in condensed tree format
### Response:
def _bfs_from_cluster_tree(tree, bfs_root):
"""
Perform a breadth first search on a tree in condensed tree format
"""
result = []
to_process = [bfs_root]
while to_process:
result.extend(to_process)
to_process = tree['child'][np.in1d(tree['parent'], to_process)].tolist()
return result |
def courses(self, request, enterprise_customer, pk=None): # pylint: disable=invalid-name
"""
Retrieve the list of courses contained within this catalog.
Only courses with active course runs are returned. A course run is considered active if it is currently
open for enrollment, or will open in the future.
"""
catalog_api = CourseCatalogApiClient(request.user, enterprise_customer.site)
courses = catalog_api.get_paginated_catalog_courses(pk, request.GET)
# If the API returned an empty response, that means pagination has ended.
# An empty response can also mean that there was a problem fetching data from catalog API.
self.ensure_data_exists(
request,
courses,
error_message=(
"Unable to fetch API response for catalog courses from endpoint '{endpoint}'. "
"The resource you are looking for does not exist.".format(endpoint=request.get_full_path())
)
)
serializer = serializers.EnterpriseCatalogCoursesReadOnlySerializer(courses)
# Add enterprise related context for the courses.
serializer.update_enterprise_courses(enterprise_customer, catalog_id=pk)
return get_paginated_response(serializer.data, request) | Retrieve the list of courses contained within this catalog.
Only courses with active course runs are returned. A course run is considered active if it is currently
open for enrollment, or will open in the future. | Below is the the instruction that describes the task:
### Input:
Retrieve the list of courses contained within this catalog.
Only courses with active course runs are returned. A course run is considered active if it is currently
open for enrollment, or will open in the future.
### Response:
def courses(self, request, enterprise_customer, pk=None): # pylint: disable=invalid-name
"""
Retrieve the list of courses contained within this catalog.
Only courses with active course runs are returned. A course run is considered active if it is currently
open for enrollment, or will open in the future.
"""
catalog_api = CourseCatalogApiClient(request.user, enterprise_customer.site)
courses = catalog_api.get_paginated_catalog_courses(pk, request.GET)
# If the API returned an empty response, that means pagination has ended.
# An empty response can also mean that there was a problem fetching data from catalog API.
self.ensure_data_exists(
request,
courses,
error_message=(
"Unable to fetch API response for catalog courses from endpoint '{endpoint}'. "
"The resource you are looking for does not exist.".format(endpoint=request.get_full_path())
)
)
serializer = serializers.EnterpriseCatalogCoursesReadOnlySerializer(courses)
# Add enterprise related context for the courses.
serializer.update_enterprise_courses(enterprise_customer, catalog_id=pk)
return get_paginated_response(serializer.data, request) |
def circ_rayleigh(alpha, w=None, d=None):
"""Rayleigh test for non-uniformity of circular data.
Parameters
----------
alpha : np.array
Sample of angles in radians.
w : np.array
Number of incidences in case of binned angle data.
d : float
Spacing (in radians) of bin centers for binned data. If supplied,
a correction factor is used to correct for bias in the estimation
of r.
Returns
-------
z : float
Z-statistic
pval : float
P-value
Notes
-----
The Rayleigh test asks how large the resultant vector length R must be
to indicate a non-uniform distribution (Fisher 1995).
H0: the population is uniformly distributed around the circle
HA: the populatoin is not distributed uniformly around the circle
The assumptions for the Rayleigh test are that (1) the distribution has
only one mode and (2) the data is sampled from a von Mises distribution.
Examples
--------
1. Simple Rayleigh test for non-uniformity of circular data.
>>> from pingouin import circ_rayleigh
>>> x = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> z, pval = circ_rayleigh(x)
>>> print(z, pval)
1.236 0.3048435876500138
2. Specifying w and d
>>> circ_rayleigh(x, w=[.1, .2, .3, .4, .5], d=0.2)
(0.278, 0.8069972000769801)
"""
alpha = np.array(alpha)
if w is None:
r = circ_r(alpha)
n = len(alpha)
else:
if len(alpha) is not len(w):
raise ValueError("Input dimensions do not match")
r = circ_r(alpha, w, d)
n = np.sum(w)
# Compute Rayleigh's statistic
R = n * r
z = (R**2) / n
# Compute p value using approxation in Zar (1999), p. 617
pval = np.exp(np.sqrt(1 + 4 * n + 4 * (n**2 - R**2)) - (1 + 2 * n))
return np.round(z, 3), pval | Rayleigh test for non-uniformity of circular data.
Parameters
----------
alpha : np.array
Sample of angles in radians.
w : np.array
Number of incidences in case of binned angle data.
d : float
Spacing (in radians) of bin centers for binned data. If supplied,
a correction factor is used to correct for bias in the estimation
of r.
Returns
-------
z : float
Z-statistic
pval : float
P-value
Notes
-----
The Rayleigh test asks how large the resultant vector length R must be
to indicate a non-uniform distribution (Fisher 1995).
H0: the population is uniformly distributed around the circle
HA: the populatoin is not distributed uniformly around the circle
The assumptions for the Rayleigh test are that (1) the distribution has
only one mode and (2) the data is sampled from a von Mises distribution.
Examples
--------
1. Simple Rayleigh test for non-uniformity of circular data.
>>> from pingouin import circ_rayleigh
>>> x = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> z, pval = circ_rayleigh(x)
>>> print(z, pval)
1.236 0.3048435876500138
2. Specifying w and d
>>> circ_rayleigh(x, w=[.1, .2, .3, .4, .5], d=0.2)
(0.278, 0.8069972000769801) | Below is the the instruction that describes the task:
### Input:
Rayleigh test for non-uniformity of circular data.
Parameters
----------
alpha : np.array
Sample of angles in radians.
w : np.array
Number of incidences in case of binned angle data.
d : float
Spacing (in radians) of bin centers for binned data. If supplied,
a correction factor is used to correct for bias in the estimation
of r.
Returns
-------
z : float
Z-statistic
pval : float
P-value
Notes
-----
The Rayleigh test asks how large the resultant vector length R must be
to indicate a non-uniform distribution (Fisher 1995).
H0: the population is uniformly distributed around the circle
HA: the populatoin is not distributed uniformly around the circle
The assumptions for the Rayleigh test are that (1) the distribution has
only one mode and (2) the data is sampled from a von Mises distribution.
Examples
--------
1. Simple Rayleigh test for non-uniformity of circular data.
>>> from pingouin import circ_rayleigh
>>> x = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> z, pval = circ_rayleigh(x)
>>> print(z, pval)
1.236 0.3048435876500138
2. Specifying w and d
>>> circ_rayleigh(x, w=[.1, .2, .3, .4, .5], d=0.2)
(0.278, 0.8069972000769801)
### Response:
def circ_rayleigh(alpha, w=None, d=None):
"""Rayleigh test for non-uniformity of circular data.
Parameters
----------
alpha : np.array
Sample of angles in radians.
w : np.array
Number of incidences in case of binned angle data.
d : float
Spacing (in radians) of bin centers for binned data. If supplied,
a correction factor is used to correct for bias in the estimation
of r.
Returns
-------
z : float
Z-statistic
pval : float
P-value
Notes
-----
The Rayleigh test asks how large the resultant vector length R must be
to indicate a non-uniform distribution (Fisher 1995).
H0: the population is uniformly distributed around the circle
HA: the populatoin is not distributed uniformly around the circle
The assumptions for the Rayleigh test are that (1) the distribution has
only one mode and (2) the data is sampled from a von Mises distribution.
Examples
--------
1. Simple Rayleigh test for non-uniformity of circular data.
>>> from pingouin import circ_rayleigh
>>> x = [0.785, 1.570, 3.141, 0.839, 5.934]
>>> z, pval = circ_rayleigh(x)
>>> print(z, pval)
1.236 0.3048435876500138
2. Specifying w and d
>>> circ_rayleigh(x, w=[.1, .2, .3, .4, .5], d=0.2)
(0.278, 0.8069972000769801)
"""
alpha = np.array(alpha)
if w is None:
r = circ_r(alpha)
n = len(alpha)
else:
if len(alpha) is not len(w):
raise ValueError("Input dimensions do not match")
r = circ_r(alpha, w, d)
n = np.sum(w)
# Compute Rayleigh's statistic
R = n * r
z = (R**2) / n
# Compute p value using approxation in Zar (1999), p. 617
pval = np.exp(np.sqrt(1 + 4 * n + 4 * (n**2 - R**2)) - (1 + 2 * n))
return np.round(z, 3), pval |
def request(key, features, query, timeout=5):
"""Make an API request
:param string key: API key to use
:param list features: features to request. It must be a subset of :data:`FEATURES`
:param string query: query to send
:param integer timeout: timeout of the request
:returns: result of the API request
:rtype: dict
"""
data = {}
data['key'] = key
data['features'] = '/'.join([f for f in features if f in FEATURES])
data['query'] = quote(query)
data['format'] = 'json'
r = requests.get(API_URL.format(**data), timeout=timeout)
results = json.loads(_unicode(r.content))
return results | Make an API request
:param string key: API key to use
:param list features: features to request. It must be a subset of :data:`FEATURES`
:param string query: query to send
:param integer timeout: timeout of the request
:returns: result of the API request
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Make an API request
:param string key: API key to use
:param list features: features to request. It must be a subset of :data:`FEATURES`
:param string query: query to send
:param integer timeout: timeout of the request
:returns: result of the API request
:rtype: dict
### Response:
def request(key, features, query, timeout=5):
"""Make an API request
:param string key: API key to use
:param list features: features to request. It must be a subset of :data:`FEATURES`
:param string query: query to send
:param integer timeout: timeout of the request
:returns: result of the API request
:rtype: dict
"""
data = {}
data['key'] = key
data['features'] = '/'.join([f for f in features if f in FEATURES])
data['query'] = quote(query)
data['format'] = 'json'
r = requests.get(API_URL.format(**data), timeout=timeout)
results = json.loads(_unicode(r.content))
return results |
def update_metadata(self, loadbalancer, metadata, node=None):
"""
Updates the existing metadata with the supplied dictionary. If
'node' is supplied, the metadata for that node is updated instead
of for the load balancer.
"""
# Get the existing metadata
md = self.get_metadata(loadbalancer, raw=True)
id_lookup = dict([(itm["key"], itm["id"]) for itm in md])
metadata_list = []
# Updates must be done individually
for key, val in metadata.items():
try:
meta_id = id_lookup[key]
if node:
uri = "/loadbalancers/%s/nodes/%s/metadata/%s" % (
utils.get_id(loadbalancer), utils.get_id(node),
meta_id)
else:
uri = "/loadbalancers/%s/metadata" % utils.get_id(loadbalancer)
req_body = {"meta": {"value": val}}
resp, body = self.api.method_put(uri, body=req_body)
except KeyError:
# Not an existing key; add to metadata_list
metadata_list.append({"key": key, "value": val})
if metadata_list:
# New items; POST them
if node:
uri = "/loadbalancers/%s/nodes/%s/metadata" % (
utils.get_id(loadbalancer), utils.get_id(node))
else:
uri = "/loadbalancers/%s/metadata" % utils.get_id(loadbalancer)
req_body = {"metadata": metadata_list}
resp, body = self.api.method_post(uri, body=req_body) | Updates the existing metadata with the supplied dictionary. If
'node' is supplied, the metadata for that node is updated instead
of for the load balancer. | Below is the the instruction that describes the task:
### Input:
Updates the existing metadata with the supplied dictionary. If
'node' is supplied, the metadata for that node is updated instead
of for the load balancer.
### Response:
def update_metadata(self, loadbalancer, metadata, node=None):
"""
Updates the existing metadata with the supplied dictionary. If
'node' is supplied, the metadata for that node is updated instead
of for the load balancer.
"""
# Get the existing metadata
md = self.get_metadata(loadbalancer, raw=True)
id_lookup = dict([(itm["key"], itm["id"]) for itm in md])
metadata_list = []
# Updates must be done individually
for key, val in metadata.items():
try:
meta_id = id_lookup[key]
if node:
uri = "/loadbalancers/%s/nodes/%s/metadata/%s" % (
utils.get_id(loadbalancer), utils.get_id(node),
meta_id)
else:
uri = "/loadbalancers/%s/metadata" % utils.get_id(loadbalancer)
req_body = {"meta": {"value": val}}
resp, body = self.api.method_put(uri, body=req_body)
except KeyError:
# Not an existing key; add to metadata_list
metadata_list.append({"key": key, "value": val})
if metadata_list:
# New items; POST them
if node:
uri = "/loadbalancers/%s/nodes/%s/metadata" % (
utils.get_id(loadbalancer), utils.get_id(node))
else:
uri = "/loadbalancers/%s/metadata" % utils.get_id(loadbalancer)
req_body = {"metadata": metadata_list}
resp, body = self.api.method_post(uri, body=req_body) |
def scan_codes(code_types, image):
"""
Get *code_type* codes from a PIL Image.
*code_type* can be any of zbar supported code type [#zbar_symbologies]_:
- **EAN/UPC**: EAN-13 (`ean13`), UPC-A (`upca`), EAN-8 (`ean8`) and UPC-E (`upce`)
- **Linear barcode**: Code 128 (`code128`), Code 93 (`code93`), Code 39 (`code39`), Interleaved 2 of 5 (`i25`),
DataBar (`databar`) and DataBar Expanded (`databar-exp`)
- **2D**: QR Code (`qrcode`)
- **Undocumented**: `ean5`, `ean2`, `composite`, `isbn13`, `isbn10`, `codabar`, `pdf417`
.. [#zbar_symbologies] http://zbar.sourceforge.net/iphone/userguide/symbologies.html
Args:
code_types (list(str)): Code type(s) to search (see ``zbarlight.Symbologies`` for supported values).
image (PIL.Image.Image): Image to scan
returns:
A list of *code_type* code values or None
"""
if isinstance(code_types, str):
code_types = [code_types]
warnings.warn(
'Using a str for code_types is deprecated, please use a list of str instead',
DeprecationWarning,
)
# Translate symbologies
symbologies = [
Symbologies.get(code_type.upper())
for code_type in set(code_types)
]
# Check that all symbologies are known
if None in symbologies:
bad_code_types = [code_type for code_type in code_types if code_type.upper() not in Symbologies]
raise UnknownSymbologieError('Unknown Symbologies: %s' % bad_code_types)
# Convert the image to be used by c-extension
if not Image.isImageType(image):
raise RuntimeError('Bad or unknown image format')
converted_image = image.convert('L') # Convert image to gray scale (8 bits per pixel).
raw = converted_image.tobytes() # Get image data.
width, height = converted_image.size # Get image size.
return zbar_code_scanner(symbologies, raw, width, height) | Get *code_type* codes from a PIL Image.
*code_type* can be any of zbar supported code type [#zbar_symbologies]_:
- **EAN/UPC**: EAN-13 (`ean13`), UPC-A (`upca`), EAN-8 (`ean8`) and UPC-E (`upce`)
- **Linear barcode**: Code 128 (`code128`), Code 93 (`code93`), Code 39 (`code39`), Interleaved 2 of 5 (`i25`),
DataBar (`databar`) and DataBar Expanded (`databar-exp`)
- **2D**: QR Code (`qrcode`)
- **Undocumented**: `ean5`, `ean2`, `composite`, `isbn13`, `isbn10`, `codabar`, `pdf417`
.. [#zbar_symbologies] http://zbar.sourceforge.net/iphone/userguide/symbologies.html
Args:
code_types (list(str)): Code type(s) to search (see ``zbarlight.Symbologies`` for supported values).
image (PIL.Image.Image): Image to scan
returns:
A list of *code_type* code values or None | Below is the the instruction that describes the task:
### Input:
Get *code_type* codes from a PIL Image.
*code_type* can be any of zbar supported code type [#zbar_symbologies]_:
- **EAN/UPC**: EAN-13 (`ean13`), UPC-A (`upca`), EAN-8 (`ean8`) and UPC-E (`upce`)
- **Linear barcode**: Code 128 (`code128`), Code 93 (`code93`), Code 39 (`code39`), Interleaved 2 of 5 (`i25`),
DataBar (`databar`) and DataBar Expanded (`databar-exp`)
- **2D**: QR Code (`qrcode`)
- **Undocumented**: `ean5`, `ean2`, `composite`, `isbn13`, `isbn10`, `codabar`, `pdf417`
.. [#zbar_symbologies] http://zbar.sourceforge.net/iphone/userguide/symbologies.html
Args:
code_types (list(str)): Code type(s) to search (see ``zbarlight.Symbologies`` for supported values).
image (PIL.Image.Image): Image to scan
returns:
A list of *code_type* code values or None
### Response:
def scan_codes(code_types, image):
"""
Get *code_type* codes from a PIL Image.
*code_type* can be any of zbar supported code type [#zbar_symbologies]_:
- **EAN/UPC**: EAN-13 (`ean13`), UPC-A (`upca`), EAN-8 (`ean8`) and UPC-E (`upce`)
- **Linear barcode**: Code 128 (`code128`), Code 93 (`code93`), Code 39 (`code39`), Interleaved 2 of 5 (`i25`),
DataBar (`databar`) and DataBar Expanded (`databar-exp`)
- **2D**: QR Code (`qrcode`)
- **Undocumented**: `ean5`, `ean2`, `composite`, `isbn13`, `isbn10`, `codabar`, `pdf417`
.. [#zbar_symbologies] http://zbar.sourceforge.net/iphone/userguide/symbologies.html
Args:
code_types (list(str)): Code type(s) to search (see ``zbarlight.Symbologies`` for supported values).
image (PIL.Image.Image): Image to scan
returns:
A list of *code_type* code values or None
"""
if isinstance(code_types, str):
code_types = [code_types]
warnings.warn(
'Using a str for code_types is deprecated, please use a list of str instead',
DeprecationWarning,
)
# Translate symbologies
symbologies = [
Symbologies.get(code_type.upper())
for code_type in set(code_types)
]
# Check that all symbologies are known
if None in symbologies:
bad_code_types = [code_type for code_type in code_types if code_type.upper() not in Symbologies]
raise UnknownSymbologieError('Unknown Symbologies: %s' % bad_code_types)
# Convert the image to be used by c-extension
if not Image.isImageType(image):
raise RuntimeError('Bad or unknown image format')
converted_image = image.convert('L') # Convert image to gray scale (8 bits per pixel).
raw = converted_image.tobytes() # Get image data.
width, height = converted_image.size # Get image size.
return zbar_code_scanner(symbologies, raw, width, height) |
def relocate(self, destination):
"""Configure the virtual environment for another path.
Args:
destination (str): The target path of the virtual environment.
Note:
This does not actually move the virtual environment. Is only
rewrites the metadata required to support a move.
"""
for activate in self.bin.activates:
activate.vpath = destination
for binfile in self.bin.files:
if binfile.shebang and (
'python' in binfile.shebang or 'pypy' in binfile.shebang
):
binfile.shebang = '#!{0}'.format(
os.path.join(destination, 'bin', 'python')
) | Configure the virtual environment for another path.
Args:
destination (str): The target path of the virtual environment.
Note:
This does not actually move the virtual environment. Is only
rewrites the metadata required to support a move. | Below is the the instruction that describes the task:
### Input:
Configure the virtual environment for another path.
Args:
destination (str): The target path of the virtual environment.
Note:
This does not actually move the virtual environment. Is only
rewrites the metadata required to support a move.
### Response:
def relocate(self, destination):
"""Configure the virtual environment for another path.
Args:
destination (str): The target path of the virtual environment.
Note:
This does not actually move the virtual environment. Is only
rewrites the metadata required to support a move.
"""
for activate in self.bin.activates:
activate.vpath = destination
for binfile in self.bin.files:
if binfile.shebang and (
'python' in binfile.shebang or 'pypy' in binfile.shebang
):
binfile.shebang = '#!{0}'.format(
os.path.join(destination, 'bin', 'python')
) |
def _resolve_looppart(parts, assign_path, context):
"""recursive function to resolve multiple assignments on loops"""
assign_path = assign_path[:]
index = assign_path.pop(0)
for part in parts:
if part is util.Uninferable:
continue
if not hasattr(part, "itered"):
continue
try:
itered = part.itered()
except TypeError:
continue
for stmt in itered:
index_node = nodes.Const(index)
try:
assigned = stmt.getitem(index_node, context)
except (
AttributeError,
exceptions.AstroidTypeError,
exceptions.AstroidIndexError,
):
continue
if not assign_path:
# we achieved to resolved the assignment path,
# don't infer the last part
yield assigned
elif assigned is util.Uninferable:
break
else:
# we are not yet on the last part of the path
# search on each possibly inferred value
try:
yield from _resolve_looppart(
assigned.infer(context), assign_path, context
)
except exceptions.InferenceError:
break | recursive function to resolve multiple assignments on loops | Below is the the instruction that describes the task:
### Input:
recursive function to resolve multiple assignments on loops
### Response:
def _resolve_looppart(parts, assign_path, context):
"""recursive function to resolve multiple assignments on loops"""
assign_path = assign_path[:]
index = assign_path.pop(0)
for part in parts:
if part is util.Uninferable:
continue
if not hasattr(part, "itered"):
continue
try:
itered = part.itered()
except TypeError:
continue
for stmt in itered:
index_node = nodes.Const(index)
try:
assigned = stmt.getitem(index_node, context)
except (
AttributeError,
exceptions.AstroidTypeError,
exceptions.AstroidIndexError,
):
continue
if not assign_path:
# we achieved to resolved the assignment path,
# don't infer the last part
yield assigned
elif assigned is util.Uninferable:
break
else:
# we are not yet on the last part of the path
# search on each possibly inferred value
try:
yield from _resolve_looppart(
assigned.infer(context), assign_path, context
)
except exceptions.InferenceError:
break |
def send(self, packet, mac_addr=broadcast_addr):
"""place sent packets directly into the reciever's queues (as if they are connected by wire)"""
if self.keep_listening:
if mac_addr == self.broadcast_addr:
for addr, recv_queue in self.inq.items():
recv_queue.put(packet)
else:
self.inq[mac_addr].put(packet)
self.inq[self.broadcast_addr].put(packet)
else:
self.log("is down.") | place sent packets directly into the reciever's queues (as if they are connected by wire) | Below is the the instruction that describes the task:
### Input:
place sent packets directly into the reciever's queues (as if they are connected by wire)
### Response:
def send(self, packet, mac_addr=broadcast_addr):
"""place sent packets directly into the reciever's queues (as if they are connected by wire)"""
if self.keep_listening:
if mac_addr == self.broadcast_addr:
for addr, recv_queue in self.inq.items():
recv_queue.put(packet)
else:
self.inq[mac_addr].put(packet)
self.inq[self.broadcast_addr].put(packet)
else:
self.log("is down.") |
def _in_deferred_types(self, cls):
"""
Check if the given class is specified in the deferred type registry.
Returns the printer from the registry if it exists, and None if the
class is not in the registry. Successful matches will be moved to the
regular type registry for future use.
"""
mod = getattr(cls, '__module__', None)
name = getattr(cls, '__name__', None)
key = (mod, name)
printer = None
if key in self.deferred_printers:
# Move the printer over to the regular registry.
printer = self.deferred_printers.pop(key)
self.type_printers[cls] = printer
return printer | Check if the given class is specified in the deferred type registry.
Returns the printer from the registry if it exists, and None if the
class is not in the registry. Successful matches will be moved to the
regular type registry for future use. | Below is the the instruction that describes the task:
### Input:
Check if the given class is specified in the deferred type registry.
Returns the printer from the registry if it exists, and None if the
class is not in the registry. Successful matches will be moved to the
regular type registry for future use.
### Response:
def _in_deferred_types(self, cls):
"""
Check if the given class is specified in the deferred type registry.
Returns the printer from the registry if it exists, and None if the
class is not in the registry. Successful matches will be moved to the
regular type registry for future use.
"""
mod = getattr(cls, '__module__', None)
name = getattr(cls, '__name__', None)
key = (mod, name)
printer = None
if key in self.deferred_printers:
# Move the printer over to the regular registry.
printer = self.deferred_printers.pop(key)
self.type_printers[cls] = printer
return printer |
def bind_switcher(cls):
"""
Bind the switch checkbox to functions for switching between types of
inputs.
"""
def show_two_conspect():
cls.is_twoconspect = True
# search by class
for el in cls.two_conspect_el:
el.style.display = "block"
cls.whole_conspect_subconspect_el.style.display = "none"
def hide_two_conspect():
cls.is_twoconspect = False
for el in cls.two_conspect_el:
el.style.display = "none"
cls.whole_conspect_subconspect_el.style.display = "block"
def show_or_hide_two_conspect(ev):
val = cls.get()
# check / uncheck both checkboxes
for el in cls.switcher_els:
el.checked = ev.target.checked
if ev.target.checked:
hide_two_conspect()
cls.set(val)
return
show_two_conspect()
cls.set(val)
# bind both `conspect_switchers`
for el in document.get(selector=".conspect_switcher"):
el.bind("change", show_or_hide_two_conspect) | Bind the switch checkbox to functions for switching between types of
inputs. | Below is the the instruction that describes the task:
### Input:
Bind the switch checkbox to functions for switching between types of
inputs.
### Response:
def bind_switcher(cls):
"""
Bind the switch checkbox to functions for switching between types of
inputs.
"""
def show_two_conspect():
cls.is_twoconspect = True
# search by class
for el in cls.two_conspect_el:
el.style.display = "block"
cls.whole_conspect_subconspect_el.style.display = "none"
def hide_two_conspect():
cls.is_twoconspect = False
for el in cls.two_conspect_el:
el.style.display = "none"
cls.whole_conspect_subconspect_el.style.display = "block"
def show_or_hide_two_conspect(ev):
val = cls.get()
# check / uncheck both checkboxes
for el in cls.switcher_els:
el.checked = ev.target.checked
if ev.target.checked:
hide_two_conspect()
cls.set(val)
return
show_two_conspect()
cls.set(val)
# bind both `conspect_switchers`
for el in document.get(selector=".conspect_switcher"):
el.bind("change", show_or_hide_two_conspect) |
def fromJSON(value):
"""loads the GP object from a JSON string """
j = json.loads(value)
v = GPDataFile()
if "defaultValue" in j:
v.value = j['defaultValue']
else:
v.value = j['value']
if 'paramName' in j:
v.paramName = j['paramName']
elif 'name' in j:
v.paramName = j['name']
return v | loads the GP object from a JSON string | Below is the the instruction that describes the task:
### Input:
loads the GP object from a JSON string
### Response:
def fromJSON(value):
"""loads the GP object from a JSON string """
j = json.loads(value)
v = GPDataFile()
if "defaultValue" in j:
v.value = j['defaultValue']
else:
v.value = j['value']
if 'paramName' in j:
v.paramName = j['paramName']
elif 'name' in j:
v.paramName = j['name']
return v |
def solve(self, solver_klass=None):
""" Solves an optimal power flow and returns a results dictionary.
"""
# Start the clock.
t0 = time()
# Build an OPF model with variables and constraints.
om = self._construct_opf_model(self.case)
if om is None:
return {"converged": False, "output": {"message": "No Ref Bus."}}
# Call the specific solver.
# if self.opt["verbose"]:
# print '\nPYLON Version %s, %s', "0.4.2", "April 2010"
if solver_klass is not None:
result = solver_klass(om, opt=self.opt).solve()
elif self.dc:
# if self.opt["verbose"]:
# print ' -- DC Optimal Power Flow\n'
result = DCOPFSolver(om, opt=self.opt).solve()
else:
# if self.opt["verbose"]:
# print ' -- AC Optimal Power Flow\n'
result = PIPSSolver(om, opt=self.opt).solve()
result["elapsed"] = time() - t0
if self.opt.has_key("verbose"):
if self.opt["verbose"]:
logger.info("OPF completed in %.3fs." % result["elapsed"])
return result | Solves an optimal power flow and returns a results dictionary. | Below is the the instruction that describes the task:
### Input:
Solves an optimal power flow and returns a results dictionary.
### Response:
def solve(self, solver_klass=None):
""" Solves an optimal power flow and returns a results dictionary.
"""
# Start the clock.
t0 = time()
# Build an OPF model with variables and constraints.
om = self._construct_opf_model(self.case)
if om is None:
return {"converged": False, "output": {"message": "No Ref Bus."}}
# Call the specific solver.
# if self.opt["verbose"]:
# print '\nPYLON Version %s, %s', "0.4.2", "April 2010"
if solver_klass is not None:
result = solver_klass(om, opt=self.opt).solve()
elif self.dc:
# if self.opt["verbose"]:
# print ' -- DC Optimal Power Flow\n'
result = DCOPFSolver(om, opt=self.opt).solve()
else:
# if self.opt["verbose"]:
# print ' -- AC Optimal Power Flow\n'
result = PIPSSolver(om, opt=self.opt).solve()
result["elapsed"] = time() - t0
if self.opt.has_key("verbose"):
if self.opt["verbose"]:
logger.info("OPF completed in %.3fs." % result["elapsed"])
return result |
def delete(self, key):
"""Implementation of :meth:`~simplekv.KeyValueStore.delete`.
If an exception occurs in either the cache or backing store, all are
passing on.
"""
self._dstore.delete(key)
self.cache.delete(key) | Implementation of :meth:`~simplekv.KeyValueStore.delete`.
If an exception occurs in either the cache or backing store, all are
passing on. | Below is the the instruction that describes the task:
### Input:
Implementation of :meth:`~simplekv.KeyValueStore.delete`.
If an exception occurs in either the cache or backing store, all are
passing on.
### Response:
def delete(self, key):
"""Implementation of :meth:`~simplekv.KeyValueStore.delete`.
If an exception occurs in either the cache or backing store, all are
passing on.
"""
self._dstore.delete(key)
self.cache.delete(key) |
def BinarySigmoid(self, func):
'''
Currently, caffe2 does not support this function.
'''
n = onnx.helper.make_node(
'HardSigmoid',
func.input,
func.output,
alpha=1.0,
beta=0.0
)
return [n] | Currently, caffe2 does not support this function. | Below is the the instruction that describes the task:
### Input:
Currently, caffe2 does not support this function.
### Response:
def BinarySigmoid(self, func):
'''
Currently, caffe2 does not support this function.
'''
n = onnx.helper.make_node(
'HardSigmoid',
func.input,
func.output,
alpha=1.0,
beta=0.0
)
return [n] |
def biasFromLocations(locs, preferOrigin=True):
"""
Find the vector that translates the whole system to the origin.
"""
dims = {}
locs.sort()
for l in locs:
for d in l.keys():
if not d in dims:
dims[d] = []
v = l[d]
if type(v)==tuple:
dims[d].append(v[0])
dims[d].append(v[1])
else:
dims[d].append(v)
candidate = Location()
for k in dims.keys():
dims[k].sort()
v = mostCommon(dims[k])
if dims[k].count(v) > 1:
# add the dimension with two or more hits
candidate[k] = mostCommon(dims[k])
matches = []
# 1. do we have an exact match?
for l in locs:
if candidate == l:
return l
# 2. find a location that matches candidate (but has more dimensions)
for l in locs:
ok = True
for k, v in candidate.items():
if l.get(k)!=v:
ok = False
break
if ok:
if not l in matches:
matches.append(l)
matches.sort()
if len(matches)>0:
if preferOrigin:
for c in matches:
if c.isOrigin():
return c
return matches[0]
# 3. no matches. Find the best from the available locations
results = {}
for bias in locs:
rel = []
for l in locs:
rel.append((l - bias).isOnAxis())
c = rel.count(False)
if not c in results:
results[c] = []
results[c].append(bias)
if results:
candidates = results[min(results.keys())]
if preferOrigin:
for c in candidates:
if c.isOrigin():
return c
candidates.sort()
return candidates[0]
return Location() | Find the vector that translates the whole system to the origin. | Below is the the instruction that describes the task:
### Input:
Find the vector that translates the whole system to the origin.
### Response:
def biasFromLocations(locs, preferOrigin=True):
"""
Find the vector that translates the whole system to the origin.
"""
dims = {}
locs.sort()
for l in locs:
for d in l.keys():
if not d in dims:
dims[d] = []
v = l[d]
if type(v)==tuple:
dims[d].append(v[0])
dims[d].append(v[1])
else:
dims[d].append(v)
candidate = Location()
for k in dims.keys():
dims[k].sort()
v = mostCommon(dims[k])
if dims[k].count(v) > 1:
# add the dimension with two or more hits
candidate[k] = mostCommon(dims[k])
matches = []
# 1. do we have an exact match?
for l in locs:
if candidate == l:
return l
# 2. find a location that matches candidate (but has more dimensions)
for l in locs:
ok = True
for k, v in candidate.items():
if l.get(k)!=v:
ok = False
break
if ok:
if not l in matches:
matches.append(l)
matches.sort()
if len(matches)>0:
if preferOrigin:
for c in matches:
if c.isOrigin():
return c
return matches[0]
# 3. no matches. Find the best from the available locations
results = {}
for bias in locs:
rel = []
for l in locs:
rel.append((l - bias).isOnAxis())
c = rel.count(False)
if not c in results:
results[c] = []
results[c].append(bias)
if results:
candidates = results[min(results.keys())]
if preferOrigin:
for c in candidates:
if c.isOrigin():
return c
candidates.sort()
return candidates[0]
return Location() |
def _proper_namespace(self, owner=None, app=None, sharing=None):
"""Produce a namespace sans wildcards for use in entity requests.
This method tries to fill in the fields of the namespace which are `None`
or wildcard (`'-'`) from the entity's namespace. If that fails, it uses
the service's namespace.
:param owner:
:param app:
:param sharing:
:return:
"""
if owner is None and app is None and sharing is None: # No namespace provided
if self._state is not None and 'access' in self._state:
return (self._state.access.owner,
self._state.access.app,
self._state.access.sharing)
else:
return (self.service.namespace['owner'],
self.service.namespace['app'],
self.service.namespace['sharing'])
else:
return (owner,app,sharing) | Produce a namespace sans wildcards for use in entity requests.
This method tries to fill in the fields of the namespace which are `None`
or wildcard (`'-'`) from the entity's namespace. If that fails, it uses
the service's namespace.
:param owner:
:param app:
:param sharing:
:return: | Below is the the instruction that describes the task:
### Input:
Produce a namespace sans wildcards for use in entity requests.
This method tries to fill in the fields of the namespace which are `None`
or wildcard (`'-'`) from the entity's namespace. If that fails, it uses
the service's namespace.
:param owner:
:param app:
:param sharing:
:return:
### Response:
def _proper_namespace(self, owner=None, app=None, sharing=None):
"""Produce a namespace sans wildcards for use in entity requests.
This method tries to fill in the fields of the namespace which are `None`
or wildcard (`'-'`) from the entity's namespace. If that fails, it uses
the service's namespace.
:param owner:
:param app:
:param sharing:
:return:
"""
if owner is None and app is None and sharing is None: # No namespace provided
if self._state is not None and 'access' in self._state:
return (self._state.access.owner,
self._state.access.app,
self._state.access.sharing)
else:
return (self.service.namespace['owner'],
self.service.namespace['app'],
self.service.namespace['sharing'])
else:
return (owner,app,sharing) |
def deserialize_data(self, buffer=bytes(), byte_order=None):
""" De-serializes the :attr:`data` object referenced by the `Pointer`
field from the byte *buffer* by mapping the bytes to the
:attr:`~Field.value` for each :class:`Field` in the :attr:`data` object
in accordance with the decoding *byte order* for the de-serialization
and the decoding :attr:`byte_order` of each :class:`Field` in the
:attr:`data` object.
A specific decoding :attr:`byte_order` of a :class:`Field` in
the :attr:`data` object overrules the decoding *byte order* for the
de-serialization.
Returns the :class:`Index` of the *buffer* after the last de-serialized
:class:`Field` in the :attr:`data` object.
:param bytes buffer: byte stream. Default is the internal
:attr:`bytestream` of the `Pointer` field.
:keyword byte_order: decoding byte order for the de-serialization.
Default is the :attr:`data_byte_order` of the `Pointer` field.
:type byte_order: :class:`Byteorder`, :class:`str`
"""
index = Index(0, 0, self.address, self.base_address, False)
if self._data:
if byte_order not in ('little', 'big', Byteorder.little, Byteorder.big):
byte_order = self.data_byte_order
index = self._data.deserialize(buffer or self._data_stream,
index,
nested=False,
byte_order=byte_order)
return index | De-serializes the :attr:`data` object referenced by the `Pointer`
field from the byte *buffer* by mapping the bytes to the
:attr:`~Field.value` for each :class:`Field` in the :attr:`data` object
in accordance with the decoding *byte order* for the de-serialization
and the decoding :attr:`byte_order` of each :class:`Field` in the
:attr:`data` object.
A specific decoding :attr:`byte_order` of a :class:`Field` in
the :attr:`data` object overrules the decoding *byte order* for the
de-serialization.
Returns the :class:`Index` of the *buffer* after the last de-serialized
:class:`Field` in the :attr:`data` object.
:param bytes buffer: byte stream. Default is the internal
:attr:`bytestream` of the `Pointer` field.
:keyword byte_order: decoding byte order for the de-serialization.
Default is the :attr:`data_byte_order` of the `Pointer` field.
:type byte_order: :class:`Byteorder`, :class:`str` | Below is the the instruction that describes the task:
### Input:
De-serializes the :attr:`data` object referenced by the `Pointer`
field from the byte *buffer* by mapping the bytes to the
:attr:`~Field.value` for each :class:`Field` in the :attr:`data` object
in accordance with the decoding *byte order* for the de-serialization
and the decoding :attr:`byte_order` of each :class:`Field` in the
:attr:`data` object.
A specific decoding :attr:`byte_order` of a :class:`Field` in
the :attr:`data` object overrules the decoding *byte order* for the
de-serialization.
Returns the :class:`Index` of the *buffer* after the last de-serialized
:class:`Field` in the :attr:`data` object.
:param bytes buffer: byte stream. Default is the internal
:attr:`bytestream` of the `Pointer` field.
:keyword byte_order: decoding byte order for the de-serialization.
Default is the :attr:`data_byte_order` of the `Pointer` field.
:type byte_order: :class:`Byteorder`, :class:`str`
### Response:
def deserialize_data(self, buffer=bytes(), byte_order=None):
""" De-serializes the :attr:`data` object referenced by the `Pointer`
field from the byte *buffer* by mapping the bytes to the
:attr:`~Field.value` for each :class:`Field` in the :attr:`data` object
in accordance with the decoding *byte order* for the de-serialization
and the decoding :attr:`byte_order` of each :class:`Field` in the
:attr:`data` object.
A specific decoding :attr:`byte_order` of a :class:`Field` in
the :attr:`data` object overrules the decoding *byte order* for the
de-serialization.
Returns the :class:`Index` of the *buffer* after the last de-serialized
:class:`Field` in the :attr:`data` object.
:param bytes buffer: byte stream. Default is the internal
:attr:`bytestream` of the `Pointer` field.
:keyword byte_order: decoding byte order for the de-serialization.
Default is the :attr:`data_byte_order` of the `Pointer` field.
:type byte_order: :class:`Byteorder`, :class:`str`
"""
index = Index(0, 0, self.address, self.base_address, False)
if self._data:
if byte_order not in ('little', 'big', Byteorder.little, Byteorder.big):
byte_order = self.data_byte_order
index = self._data.deserialize(buffer or self._data_stream,
index,
nested=False,
byte_order=byte_order)
return index |
def update(self,
stats,
duration=3,
cs_status=None,
return_to_browser=False):
"""Update the screen.
INPUT
stats: Stats database to display
duration: duration of the loop
cs_status:
"None": standalone or server mode
"Connected": Client is connected to the server
"Disconnected": Client is disconnected from the server
return_to_browser:
True: Do not exist, return to the browser list
False: Exit and return to the shell
OUTPUT
True: Exit key has been pressed
False: Others cases...
"""
# Flush display
self.flush(stats, cs_status=cs_status)
# If the duration is < 0 (update + export time > refresh_time)
# Then display the interface and log a message
if duration <= 0:
logger.warning('Update and export time higher than refresh_time.')
duration = 0.1
# Wait duration (in s) time
exitkey = False
countdown = Timer(duration)
# Set the default timeout (in ms) for the getch method
self.term_window.timeout(int(duration * 1000))
while not countdown.finished() and not exitkey:
# Getkey
pressedkey = self.__catch_key(return_to_browser=return_to_browser)
# Is it an exit key ?
exitkey = (pressedkey == ord('\x1b') or pressedkey == ord('q'))
if not exitkey and pressedkey > -1:
# Redraw display
self.flush(stats, cs_status=cs_status)
# Overwrite the timeout with the countdown
self.term_window.timeout(int(countdown.get() * 1000))
return exitkey | Update the screen.
INPUT
stats: Stats database to display
duration: duration of the loop
cs_status:
"None": standalone or server mode
"Connected": Client is connected to the server
"Disconnected": Client is disconnected from the server
return_to_browser:
True: Do not exist, return to the browser list
False: Exit and return to the shell
OUTPUT
True: Exit key has been pressed
False: Others cases... | Below is the the instruction that describes the task:
### Input:
Update the screen.
INPUT
stats: Stats database to display
duration: duration of the loop
cs_status:
"None": standalone or server mode
"Connected": Client is connected to the server
"Disconnected": Client is disconnected from the server
return_to_browser:
True: Do not exist, return to the browser list
False: Exit and return to the shell
OUTPUT
True: Exit key has been pressed
False: Others cases...
### Response:
def update(self,
stats,
duration=3,
cs_status=None,
return_to_browser=False):
"""Update the screen.
INPUT
stats: Stats database to display
duration: duration of the loop
cs_status:
"None": standalone or server mode
"Connected": Client is connected to the server
"Disconnected": Client is disconnected from the server
return_to_browser:
True: Do not exist, return to the browser list
False: Exit and return to the shell
OUTPUT
True: Exit key has been pressed
False: Others cases...
"""
# Flush display
self.flush(stats, cs_status=cs_status)
# If the duration is < 0 (update + export time > refresh_time)
# Then display the interface and log a message
if duration <= 0:
logger.warning('Update and export time higher than refresh_time.')
duration = 0.1
# Wait duration (in s) time
exitkey = False
countdown = Timer(duration)
# Set the default timeout (in ms) for the getch method
self.term_window.timeout(int(duration * 1000))
while not countdown.finished() and not exitkey:
# Getkey
pressedkey = self.__catch_key(return_to_browser=return_to_browser)
# Is it an exit key ?
exitkey = (pressedkey == ord('\x1b') or pressedkey == ord('q'))
if not exitkey and pressedkey > -1:
# Redraw display
self.flush(stats, cs_status=cs_status)
# Overwrite the timeout with the countdown
self.term_window.timeout(int(countdown.get() * 1000))
return exitkey |
def _final_redis_call(self, final_set, sort_options):
"""
The final redis call to obtain the values to return from the "final_set"
with some sort options.
"""
conn = self.cls.get_connection()
if sort_options is not None:
# a sort, or values, call the SORT command on the set
return conn.sort(final_set, **sort_options)
else:
# no sort, nor values, simply return the full set
return conn.smembers(final_set) | The final redis call to obtain the values to return from the "final_set"
with some sort options. | Below is the the instruction that describes the task:
### Input:
The final redis call to obtain the values to return from the "final_set"
with some sort options.
### Response:
def _final_redis_call(self, final_set, sort_options):
"""
The final redis call to obtain the values to return from the "final_set"
with some sort options.
"""
conn = self.cls.get_connection()
if sort_options is not None:
# a sort, or values, call the SORT command on the set
return conn.sort(final_set, **sort_options)
else:
# no sort, nor values, simply return the full set
return conn.smembers(final_set) |
def action_object(self, obj, **kwargs):
"""
Stream of most recent actions where obj is the action_object.
Keyword arguments will be passed to Action.objects.filter
"""
check(obj)
return obj.action_object_actions.public(**kwargs) | Stream of most recent actions where obj is the action_object.
Keyword arguments will be passed to Action.objects.filter | Below is the the instruction that describes the task:
### Input:
Stream of most recent actions where obj is the action_object.
Keyword arguments will be passed to Action.objects.filter
### Response:
def action_object(self, obj, **kwargs):
"""
Stream of most recent actions where obj is the action_object.
Keyword arguments will be passed to Action.objects.filter
"""
check(obj)
return obj.action_object_actions.public(**kwargs) |
def ensure_loopback_device(path, size):
'''
Ensure a loopback device exists for a given backing file path and size.
If it a loopback device is not mapped to file, a new one will be created.
TODO: Confirm size of found loopback device.
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
'''
for d, f in six.iteritems(loopback_devices()):
if f == path:
return d
if not os.path.exists(path):
cmd = ['truncate', '--size', size, path]
check_call(cmd)
return create_loopback(path) | Ensure a loopback device exists for a given backing file path and size.
If it a loopback device is not mapped to file, a new one will be created.
TODO: Confirm size of found loopback device.
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0) | Below is the the instruction that describes the task:
### Input:
Ensure a loopback device exists for a given backing file path and size.
If it a loopback device is not mapped to file, a new one will be created.
TODO: Confirm size of found loopback device.
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
### Response:
def ensure_loopback_device(path, size):
'''
Ensure a loopback device exists for a given backing file path and size.
If it a loopback device is not mapped to file, a new one will be created.
TODO: Confirm size of found loopback device.
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
'''
for d, f in six.iteritems(loopback_devices()):
if f == path:
return d
if not os.path.exists(path):
cmd = ['truncate', '--size', size, path]
check_call(cmd)
return create_loopback(path) |
def _plot2d(plotfunc):
"""
Decorator for common 2d plotting logic
Also adds the 2d plot method to class _PlotMethods
"""
commondoc = """
Parameters
----------
darray : DataArray
Must be 2 dimensional, unless creating faceted plots
x : string, optional
Coordinate for x axis. If None use darray.dims[1]
y : string, optional
Coordinate for y axis. If None use darray.dims[0]
figsize : tuple, optional
A tuple (width, height) of the figure in inches.
Mutually exclusive with ``size`` and ``ax``.
aspect : scalar, optional
Aspect ratio of plot, so that ``aspect * size`` gives the width in
inches. Only used if a ``size`` is provided.
size : scalar, optional
If provided, create a new figure for the plot with the given size.
Height (in inches) of each plot. See also: ``aspect``.
ax : matplotlib axes object, optional
Axis on which to plot this figure. By default, use the current axis.
Mutually exclusive with ``size`` and ``figsize``.
row : string, optional
If passed, make row faceted plots on this dimension name
col : string, optional
If passed, make column faceted plots on this dimension name
col_wrap : integer, optional
Use together with ``col`` to wrap faceted plots
xscale, yscale : 'linear', 'symlog', 'log', 'logit', optional
Specifies scaling for the x- and y-axes respectively
xticks, yticks : Specify tick locations for x- and y-axes
xlim, ylim : Specify x- and y-axes limits
xincrease : None, True, or False, optional
Should the values on the x axes be increasing from left to right?
if None, use the default for the matplotlib function.
yincrease : None, True, or False, optional
Should the values on the y axes be increasing from top to bottom?
if None, use the default for the matplotlib function.
add_colorbar : Boolean, optional
Adds colorbar to axis
add_labels : Boolean, optional
Use xarray metadata to label axes
norm : ``matplotlib.colors.Normalize`` instance, optional
If the ``norm`` has vmin or vmax specified, the corresponding kwarg
must be None.
vmin, vmax : floats, optional
Values to anchor the colormap, otherwise they are inferred from the
data and other keyword arguments. When a diverging dataset is inferred,
setting one of these values will fix the other by symmetry around
``center``. Setting both values prevents use of a diverging colormap.
If discrete levels are provided as an explicit list, both of these
values are ignored.
cmap : matplotlib colormap name or object, optional
The mapping from data values to color space. If not provided, this
will be either be ``viridis`` (if the function infers a sequential
dataset) or ``RdBu_r`` (if the function infers a diverging dataset).
When `Seaborn` is installed, ``cmap`` may also be a `seaborn`
color palette. If ``cmap`` is seaborn color palette and the plot type
is not ``contour`` or ``contourf``, ``levels`` must also be specified.
colors : discrete colors to plot, optional
A single color or a list of colors. If the plot type is not ``contour``
or ``contourf``, the ``levels`` argument is required.
center : float, optional
The value at which to center the colormap. Passing this value implies
use of a diverging colormap. Setting it to ``False`` prevents use of a
diverging colormap.
robust : bool, optional
If True and ``vmin`` or ``vmax`` are absent, the colormap range is
computed with 2nd and 98th percentiles instead of the extreme values.
extend : {'neither', 'both', 'min', 'max'}, optional
How to draw arrows extending the colorbar beyond its limits. If not
provided, extend is inferred from vmin, vmax and the data limits.
levels : int or list-like object, optional
Split the colormap (cmap) into discrete color intervals. If an integer
is provided, "nice" levels are chosen based on the data range: this can
imply that the final number of levels is not exactly the expected one.
Setting ``vmin`` and/or ``vmax`` with ``levels=N`` is equivalent to
setting ``levels=np.linspace(vmin, vmax, N)``.
infer_intervals : bool, optional
Only applies to pcolormesh. If True, the coordinate intervals are
passed to pcolormesh. If False, the original coordinates are used
(this can be useful for certain map projections). The default is to
always infer intervals, unless the mesh is irregular and plotted on
a map projection.
subplot_kws : dict, optional
Dictionary of keyword arguments for matplotlib subplots. Only applies
to FacetGrid plotting.
cbar_ax : matplotlib Axes, optional
Axes in which to draw the colorbar.
cbar_kwargs : dict, optional
Dictionary of keyword arguments to pass to the colorbar.
**kwargs : optional
Additional arguments to wrapped matplotlib function
Returns
-------
artist :
The same type of primitive artist that the wrapped matplotlib
function returns
"""
# Build on the original docstring
plotfunc.__doc__ = '%s\n%s' % (plotfunc.__doc__, commondoc)
@functools.wraps(plotfunc)
def newplotfunc(darray, x=None, y=None, figsize=None, size=None,
aspect=None, ax=None, row=None, col=None,
col_wrap=None, xincrease=True, yincrease=True,
add_colorbar=None, add_labels=True, vmin=None, vmax=None,
cmap=None, center=None, robust=False, extend=None,
levels=None, infer_intervals=None, colors=None,
subplot_kws=None, cbar_ax=None, cbar_kwargs=None,
xscale=None, yscale=None, xticks=None, yticks=None,
xlim=None, ylim=None, norm=None, **kwargs):
# All 2d plots in xarray share this function signature.
# Method signature below should be consistent.
# Decide on a default for the colorbar before facetgrids
if add_colorbar is None:
add_colorbar = plotfunc.__name__ != 'contour'
imshow_rgb = (
plotfunc.__name__ == 'imshow' and
darray.ndim == (3 + (row is not None) + (col is not None)))
if imshow_rgb:
# Don't add a colorbar when showing an image with explicit colors
add_colorbar = False
# Matplotlib does not support normalising RGB data, so do it here.
# See eg. https://github.com/matplotlib/matplotlib/pull/10220
if robust or vmax is not None or vmin is not None:
darray = _rescale_imshow_rgb(darray, vmin, vmax, robust)
vmin, vmax, robust = None, None, False
# Handle facetgrids first
if row or col:
allargs = locals().copy()
allargs.pop('imshow_rgb')
allargs.update(allargs.pop('kwargs'))
allargs.pop('darray')
# Need the decorated plotting function
allargs['plotfunc'] = globals()[plotfunc.__name__]
return _easy_facetgrid(darray, kind='dataarray', **allargs)
plt = import_matplotlib_pyplot()
rgb = kwargs.pop('rgb', None)
if rgb is not None and plotfunc.__name__ != 'imshow':
raise ValueError('The "rgb" keyword is only valid for imshow()')
elif rgb is not None and not imshow_rgb:
raise ValueError('The "rgb" keyword is only valid for imshow()'
'with a three-dimensional array (per facet)')
xlab, ylab = _infer_xy_labels(
darray=darray, x=x, y=y, imshow=imshow_rgb, rgb=rgb)
# better to pass the ndarrays directly to plotting functions
xval = darray[xlab].values
yval = darray[ylab].values
# check if we need to broadcast one dimension
if xval.ndim < yval.ndim:
xval = np.broadcast_to(xval, yval.shape)
if yval.ndim < xval.ndim:
yval = np.broadcast_to(yval, xval.shape)
# May need to transpose for correct x, y labels
# xlab may be the name of a coord, we have to check for dim names
if imshow_rgb:
# For RGB[A] images, matplotlib requires the color dimension
# to be last. In Xarray the order should be unimportant, so
# we transpose to (y, x, color) to make this work.
yx_dims = (ylab, xlab)
dims = yx_dims + tuple(d for d in darray.dims if d not in yx_dims)
if dims != darray.dims:
darray = darray.transpose(*dims)
elif darray[xlab].dims[-1] == darray.dims[0]:
darray = darray.transpose()
# Pass the data as a masked ndarray too
zval = darray.to_masked_array(copy=False)
# Replace pd.Intervals if contained in xval or yval.
xplt, xlab_extra = _resolve_intervals_2dplot(xval, plotfunc.__name__)
yplt, ylab_extra = _resolve_intervals_2dplot(yval, plotfunc.__name__)
_ensure_plottable(xplt, yplt)
cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs(
plotfunc, locals(), zval.data)
if 'contour' in plotfunc.__name__:
# extend is a keyword argument only for contour and contourf, but
# passing it to the colorbar is sufficient for imshow and
# pcolormesh
kwargs['extend'] = cmap_params['extend']
kwargs['levels'] = cmap_params['levels']
# if colors == a single color, matplotlib draws dashed negative
# contours. we lose this feature if we pass cmap and not colors
if isinstance(colors, str):
cmap_params['cmap'] = None
kwargs['colors'] = colors
if 'pcolormesh' == plotfunc.__name__:
kwargs['infer_intervals'] = infer_intervals
if 'imshow' == plotfunc.__name__ and isinstance(aspect, str):
# forbid usage of mpl strings
raise ValueError("plt.imshow's `aspect` kwarg is not available "
"in xarray")
ax = get_axis(figsize, size, aspect, ax)
primitive = plotfunc(xplt, yplt, zval, ax=ax, cmap=cmap_params['cmap'],
vmin=cmap_params['vmin'],
vmax=cmap_params['vmax'],
norm=cmap_params['norm'],
**kwargs)
# Label the plot with metadata
if add_labels:
ax.set_xlabel(label_from_attrs(darray[xlab], xlab_extra))
ax.set_ylabel(label_from_attrs(darray[ylab], ylab_extra))
ax.set_title(darray._title_for_slice())
if add_colorbar:
if add_labels and 'label' not in cbar_kwargs:
cbar_kwargs['label'] = label_from_attrs(darray)
cbar = _add_colorbar(primitive, ax, cbar_ax, cbar_kwargs,
cmap_params)
elif (cbar_ax is not None or cbar_kwargs):
# inform the user about keywords which aren't used
raise ValueError("cbar_ax and cbar_kwargs can't be used with "
"add_colorbar=False.")
# origin kwarg overrides yincrease
if 'origin' in kwargs:
yincrease = None
_update_axes(ax, xincrease, yincrease, xscale, yscale,
xticks, yticks, xlim, ylim)
# Rotate dates on xlabels
# Do this without calling autofmt_xdate so that x-axes ticks
# on other subplots (if any) are not deleted.
# https://stackoverflow.com/questions/17430105/autofmt-xdate-deletes-x-axis-labels-of-all-subplots
if np.issubdtype(xplt.dtype, np.datetime64):
for xlabels in ax.get_xticklabels():
xlabels.set_rotation(30)
xlabels.set_ha('right')
return primitive
# For use as DataArray.plot.plotmethod
@functools.wraps(newplotfunc)
def plotmethod(_PlotMethods_obj, x=None, y=None, figsize=None, size=None,
aspect=None, ax=None, row=None, col=None, col_wrap=None,
xincrease=True, yincrease=True, add_colorbar=None,
add_labels=True, vmin=None, vmax=None, cmap=None,
colors=None, center=None, robust=False, extend=None,
levels=None, infer_intervals=None, subplot_kws=None,
cbar_ax=None, cbar_kwargs=None,
xscale=None, yscale=None, xticks=None, yticks=None,
xlim=None, ylim=None, norm=None, **kwargs):
"""
The method should have the same signature as the function.
This just makes the method work on Plotmethods objects,
and passes all the other arguments straight through.
"""
allargs = locals()
allargs['darray'] = _PlotMethods_obj._da
allargs.update(kwargs)
for arg in ['_PlotMethods_obj', 'newplotfunc', 'kwargs']:
del allargs[arg]
return newplotfunc(**allargs)
# Add to class _PlotMethods
setattr(_PlotMethods, plotmethod.__name__, plotmethod)
return newplotfunc | Decorator for common 2d plotting logic
Also adds the 2d plot method to class _PlotMethods | Below is the the instruction that describes the task:
### Input:
Decorator for common 2d plotting logic
Also adds the 2d plot method to class _PlotMethods
### Response:
def _plot2d(plotfunc):
"""
Decorator for common 2d plotting logic
Also adds the 2d plot method to class _PlotMethods
"""
commondoc = """
Parameters
----------
darray : DataArray
Must be 2 dimensional, unless creating faceted plots
x : string, optional
Coordinate for x axis. If None use darray.dims[1]
y : string, optional
Coordinate for y axis. If None use darray.dims[0]
figsize : tuple, optional
A tuple (width, height) of the figure in inches.
Mutually exclusive with ``size`` and ``ax``.
aspect : scalar, optional
Aspect ratio of plot, so that ``aspect * size`` gives the width in
inches. Only used if a ``size`` is provided.
size : scalar, optional
If provided, create a new figure for the plot with the given size.
Height (in inches) of each plot. See also: ``aspect``.
ax : matplotlib axes object, optional
Axis on which to plot this figure. By default, use the current axis.
Mutually exclusive with ``size`` and ``figsize``.
row : string, optional
If passed, make row faceted plots on this dimension name
col : string, optional
If passed, make column faceted plots on this dimension name
col_wrap : integer, optional
Use together with ``col`` to wrap faceted plots
xscale, yscale : 'linear', 'symlog', 'log', 'logit', optional
Specifies scaling for the x- and y-axes respectively
xticks, yticks : Specify tick locations for x- and y-axes
xlim, ylim : Specify x- and y-axes limits
xincrease : None, True, or False, optional
Should the values on the x axes be increasing from left to right?
if None, use the default for the matplotlib function.
yincrease : None, True, or False, optional
Should the values on the y axes be increasing from top to bottom?
if None, use the default for the matplotlib function.
add_colorbar : Boolean, optional
Adds colorbar to axis
add_labels : Boolean, optional
Use xarray metadata to label axes
norm : ``matplotlib.colors.Normalize`` instance, optional
If the ``norm`` has vmin or vmax specified, the corresponding kwarg
must be None.
vmin, vmax : floats, optional
Values to anchor the colormap, otherwise they are inferred from the
data and other keyword arguments. When a diverging dataset is inferred,
setting one of these values will fix the other by symmetry around
``center``. Setting both values prevents use of a diverging colormap.
If discrete levels are provided as an explicit list, both of these
values are ignored.
cmap : matplotlib colormap name or object, optional
The mapping from data values to color space. If not provided, this
will be either be ``viridis`` (if the function infers a sequential
dataset) or ``RdBu_r`` (if the function infers a diverging dataset).
When `Seaborn` is installed, ``cmap`` may also be a `seaborn`
color palette. If ``cmap`` is seaborn color palette and the plot type
is not ``contour`` or ``contourf``, ``levels`` must also be specified.
colors : discrete colors to plot, optional
A single color or a list of colors. If the plot type is not ``contour``
or ``contourf``, the ``levels`` argument is required.
center : float, optional
The value at which to center the colormap. Passing this value implies
use of a diverging colormap. Setting it to ``False`` prevents use of a
diverging colormap.
robust : bool, optional
If True and ``vmin`` or ``vmax`` are absent, the colormap range is
computed with 2nd and 98th percentiles instead of the extreme values.
extend : {'neither', 'both', 'min', 'max'}, optional
How to draw arrows extending the colorbar beyond its limits. If not
provided, extend is inferred from vmin, vmax and the data limits.
levels : int or list-like object, optional
Split the colormap (cmap) into discrete color intervals. If an integer
is provided, "nice" levels are chosen based on the data range: this can
imply that the final number of levels is not exactly the expected one.
Setting ``vmin`` and/or ``vmax`` with ``levels=N`` is equivalent to
setting ``levels=np.linspace(vmin, vmax, N)``.
infer_intervals : bool, optional
Only applies to pcolormesh. If True, the coordinate intervals are
passed to pcolormesh. If False, the original coordinates are used
(this can be useful for certain map projections). The default is to
always infer intervals, unless the mesh is irregular and plotted on
a map projection.
subplot_kws : dict, optional
Dictionary of keyword arguments for matplotlib subplots. Only applies
to FacetGrid plotting.
cbar_ax : matplotlib Axes, optional
Axes in which to draw the colorbar.
cbar_kwargs : dict, optional
Dictionary of keyword arguments to pass to the colorbar.
**kwargs : optional
Additional arguments to wrapped matplotlib function
Returns
-------
artist :
The same type of primitive artist that the wrapped matplotlib
function returns
"""
# Build on the original docstring
plotfunc.__doc__ = '%s\n%s' % (plotfunc.__doc__, commondoc)
@functools.wraps(plotfunc)
def newplotfunc(darray, x=None, y=None, figsize=None, size=None,
aspect=None, ax=None, row=None, col=None,
col_wrap=None, xincrease=True, yincrease=True,
add_colorbar=None, add_labels=True, vmin=None, vmax=None,
cmap=None, center=None, robust=False, extend=None,
levels=None, infer_intervals=None, colors=None,
subplot_kws=None, cbar_ax=None, cbar_kwargs=None,
xscale=None, yscale=None, xticks=None, yticks=None,
xlim=None, ylim=None, norm=None, **kwargs):
# All 2d plots in xarray share this function signature.
# Method signature below should be consistent.
# Decide on a default for the colorbar before facetgrids
if add_colorbar is None:
add_colorbar = plotfunc.__name__ != 'contour'
imshow_rgb = (
plotfunc.__name__ == 'imshow' and
darray.ndim == (3 + (row is not None) + (col is not None)))
if imshow_rgb:
# Don't add a colorbar when showing an image with explicit colors
add_colorbar = False
# Matplotlib does not support normalising RGB data, so do it here.
# See eg. https://github.com/matplotlib/matplotlib/pull/10220
if robust or vmax is not None or vmin is not None:
darray = _rescale_imshow_rgb(darray, vmin, vmax, robust)
vmin, vmax, robust = None, None, False
# Handle facetgrids first
if row or col:
allargs = locals().copy()
allargs.pop('imshow_rgb')
allargs.update(allargs.pop('kwargs'))
allargs.pop('darray')
# Need the decorated plotting function
allargs['plotfunc'] = globals()[plotfunc.__name__]
return _easy_facetgrid(darray, kind='dataarray', **allargs)
plt = import_matplotlib_pyplot()
rgb = kwargs.pop('rgb', None)
if rgb is not None and plotfunc.__name__ != 'imshow':
raise ValueError('The "rgb" keyword is only valid for imshow()')
elif rgb is not None and not imshow_rgb:
raise ValueError('The "rgb" keyword is only valid for imshow()'
'with a three-dimensional array (per facet)')
xlab, ylab = _infer_xy_labels(
darray=darray, x=x, y=y, imshow=imshow_rgb, rgb=rgb)
# better to pass the ndarrays directly to plotting functions
xval = darray[xlab].values
yval = darray[ylab].values
# check if we need to broadcast one dimension
if xval.ndim < yval.ndim:
xval = np.broadcast_to(xval, yval.shape)
if yval.ndim < xval.ndim:
yval = np.broadcast_to(yval, xval.shape)
# May need to transpose for correct x, y labels
# xlab may be the name of a coord, we have to check for dim names
if imshow_rgb:
# For RGB[A] images, matplotlib requires the color dimension
# to be last. In Xarray the order should be unimportant, so
# we transpose to (y, x, color) to make this work.
yx_dims = (ylab, xlab)
dims = yx_dims + tuple(d for d in darray.dims if d not in yx_dims)
if dims != darray.dims:
darray = darray.transpose(*dims)
elif darray[xlab].dims[-1] == darray.dims[0]:
darray = darray.transpose()
# Pass the data as a masked ndarray too
zval = darray.to_masked_array(copy=False)
# Replace pd.Intervals if contained in xval or yval.
xplt, xlab_extra = _resolve_intervals_2dplot(xval, plotfunc.__name__)
yplt, ylab_extra = _resolve_intervals_2dplot(yval, plotfunc.__name__)
_ensure_plottable(xplt, yplt)
cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs(
plotfunc, locals(), zval.data)
if 'contour' in plotfunc.__name__:
# extend is a keyword argument only for contour and contourf, but
# passing it to the colorbar is sufficient for imshow and
# pcolormesh
kwargs['extend'] = cmap_params['extend']
kwargs['levels'] = cmap_params['levels']
# if colors == a single color, matplotlib draws dashed negative
# contours. we lose this feature if we pass cmap and not colors
if isinstance(colors, str):
cmap_params['cmap'] = None
kwargs['colors'] = colors
if 'pcolormesh' == plotfunc.__name__:
kwargs['infer_intervals'] = infer_intervals
if 'imshow' == plotfunc.__name__ and isinstance(aspect, str):
# forbid usage of mpl strings
raise ValueError("plt.imshow's `aspect` kwarg is not available "
"in xarray")
ax = get_axis(figsize, size, aspect, ax)
primitive = plotfunc(xplt, yplt, zval, ax=ax, cmap=cmap_params['cmap'],
vmin=cmap_params['vmin'],
vmax=cmap_params['vmax'],
norm=cmap_params['norm'],
**kwargs)
# Label the plot with metadata
if add_labels:
ax.set_xlabel(label_from_attrs(darray[xlab], xlab_extra))
ax.set_ylabel(label_from_attrs(darray[ylab], ylab_extra))
ax.set_title(darray._title_for_slice())
if add_colorbar:
if add_labels and 'label' not in cbar_kwargs:
cbar_kwargs['label'] = label_from_attrs(darray)
cbar = _add_colorbar(primitive, ax, cbar_ax, cbar_kwargs,
cmap_params)
elif (cbar_ax is not None or cbar_kwargs):
# inform the user about keywords which aren't used
raise ValueError("cbar_ax and cbar_kwargs can't be used with "
"add_colorbar=False.")
# origin kwarg overrides yincrease
if 'origin' in kwargs:
yincrease = None
_update_axes(ax, xincrease, yincrease, xscale, yscale,
xticks, yticks, xlim, ylim)
# Rotate dates on xlabels
# Do this without calling autofmt_xdate so that x-axes ticks
# on other subplots (if any) are not deleted.
# https://stackoverflow.com/questions/17430105/autofmt-xdate-deletes-x-axis-labels-of-all-subplots
if np.issubdtype(xplt.dtype, np.datetime64):
for xlabels in ax.get_xticklabels():
xlabels.set_rotation(30)
xlabels.set_ha('right')
return primitive
# For use as DataArray.plot.plotmethod
@functools.wraps(newplotfunc)
def plotmethod(_PlotMethods_obj, x=None, y=None, figsize=None, size=None,
aspect=None, ax=None, row=None, col=None, col_wrap=None,
xincrease=True, yincrease=True, add_colorbar=None,
add_labels=True, vmin=None, vmax=None, cmap=None,
colors=None, center=None, robust=False, extend=None,
levels=None, infer_intervals=None, subplot_kws=None,
cbar_ax=None, cbar_kwargs=None,
xscale=None, yscale=None, xticks=None, yticks=None,
xlim=None, ylim=None, norm=None, **kwargs):
"""
The method should have the same signature as the function.
This just makes the method work on Plotmethods objects,
and passes all the other arguments straight through.
"""
allargs = locals()
allargs['darray'] = _PlotMethods_obj._da
allargs.update(kwargs)
for arg in ['_PlotMethods_obj', 'newplotfunc', 'kwargs']:
del allargs[arg]
return newplotfunc(**allargs)
# Add to class _PlotMethods
setattr(_PlotMethods, plotmethod.__name__, plotmethod)
return newplotfunc |
def add_service(self, zconf, typ, name):
""" Add a service to the collection. """
service = None
tries = 0
_LOGGER.debug("add_service %s, %s", typ, name)
while service is None and tries < 4:
try:
service = zconf.get_service_info(typ, name)
except IOError:
# If the zeroconf fails to receive the necessary data we abort
# adding the service
break
tries += 1
if not service:
_LOGGER.debug("add_service failed to add %s, %s", typ, name)
return
def get_value(key):
"""Retrieve value and decode to UTF-8."""
value = service.properties.get(key.encode('utf-8'))
if value is None or isinstance(value, str):
return value
return value.decode('utf-8')
ips = zconf.cache.entries_with_name(service.server.lower())
host = repr(ips[0]) if ips else service.server
model_name = get_value('md')
uuid = get_value('id')
friendly_name = get_value('fn')
if uuid:
uuid = UUID(uuid)
self.services[name] = (host, service.port, uuid, model_name,
friendly_name)
if self.add_callback:
self.add_callback(name) | Add a service to the collection. | Below is the the instruction that describes the task:
### Input:
Add a service to the collection.
### Response:
def add_service(self, zconf, typ, name):
""" Add a service to the collection. """
service = None
tries = 0
_LOGGER.debug("add_service %s, %s", typ, name)
while service is None and tries < 4:
try:
service = zconf.get_service_info(typ, name)
except IOError:
# If the zeroconf fails to receive the necessary data we abort
# adding the service
break
tries += 1
if not service:
_LOGGER.debug("add_service failed to add %s, %s", typ, name)
return
def get_value(key):
"""Retrieve value and decode to UTF-8."""
value = service.properties.get(key.encode('utf-8'))
if value is None or isinstance(value, str):
return value
return value.decode('utf-8')
ips = zconf.cache.entries_with_name(service.server.lower())
host = repr(ips[0]) if ips else service.server
model_name = get_value('md')
uuid = get_value('id')
friendly_name = get_value('fn')
if uuid:
uuid = UUID(uuid)
self.services[name] = (host, service.port, uuid, model_name,
friendly_name)
if self.add_callback:
self.add_callback(name) |
def _request(self, *args, **kwargs):
# type (Any) -> Response
"""Make requests using configured :class:`requests.Session`.
Any error details will be extracted to an :class:`HTTPError`
which will contain relevant error details when printed."""
self._amend_request_kwargs(kwargs)
_response = self._requests_session.request(*args, **kwargs)
try:
_response.raise_for_status()
except HTTPError as e:
if e.response is not None:
raise_from(ConjureHTTPError(e), e)
raise e
return _response | Make requests using configured :class:`requests.Session`.
Any error details will be extracted to an :class:`HTTPError`
which will contain relevant error details when printed. | Below is the the instruction that describes the task:
### Input:
Make requests using configured :class:`requests.Session`.
Any error details will be extracted to an :class:`HTTPError`
which will contain relevant error details when printed.
### Response:
def _request(self, *args, **kwargs):
# type (Any) -> Response
"""Make requests using configured :class:`requests.Session`.
Any error details will be extracted to an :class:`HTTPError`
which will contain relevant error details when printed."""
self._amend_request_kwargs(kwargs)
_response = self._requests_session.request(*args, **kwargs)
try:
_response.raise_for_status()
except HTTPError as e:
if e.response is not None:
raise_from(ConjureHTTPError(e), e)
raise e
return _response |
def reduce_max(x,
disable_positional_args=None,
output_shape=None,
reduced_dim=None,
name=None):
"""Reduction on 1 or more axes.
Args:
x: a Tensor
disable_positional_args: None
output_shape: an optional Shape. Must be a subsequence of x.shape.
reduced_dim: an optional Dimension
name: an optional string
Returns:
a Tensor
"""
output_shape = convert_to_shape(output_shape)
reduced_dim = convert_to_dimension(reduced_dim)
assert disable_positional_args is None
output_shape = _reduction_output_shape(x, output_shape, reduced_dim)
if output_shape is None:
output_shape = Shape([])
if output_shape == x.shape:
return x
return ReduceOperation(
x, output_shape, "MAX", name=name or "reduce_max").outputs[0] | Reduction on 1 or more axes.
Args:
x: a Tensor
disable_positional_args: None
output_shape: an optional Shape. Must be a subsequence of x.shape.
reduced_dim: an optional Dimension
name: an optional string
Returns:
a Tensor | Below is the the instruction that describes the task:
### Input:
Reduction on 1 or more axes.
Args:
x: a Tensor
disable_positional_args: None
output_shape: an optional Shape. Must be a subsequence of x.shape.
reduced_dim: an optional Dimension
name: an optional string
Returns:
a Tensor
### Response:
def reduce_max(x,
disable_positional_args=None,
output_shape=None,
reduced_dim=None,
name=None):
"""Reduction on 1 or more axes.
Args:
x: a Tensor
disable_positional_args: None
output_shape: an optional Shape. Must be a subsequence of x.shape.
reduced_dim: an optional Dimension
name: an optional string
Returns:
a Tensor
"""
output_shape = convert_to_shape(output_shape)
reduced_dim = convert_to_dimension(reduced_dim)
assert disable_positional_args is None
output_shape = _reduction_output_shape(x, output_shape, reduced_dim)
if output_shape is None:
output_shape = Shape([])
if output_shape == x.shape:
return x
return ReduceOperation(
x, output_shape, "MAX", name=name or "reduce_max").outputs[0] |
def reset(self):
"""
Stops the timer and resets its values to 0.
"""
self._elapsed = datetime.timedelta()
self._delta = datetime.timedelta()
self._starttime = datetime.datetime.now()
self.refresh() | Stops the timer and resets its values to 0. | Below is the the instruction that describes the task:
### Input:
Stops the timer and resets its values to 0.
### Response:
def reset(self):
"""
Stops the timer and resets its values to 0.
"""
self._elapsed = datetime.timedelta()
self._delta = datetime.timedelta()
self._starttime = datetime.datetime.now()
self.refresh() |
def requestedFormat(request,acceptedFormat):
"""Return the response format requested by client
Client could specify requested format using:
(options are processed in this order)
- `format` field in http request
- `Accept` header in http request
Example:
chooseFormat(request, ['text/html','application/json'])
Args:
acceptedFormat: list containing all the accepted format
Returns:
string: the user requested mime-type (if supported)
Raises:
ValueError: if user request a mime-type not supported
"""
if 'format' in request.args:
fieldFormat = request.args.get('format')
if fieldFormat not in acceptedFormat:
raise ValueError("requested format not supported: "+ fieldFormat)
return fieldFormat
else:
return request.accept_mimetypes.best_match(acceptedFormat) | Return the response format requested by client
Client could specify requested format using:
(options are processed in this order)
- `format` field in http request
- `Accept` header in http request
Example:
chooseFormat(request, ['text/html','application/json'])
Args:
acceptedFormat: list containing all the accepted format
Returns:
string: the user requested mime-type (if supported)
Raises:
ValueError: if user request a mime-type not supported | Below is the the instruction that describes the task:
### Input:
Return the response format requested by client
Client could specify requested format using:
(options are processed in this order)
- `format` field in http request
- `Accept` header in http request
Example:
chooseFormat(request, ['text/html','application/json'])
Args:
acceptedFormat: list containing all the accepted format
Returns:
string: the user requested mime-type (if supported)
Raises:
ValueError: if user request a mime-type not supported
### Response:
def requestedFormat(request,acceptedFormat):
"""Return the response format requested by client
Client could specify requested format using:
(options are processed in this order)
- `format` field in http request
- `Accept` header in http request
Example:
chooseFormat(request, ['text/html','application/json'])
Args:
acceptedFormat: list containing all the accepted format
Returns:
string: the user requested mime-type (if supported)
Raises:
ValueError: if user request a mime-type not supported
"""
if 'format' in request.args:
fieldFormat = request.args.get('format')
if fieldFormat not in acceptedFormat:
raise ValueError("requested format not supported: "+ fieldFormat)
return fieldFormat
else:
return request.accept_mimetypes.best_match(acceptedFormat) |
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args) | Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5 | Below is the the instruction that describes the task:
### Input:
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
### Response:
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args) |
def database_remove_tags(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /database-xxxx/removeTags API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Tags#API-method%3A-%2Fclass-xxxx%2FremoveTags
"""
return DXHTTPRequest('/%s/removeTags' % object_id, input_params, always_retry=always_retry, **kwargs) | Invokes the /database-xxxx/removeTags API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Tags#API-method%3A-%2Fclass-xxxx%2FremoveTags | Below is the the instruction that describes the task:
### Input:
Invokes the /database-xxxx/removeTags API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Tags#API-method%3A-%2Fclass-xxxx%2FremoveTags
### Response:
def database_remove_tags(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /database-xxxx/removeTags API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Tags#API-method%3A-%2Fclass-xxxx%2FremoveTags
"""
return DXHTTPRequest('/%s/removeTags' % object_id, input_params, always_retry=always_retry, **kwargs) |
def mkpart(device, part_type, fs_type=None, start=None, end=None):
'''
Make a part_type partition for filesystem fs_type, beginning at start and
ending at end (by default in megabytes). part_type should be one of
"primary", "logical", or "extended".
CLI Examples:
.. code-block:: bash
salt '*' partition.mkpart /dev/sda primary fs_type=fat32 start=0 end=639
salt '*' partition.mkpart /dev/sda primary start=0 end=639
'''
if part_type not in set(['primary', 'logical', 'extended']):
raise CommandExecutionError(
'Invalid part_type passed to partition.mkpart'
)
if not _is_fstype(fs_type):
raise CommandExecutionError(
'Invalid fs_type passed to partition.mkpart'
)
if start is not None and end is not None:
_validate_partition_boundary(start)
_validate_partition_boundary(end)
if start is None:
start = ''
if end is None:
end = ''
if fs_type:
cmd = ('parted', '-m', '-s', '--', device, 'mkpart', part_type, fs_type, start, end)
else:
cmd = ('parted', '-m', '-s', '--', device, 'mkpart', part_type, start, end)
out = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
return out | Make a part_type partition for filesystem fs_type, beginning at start and
ending at end (by default in megabytes). part_type should be one of
"primary", "logical", or "extended".
CLI Examples:
.. code-block:: bash
salt '*' partition.mkpart /dev/sda primary fs_type=fat32 start=0 end=639
salt '*' partition.mkpart /dev/sda primary start=0 end=639 | Below is the the instruction that describes the task:
### Input:
Make a part_type partition for filesystem fs_type, beginning at start and
ending at end (by default in megabytes). part_type should be one of
"primary", "logical", or "extended".
CLI Examples:
.. code-block:: bash
salt '*' partition.mkpart /dev/sda primary fs_type=fat32 start=0 end=639
salt '*' partition.mkpart /dev/sda primary start=0 end=639
### Response:
def mkpart(device, part_type, fs_type=None, start=None, end=None):
'''
Make a part_type partition for filesystem fs_type, beginning at start and
ending at end (by default in megabytes). part_type should be one of
"primary", "logical", or "extended".
CLI Examples:
.. code-block:: bash
salt '*' partition.mkpart /dev/sda primary fs_type=fat32 start=0 end=639
salt '*' partition.mkpart /dev/sda primary start=0 end=639
'''
if part_type not in set(['primary', 'logical', 'extended']):
raise CommandExecutionError(
'Invalid part_type passed to partition.mkpart'
)
if not _is_fstype(fs_type):
raise CommandExecutionError(
'Invalid fs_type passed to partition.mkpart'
)
if start is not None and end is not None:
_validate_partition_boundary(start)
_validate_partition_boundary(end)
if start is None:
start = ''
if end is None:
end = ''
if fs_type:
cmd = ('parted', '-m', '-s', '--', device, 'mkpart', part_type, fs_type, start, end)
else:
cmd = ('parted', '-m', '-s', '--', device, 'mkpart', part_type, start, end)
out = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
return out |
def reset_term_stats(set_id, term_id, client_id, user_id, access_token):
"""Reset the stats of a term by deleting and re-creating it."""
found_sets = [user_set for user_set in get_user_sets(client_id, user_id)
if user_set.set_id == set_id]
if len(found_sets) != 1:
raise ValueError('{} set(s) found with id {}'.format(len(found_sets), set_id))
found_terms = [term for term in found_sets[0].terms if term.term_id == term_id]
if len(found_terms) != 1:
raise ValueError('{} term(s) found with id {}'.format(len(found_terms), term_id))
term = found_terms[0]
if term.image.url:
# Creating a term with an image requires an "image identifier", which you get by uploading
# an image via https://quizlet.com/api/2.0/docs/images , which can only be used by Quizlet
# PLUS members.
raise NotImplementedError('"{}" has an image and is thus not supported'.format(term))
print('Deleting "{}"...'.format(term))
delete_term(set_id, term_id, access_token)
print('Re-creating "{}"...'.format(term))
add_term(set_id, term, access_token)
print('Done') | Reset the stats of a term by deleting and re-creating it. | Below is the the instruction that describes the task:
### Input:
Reset the stats of a term by deleting and re-creating it.
### Response:
def reset_term_stats(set_id, term_id, client_id, user_id, access_token):
"""Reset the stats of a term by deleting and re-creating it."""
found_sets = [user_set for user_set in get_user_sets(client_id, user_id)
if user_set.set_id == set_id]
if len(found_sets) != 1:
raise ValueError('{} set(s) found with id {}'.format(len(found_sets), set_id))
found_terms = [term for term in found_sets[0].terms if term.term_id == term_id]
if len(found_terms) != 1:
raise ValueError('{} term(s) found with id {}'.format(len(found_terms), term_id))
term = found_terms[0]
if term.image.url:
# Creating a term with an image requires an "image identifier", which you get by uploading
# an image via https://quizlet.com/api/2.0/docs/images , which can only be used by Quizlet
# PLUS members.
raise NotImplementedError('"{}" has an image and is thus not supported'.format(term))
print('Deleting "{}"...'.format(term))
delete_term(set_id, term_id, access_token)
print('Re-creating "{}"...'.format(term))
add_term(set_id, term, access_token)
print('Done') |
def error(self, argparser, target, message):
"""
This was used as part of the original non-recursive lookup for
the target parser.
"""
warnings.warn(
'Runtime.error is deprecated and will be removed by calmjs-4.0.0',
DeprecationWarning)
details = self.get_argparser_details(argparser)
argparser = details.subparsers[target] if details else self.argparser
argparser.error(message) | This was used as part of the original non-recursive lookup for
the target parser. | Below is the the instruction that describes the task:
### Input:
This was used as part of the original non-recursive lookup for
the target parser.
### Response:
def error(self, argparser, target, message):
"""
This was used as part of the original non-recursive lookup for
the target parser.
"""
warnings.warn(
'Runtime.error is deprecated and will be removed by calmjs-4.0.0',
DeprecationWarning)
details = self.get_argparser_details(argparser)
argparser = details.subparsers[target] if details else self.argparser
argparser.error(message) |
def clean_up(files):
'''clean up will delete a list of files, only if they exist
'''
if not isinstance(files, list):
files = [files]
for f in files:
if os.path.exists(f):
bot.verbose3("Cleaning up %s" % f)
os.remove(f) | clean up will delete a list of files, only if they exist | Below is the the instruction that describes the task:
### Input:
clean up will delete a list of files, only if they exist
### Response:
def clean_up(files):
'''clean up will delete a list of files, only if they exist
'''
if not isinstance(files, list):
files = [files]
for f in files:
if os.path.exists(f):
bot.verbose3("Cleaning up %s" % f)
os.remove(f) |
def get_assembly_names():
"""return list of available assemblies
>>> assy_names = get_assembly_names()
>>> 'GRCh37.p13' in assy_names
True
"""
return [
n.replace(".json.gz", "")
for n in pkg_resources.resource_listdir(__name__, _assy_dir)
if n.endswith(".json.gz")
] | return list of available assemblies
>>> assy_names = get_assembly_names()
>>> 'GRCh37.p13' in assy_names
True | Below is the the instruction that describes the task:
### Input:
return list of available assemblies
>>> assy_names = get_assembly_names()
>>> 'GRCh37.p13' in assy_names
True
### Response:
def get_assembly_names():
"""return list of available assemblies
>>> assy_names = get_assembly_names()
>>> 'GRCh37.p13' in assy_names
True
"""
return [
n.replace(".json.gz", "")
for n in pkg_resources.resource_listdir(__name__, _assy_dir)
if n.endswith(".json.gz")
] |
def print_grains(self):
'''
Print out the grains
'''
grains = self.minion.opts.get('grains') or salt.loader.grains(self.opts)
salt.output.display_output({'local': grains}, 'grains', self.opts) | Print out the grains | Below is the the instruction that describes the task:
### Input:
Print out the grains
### Response:
def print_grains(self):
'''
Print out the grains
'''
grains = self.minion.opts.get('grains') or salt.loader.grains(self.opts)
salt.output.display_output({'local': grains}, 'grains', self.opts) |
def disassembler(co, lasti= -1):
"""Disassemble a code object.
:param co: code object
:param lasti: internal
:yields: Instructions.
"""
code = co.co_code
labels = dis.findlabels(code)
linestarts = dict(dis.findlinestarts(co))
i = 0
extended_arg = 0
lineno = 0
free = None
for i, op, oparg in _walk_ops(co):
if i in linestarts:
lineno = linestarts[i]
instr = Instruction(i=i, op=op, lineno=lineno)
instr.linestart = i in linestarts
if i == lasti:
instr.lasti = True
else:
instr.lasti = False
if i in labels:
instr.label = True
else:
instr.label = False
instr.oparg = oparg
extended_arg = 0
if op == dis.EXTENDED_ARG:
extended_arg = oparg * 65536
instr.extended_arg = extended_arg
if op >= dis.HAVE_ARGUMENT:
if op in dis.hasconst:
instr.arg = co.co_consts[oparg]
elif op in dis.hasname:
instr.arg = co.co_names[oparg]
elif op in dis.hasjrel:
instr.arg = i + oparg
elif op in dis.haslocal:
instr.arg = co.co_varnames[oparg]
elif op in dis.hascompare:
instr.arg = dis.cmp_op[oparg]
elif op in dis.hasfree:
if free is None:
free = co.co_cellvars + co.co_freevars
instr.arg = free[oparg]
yield instr | Disassemble a code object.
:param co: code object
:param lasti: internal
:yields: Instructions. | Below is the the instruction that describes the task:
### Input:
Disassemble a code object.
:param co: code object
:param lasti: internal
:yields: Instructions.
### Response:
def disassembler(co, lasti= -1):
"""Disassemble a code object.
:param co: code object
:param lasti: internal
:yields: Instructions.
"""
code = co.co_code
labels = dis.findlabels(code)
linestarts = dict(dis.findlinestarts(co))
i = 0
extended_arg = 0
lineno = 0
free = None
for i, op, oparg in _walk_ops(co):
if i in linestarts:
lineno = linestarts[i]
instr = Instruction(i=i, op=op, lineno=lineno)
instr.linestart = i in linestarts
if i == lasti:
instr.lasti = True
else:
instr.lasti = False
if i in labels:
instr.label = True
else:
instr.label = False
instr.oparg = oparg
extended_arg = 0
if op == dis.EXTENDED_ARG:
extended_arg = oparg * 65536
instr.extended_arg = extended_arg
if op >= dis.HAVE_ARGUMENT:
if op in dis.hasconst:
instr.arg = co.co_consts[oparg]
elif op in dis.hasname:
instr.arg = co.co_names[oparg]
elif op in dis.hasjrel:
instr.arg = i + oparg
elif op in dis.haslocal:
instr.arg = co.co_varnames[oparg]
elif op in dis.hascompare:
instr.arg = dis.cmp_op[oparg]
elif op in dis.hasfree:
if free is None:
free = co.co_cellvars + co.co_freevars
instr.arg = free[oparg]
yield instr |
def validate_wrap(self, value):
''' Validates the type and length of ``value`` '''
if not isinstance(value, basestring):
self._fail_validation_type(value, basestring)
if self.max is not None and len(value) > self.max:
self._fail_validation(value, 'Value too long (%d)' % len(value))
if self.min is not None and len(value) < self.min:
self._fail_validation(value, 'Value too short (%d)' % len(value)) | Validates the type and length of ``value`` | Below is the the instruction that describes the task:
### Input:
Validates the type and length of ``value``
### Response:
def validate_wrap(self, value):
''' Validates the type and length of ``value`` '''
if not isinstance(value, basestring):
self._fail_validation_type(value, basestring)
if self.max is not None and len(value) > self.max:
self._fail_validation(value, 'Value too long (%d)' % len(value))
if self.min is not None and len(value) < self.min:
self._fail_validation(value, 'Value too short (%d)' % len(value)) |
def makeService(opt):
"""Return a service based on parsed command-line options
:param opt: dict-like object. Relevant keys are config, messages,
pid, frequency, threshold, killtime, minrestartdelay
and maxrestartdelay
:returns: service, {twisted.application.interfaces.IService}
"""
ret = get(config=opt['config'], messages=opt['messages'],
pidDir=opt['pid'], freq=opt['frequency'])
pm = ret.getServiceNamed("procmon")
pm.threshold = opt["threshold"]
pm.killTime = opt["killtime"]
pm.minRestartDelay = opt["minrestartdelay"]
pm.maxRestartDelay = opt["maxrestartdelay"]
return ret | Return a service based on parsed command-line options
:param opt: dict-like object. Relevant keys are config, messages,
pid, frequency, threshold, killtime, minrestartdelay
and maxrestartdelay
:returns: service, {twisted.application.interfaces.IService} | Below is the the instruction that describes the task:
### Input:
Return a service based on parsed command-line options
:param opt: dict-like object. Relevant keys are config, messages,
pid, frequency, threshold, killtime, minrestartdelay
and maxrestartdelay
:returns: service, {twisted.application.interfaces.IService}
### Response:
def makeService(opt):
"""Return a service based on parsed command-line options
:param opt: dict-like object. Relevant keys are config, messages,
pid, frequency, threshold, killtime, minrestartdelay
and maxrestartdelay
:returns: service, {twisted.application.interfaces.IService}
"""
ret = get(config=opt['config'], messages=opt['messages'],
pidDir=opt['pid'], freq=opt['frequency'])
pm = ret.getServiceNamed("procmon")
pm.threshold = opt["threshold"]
pm.killTime = opt["killtime"]
pm.minRestartDelay = opt["minrestartdelay"]
pm.maxRestartDelay = opt["maxrestartdelay"]
return ret |
def finalize(self):
"""Disables redirection"""
if self._original_steam is not None and self._redirection:
sys.stdout = self._original_steam
print('Disabled redirection of `stdout`.')
self._redirection = False
self._original_steam = None | Disables redirection | Below is the the instruction that describes the task:
### Input:
Disables redirection
### Response:
def finalize(self):
"""Disables redirection"""
if self._original_steam is not None and self._redirection:
sys.stdout = self._original_steam
print('Disabled redirection of `stdout`.')
self._redirection = False
self._original_steam = None |
def forward_log_det_jacobian_fn(bijector):
"""Makes a function which applies a list of Bijectors' `log_det_jacobian`s."""
if not mcmc_util.is_list_like(bijector):
bijector = [bijector]
def fn(transformed_state_parts, event_ndims):
return sum([
b.forward_log_det_jacobian(sp, event_ndims=e)
for b, e, sp in zip(bijector, event_ndims, transformed_state_parts)
])
return fn | Makes a function which applies a list of Bijectors' `log_det_jacobian`s. | Below is the the instruction that describes the task:
### Input:
Makes a function which applies a list of Bijectors' `log_det_jacobian`s.
### Response:
def forward_log_det_jacobian_fn(bijector):
"""Makes a function which applies a list of Bijectors' `log_det_jacobian`s."""
if not mcmc_util.is_list_like(bijector):
bijector = [bijector]
def fn(transformed_state_parts, event_ndims):
return sum([
b.forward_log_det_jacobian(sp, event_ndims=e)
for b, e, sp in zip(bijector, event_ndims, transformed_state_parts)
])
return fn |
def decode_randomness(randomness: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
The given :class:`~str` are expected to represent the last 16 characters of a ULID, which
are cryptographically secure random values.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param randomness: String to decode
:type randomness: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 16 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
encoded = str_to_bytes(randomness, 16)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 3) | (decoding[encoded[1]] >> 2)) & 0xFF,
((decoding[encoded[1]] << 6) | (decoding[encoded[2]] << 1) | (decoding[encoded[3]] >> 4)) & 0xFF,
((decoding[encoded[3]] << 4) | (decoding[encoded[4]] >> 1)) & 0xFF,
((decoding[encoded[4]] << 7) | (decoding[encoded[5]] << 2) | (decoding[encoded[6]] >> 3)) & 0xFF,
((decoding[encoded[6]] << 5) | (decoding[encoded[7]])) & 0xFF,
((decoding[encoded[8]] << 3) | (decoding[encoded[9]] >> 2)) & 0xFF,
((decoding[encoded[9]] << 6) | (decoding[encoded[10]] << 1) | (decoding[encoded[11]] >> 4)) & 0xFF,
((decoding[encoded[11]] << 4) | (decoding[encoded[12]] >> 1)) & 0xFF,
((decoding[encoded[12]] << 7) | (decoding[encoded[13]] << 2) | (decoding[encoded[14]] >> 3)) & 0xFF,
((decoding[encoded[14]] << 5) | (decoding[encoded[15]])) & 0xFF
)) | Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
The given :class:`~str` are expected to represent the last 16 characters of a ULID, which
are cryptographically secure random values.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param randomness: String to decode
:type randomness: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 16 characters
:raises ValueError: when value cannot be encoded in ASCII | Below is the the instruction that describes the task:
### Input:
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
The given :class:`~str` are expected to represent the last 16 characters of a ULID, which
are cryptographically secure random values.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param randomness: String to decode
:type randomness: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 16 characters
:raises ValueError: when value cannot be encoded in ASCII
### Response:
def decode_randomness(randomness: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
The given :class:`~str` are expected to represent the last 16 characters of a ULID, which
are cryptographically secure random values.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param randomness: String to decode
:type randomness: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 16 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
encoded = str_to_bytes(randomness, 16)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 3) | (decoding[encoded[1]] >> 2)) & 0xFF,
((decoding[encoded[1]] << 6) | (decoding[encoded[2]] << 1) | (decoding[encoded[3]] >> 4)) & 0xFF,
((decoding[encoded[3]] << 4) | (decoding[encoded[4]] >> 1)) & 0xFF,
((decoding[encoded[4]] << 7) | (decoding[encoded[5]] << 2) | (decoding[encoded[6]] >> 3)) & 0xFF,
((decoding[encoded[6]] << 5) | (decoding[encoded[7]])) & 0xFF,
((decoding[encoded[8]] << 3) | (decoding[encoded[9]] >> 2)) & 0xFF,
((decoding[encoded[9]] << 6) | (decoding[encoded[10]] << 1) | (decoding[encoded[11]] >> 4)) & 0xFF,
((decoding[encoded[11]] << 4) | (decoding[encoded[12]] >> 1)) & 0xFF,
((decoding[encoded[12]] << 7) | (decoding[encoded[13]] << 2) | (decoding[encoded[14]] >> 3)) & 0xFF,
((decoding[encoded[14]] << 5) | (decoding[encoded[15]])) & 0xFF
)) |
def uptodate(name,
software=True,
drivers=False,
skip_hidden=False,
skip_mandatory=False,
skip_reboot=True,
categories=None,
severities=None,):
'''
Ensure Microsoft Updates that match the passed criteria are installed.
Updates will be downloaded if needed.
This state allows you to update a system without specifying a specific
update to apply. All matching updates will be installed.
Args:
name (str):
The name has no functional value and is only used as a tracking
reference
software (bool):
Include software updates in the results (default is True)
drivers (bool):
Include driver updates in the results (default is False)
skip_hidden (bool):
Skip updates that have been hidden. Default is False.
skip_mandatory (bool):
Skip mandatory updates. Default is False.
skip_reboot (bool):
Skip updates that require a reboot. Default is True.
categories (list):
Specify the categories to list. Must be passed as a list. All
categories returned by default.
Categories include the following:
* Critical Updates
* Definition Updates
* Drivers (make sure you set drivers=True)
* Feature Packs
* Security Updates
* Update Rollups
* Updates
* Update Rollups
* Windows 7
* Windows 8.1
* Windows 8.1 drivers
* Windows 8.1 and later drivers
* Windows Defender
severities (list):
Specify the severities to include. Must be passed as a list. All
severities returned by default.
Severities include the following:
* Critical
* Important
Returns:
dict: A dictionary containing the results of the update
CLI Example:
.. code-block:: yaml
# Update the system using the state defaults
update_system:
wua.uptodate
# Update the drivers
update_drivers:
wua.uptodate:
- software: False
- drivers: True
- skip_reboot: False
# Apply all critical updates
update_critical:
wua.uptodate:
- severities:
- Critical
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
wua = salt.utils.win_update.WindowsUpdateAgent()
available_updates = wua.available(
skip_hidden=skip_hidden, skip_installed=True,
skip_mandatory=skip_mandatory, skip_reboot=skip_reboot,
software=software, drivers=drivers, categories=categories,
severities=severities)
# No updates found
if available_updates.count() == 0:
ret['comment'] = 'No updates found'
return ret
updates = list(available_updates.list().keys())
# Search for updates
install_list = wua.search(updates)
# List of updates to download
download = salt.utils.win_update.Updates()
for item in install_list.updates:
if not salt.utils.data.is_true(item.IsDownloaded):
download.updates.Add(item)
# List of updates to install
install = salt.utils.win_update.Updates()
for item in install_list.updates:
if not salt.utils.data.is_true(item.IsInstalled):
install.updates.Add(item)
# Return comment of changes if test.
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Updates will be installed:'
for update in install.updates:
ret['comment'] += '\n'
ret['comment'] += ': '.join(
[update.Identity.UpdateID, update.Title])
return ret
# Download updates
wua.download(download)
# Install updates
wua.install(install)
# Refresh windows update info
wua.refresh()
post_info = wua.updates().list()
# Verify the installation
for item in install.list():
if not salt.utils.data.is_true(post_info[item]['Installed']):
ret['changes']['failed'] = {
item: {'Title': post_info[item]['Title'][:40] + '...',
'KBs': post_info[item]['KBs']}
}
ret['result'] = False
else:
ret['changes']['installed'] = {
item: {'Title': post_info[item]['Title'][:40] + '...',
'NeedsReboot': post_info[item]['NeedsReboot'],
'KBs': post_info[item]['KBs']}
}
if ret['changes'].get('failed', False):
ret['comment'] = 'Updates failed'
else:
ret['comment'] = 'Updates installed successfully'
return ret | Ensure Microsoft Updates that match the passed criteria are installed.
Updates will be downloaded if needed.
This state allows you to update a system without specifying a specific
update to apply. All matching updates will be installed.
Args:
name (str):
The name has no functional value and is only used as a tracking
reference
software (bool):
Include software updates in the results (default is True)
drivers (bool):
Include driver updates in the results (default is False)
skip_hidden (bool):
Skip updates that have been hidden. Default is False.
skip_mandatory (bool):
Skip mandatory updates. Default is False.
skip_reboot (bool):
Skip updates that require a reboot. Default is True.
categories (list):
Specify the categories to list. Must be passed as a list. All
categories returned by default.
Categories include the following:
* Critical Updates
* Definition Updates
* Drivers (make sure you set drivers=True)
* Feature Packs
* Security Updates
* Update Rollups
* Updates
* Update Rollups
* Windows 7
* Windows 8.1
* Windows 8.1 drivers
* Windows 8.1 and later drivers
* Windows Defender
severities (list):
Specify the severities to include. Must be passed as a list. All
severities returned by default.
Severities include the following:
* Critical
* Important
Returns:
dict: A dictionary containing the results of the update
CLI Example:
.. code-block:: yaml
# Update the system using the state defaults
update_system:
wua.uptodate
# Update the drivers
update_drivers:
wua.uptodate:
- software: False
- drivers: True
- skip_reboot: False
# Apply all critical updates
update_critical:
wua.uptodate:
- severities:
- Critical | Below is the the instruction that describes the task:
### Input:
Ensure Microsoft Updates that match the passed criteria are installed.
Updates will be downloaded if needed.
This state allows you to update a system without specifying a specific
update to apply. All matching updates will be installed.
Args:
name (str):
The name has no functional value and is only used as a tracking
reference
software (bool):
Include software updates in the results (default is True)
drivers (bool):
Include driver updates in the results (default is False)
skip_hidden (bool):
Skip updates that have been hidden. Default is False.
skip_mandatory (bool):
Skip mandatory updates. Default is False.
skip_reboot (bool):
Skip updates that require a reboot. Default is True.
categories (list):
Specify the categories to list. Must be passed as a list. All
categories returned by default.
Categories include the following:
* Critical Updates
* Definition Updates
* Drivers (make sure you set drivers=True)
* Feature Packs
* Security Updates
* Update Rollups
* Updates
* Update Rollups
* Windows 7
* Windows 8.1
* Windows 8.1 drivers
* Windows 8.1 and later drivers
* Windows Defender
severities (list):
Specify the severities to include. Must be passed as a list. All
severities returned by default.
Severities include the following:
* Critical
* Important
Returns:
dict: A dictionary containing the results of the update
CLI Example:
.. code-block:: yaml
# Update the system using the state defaults
update_system:
wua.uptodate
# Update the drivers
update_drivers:
wua.uptodate:
- software: False
- drivers: True
- skip_reboot: False
# Apply all critical updates
update_critical:
wua.uptodate:
- severities:
- Critical
### Response:
def uptodate(name,
software=True,
drivers=False,
skip_hidden=False,
skip_mandatory=False,
skip_reboot=True,
categories=None,
severities=None,):
'''
Ensure Microsoft Updates that match the passed criteria are installed.
Updates will be downloaded if needed.
This state allows you to update a system without specifying a specific
update to apply. All matching updates will be installed.
Args:
name (str):
The name has no functional value and is only used as a tracking
reference
software (bool):
Include software updates in the results (default is True)
drivers (bool):
Include driver updates in the results (default is False)
skip_hidden (bool):
Skip updates that have been hidden. Default is False.
skip_mandatory (bool):
Skip mandatory updates. Default is False.
skip_reboot (bool):
Skip updates that require a reboot. Default is True.
categories (list):
Specify the categories to list. Must be passed as a list. All
categories returned by default.
Categories include the following:
* Critical Updates
* Definition Updates
* Drivers (make sure you set drivers=True)
* Feature Packs
* Security Updates
* Update Rollups
* Updates
* Update Rollups
* Windows 7
* Windows 8.1
* Windows 8.1 drivers
* Windows 8.1 and later drivers
* Windows Defender
severities (list):
Specify the severities to include. Must be passed as a list. All
severities returned by default.
Severities include the following:
* Critical
* Important
Returns:
dict: A dictionary containing the results of the update
CLI Example:
.. code-block:: yaml
# Update the system using the state defaults
update_system:
wua.uptodate
# Update the drivers
update_drivers:
wua.uptodate:
- software: False
- drivers: True
- skip_reboot: False
# Apply all critical updates
update_critical:
wua.uptodate:
- severities:
- Critical
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
wua = salt.utils.win_update.WindowsUpdateAgent()
available_updates = wua.available(
skip_hidden=skip_hidden, skip_installed=True,
skip_mandatory=skip_mandatory, skip_reboot=skip_reboot,
software=software, drivers=drivers, categories=categories,
severities=severities)
# No updates found
if available_updates.count() == 0:
ret['comment'] = 'No updates found'
return ret
updates = list(available_updates.list().keys())
# Search for updates
install_list = wua.search(updates)
# List of updates to download
download = salt.utils.win_update.Updates()
for item in install_list.updates:
if not salt.utils.data.is_true(item.IsDownloaded):
download.updates.Add(item)
# List of updates to install
install = salt.utils.win_update.Updates()
for item in install_list.updates:
if not salt.utils.data.is_true(item.IsInstalled):
install.updates.Add(item)
# Return comment of changes if test.
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Updates will be installed:'
for update in install.updates:
ret['comment'] += '\n'
ret['comment'] += ': '.join(
[update.Identity.UpdateID, update.Title])
return ret
# Download updates
wua.download(download)
# Install updates
wua.install(install)
# Refresh windows update info
wua.refresh()
post_info = wua.updates().list()
# Verify the installation
for item in install.list():
if not salt.utils.data.is_true(post_info[item]['Installed']):
ret['changes']['failed'] = {
item: {'Title': post_info[item]['Title'][:40] + '...',
'KBs': post_info[item]['KBs']}
}
ret['result'] = False
else:
ret['changes']['installed'] = {
item: {'Title': post_info[item]['Title'][:40] + '...',
'NeedsReboot': post_info[item]['NeedsReboot'],
'KBs': post_info[item]['KBs']}
}
if ret['changes'].get('failed', False):
ret['comment'] = 'Updates failed'
else:
ret['comment'] = 'Updates installed successfully'
return ret |
def _retrieve_config_xml(config_xml, saltenv):
'''
Helper to cache the config XML and raise a CommandExecutionError if we fail
to do so. If we successfully cache the file, return the cached path.
'''
ret = __salt__['cp.cache_file'](config_xml, saltenv)
if not ret:
raise CommandExecutionError('Failed to retrieve {0}'.format(config_xml))
return ret | Helper to cache the config XML and raise a CommandExecutionError if we fail
to do so. If we successfully cache the file, return the cached path. | Below is the the instruction that describes the task:
### Input:
Helper to cache the config XML and raise a CommandExecutionError if we fail
to do so. If we successfully cache the file, return the cached path.
### Response:
def _retrieve_config_xml(config_xml, saltenv):
'''
Helper to cache the config XML and raise a CommandExecutionError if we fail
to do so. If we successfully cache the file, return the cached path.
'''
ret = __salt__['cp.cache_file'](config_xml, saltenv)
if not ret:
raise CommandExecutionError('Failed to retrieve {0}'.format(config_xml))
return ret |
def reconnect(self):
"""断线重连."""
self.closed = True
self.connect()
if self.debug:
print("reconnect to {}".format((self.hostname, self.port))) | 断线重连. | Below is the the instruction that describes the task:
### Input:
断线重连.
### Response:
def reconnect(self):
"""断线重连."""
self.closed = True
self.connect()
if self.debug:
print("reconnect to {}".format((self.hostname, self.port))) |
def parse(self, contents):
"""Parse the document.
:param contents: The text contents of the document.
:rtype: a *generator* of tokenized text.
"""
i = 0
for text in contents.split(self.delim):
if not len(text.strip()):
continue
words = text.split()
char_offsets = [0] + [
int(_) for _ in np.cumsum([len(x) + 1 for x in words])[:-1]
]
text = " ".join(words)
yield {
"text": text,
"words": words,
"pos_tags": [""] * len(words),
"ner_tags": [""] * len(words),
"lemmas": [""] * len(words),
"dep_parents": [0] * len(words),
"dep_labels": [""] * len(words),
"char_offsets": char_offsets,
"abs_char_offsets": char_offsets,
}
i += 1 | Parse the document.
:param contents: The text contents of the document.
:rtype: a *generator* of tokenized text. | Below is the the instruction that describes the task:
### Input:
Parse the document.
:param contents: The text contents of the document.
:rtype: a *generator* of tokenized text.
### Response:
def parse(self, contents):
"""Parse the document.
:param contents: The text contents of the document.
:rtype: a *generator* of tokenized text.
"""
i = 0
for text in contents.split(self.delim):
if not len(text.strip()):
continue
words = text.split()
char_offsets = [0] + [
int(_) for _ in np.cumsum([len(x) + 1 for x in words])[:-1]
]
text = " ".join(words)
yield {
"text": text,
"words": words,
"pos_tags": [""] * len(words),
"ner_tags": [""] * len(words),
"lemmas": [""] * len(words),
"dep_parents": [0] * len(words),
"dep_labels": [""] * len(words),
"char_offsets": char_offsets,
"abs_char_offsets": char_offsets,
}
i += 1 |
def _dataset_line(args):
"""Implements the BigQuery dataset magic subcommand used to operate on datasets
The supported syntax is:
%bq datasets <command> <args>
Commands:
{list, create, delete}
Args:
args: the optional arguments following '%bq datasets command'.
"""
if args['command'] == 'list':
filter_ = args['filter'] if args['filter'] else '*'
context = google.datalab.Context.default()
if args['project']:
context = google.datalab.Context(args['project'], context.credentials)
return _render_list([str(dataset) for dataset in bigquery.Datasets(context)
if fnmatch.fnmatch(str(dataset), filter_)])
elif args['command'] == 'create':
try:
bigquery.Dataset(args['name']).create(friendly_name=args['friendly'])
except Exception as e:
print('Failed to create dataset %s: %s' % (args['name'], e))
elif args['command'] == 'delete':
try:
bigquery.Dataset(args['name']).delete()
except Exception as e:
print('Failed to delete dataset %s: %s' % (args['name'], e)) | Implements the BigQuery dataset magic subcommand used to operate on datasets
The supported syntax is:
%bq datasets <command> <args>
Commands:
{list, create, delete}
Args:
args: the optional arguments following '%bq datasets command'. | Below is the the instruction that describes the task:
### Input:
Implements the BigQuery dataset magic subcommand used to operate on datasets
The supported syntax is:
%bq datasets <command> <args>
Commands:
{list, create, delete}
Args:
args: the optional arguments following '%bq datasets command'.
### Response:
def _dataset_line(args):
"""Implements the BigQuery dataset magic subcommand used to operate on datasets
The supported syntax is:
%bq datasets <command> <args>
Commands:
{list, create, delete}
Args:
args: the optional arguments following '%bq datasets command'.
"""
if args['command'] == 'list':
filter_ = args['filter'] if args['filter'] else '*'
context = google.datalab.Context.default()
if args['project']:
context = google.datalab.Context(args['project'], context.credentials)
return _render_list([str(dataset) for dataset in bigquery.Datasets(context)
if fnmatch.fnmatch(str(dataset), filter_)])
elif args['command'] == 'create':
try:
bigquery.Dataset(args['name']).create(friendly_name=args['friendly'])
except Exception as e:
print('Failed to create dataset %s: %s' % (args['name'], e))
elif args['command'] == 'delete':
try:
bigquery.Dataset(args['name']).delete()
except Exception as e:
print('Failed to delete dataset %s: %s' % (args['name'], e)) |
def onThemeColor(self, color, item):
"""pass theme colors to bottom panel"""
bconf = self.panel_bot.conf
if item == 'grid':
bconf.set_gridcolor(color)
elif item == 'bg':
bconf.set_bgcolor(color)
elif item == 'frame':
bconf.set_framecolor(color)
elif item == 'text':
bconf.set_textcolor(color)
bconf.canvas.draw() | pass theme colors to bottom panel | Below is the the instruction that describes the task:
### Input:
pass theme colors to bottom panel
### Response:
def onThemeColor(self, color, item):
"""pass theme colors to bottom panel"""
bconf = self.panel_bot.conf
if item == 'grid':
bconf.set_gridcolor(color)
elif item == 'bg':
bconf.set_bgcolor(color)
elif item == 'frame':
bconf.set_framecolor(color)
elif item == 'text':
bconf.set_textcolor(color)
bconf.canvas.draw() |
def from_dict(data, ctx):
"""
Instantiate a new QuoteHomeConversionFactors from a dict (generally
from loading a JSON response). The data used to instantiate the
QuoteHomeConversionFactors is a shallow copy of the dict passed in,
with any complex child types instantiated appropriately.
"""
data = data.copy()
if data.get('positiveUnits') is not None:
data['positiveUnits'] = ctx.convert_decimal_number(
data.get('positiveUnits')
)
if data.get('negativeUnits') is not None:
data['negativeUnits'] = ctx.convert_decimal_number(
data.get('negativeUnits')
)
return QuoteHomeConversionFactors(**data) | Instantiate a new QuoteHomeConversionFactors from a dict (generally
from loading a JSON response). The data used to instantiate the
QuoteHomeConversionFactors is a shallow copy of the dict passed in,
with any complex child types instantiated appropriately. | Below is the the instruction that describes the task:
### Input:
Instantiate a new QuoteHomeConversionFactors from a dict (generally
from loading a JSON response). The data used to instantiate the
QuoteHomeConversionFactors is a shallow copy of the dict passed in,
with any complex child types instantiated appropriately.
### Response:
def from_dict(data, ctx):
"""
Instantiate a new QuoteHomeConversionFactors from a dict (generally
from loading a JSON response). The data used to instantiate the
QuoteHomeConversionFactors is a shallow copy of the dict passed in,
with any complex child types instantiated appropriately.
"""
data = data.copy()
if data.get('positiveUnits') is not None:
data['positiveUnits'] = ctx.convert_decimal_number(
data.get('positiveUnits')
)
if data.get('negativeUnits') is not None:
data['negativeUnits'] = ctx.convert_decimal_number(
data.get('negativeUnits')
)
return QuoteHomeConversionFactors(**data) |
def to_xdr_object(self):
"""Creates an XDR Operation object that represents this
:class:`SetOptions`.
"""
def assert_option_array(x):
if x is None:
return []
if not isinstance(x, list):
return [x]
return x
if self.inflation_dest is not None:
inflation_dest = [account_xdr_object(self.inflation_dest)]
else:
inflation_dest = []
self.clear_flags = assert_option_array(self.clear_flags)
self.set_flags = assert_option_array(self.set_flags)
self.master_weight = assert_option_array(self.master_weight)
self.low_threshold = assert_option_array(self.low_threshold)
self.med_threshold = assert_option_array(self.med_threshold)
self.high_threshold = assert_option_array(self.high_threshold)
self.home_domain = assert_option_array(self.home_domain)
req_signer_fields = (self.signer_address, self.signer_type,
self.signer_weight)
if all(signer_field is not None for signer_field in req_signer_fields):
signer = [
Xdr.types.Signer(
signer_key_xdr_object(self.signer_type,
self.signer_address),
self.signer_weight)
]
else:
signer = []
set_options_op = Xdr.types.SetOptionsOp(
inflation_dest, self.clear_flags, self.set_flags,
self.master_weight, self.low_threshold, self.med_threshold,
self.high_threshold, self.home_domain, signer)
self.body.type = Xdr.const.SET_OPTIONS
self.body.setOptionsOp = set_options_op
return super(SetOptions, self).to_xdr_object() | Creates an XDR Operation object that represents this
:class:`SetOptions`. | Below is the the instruction that describes the task:
### Input:
Creates an XDR Operation object that represents this
:class:`SetOptions`.
### Response:
def to_xdr_object(self):
"""Creates an XDR Operation object that represents this
:class:`SetOptions`.
"""
def assert_option_array(x):
if x is None:
return []
if not isinstance(x, list):
return [x]
return x
if self.inflation_dest is not None:
inflation_dest = [account_xdr_object(self.inflation_dest)]
else:
inflation_dest = []
self.clear_flags = assert_option_array(self.clear_flags)
self.set_flags = assert_option_array(self.set_flags)
self.master_weight = assert_option_array(self.master_weight)
self.low_threshold = assert_option_array(self.low_threshold)
self.med_threshold = assert_option_array(self.med_threshold)
self.high_threshold = assert_option_array(self.high_threshold)
self.home_domain = assert_option_array(self.home_domain)
req_signer_fields = (self.signer_address, self.signer_type,
self.signer_weight)
if all(signer_field is not None for signer_field in req_signer_fields):
signer = [
Xdr.types.Signer(
signer_key_xdr_object(self.signer_type,
self.signer_address),
self.signer_weight)
]
else:
signer = []
set_options_op = Xdr.types.SetOptionsOp(
inflation_dest, self.clear_flags, self.set_flags,
self.master_weight, self.low_threshold, self.med_threshold,
self.high_threshold, self.home_domain, signer)
self.body.type = Xdr.const.SET_OPTIONS
self.body.setOptionsOp = set_options_op
return super(SetOptions, self).to_xdr_object() |
def destroy(self):
"""Destroy an app and all its add-ons"""
result = self._result(
["heroku", "apps:destroy", "--app", self.name, "--confirm", self.name]
)
return result | Destroy an app and all its add-ons | Below is the the instruction that describes the task:
### Input:
Destroy an app and all its add-ons
### Response:
def destroy(self):
"""Destroy an app and all its add-ons"""
result = self._result(
["heroku", "apps:destroy", "--app", self.name, "--confirm", self.name]
)
return result |
def set_logging(self, log_level=logging.ERROR, file_path_name=None):
"""
This function allows to change the logging backend, either output or file as backend
It also allows to set the logging level (whether to display only critical/error/info/debug.
for example::
yag = yagmail.SMTP()
yag.set_logging(yagmail.logging.DEBUG) # to see everything
and::
yagmail.set_logging(yagmail.logging.DEBUG, 'somelocalfile.log')
lastly, a log_level of :py:class:`None` will make sure there is no I/O.
"""
self.log = get_logger(log_level, file_path_name) | This function allows to change the logging backend, either output or file as backend
It also allows to set the logging level (whether to display only critical/error/info/debug.
for example::
yag = yagmail.SMTP()
yag.set_logging(yagmail.logging.DEBUG) # to see everything
and::
yagmail.set_logging(yagmail.logging.DEBUG, 'somelocalfile.log')
lastly, a log_level of :py:class:`None` will make sure there is no I/O. | Below is the the instruction that describes the task:
### Input:
This function allows to change the logging backend, either output or file as backend
It also allows to set the logging level (whether to display only critical/error/info/debug.
for example::
yag = yagmail.SMTP()
yag.set_logging(yagmail.logging.DEBUG) # to see everything
and::
yagmail.set_logging(yagmail.logging.DEBUG, 'somelocalfile.log')
lastly, a log_level of :py:class:`None` will make sure there is no I/O.
### Response:
def set_logging(self, log_level=logging.ERROR, file_path_name=None):
"""
This function allows to change the logging backend, either output or file as backend
It also allows to set the logging level (whether to display only critical/error/info/debug.
for example::
yag = yagmail.SMTP()
yag.set_logging(yagmail.logging.DEBUG) # to see everything
and::
yagmail.set_logging(yagmail.logging.DEBUG, 'somelocalfile.log')
lastly, a log_level of :py:class:`None` will make sure there is no I/O.
"""
self.log = get_logger(log_level, file_path_name) |
def _previous(self):
"""Get the previous summary and present it."""
self.summaries.rotate()
current_summary = self.summaries[0]
self._update_summary(current_summary) | Get the previous summary and present it. | Below is the the instruction that describes the task:
### Input:
Get the previous summary and present it.
### Response:
def _previous(self):
"""Get the previous summary and present it."""
self.summaries.rotate()
current_summary = self.summaries[0]
self._update_summary(current_summary) |
def _init_id2gos(assoc_fn): ##, no_top=False):
"""
Reads a gene id go term association file. The format of the file
is as follows:
AAR1 GO:0005575;GO:0003674;GO:0006970;GO:0006970;GO:0040029
AAR2 GO:0005575;GO:0003674;GO:0040029;GO:0009845
ACD5 GO:0005575;GO:0003674;GO:0008219
ACL1 GO:0005575;GO:0003674;GO:0009965;GO:0010073
ACL2 GO:0005575;GO:0003674;GO:0009826
ACL3 GO:0005575;GO:0003674;GO:0009826;GO:0009965
Also, the following format is accepted (gene ids are repeated):
AAR1 GO:0005575
AAR1 GO:0003674
AAR1 GO:0006970
AAR2 GO:0005575
AAR2 GO:0003674
AAR2 GO:0040029
:param assoc_fn: file name of the association
:return: dictionary having keys: gene id, values set of GO terms
"""
assoc = cx.defaultdict(set)
## top_terms = set(['GO:0008150', 'GO:0003674', 'GO:0005575']) # BP, MF, CC
for row in open(assoc_fn, 'r'):
atoms = row.split()
if len(atoms) == 2:
gene_id, go_terms = atoms
elif len(atoms) > 2 and row.count('\t') == 1:
gene_id, go_terms = row.split("\t")
else:
continue
gos = set(go_terms.split(";"))
## if no_top:
## gos = gos.difference(top_terms)
assoc[gene_id] |= gos
return assoc | Reads a gene id go term association file. The format of the file
is as follows:
AAR1 GO:0005575;GO:0003674;GO:0006970;GO:0006970;GO:0040029
AAR2 GO:0005575;GO:0003674;GO:0040029;GO:0009845
ACD5 GO:0005575;GO:0003674;GO:0008219
ACL1 GO:0005575;GO:0003674;GO:0009965;GO:0010073
ACL2 GO:0005575;GO:0003674;GO:0009826
ACL3 GO:0005575;GO:0003674;GO:0009826;GO:0009965
Also, the following format is accepted (gene ids are repeated):
AAR1 GO:0005575
AAR1 GO:0003674
AAR1 GO:0006970
AAR2 GO:0005575
AAR2 GO:0003674
AAR2 GO:0040029
:param assoc_fn: file name of the association
:return: dictionary having keys: gene id, values set of GO terms | Below is the the instruction that describes the task:
### Input:
Reads a gene id go term association file. The format of the file
is as follows:
AAR1 GO:0005575;GO:0003674;GO:0006970;GO:0006970;GO:0040029
AAR2 GO:0005575;GO:0003674;GO:0040029;GO:0009845
ACD5 GO:0005575;GO:0003674;GO:0008219
ACL1 GO:0005575;GO:0003674;GO:0009965;GO:0010073
ACL2 GO:0005575;GO:0003674;GO:0009826
ACL3 GO:0005575;GO:0003674;GO:0009826;GO:0009965
Also, the following format is accepted (gene ids are repeated):
AAR1 GO:0005575
AAR1 GO:0003674
AAR1 GO:0006970
AAR2 GO:0005575
AAR2 GO:0003674
AAR2 GO:0040029
:param assoc_fn: file name of the association
:return: dictionary having keys: gene id, values set of GO terms
### Response:
def _init_id2gos(assoc_fn): ##, no_top=False):
"""
Reads a gene id go term association file. The format of the file
is as follows:
AAR1 GO:0005575;GO:0003674;GO:0006970;GO:0006970;GO:0040029
AAR2 GO:0005575;GO:0003674;GO:0040029;GO:0009845
ACD5 GO:0005575;GO:0003674;GO:0008219
ACL1 GO:0005575;GO:0003674;GO:0009965;GO:0010073
ACL2 GO:0005575;GO:0003674;GO:0009826
ACL3 GO:0005575;GO:0003674;GO:0009826;GO:0009965
Also, the following format is accepted (gene ids are repeated):
AAR1 GO:0005575
AAR1 GO:0003674
AAR1 GO:0006970
AAR2 GO:0005575
AAR2 GO:0003674
AAR2 GO:0040029
:param assoc_fn: file name of the association
:return: dictionary having keys: gene id, values set of GO terms
"""
assoc = cx.defaultdict(set)
## top_terms = set(['GO:0008150', 'GO:0003674', 'GO:0005575']) # BP, MF, CC
for row in open(assoc_fn, 'r'):
atoms = row.split()
if len(atoms) == 2:
gene_id, go_terms = atoms
elif len(atoms) > 2 and row.count('\t') == 1:
gene_id, go_terms = row.split("\t")
else:
continue
gos = set(go_terms.split(";"))
## if no_top:
## gos = gos.difference(top_terms)
assoc[gene_id] |= gos
return assoc |
def request_args(self):
'''
Returns the arguments passed with the request in a dictionary.
Returns both URL resolved arguments and query string arguments.
'''
kwargs = {}
kwargs.update(self.request.match_info.items())
kwargs.update(self.request.query.items())
return kwargs | Returns the arguments passed with the request in a dictionary.
Returns both URL resolved arguments and query string arguments. | Below is the the instruction that describes the task:
### Input:
Returns the arguments passed with the request in a dictionary.
Returns both URL resolved arguments and query string arguments.
### Response:
def request_args(self):
'''
Returns the arguments passed with the request in a dictionary.
Returns both URL resolved arguments and query string arguments.
'''
kwargs = {}
kwargs.update(self.request.match_info.items())
kwargs.update(self.request.query.items())
return kwargs |
def _batches(iterable, size):
"""
Take an iterator and yield its contents in groups of `size` items.
"""
sourceiter = iter(iterable)
while True:
try:
batchiter = islice(sourceiter, size)
yield chain([next(batchiter)], batchiter)
except StopIteration:
return | Take an iterator and yield its contents in groups of `size` items. | Below is the the instruction that describes the task:
### Input:
Take an iterator and yield its contents in groups of `size` items.
### Response:
def _batches(iterable, size):
"""
Take an iterator and yield its contents in groups of `size` items.
"""
sourceiter = iter(iterable)
while True:
try:
batchiter = islice(sourceiter, size)
yield chain([next(batchiter)], batchiter)
except StopIteration:
return |
def click(self, selector, btn=0):
"""
Click the targeted element.
:param selector: A CSS3 selector to targeted element.
:param btn: The number of mouse button.
0 - left button,
1 - middle button,
2 - right button
"""
return self.evaluate("""
(function () {{
var element = document.querySelector({0});
var evt = document.createEvent("MouseEvents");
evt.initMouseEvent("click", true, true, window, 1, 1, 1, 1, 1,
false, false, false, false, {1}, element);
return element.dispatchEvent(evt);
}})();
""".format(repr(selector), repr(btn))) | Click the targeted element.
:param selector: A CSS3 selector to targeted element.
:param btn: The number of mouse button.
0 - left button,
1 - middle button,
2 - right button | Below is the the instruction that describes the task:
### Input:
Click the targeted element.
:param selector: A CSS3 selector to targeted element.
:param btn: The number of mouse button.
0 - left button,
1 - middle button,
2 - right button
### Response:
def click(self, selector, btn=0):
"""
Click the targeted element.
:param selector: A CSS3 selector to targeted element.
:param btn: The number of mouse button.
0 - left button,
1 - middle button,
2 - right button
"""
return self.evaluate("""
(function () {{
var element = document.querySelector({0});
var evt = document.createEvent("MouseEvents");
evt.initMouseEvent("click", true, true, window, 1, 1, 1, 1, 1,
false, false, false, false, {1}, element);
return element.dispatchEvent(evt);
}})();
""".format(repr(selector), repr(btn))) |
def display_event(div, attributes=[]):
"""
Function to build a suitable CustomJS to display the current event
in the div model.
"""
style = 'float: left; clear: left; font-size: 10pt'
return CustomJS(args=dict(div=div), code="""
var attrs = %s;
var args = [];
for (var i = 0; i<attrs.length; i++ ) {
var val = JSON.stringify(cb_obj[attrs[i]], function(key, val) {
return val.toFixed ? Number(val.toFixed(2)) : val;
})
args.push(attrs[i] + '=' + val)
}
var line = "<span style=%r><b>" + cb_obj.event_name + "</b>(" + args.join(", ") + ")</span>\\n";
var text = div.text.concat(line);
var lines = text.split("\\n")
if (lines.length > 35)
lines.shift();
div.text = lines.join("\\n");
""" % (attributes, style)) | Function to build a suitable CustomJS to display the current event
in the div model. | Below is the the instruction that describes the task:
### Input:
Function to build a suitable CustomJS to display the current event
in the div model.
### Response:
def display_event(div, attributes=[]):
"""
Function to build a suitable CustomJS to display the current event
in the div model.
"""
style = 'float: left; clear: left; font-size: 10pt'
return CustomJS(args=dict(div=div), code="""
var attrs = %s;
var args = [];
for (var i = 0; i<attrs.length; i++ ) {
var val = JSON.stringify(cb_obj[attrs[i]], function(key, val) {
return val.toFixed ? Number(val.toFixed(2)) : val;
})
args.push(attrs[i] + '=' + val)
}
var line = "<span style=%r><b>" + cb_obj.event_name + "</b>(" + args.join(", ") + ")</span>\\n";
var text = div.text.concat(line);
var lines = text.split("\\n")
if (lines.length > 35)
lines.shift();
div.text = lines.join("\\n");
""" % (attributes, style)) |
def fitPlaneLSQ(XYZ):
"""Fit a plane to input point data using LSQ
"""
[rows,cols] = XYZ.shape
G = np.ones((rows,3))
G[:,0] = XYZ[:,0] #X
G[:,1] = XYZ[:,1] #Y
Z = XYZ[:,2]
coeff,resid,rank,s = np.linalg.lstsq(G,Z,rcond=None)
return coeff | Fit a plane to input point data using LSQ | Below is the the instruction that describes the task:
### Input:
Fit a plane to input point data using LSQ
### Response:
def fitPlaneLSQ(XYZ):
"""Fit a plane to input point data using LSQ
"""
[rows,cols] = XYZ.shape
G = np.ones((rows,3))
G[:,0] = XYZ[:,0] #X
G[:,1] = XYZ[:,1] #Y
Z = XYZ[:,2]
coeff,resid,rank,s = np.linalg.lstsq(G,Z,rcond=None)
return coeff |
def default_vrf_unicast_address_family(self, **kwargs):
"""Create default address family (ipv4/ipv6) under router bgp.
Args:
afi (str): Address family to configure. (ipv4, ipv6)
rbridge_id (str): The rbridge ID of the device on which BGP will be
configured in a VCS fabric.
delete (bool): Deletes the redistribute connected under default vrf
if `delete` is ``True``.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `afi' is not expected
AttributeError: if 'afi' is not in ipv4,ipv6.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... device.bgp.default_vrf_unicast_address_family(delete=True,
... rbridge_id='4')
... device.bgp.default_vrf_unicast_address_family(get=True,
... rbridge_id='4')
... device.bgp.default_vrf_unicast_address_family(
... rbridge_id='4', afi='ipv6')
"""
afi = kwargs.pop('afi', 'ipv4')
rbridge_id = kwargs.pop('rbridge_id', '1')
delete = kwargs.pop('delete', False)
callback = kwargs.pop('callback', self._callback)
if afi not in ['ipv4', 'ipv6']:
raise AttributeError('Invalid AFI.')
addr_family = getattr(self._rbridge,
'rbridge_id_router_router_bgp_address_family'
'_{0}_{0}_unicast_default_vrf_default_vrf_'
'selected'.format(afi))
neighbor_args = dict(rbridge_id=rbridge_id)
config = addr_family(**neighbor_args)
result = False
if kwargs.pop('get', False):
output = callback(config, handler='get_config')
if output.data.findall('.//{*}default-vrf-selected') != []:
result = True
elif delete:
config.find('.//*af-vrf').set('operation', 'delete')
result = callback(config)
else:
result = callback(config)
return result | Create default address family (ipv4/ipv6) under router bgp.
Args:
afi (str): Address family to configure. (ipv4, ipv6)
rbridge_id (str): The rbridge ID of the device on which BGP will be
configured in a VCS fabric.
delete (bool): Deletes the redistribute connected under default vrf
if `delete` is ``True``.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `afi' is not expected
AttributeError: if 'afi' is not in ipv4,ipv6.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... device.bgp.default_vrf_unicast_address_family(delete=True,
... rbridge_id='4')
... device.bgp.default_vrf_unicast_address_family(get=True,
... rbridge_id='4')
... device.bgp.default_vrf_unicast_address_family(
... rbridge_id='4', afi='ipv6') | Below is the the instruction that describes the task:
### Input:
Create default address family (ipv4/ipv6) under router bgp.
Args:
afi (str): Address family to configure. (ipv4, ipv6)
rbridge_id (str): The rbridge ID of the device on which BGP will be
configured in a VCS fabric.
delete (bool): Deletes the redistribute connected under default vrf
if `delete` is ``True``.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `afi' is not expected
AttributeError: if 'afi' is not in ipv4,ipv6.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... device.bgp.default_vrf_unicast_address_family(delete=True,
... rbridge_id='4')
... device.bgp.default_vrf_unicast_address_family(get=True,
... rbridge_id='4')
... device.bgp.default_vrf_unicast_address_family(
... rbridge_id='4', afi='ipv6')
### Response:
def default_vrf_unicast_address_family(self, **kwargs):
"""Create default address family (ipv4/ipv6) under router bgp.
Args:
afi (str): Address family to configure. (ipv4, ipv6)
rbridge_id (str): The rbridge ID of the device on which BGP will be
configured in a VCS fabric.
delete (bool): Deletes the redistribute connected under default vrf
if `delete` is ``True``.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `afi' is not expected
AttributeError: if 'afi' is not in ipv4,ipv6.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... device.bgp.default_vrf_unicast_address_family(delete=True,
... rbridge_id='4')
... device.bgp.default_vrf_unicast_address_family(get=True,
... rbridge_id='4')
... device.bgp.default_vrf_unicast_address_family(
... rbridge_id='4', afi='ipv6')
"""
afi = kwargs.pop('afi', 'ipv4')
rbridge_id = kwargs.pop('rbridge_id', '1')
delete = kwargs.pop('delete', False)
callback = kwargs.pop('callback', self._callback)
if afi not in ['ipv4', 'ipv6']:
raise AttributeError('Invalid AFI.')
addr_family = getattr(self._rbridge,
'rbridge_id_router_router_bgp_address_family'
'_{0}_{0}_unicast_default_vrf_default_vrf_'
'selected'.format(afi))
neighbor_args = dict(rbridge_id=rbridge_id)
config = addr_family(**neighbor_args)
result = False
if kwargs.pop('get', False):
output = callback(config, handler='get_config')
if output.data.findall('.//{*}default-vrf-selected') != []:
result = True
elif delete:
config.find('.//*af-vrf').set('operation', 'delete')
result = callback(config)
else:
result = callback(config)
return result |
def showActionToolTip(self):
"""
Shows the tool tip of the action that is currently being hovered over.
:param action | <QAction>
"""
if ( not self.isVisible() ):
return
geom = self.actionGeometry(self._toolTipAction)
pos = self.mapToGlobal(QPoint(geom.left(), geom.top()))
pos.setY(pos.y() + geom.height())
tip = nativestring(self._toolTipAction.toolTip()).strip().strip('.')
text = nativestring(self._toolTipAction.text()).strip().strip('.')
# don't waste time showing the user what they already see
if ( tip == text ):
return
QToolTip.showText(pos, self._toolTipAction.toolTip()) | Shows the tool tip of the action that is currently being hovered over.
:param action | <QAction> | Below is the the instruction that describes the task:
### Input:
Shows the tool tip of the action that is currently being hovered over.
:param action | <QAction>
### Response:
def showActionToolTip(self):
"""
Shows the tool tip of the action that is currently being hovered over.
:param action | <QAction>
"""
if ( not self.isVisible() ):
return
geom = self.actionGeometry(self._toolTipAction)
pos = self.mapToGlobal(QPoint(geom.left(), geom.top()))
pos.setY(pos.y() + geom.height())
tip = nativestring(self._toolTipAction.toolTip()).strip().strip('.')
text = nativestring(self._toolTipAction.text()).strip().strip('.')
# don't waste time showing the user what they already see
if ( tip == text ):
return
QToolTip.showText(pos, self._toolTipAction.toolTip()) |
def column_exists(cr, table, column):
""" Check whether a certain column exists """
cr.execute(
'SELECT count(attname) FROM pg_attribute '
'WHERE attrelid = '
'( SELECT oid FROM pg_class WHERE relname = %s ) '
'AND attname = %s',
(table, column))
return cr.fetchone()[0] == 1 | Check whether a certain column exists | Below is the the instruction that describes the task:
### Input:
Check whether a certain column exists
### Response:
def column_exists(cr, table, column):
""" Check whether a certain column exists """
cr.execute(
'SELECT count(attname) FROM pg_attribute '
'WHERE attrelid = '
'( SELECT oid FROM pg_class WHERE relname = %s ) '
'AND attname = %s',
(table, column))
return cr.fetchone()[0] == 1 |
def parse_file(self, filename: str, entry: str=None) -> parsing.Node:
"""Parse filename using the grammar"""
self.from_string = False
import os.path
with open(filename, 'r') as f:
self.parsed_stream(f.read(), os.path.abspath(filename))
if entry is None:
entry = self.entry
if entry is None:
raise ValueError("No entry rule name defined for {}".format(
self.__class__.__name__))
return self._do_parse(entry) | Parse filename using the grammar | Below is the the instruction that describes the task:
### Input:
Parse filename using the grammar
### Response:
def parse_file(self, filename: str, entry: str=None) -> parsing.Node:
"""Parse filename using the grammar"""
self.from_string = False
import os.path
with open(filename, 'r') as f:
self.parsed_stream(f.read(), os.path.abspath(filename))
if entry is None:
entry = self.entry
if entry is None:
raise ValueError("No entry rule name defined for {}".format(
self.__class__.__name__))
return self._do_parse(entry) |
def getState(self):
"""See comments in base class."""
return dict(_position = self._position,
position = self.getPosition(),
velocity = self._velocity,
bestPosition = self._bestPosition,
bestResult = self._bestResult) | See comments in base class. | Below is the the instruction that describes the task:
### Input:
See comments in base class.
### Response:
def getState(self):
"""See comments in base class."""
return dict(_position = self._position,
position = self.getPosition(),
velocity = self._velocity,
bestPosition = self._bestPosition,
bestResult = self._bestResult) |
def humanize(number):
""" Return a human-readable string for number. """
# units = ('bytes', 'KB', 'MB', 'GB', 'TB')
# base = 1000
units = ('bytes', 'KiB', 'MiB', 'GiB', 'TiB')
base = 1024
if number is None:
return None
pow = int(math.log(number, base)) if number > 0 else 0
pow = min(pow, len(units) - 1)
mantissa = number / (base ** pow)
return "%.4g %s" % (mantissa, units[pow]) | Return a human-readable string for number. | Below is the the instruction that describes the task:
### Input:
Return a human-readable string for number.
### Response:
def humanize(number):
""" Return a human-readable string for number. """
# units = ('bytes', 'KB', 'MB', 'GB', 'TB')
# base = 1000
units = ('bytes', 'KiB', 'MiB', 'GiB', 'TiB')
base = 1024
if number is None:
return None
pow = int(math.log(number, base)) if number > 0 else 0
pow = min(pow, len(units) - 1)
mantissa = number / (base ** pow)
return "%.4g %s" % (mantissa, units[pow]) |
def get(path, objectType, user=None):
'''
Get the ACL of an object. Will filter by user if one is provided.
Args:
path: The path to the object
objectType: The type of object (FILE, DIRECTORY, REGISTRY)
user: A user name to filter by
Returns (dict): A dictionary containing the ACL
CLI Example:
.. code-block:: bash
salt 'minion-id' win_dacl.get c:\temp directory
'''
ret = {'Path': path,
'ACLs': []}
sidRet = _getUserSid(user)
if path and objectType:
dc = daclConstants()
objectTypeBit = dc.getObjectTypeBit(objectType)
path = dc.processPath(path, objectTypeBit)
tdacl = _get_dacl(path, objectTypeBit)
if tdacl:
for counter in range(0, tdacl.GetAceCount()):
tAce = tdacl.GetAce(counter)
if not sidRet['sid'] or (tAce[2] == sidRet['sid']):
ret['ACLs'].append(_ace_to_text(tAce, objectTypeBit))
return ret | Get the ACL of an object. Will filter by user if one is provided.
Args:
path: The path to the object
objectType: The type of object (FILE, DIRECTORY, REGISTRY)
user: A user name to filter by
Returns (dict): A dictionary containing the ACL
CLI Example:
.. code-block:: bash
salt 'minion-id' win_dacl.get c:\temp directory | Below is the the instruction that describes the task:
### Input:
Get the ACL of an object. Will filter by user if one is provided.
Args:
path: The path to the object
objectType: The type of object (FILE, DIRECTORY, REGISTRY)
user: A user name to filter by
Returns (dict): A dictionary containing the ACL
CLI Example:
.. code-block:: bash
salt 'minion-id' win_dacl.get c:\temp directory
### Response:
def get(path, objectType, user=None):
'''
Get the ACL of an object. Will filter by user if one is provided.
Args:
path: The path to the object
objectType: The type of object (FILE, DIRECTORY, REGISTRY)
user: A user name to filter by
Returns (dict): A dictionary containing the ACL
CLI Example:
.. code-block:: bash
salt 'minion-id' win_dacl.get c:\temp directory
'''
ret = {'Path': path,
'ACLs': []}
sidRet = _getUserSid(user)
if path and objectType:
dc = daclConstants()
objectTypeBit = dc.getObjectTypeBit(objectType)
path = dc.processPath(path, objectTypeBit)
tdacl = _get_dacl(path, objectTypeBit)
if tdacl:
for counter in range(0, tdacl.GetAceCount()):
tAce = tdacl.GetAce(counter)
if not sidRet['sid'] or (tAce[2] == sidRet['sid']):
ret['ACLs'].append(_ace_to_text(tAce, objectTypeBit))
return ret |
def _pyfftw_rfftn_empty_aligned(shape, axes, dtype, order='C', n=None):
"""Patched version of :func:`sporco.linalg.pyfftw_rfftn_empty_aligned`.
"""
ashp = list(shape)
raxis = axes[-1]
ashp[raxis] = ashp[raxis] // 2 + 1
cdtype = _complex_dtype(dtype)
return cp.empty(ashp, cdtype, order) | Patched version of :func:`sporco.linalg.pyfftw_rfftn_empty_aligned`. | Below is the the instruction that describes the task:
### Input:
Patched version of :func:`sporco.linalg.pyfftw_rfftn_empty_aligned`.
### Response:
def _pyfftw_rfftn_empty_aligned(shape, axes, dtype, order='C', n=None):
"""Patched version of :func:`sporco.linalg.pyfftw_rfftn_empty_aligned`.
"""
ashp = list(shape)
raxis = axes[-1]
ashp[raxis] = ashp[raxis] // 2 + 1
cdtype = _complex_dtype(dtype)
return cp.empty(ashp, cdtype, order) |
def trim_prefix(text, nchr):
"""Trim characters off of the beginnings of text lines.
Parameters
----------
text : str
The text to be trimmed, with newlines (\n) separating lines
nchr: int
The number of spaces to trim off the beginning of a line if
it starts with that many spaces
Returns
-------
text : str
The trimmed text
"""
res = []
for line in text.split('\n'):
if line.startswith(' ' * nchr):
line = line[nchr:]
res.append(line)
return '\n'.join(res) | Trim characters off of the beginnings of text lines.
Parameters
----------
text : str
The text to be trimmed, with newlines (\n) separating lines
nchr: int
The number of spaces to trim off the beginning of a line if
it starts with that many spaces
Returns
-------
text : str
The trimmed text | Below is the the instruction that describes the task:
### Input:
Trim characters off of the beginnings of text lines.
Parameters
----------
text : str
The text to be trimmed, with newlines (\n) separating lines
nchr: int
The number of spaces to trim off the beginning of a line if
it starts with that many spaces
Returns
-------
text : str
The trimmed text
### Response:
def trim_prefix(text, nchr):
"""Trim characters off of the beginnings of text lines.
Parameters
----------
text : str
The text to be trimmed, with newlines (\n) separating lines
nchr: int
The number of spaces to trim off the beginning of a line if
it starts with that many spaces
Returns
-------
text : str
The trimmed text
"""
res = []
for line in text.split('\n'):
if line.startswith(' ' * nchr):
line = line[nchr:]
res.append(line)
return '\n'.join(res) |
def sync_local_to_changes(org, syncer, fetches, deleted_fetches, progress_callback=None):
"""
Sync local instances against iterators which return fetches of changed and deleted remote objects.
:param * org: the org
:param * syncer: the local model syncer
:param * fetches: an iterator returning fetches of modified remote objects
:param * deleted_fetches: an iterator returning fetches of deleted remote objects
:param * progress_callback: callable for tracking progress - called for each fetch with number of contacts fetched
:return: tuple containing counts of created, updated and deleted local instances
"""
num_synced = 0
outcome_counts = defaultdict(int)
for fetch in fetches:
for remote in fetch:
outcome = sync_from_remote(org, syncer, remote)
outcome_counts[outcome] += 1
num_synced += len(fetch)
if progress_callback:
progress_callback(num_synced)
# any item that has been deleted remotely should also be released locally
for deleted_fetch in deleted_fetches:
for deleted_remote in deleted_fetch:
identity = syncer.identify_remote(deleted_remote)
with syncer.lock(org, identity):
existing = syncer.fetch_local(org, identity)
if existing:
syncer.delete_local(existing)
outcome_counts[SyncOutcome.deleted] += 1
num_synced += len(deleted_fetch)
if progress_callback:
progress_callback(num_synced)
return (
outcome_counts[SyncOutcome.created],
outcome_counts[SyncOutcome.updated],
outcome_counts[SyncOutcome.deleted],
outcome_counts[SyncOutcome.ignored],
) | Sync local instances against iterators which return fetches of changed and deleted remote objects.
:param * org: the org
:param * syncer: the local model syncer
:param * fetches: an iterator returning fetches of modified remote objects
:param * deleted_fetches: an iterator returning fetches of deleted remote objects
:param * progress_callback: callable for tracking progress - called for each fetch with number of contacts fetched
:return: tuple containing counts of created, updated and deleted local instances | Below is the the instruction that describes the task:
### Input:
Sync local instances against iterators which return fetches of changed and deleted remote objects.
:param * org: the org
:param * syncer: the local model syncer
:param * fetches: an iterator returning fetches of modified remote objects
:param * deleted_fetches: an iterator returning fetches of deleted remote objects
:param * progress_callback: callable for tracking progress - called for each fetch with number of contacts fetched
:return: tuple containing counts of created, updated and deleted local instances
### Response:
def sync_local_to_changes(org, syncer, fetches, deleted_fetches, progress_callback=None):
"""
Sync local instances against iterators which return fetches of changed and deleted remote objects.
:param * org: the org
:param * syncer: the local model syncer
:param * fetches: an iterator returning fetches of modified remote objects
:param * deleted_fetches: an iterator returning fetches of deleted remote objects
:param * progress_callback: callable for tracking progress - called for each fetch with number of contacts fetched
:return: tuple containing counts of created, updated and deleted local instances
"""
num_synced = 0
outcome_counts = defaultdict(int)
for fetch in fetches:
for remote in fetch:
outcome = sync_from_remote(org, syncer, remote)
outcome_counts[outcome] += 1
num_synced += len(fetch)
if progress_callback:
progress_callback(num_synced)
# any item that has been deleted remotely should also be released locally
for deleted_fetch in deleted_fetches:
for deleted_remote in deleted_fetch:
identity = syncer.identify_remote(deleted_remote)
with syncer.lock(org, identity):
existing = syncer.fetch_local(org, identity)
if existing:
syncer.delete_local(existing)
outcome_counts[SyncOutcome.deleted] += 1
num_synced += len(deleted_fetch)
if progress_callback:
progress_callback(num_synced)
return (
outcome_counts[SyncOutcome.created],
outcome_counts[SyncOutcome.updated],
outcome_counts[SyncOutcome.deleted],
outcome_counts[SyncOutcome.ignored],
) |
def get_socket(host, port, timeout=None):
"""
Return a socket.
:param str host: the hostname to connect to
:param int port: the port number to connect to
:param timeout: if specified, set the socket timeout
"""
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not None:
sock.settimeout(timeout)
sock.connect(sa)
return sock
except error:
if sock is not None:
sock.close()
raise error | Return a socket.
:param str host: the hostname to connect to
:param int port: the port number to connect to
:param timeout: if specified, set the socket timeout | Below is the the instruction that describes the task:
### Input:
Return a socket.
:param str host: the hostname to connect to
:param int port: the port number to connect to
:param timeout: if specified, set the socket timeout
### Response:
def get_socket(host, port, timeout=None):
"""
Return a socket.
:param str host: the hostname to connect to
:param int port: the port number to connect to
:param timeout: if specified, set the socket timeout
"""
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not None:
sock.settimeout(timeout)
sock.connect(sa)
return sock
except error:
if sock is not None:
sock.close()
raise error |
def _grow(growth, walls, target, i, j, steps, new_steps, res):
'''
fills [res] with [distance to next position where target == 1,
x coord.,
y coord. of that position in target]
using region growth
i,j -> pixel position
growth -> a work array, needed to measure the distance
steps, new_steps -> current and last positions of the region growth steps
using this instead of looking for the right step position in [growth]
should speed up the process
'''
# clean array:
growth[:] = 0
if target[i, j]:
# pixel is in target
res[0] = 1
res[1] = i
res[2] = j
return
step = 1
s0, s1 = growth.shape
step_len = 1
new_step_ind = 0
steps[new_step_ind, 0] = i
steps[new_step_ind, 1] = j
growth[i, j] = 1
while True:
for n in range(step_len):
i, j = steps[n]
for ii, jj in DIRECT_NEIGHBOURS:
pi = i + ii
pj = j + jj
# if in image:
if 0 <= pi < s0 and 0 <= pj < s1:
# is growth array is empty and there are no walls:
# fill growth with current step
if growth[pi, pj] == 0 and not walls[pi, pj]:
growth[pi, pj] = step
if target[pi, pj]:
# found destination
res[0] = 1
res[1] = pi
res[2] = pj
return
new_steps[new_step_ind, 0] = pi
new_steps[new_step_ind, 1] = pj
new_step_ind += 1
if new_step_ind == 0:
# couldn't populate any more because growth is full
# and all possible steps are gone
res[0] = 0
return
step += 1
steps, new_steps = new_steps, steps
step_len = new_step_ind
new_step_ind = 0 | fills [res] with [distance to next position where target == 1,
x coord.,
y coord. of that position in target]
using region growth
i,j -> pixel position
growth -> a work array, needed to measure the distance
steps, new_steps -> current and last positions of the region growth steps
using this instead of looking for the right step position in [growth]
should speed up the process | Below is the the instruction that describes the task:
### Input:
fills [res] with [distance to next position where target == 1,
x coord.,
y coord. of that position in target]
using region growth
i,j -> pixel position
growth -> a work array, needed to measure the distance
steps, new_steps -> current and last positions of the region growth steps
using this instead of looking for the right step position in [growth]
should speed up the process
### Response:
def _grow(growth, walls, target, i, j, steps, new_steps, res):
'''
fills [res] with [distance to next position where target == 1,
x coord.,
y coord. of that position in target]
using region growth
i,j -> pixel position
growth -> a work array, needed to measure the distance
steps, new_steps -> current and last positions of the region growth steps
using this instead of looking for the right step position in [growth]
should speed up the process
'''
# clean array:
growth[:] = 0
if target[i, j]:
# pixel is in target
res[0] = 1
res[1] = i
res[2] = j
return
step = 1
s0, s1 = growth.shape
step_len = 1
new_step_ind = 0
steps[new_step_ind, 0] = i
steps[new_step_ind, 1] = j
growth[i, j] = 1
while True:
for n in range(step_len):
i, j = steps[n]
for ii, jj in DIRECT_NEIGHBOURS:
pi = i + ii
pj = j + jj
# if in image:
if 0 <= pi < s0 and 0 <= pj < s1:
# is growth array is empty and there are no walls:
# fill growth with current step
if growth[pi, pj] == 0 and not walls[pi, pj]:
growth[pi, pj] = step
if target[pi, pj]:
# found destination
res[0] = 1
res[1] = pi
res[2] = pj
return
new_steps[new_step_ind, 0] = pi
new_steps[new_step_ind, 1] = pj
new_step_ind += 1
if new_step_ind == 0:
# couldn't populate any more because growth is full
# and all possible steps are gone
res[0] = 0
return
step += 1
steps, new_steps = new_steps, steps
step_len = new_step_ind
new_step_ind = 0 |
def write_file(
task: Task,
filename: str,
content: str,
append: bool = False,
dry_run: Optional[bool] = None,
) -> Result:
"""
Write contents to a file (locally)
Arguments:
dry_run: Whether to apply changes or not
filename: file you want to write into
content: content you want to write
append: whether you want to replace the contents or append to it
Returns:
Result object with the following attributes set:
* changed (``bool``):
* diff (``str``): unified diff
"""
diff = _generate_diff(filename, content, append)
if not task.is_dry_run(dry_run):
mode = "a+" if append else "w+"
with open(filename, mode=mode) as f:
f.write(content)
return Result(host=task.host, diff=diff, changed=bool(diff)) | Write contents to a file (locally)
Arguments:
dry_run: Whether to apply changes or not
filename: file you want to write into
content: content you want to write
append: whether you want to replace the contents or append to it
Returns:
Result object with the following attributes set:
* changed (``bool``):
* diff (``str``): unified diff | Below is the the instruction that describes the task:
### Input:
Write contents to a file (locally)
Arguments:
dry_run: Whether to apply changes or not
filename: file you want to write into
content: content you want to write
append: whether you want to replace the contents or append to it
Returns:
Result object with the following attributes set:
* changed (``bool``):
* diff (``str``): unified diff
### Response:
def write_file(
task: Task,
filename: str,
content: str,
append: bool = False,
dry_run: Optional[bool] = None,
) -> Result:
"""
Write contents to a file (locally)
Arguments:
dry_run: Whether to apply changes or not
filename: file you want to write into
content: content you want to write
append: whether you want to replace the contents or append to it
Returns:
Result object with the following attributes set:
* changed (``bool``):
* diff (``str``): unified diff
"""
diff = _generate_diff(filename, content, append)
if not task.is_dry_run(dry_run):
mode = "a+" if append else "w+"
with open(filename, mode=mode) as f:
f.write(content)
return Result(host=task.host, diff=diff, changed=bool(diff)) |
def create_app(self, args):
"""创建应用
在指定区域创建一个新应用,所属应用为当前请求方。
Args:
- args: 请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/
Returns:
- result 成功返回所创建的应用信息,若失败则返回None
- ResponseInfo 请求的Response信息
"""
url = '{0}/v3/apps'.format(self.host)
return http._post_with_qiniu_mac(url, args, self.auth) | 创建应用
在指定区域创建一个新应用,所属应用为当前请求方。
Args:
- args: 请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/
Returns:
- result 成功返回所创建的应用信息,若失败则返回None
- ResponseInfo 请求的Response信息 | Below is the the instruction that describes the task:
### Input:
创建应用
在指定区域创建一个新应用,所属应用为当前请求方。
Args:
- args: 请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/
Returns:
- result 成功返回所创建的应用信息,若失败则返回None
- ResponseInfo 请求的Response信息
### Response:
def create_app(self, args):
"""创建应用
在指定区域创建一个新应用,所属应用为当前请求方。
Args:
- args: 请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/
Returns:
- result 成功返回所创建的应用信息,若失败则返回None
- ResponseInfo 请求的Response信息
"""
url = '{0}/v3/apps'.format(self.host)
return http._post_with_qiniu_mac(url, args, self.auth) |
def build_response(
self,
status=NOT_SET,
error="",
data=None):
"""build_response
:param status: status code
:param error: error message
:param data: dictionary to send back
"""
res_node = {
"status": status,
"error": error,
"data": data
}
return res_node | build_response
:param status: status code
:param error: error message
:param data: dictionary to send back | Below is the the instruction that describes the task:
### Input:
build_response
:param status: status code
:param error: error message
:param data: dictionary to send back
### Response:
def build_response(
self,
status=NOT_SET,
error="",
data=None):
"""build_response
:param status: status code
:param error: error message
:param data: dictionary to send back
"""
res_node = {
"status": status,
"error": error,
"data": data
}
return res_node |
def _run_tumor_pindel_caller(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Detect indels with pindel in tumor/[normal] analysis.
Only attempts to detect small insertion/deletions and not larger structural events.
:param align_bam: (list) bam files
:param items: (dict) information from yaml
:param ref_file: (str) genome in fasta format
:param assoc_file: (dict) files for annotation
:param region: (str or tupple) region to analyze
:param out_file: (str) final vcf file
:returns: (str) final vcf file
"""
config = items[0]["config"]
paired = get_paired_bams(align_bams, items)
if out_file is None:
out_file = "%s-indels.vcf" % os.path.splitext(align_bams[0])[0]
paired_bam = [paired.tumor_bam]
paired_name = [paired.tumor_name]
if paired.normal_bam:
paired_bam.append(paired.normal_bam)
paired_name.append(paired.normal_name)
if not utils.file_exists(out_file):
with tx_tmpdir(config) as tmp_path:
for align_bam in align_bams:
bam.index(align_bam, config)
root_pindel = os.path.join(tmp_path, "pindelroot")
pindel = config_utils.get_program("pindel", config)
opts = _pindel_options(items, config, out_file, region, tmp_path)
tmp_input = _create_tmp_input(paired_bam, paired_name, tmp_path, config)
cmd = ("{pindel} -f {ref_file} -i {tmp_input} -o {root_pindel} " +
"{opts} --max_range_index 2 --IndelCorrection "
"--report_breakpoints false --report_interchromosomal_events false")
do.run(cmd.format(**locals()), "Genotyping with pindel", {})
out_file = _create_vcf(root_pindel, out_file, ref_file,
items, paired)
return out_file | Detect indels with pindel in tumor/[normal] analysis.
Only attempts to detect small insertion/deletions and not larger structural events.
:param align_bam: (list) bam files
:param items: (dict) information from yaml
:param ref_file: (str) genome in fasta format
:param assoc_file: (dict) files for annotation
:param region: (str or tupple) region to analyze
:param out_file: (str) final vcf file
:returns: (str) final vcf file | Below is the the instruction that describes the task:
### Input:
Detect indels with pindel in tumor/[normal] analysis.
Only attempts to detect small insertion/deletions and not larger structural events.
:param align_bam: (list) bam files
:param items: (dict) information from yaml
:param ref_file: (str) genome in fasta format
:param assoc_file: (dict) files for annotation
:param region: (str or tupple) region to analyze
:param out_file: (str) final vcf file
:returns: (str) final vcf file
### Response:
def _run_tumor_pindel_caller(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Detect indels with pindel in tumor/[normal] analysis.
Only attempts to detect small insertion/deletions and not larger structural events.
:param align_bam: (list) bam files
:param items: (dict) information from yaml
:param ref_file: (str) genome in fasta format
:param assoc_file: (dict) files for annotation
:param region: (str or tupple) region to analyze
:param out_file: (str) final vcf file
:returns: (str) final vcf file
"""
config = items[0]["config"]
paired = get_paired_bams(align_bams, items)
if out_file is None:
out_file = "%s-indels.vcf" % os.path.splitext(align_bams[0])[0]
paired_bam = [paired.tumor_bam]
paired_name = [paired.tumor_name]
if paired.normal_bam:
paired_bam.append(paired.normal_bam)
paired_name.append(paired.normal_name)
if not utils.file_exists(out_file):
with tx_tmpdir(config) as tmp_path:
for align_bam in align_bams:
bam.index(align_bam, config)
root_pindel = os.path.join(tmp_path, "pindelroot")
pindel = config_utils.get_program("pindel", config)
opts = _pindel_options(items, config, out_file, region, tmp_path)
tmp_input = _create_tmp_input(paired_bam, paired_name, tmp_path, config)
cmd = ("{pindel} -f {ref_file} -i {tmp_input} -o {root_pindel} " +
"{opts} --max_range_index 2 --IndelCorrection "
"--report_breakpoints false --report_interchromosomal_events false")
do.run(cmd.format(**locals()), "Genotyping with pindel", {})
out_file = _create_vcf(root_pindel, out_file, ref_file,
items, paired)
return out_file |
def _get_el_attributes(lxml_el, ns=None, nsmap=None):
"""Return the XML attributes of lxml ``Element`` instance lxml_el as a dict
where namespaced attributes are represented via colon-delimiting and using
snake case.
"""
attrs = {}
for attr, val in lxml_el.items():
attr = _to_colon_ns(attr, default_ns=ns, nsmap=nsmap)
attrs[attr] = val
return attrs | Return the XML attributes of lxml ``Element`` instance lxml_el as a dict
where namespaced attributes are represented via colon-delimiting and using
snake case. | Below is the the instruction that describes the task:
### Input:
Return the XML attributes of lxml ``Element`` instance lxml_el as a dict
where namespaced attributes are represented via colon-delimiting and using
snake case.
### Response:
def _get_el_attributes(lxml_el, ns=None, nsmap=None):
"""Return the XML attributes of lxml ``Element`` instance lxml_el as a dict
where namespaced attributes are represented via colon-delimiting and using
snake case.
"""
attrs = {}
for attr, val in lxml_el.items():
attr = _to_colon_ns(attr, default_ns=ns, nsmap=nsmap)
attrs[attr] = val
return attrs |
def put(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
headers = None
return self.service.invoke(headers, obj) | resource can be a URL or a ResourceLocator | Below is the the instruction that describes the task:
### Input:
resource can be a URL or a ResourceLocator
### Response:
def put(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
headers = None
return self.service.invoke(headers, obj) |
def make_fileitem_username(file_owner, condition='is', negate=False, preserve_case=False):
"""
Create a node for FileItem/Username
:return: A IndicatorItem represented as an Element node
"""
document = 'FileItem'
search = 'FileItem/Username'
content_type = 'string'
content = file_owner
ii_node = ioc_api.make_indicatoritem_node(condition, document, search, content_type, content,
negate=negate, preserve_case=preserve_case)
return ii_node | Create a node for FileItem/Username
:return: A IndicatorItem represented as an Element node | Below is the the instruction that describes the task:
### Input:
Create a node for FileItem/Username
:return: A IndicatorItem represented as an Element node
### Response:
def make_fileitem_username(file_owner, condition='is', negate=False, preserve_case=False):
"""
Create a node for FileItem/Username
:return: A IndicatorItem represented as an Element node
"""
document = 'FileItem'
search = 'FileItem/Username'
content_type = 'string'
content = file_owner
ii_node = ioc_api.make_indicatoritem_node(condition, document, search, content_type, content,
negate=negate, preserve_case=preserve_case)
return ii_node |
def to_mesh(self):
"""
Return a copy of the Primitive object as a Trimesh object.
"""
result = Trimesh(vertices=self.vertices.copy(),
faces=self.faces.copy(),
face_normals=self.face_normals.copy(),
process=False)
return result | Return a copy of the Primitive object as a Trimesh object. | Below is the the instruction that describes the task:
### Input:
Return a copy of the Primitive object as a Trimesh object.
### Response:
def to_mesh(self):
"""
Return a copy of the Primitive object as a Trimesh object.
"""
result = Trimesh(vertices=self.vertices.copy(),
faces=self.faces.copy(),
face_normals=self.face_normals.copy(),
process=False)
return result |
def json_encode(func):
"""
Decorator used to change the return value from PJFFactory.fuzzed, it makes the structure printable
"""
def func_wrapper(self, indent, utf8):
if utf8:
encoding = "\\x%02x"
else:
encoding = "\\u%04x"
hex_regex = re.compile(r"(\\\\x[a-fA-F0-9]{2})")
unicode_regex = re.compile(r"(\\u[a-fA-F0-9]{4})")
def encode_decode_all(d, _decode=True):
if type(d) == dict:
for k in d:
if type(d[k]) in [dict, list]:
if _decode:
d[k] = encode_decode_all(d[k])
else:
d[k] = encode_decode_all(d[k], _decode=False)
elif type(d[k]) == str:
if _decode:
d[k] = decode(d[k])
else:
d[k] = encode(d[k])
elif type(d) == list:
arr = []
for e in d:
if type(e) == str:
if _decode:
arr.append(decode(e))
else:
arr.append(encode(e))
elif type(e) in [dict, list]:
if _decode:
arr.append(encode_decode_all(e))
else:
arr.append(encode_decode_all(e, _decode=False))
else:
arr.append(e)
return arr
else:
if _decode:
return decode(d)
else:
return encode(d)
return d
def decode(x):
tmp = "".join(encoding % ord(c) if c not in p else c for c in x)
if sys.version_info >= (3, 0):
return str(tmp)
else:
for encoded in unicode_regex.findall(tmp):
tmp = tmp.replace(encoded, encoded.decode("unicode_escape"))
return unicode(tmp)
def encode(x):
for encoded in hex_regex.findall(x):
if sys.version_info >= (3, 0):
x = x.replace(encoded, bytes(str(encoded).replace("\\\\x", "\\x"),"utf-8").decode("unicode_escape"))
else:
x = x.replace(encoded, str(encoded).replace("\\\\x", "\\x").decode("string_escape"))
return x
if indent:
return encode_decode_all("{0}".format(json.dumps(encode_decode_all(func(self)), indent=5)),
_decode=False)
else:
return encode_decode_all("{0}".format(json.dumps(encode_decode_all(func(self)))), _decode=False)
return func_wrapper | Decorator used to change the return value from PJFFactory.fuzzed, it makes the structure printable | Below is the the instruction that describes the task:
### Input:
Decorator used to change the return value from PJFFactory.fuzzed, it makes the structure printable
### Response:
def json_encode(func):
"""
Decorator used to change the return value from PJFFactory.fuzzed, it makes the structure printable
"""
def func_wrapper(self, indent, utf8):
if utf8:
encoding = "\\x%02x"
else:
encoding = "\\u%04x"
hex_regex = re.compile(r"(\\\\x[a-fA-F0-9]{2})")
unicode_regex = re.compile(r"(\\u[a-fA-F0-9]{4})")
def encode_decode_all(d, _decode=True):
if type(d) == dict:
for k in d:
if type(d[k]) in [dict, list]:
if _decode:
d[k] = encode_decode_all(d[k])
else:
d[k] = encode_decode_all(d[k], _decode=False)
elif type(d[k]) == str:
if _decode:
d[k] = decode(d[k])
else:
d[k] = encode(d[k])
elif type(d) == list:
arr = []
for e in d:
if type(e) == str:
if _decode:
arr.append(decode(e))
else:
arr.append(encode(e))
elif type(e) in [dict, list]:
if _decode:
arr.append(encode_decode_all(e))
else:
arr.append(encode_decode_all(e, _decode=False))
else:
arr.append(e)
return arr
else:
if _decode:
return decode(d)
else:
return encode(d)
return d
def decode(x):
tmp = "".join(encoding % ord(c) if c not in p else c for c in x)
if sys.version_info >= (3, 0):
return str(tmp)
else:
for encoded in unicode_regex.findall(tmp):
tmp = tmp.replace(encoded, encoded.decode("unicode_escape"))
return unicode(tmp)
def encode(x):
for encoded in hex_regex.findall(x):
if sys.version_info >= (3, 0):
x = x.replace(encoded, bytes(str(encoded).replace("\\\\x", "\\x"),"utf-8").decode("unicode_escape"))
else:
x = x.replace(encoded, str(encoded).replace("\\\\x", "\\x").decode("string_escape"))
return x
if indent:
return encode_decode_all("{0}".format(json.dumps(encode_decode_all(func(self)), indent=5)),
_decode=False)
else:
return encode_decode_all("{0}".format(json.dumps(encode_decode_all(func(self)))), _decode=False)
return func_wrapper |
def usage(self, callback=None, errback=None, **kwargs):
"""
Return the current usage information for this zone
:rtype: dict
:return: usage information
"""
stats = Stats(self.config)
return stats.usage(zone=self.zone, callback=callback, errback=errback,
**kwargs) | Return the current usage information for this zone
:rtype: dict
:return: usage information | Below is the the instruction that describes the task:
### Input:
Return the current usage information for this zone
:rtype: dict
:return: usage information
### Response:
def usage(self, callback=None, errback=None, **kwargs):
"""
Return the current usage information for this zone
:rtype: dict
:return: usage information
"""
stats = Stats(self.config)
return stats.usage(zone=self.zone, callback=callback, errback=errback,
**kwargs) |
def lose():
"""Enables access to websites that are defined as 'distractors'"""
changed = False
with open(settings.HOSTS_FILE, "r") as hosts_file:
new_file = []
in_block = False
for line in hosts_file:
if in_block:
if line.strip() == settings.END_TOKEN:
in_block = False
changed = True
elif line.strip() == settings.START_TOKEN:
in_block = True
else:
new_file.append(line)
if changed:
with open(settings.HOSTS_FILE, "w") as hosts_file:
hosts_file.write("".join(new_file))
reset_network("Concentration is now lost :(.") | Enables access to websites that are defined as 'distractors | Below is the the instruction that describes the task:
### Input:
Enables access to websites that are defined as 'distractors
### Response:
def lose():
"""Enables access to websites that are defined as 'distractors'"""
changed = False
with open(settings.HOSTS_FILE, "r") as hosts_file:
new_file = []
in_block = False
for line in hosts_file:
if in_block:
if line.strip() == settings.END_TOKEN:
in_block = False
changed = True
elif line.strip() == settings.START_TOKEN:
in_block = True
else:
new_file.append(line)
if changed:
with open(settings.HOSTS_FILE, "w") as hosts_file:
hosts_file.write("".join(new_file))
reset_network("Concentration is now lost :(.") |
def items(self):
"Returns all elements as a list in (key,value) format."
return list(zip(list(self.keys()), list(self.values()))) | Returns all elements as a list in (key,value) format. | Below is the the instruction that describes the task:
### Input:
Returns all elements as a list in (key,value) format.
### Response:
def items(self):
"Returns all elements as a list in (key,value) format."
return list(zip(list(self.keys()), list(self.values()))) |
def set_runtime_value_int(self, ihcid: int, value: int) -> bool:
""" Set integer runtime value with re-authenticate if needed"""
if self.client.set_runtime_value_int(ihcid, value):
return True
self.re_authenticate()
return self.client.set_runtime_value_int(ihcid, value) | Set integer runtime value with re-authenticate if needed | Below is the the instruction that describes the task:
### Input:
Set integer runtime value with re-authenticate if needed
### Response:
def set_runtime_value_int(self, ihcid: int, value: int) -> bool:
""" Set integer runtime value with re-authenticate if needed"""
if self.client.set_runtime_value_int(ihcid, value):
return True
self.re_authenticate()
return self.client.set_runtime_value_int(ihcid, value) |
def refresh_pillar(**kwargs):
'''
Signal the minion to refresh the pillar data.
.. versionchanged:: Neon
The ``async`` argument has been added. The default value is True.
CLI Example:
.. code-block:: bash
salt '*' saltutil.refresh_pillar
salt '*' saltutil.refresh_pillar async=False
'''
asynchronous = bool(kwargs.get('async', True))
try:
if asynchronous:
# If we're going to block, first setup a listener
ret = __salt__['event.fire']({}, 'pillar_refresh')
else:
eventer = salt.utils.event.get_event(
'minion', opts=__opts__, listen=True)
ret = __salt__['event.fire']({'notify': True}, 'pillar_refresh')
# Wait for the finish event to fire
log.trace('refresh_pillar waiting for pillar refresh to complete')
# Blocks until we hear this event or until the timeout expires
eventer.get_event(
tag=salt.defaults.events.MINION_PILLAR_COMPLETE, wait=30)
except KeyError:
log.error('Event module not available. Pillar refresh failed.')
ret = False # Effectively a no-op, since we can't really return without an event system
return ret | Signal the minion to refresh the pillar data.
.. versionchanged:: Neon
The ``async`` argument has been added. The default value is True.
CLI Example:
.. code-block:: bash
salt '*' saltutil.refresh_pillar
salt '*' saltutil.refresh_pillar async=False | Below is the the instruction that describes the task:
### Input:
Signal the minion to refresh the pillar data.
.. versionchanged:: Neon
The ``async`` argument has been added. The default value is True.
CLI Example:
.. code-block:: bash
salt '*' saltutil.refresh_pillar
salt '*' saltutil.refresh_pillar async=False
### Response:
def refresh_pillar(**kwargs):
'''
Signal the minion to refresh the pillar data.
.. versionchanged:: Neon
The ``async`` argument has been added. The default value is True.
CLI Example:
.. code-block:: bash
salt '*' saltutil.refresh_pillar
salt '*' saltutil.refresh_pillar async=False
'''
asynchronous = bool(kwargs.get('async', True))
try:
if asynchronous:
# If we're going to block, first setup a listener
ret = __salt__['event.fire']({}, 'pillar_refresh')
else:
eventer = salt.utils.event.get_event(
'minion', opts=__opts__, listen=True)
ret = __salt__['event.fire']({'notify': True}, 'pillar_refresh')
# Wait for the finish event to fire
log.trace('refresh_pillar waiting for pillar refresh to complete')
# Blocks until we hear this event or until the timeout expires
eventer.get_event(
tag=salt.defaults.events.MINION_PILLAR_COMPLETE, wait=30)
except KeyError:
log.error('Event module not available. Pillar refresh failed.')
ret = False # Effectively a no-op, since we can't really return without an event system
return ret |
def compile(self, session=None):
"""
Before calling the standard compile function, check to see if the size
of the data has changed and add parameters appropriately.
This is necessary because the shape of the parameters depends on the
shape of the data.
"""
if not self.num_data == self.X.shape[0]:
self.num_data = self.X.shape[0]
self.V = Parameter(np.zeros((self.num_data, self.num_latent)))
self.V.prior = Gaussian(0., 1.)
return super(GPMC, self).compile(session=session) | Before calling the standard compile function, check to see if the size
of the data has changed and add parameters appropriately.
This is necessary because the shape of the parameters depends on the
shape of the data. | Below is the the instruction that describes the task:
### Input:
Before calling the standard compile function, check to see if the size
of the data has changed and add parameters appropriately.
This is necessary because the shape of the parameters depends on the
shape of the data.
### Response:
def compile(self, session=None):
"""
Before calling the standard compile function, check to see if the size
of the data has changed and add parameters appropriately.
This is necessary because the shape of the parameters depends on the
shape of the data.
"""
if not self.num_data == self.X.shape[0]:
self.num_data = self.X.shape[0]
self.V = Parameter(np.zeros((self.num_data, self.num_latent)))
self.V.prior = Gaussian(0., 1.)
return super(GPMC, self).compile(session=session) |
def merge_peptides(fns, ns):
"""Loops peptides from multiple files, fetches PSMs from
sequence:PSM map, outputs correctly PSM mapped peptides"""
peptides_to_map = reader.generate_peptides_multiple_fractions(fns, ns)
psmmap = create_merge_psm_map(peptides_to_map, ns)
peptides = reader.generate_peptides_multiple_fractions(fns, ns)
for peptide in peptides:
seq = reader.get_peptide_seq(peptide, ns)
psm_ids = reader.get_psm_ids_from_peptide(peptide, ns)
# remove current psm ids, repopulate with stored ones
psm_ids.clear()
for new_psm_id in psmmap[seq]:
etree.SubElement(psm_ids, 'psm_id').text = new_psm_id
yield formatting.string_and_clear(peptide, ns) | Loops peptides from multiple files, fetches PSMs from
sequence:PSM map, outputs correctly PSM mapped peptides | Below is the the instruction that describes the task:
### Input:
Loops peptides from multiple files, fetches PSMs from
sequence:PSM map, outputs correctly PSM mapped peptides
### Response:
def merge_peptides(fns, ns):
"""Loops peptides from multiple files, fetches PSMs from
sequence:PSM map, outputs correctly PSM mapped peptides"""
peptides_to_map = reader.generate_peptides_multiple_fractions(fns, ns)
psmmap = create_merge_psm_map(peptides_to_map, ns)
peptides = reader.generate_peptides_multiple_fractions(fns, ns)
for peptide in peptides:
seq = reader.get_peptide_seq(peptide, ns)
psm_ids = reader.get_psm_ids_from_peptide(peptide, ns)
# remove current psm ids, repopulate with stored ones
psm_ids.clear()
for new_psm_id in psmmap[seq]:
etree.SubElement(psm_ids, 'psm_id').text = new_psm_id
yield formatting.string_and_clear(peptide, ns) |
def tofile(self, fobj, format):
"""Write data to hex or bin file. Preferred method over tobin or tohex.
@param fobj file name or file-like object
@param format file format ("hex" or "bin")
"""
if format == 'hex':
self.write_hex_file(fobj)
elif format == 'bin':
self.tobinfile(fobj)
else:
raise ValueError('format should be either "hex" or "bin";'
' got %r instead' % format) | Write data to hex or bin file. Preferred method over tobin or tohex.
@param fobj file name or file-like object
@param format file format ("hex" or "bin") | Below is the the instruction that describes the task:
### Input:
Write data to hex or bin file. Preferred method over tobin or tohex.
@param fobj file name or file-like object
@param format file format ("hex" or "bin")
### Response:
def tofile(self, fobj, format):
"""Write data to hex or bin file. Preferred method over tobin or tohex.
@param fobj file name or file-like object
@param format file format ("hex" or "bin")
"""
if format == 'hex':
self.write_hex_file(fobj)
elif format == 'bin':
self.tobinfile(fobj)
else:
raise ValueError('format should be either "hex" or "bin";'
' got %r instead' % format) |
def replace_strings_in_list(array_of_strigs, replace_with_strings):
"A value in replace_with_strings can be either single string or list of strings"
potentially_nested_list = [replace_with_strings.get(s) or s for s in array_of_strigs]
return list(flatten(potentially_nested_list)) | A value in replace_with_strings can be either single string or list of strings | Below is the the instruction that describes the task:
### Input:
A value in replace_with_strings can be either single string or list of strings
### Response:
def replace_strings_in_list(array_of_strigs, replace_with_strings):
"A value in replace_with_strings can be either single string or list of strings"
potentially_nested_list = [replace_with_strings.get(s) or s for s in array_of_strigs]
return list(flatten(potentially_nested_list)) |
def getParameterArrayCount(self, name, index):
"""Default implementation that return the length of the attribute.
This default implementation goes hand in hand with
:meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameterArray`.
If you override one of them in your subclass, you should probably override
both of them.
The implementation prevents accessing parameters names that start with
``_``. It may be better to enforce this convention at the node spec level.
:param name: (string) name of requested parameter
:param index: (int) index of node inside the region (if relevant)
:raises: Exception if parameter starts with ``_``.
"""
if name.startswith('_'):
raise Exception('Parameter name must not start with an underscore')
return len(self.parameters[name]) | Default implementation that return the length of the attribute.
This default implementation goes hand in hand with
:meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameterArray`.
If you override one of them in your subclass, you should probably override
both of them.
The implementation prevents accessing parameters names that start with
``_``. It may be better to enforce this convention at the node spec level.
:param name: (string) name of requested parameter
:param index: (int) index of node inside the region (if relevant)
:raises: Exception if parameter starts with ``_``. | Below is the the instruction that describes the task:
### Input:
Default implementation that return the length of the attribute.
This default implementation goes hand in hand with
:meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameterArray`.
If you override one of them in your subclass, you should probably override
both of them.
The implementation prevents accessing parameters names that start with
``_``. It may be better to enforce this convention at the node spec level.
:param name: (string) name of requested parameter
:param index: (int) index of node inside the region (if relevant)
:raises: Exception if parameter starts with ``_``.
### Response:
def getParameterArrayCount(self, name, index):
"""Default implementation that return the length of the attribute.
This default implementation goes hand in hand with
:meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameterArray`.
If you override one of them in your subclass, you should probably override
both of them.
The implementation prevents accessing parameters names that start with
``_``. It may be better to enforce this convention at the node spec level.
:param name: (string) name of requested parameter
:param index: (int) index of node inside the region (if relevant)
:raises: Exception if parameter starts with ``_``.
"""
if name.startswith('_'):
raise Exception('Parameter name must not start with an underscore')
return len(self.parameters[name]) |
def sendPREMISEvent(webRoot, eventType, agentIdentifier, eventDetail,
eventOutcome, eventOutcomeDetail=None, linkObjectList=[],
eventDate=None, debug=False, eventIdentifier=None):
"""
A function to format an event to be uploaded and send it to a particular CODA server
in order to register it
"""
atomID = uuid.uuid1().hex
eventXML = createPREMISEventXML(
eventType=eventType,
agentIdentifier=agentIdentifier,
eventDetail=eventDetail,
eventOutcome=eventOutcome,
outcomeDetail=eventOutcomeDetail,
eventIdentifier=eventIdentifier,
eventDate=eventDate,
linkObjectList=linkObjectList
)
atomXML = bagatom.wrapAtom(eventXML, id=atomID, title=atomID)
atomXMLText = '<?xml version="1.0"?>\n%s' % etree.tostring(
atomXML, pretty_print=True
)
if debug:
print "Uploading XML to %s\n---\n%s\n---\n" % (webRoot, atomXMLText)
response = None
try:
response, content = doWebRequest(webRoot, "POST", data=atomXMLText)
except urllib2.URLError:
pass
if not response:
waitForURL(webRoot, 60)
response, content = doWebRequest(webRoot, "POST", data=atomXMLText)
if response.code != 201:
if debug:
tempdir = tempfile.gettempdir()
tfPath = os.path.join(
tempdir, "premis_upload_%s.html" % uuid.uuid1().hex
)
tf = open(tfPath, "w")
tf.write(content)
tf.close()
sys.stderr.write(
"Output from webserver available at %s. Response code %s" % (
tf.name, response.code
)
)
raise Exception(
"Error uploading PREMIS Event to %s. Response code is %s" % (
webRoot, response.code
)
)
return (response, content) | A function to format an event to be uploaded and send it to a particular CODA server
in order to register it | Below is the the instruction that describes the task:
### Input:
A function to format an event to be uploaded and send it to a particular CODA server
in order to register it
### Response:
def sendPREMISEvent(webRoot, eventType, agentIdentifier, eventDetail,
eventOutcome, eventOutcomeDetail=None, linkObjectList=[],
eventDate=None, debug=False, eventIdentifier=None):
"""
A function to format an event to be uploaded and send it to a particular CODA server
in order to register it
"""
atomID = uuid.uuid1().hex
eventXML = createPREMISEventXML(
eventType=eventType,
agentIdentifier=agentIdentifier,
eventDetail=eventDetail,
eventOutcome=eventOutcome,
outcomeDetail=eventOutcomeDetail,
eventIdentifier=eventIdentifier,
eventDate=eventDate,
linkObjectList=linkObjectList
)
atomXML = bagatom.wrapAtom(eventXML, id=atomID, title=atomID)
atomXMLText = '<?xml version="1.0"?>\n%s' % etree.tostring(
atomXML, pretty_print=True
)
if debug:
print "Uploading XML to %s\n---\n%s\n---\n" % (webRoot, atomXMLText)
response = None
try:
response, content = doWebRequest(webRoot, "POST", data=atomXMLText)
except urllib2.URLError:
pass
if not response:
waitForURL(webRoot, 60)
response, content = doWebRequest(webRoot, "POST", data=atomXMLText)
if response.code != 201:
if debug:
tempdir = tempfile.gettempdir()
tfPath = os.path.join(
tempdir, "premis_upload_%s.html" % uuid.uuid1().hex
)
tf = open(tfPath, "w")
tf.write(content)
tf.close()
sys.stderr.write(
"Output from webserver available at %s. Response code %s" % (
tf.name, response.code
)
)
raise Exception(
"Error uploading PREMIS Event to %s. Response code is %s" % (
webRoot, response.code
)
)
return (response, content) |
def to_string(self, verbose=0):
"""String representation."""
lines = []
app = lines.append
app("<%s: %s>" % (self.__class__.__name__, self.basename))
app(" summary: " + self.summary.strip())
app(" number of valence electrons: %s" % self.Z_val)
app(" maximum angular momentum: %s" % l2str(self.l_max))
app(" angular momentum for local part: %s" % l2str(self.l_local))
app(" XC correlation: %s" % self.xc)
app(" supports spin-orbit: %s" % self.supports_soc)
if self.isnc:
app(" radius for non-linear core correction: %s" % self.nlcc_radius)
if self.has_hints:
for accuracy in ("low", "normal", "high"):
hint = self.hint_for_accuracy(accuracy=accuracy)
app(" hint for %s accuracy: %s" % (accuracy, str(hint)))
return "\n".join(lines) | String representation. | Below is the the instruction that describes the task:
### Input:
String representation.
### Response:
def to_string(self, verbose=0):
"""String representation."""
lines = []
app = lines.append
app("<%s: %s>" % (self.__class__.__name__, self.basename))
app(" summary: " + self.summary.strip())
app(" number of valence electrons: %s" % self.Z_val)
app(" maximum angular momentum: %s" % l2str(self.l_max))
app(" angular momentum for local part: %s" % l2str(self.l_local))
app(" XC correlation: %s" % self.xc)
app(" supports spin-orbit: %s" % self.supports_soc)
if self.isnc:
app(" radius for non-linear core correction: %s" % self.nlcc_radius)
if self.has_hints:
for accuracy in ("low", "normal", "high"):
hint = self.hint_for_accuracy(accuracy=accuracy)
app(" hint for %s accuracy: %s" % (accuracy, str(hint)))
return "\n".join(lines) |
def scheduled_status_update(self, id, scheduled_at):
"""
Update the scheduled time of a scheduled status.
New time must be at least 5 minutes into the future.
Returns a `scheduled toot dict`_
"""
scheduled_at = self.__consistent_isoformat_utc(scheduled_at)
id = self.__unpack_id(id)
params = self.__generate_params(locals(), ['id'])
url = '/api/v1/scheduled_statuses/{0}'.format(str(id))
return self.__api_request('PUT', url, params) | Update the scheduled time of a scheduled status.
New time must be at least 5 minutes into the future.
Returns a `scheduled toot dict`_ | Below is the the instruction that describes the task:
### Input:
Update the scheduled time of a scheduled status.
New time must be at least 5 minutes into the future.
Returns a `scheduled toot dict`_
### Response:
def scheduled_status_update(self, id, scheduled_at):
"""
Update the scheduled time of a scheduled status.
New time must be at least 5 minutes into the future.
Returns a `scheduled toot dict`_
"""
scheduled_at = self.__consistent_isoformat_utc(scheduled_at)
id = self.__unpack_id(id)
params = self.__generate_params(locals(), ['id'])
url = '/api/v1/scheduled_statuses/{0}'.format(str(id))
return self.__api_request('PUT', url, params) |
def read_release_version():
"""Read version information from VERSION file"""
try:
with open(VERSION_FILE, "r") as infile:
version = str(infile.read().strip())
if len(version) == 0:
version = None
return version
except IOError:
return None | Read version information from VERSION file | Below is the the instruction that describes the task:
### Input:
Read version information from VERSION file
### Response:
def read_release_version():
"""Read version information from VERSION file"""
try:
with open(VERSION_FILE, "r") as infile:
version = str(infile.read().strip())
if len(version) == 0:
version = None
return version
except IOError:
return None |
def read_dirs(path, folder):
'''
Fetches name of all files in path in long form, and labels associated by extrapolation of directory names.
'''
lbls, fnames, all_lbls = [], [], []
full_path = os.path.join(path, folder)
for lbl in sorted(os.listdir(full_path)):
if lbl not in ('.ipynb_checkpoints','.DS_Store'):
all_lbls.append(lbl)
for fname in os.listdir(os.path.join(full_path, lbl)):
if fname not in ('.DS_Store'):
fnames.append(os.path.join(folder, lbl, fname))
lbls.append(lbl)
return fnames, lbls, all_lbls | Fetches name of all files in path in long form, and labels associated by extrapolation of directory names. | Below is the the instruction that describes the task:
### Input:
Fetches name of all files in path in long form, and labels associated by extrapolation of directory names.
### Response:
def read_dirs(path, folder):
'''
Fetches name of all files in path in long form, and labels associated by extrapolation of directory names.
'''
lbls, fnames, all_lbls = [], [], []
full_path = os.path.join(path, folder)
for lbl in sorted(os.listdir(full_path)):
if lbl not in ('.ipynb_checkpoints','.DS_Store'):
all_lbls.append(lbl)
for fname in os.listdir(os.path.join(full_path, lbl)):
if fname not in ('.DS_Store'):
fnames.append(os.path.join(folder, lbl, fname))
lbls.append(lbl)
return fnames, lbls, all_lbls |
def get_phonetic_info(self, lang):
"""For a specified language (lang), it returns the matrix and the vecto
containing specifications of the characters.
"""
phonetic_data = self.all_phonetic_data if lang != LC_TA else self.tamil_phonetic_data
phonetic_vectors = self.all_phonetic_vectors if lang != LC_TA else self.tamil_phonetic_vectors
return phonetic_data, phonetic_vectors | For a specified language (lang), it returns the matrix and the vecto
containing specifications of the characters. | Below is the the instruction that describes the task:
### Input:
For a specified language (lang), it returns the matrix and the vecto
containing specifications of the characters.
### Response:
def get_phonetic_info(self, lang):
"""For a specified language (lang), it returns the matrix and the vecto
containing specifications of the characters.
"""
phonetic_data = self.all_phonetic_data if lang != LC_TA else self.tamil_phonetic_data
phonetic_vectors = self.all_phonetic_vectors if lang != LC_TA else self.tamil_phonetic_vectors
return phonetic_data, phonetic_vectors |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.