code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def load(self, callback=None, errback=None, reload=False):
"""
Load record data from the API.
"""
if not reload and self.data:
raise RecordException('record already loaded')
def success(result, *args):
self._parseModel(result)
if callback:
return callback(self)
else:
return self
return self._rest.retrieve(self.parentZone.zone,
self.domain, self.type,
callback=success, errback=errback) | Load record data from the API. | Below is the the instruction that describes the task:
### Input:
Load record data from the API.
### Response:
def load(self, callback=None, errback=None, reload=False):
"""
Load record data from the API.
"""
if not reload and self.data:
raise RecordException('record already loaded')
def success(result, *args):
self._parseModel(result)
if callback:
return callback(self)
else:
return self
return self._rest.retrieve(self.parentZone.zone,
self.domain, self.type,
callback=success, errback=errback) |
def parse_args(argString=None):
"""Parses the command line options and arguments.
:returns: A :py:class:`argparse.Namespace` object created by the
:py:mod:`argparse` module. It contains the values of the
different options.
====================== ====== ================================
Options Type Description
====================== ====== ================================
``--evec`` string The EVEC file from EIGENSOFT
``--scree-plot-title`` string The main title of the scree plot
``--out`` string The name of the output file
====================== ====== ================================
.. note::
No option check is done here (except for the one automatically done by
:py:mod:`argparse`). Those need to be done elsewhere (see
:py:func:`checkArgs`).
"""
args = None
if argString is None:
args = parser.parse_args()
else:
args = parser.parse_args(argString)
return args | Parses the command line options and arguments.
:returns: A :py:class:`argparse.Namespace` object created by the
:py:mod:`argparse` module. It contains the values of the
different options.
====================== ====== ================================
Options Type Description
====================== ====== ================================
``--evec`` string The EVEC file from EIGENSOFT
``--scree-plot-title`` string The main title of the scree plot
``--out`` string The name of the output file
====================== ====== ================================
.. note::
No option check is done here (except for the one automatically done by
:py:mod:`argparse`). Those need to be done elsewhere (see
:py:func:`checkArgs`). | Below is the the instruction that describes the task:
### Input:
Parses the command line options and arguments.
:returns: A :py:class:`argparse.Namespace` object created by the
:py:mod:`argparse` module. It contains the values of the
different options.
====================== ====== ================================
Options Type Description
====================== ====== ================================
``--evec`` string The EVEC file from EIGENSOFT
``--scree-plot-title`` string The main title of the scree plot
``--out`` string The name of the output file
====================== ====== ================================
.. note::
No option check is done here (except for the one automatically done by
:py:mod:`argparse`). Those need to be done elsewhere (see
:py:func:`checkArgs`).
### Response:
def parse_args(argString=None):
"""Parses the command line options and arguments.
:returns: A :py:class:`argparse.Namespace` object created by the
:py:mod:`argparse` module. It contains the values of the
different options.
====================== ====== ================================
Options Type Description
====================== ====== ================================
``--evec`` string The EVEC file from EIGENSOFT
``--scree-plot-title`` string The main title of the scree plot
``--out`` string The name of the output file
====================== ====== ================================
.. note::
No option check is done here (except for the one automatically done by
:py:mod:`argparse`). Those need to be done elsewhere (see
:py:func:`checkArgs`).
"""
args = None
if argString is None:
args = parser.parse_args()
else:
args = parser.parse_args(argString)
return args |
def replace_product_by_id(cls, product_id, product, **kwargs):
"""Replace Product
Replace all attributes of Product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_product_by_id(product_id, product, async=True)
>>> result = thread.get()
:param async bool
:param str product_id: ID of product to replace (required)
:param Product product: Attributes of product to replace (required)
:return: Product
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_product_by_id_with_http_info(product_id, product, **kwargs)
else:
(data) = cls._replace_product_by_id_with_http_info(product_id, product, **kwargs)
return data | Replace Product
Replace all attributes of Product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_product_by_id(product_id, product, async=True)
>>> result = thread.get()
:param async bool
:param str product_id: ID of product to replace (required)
:param Product product: Attributes of product to replace (required)
:return: Product
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Replace Product
Replace all attributes of Product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_product_by_id(product_id, product, async=True)
>>> result = thread.get()
:param async bool
:param str product_id: ID of product to replace (required)
:param Product product: Attributes of product to replace (required)
:return: Product
If the method is called asynchronously,
returns the request thread.
### Response:
def replace_product_by_id(cls, product_id, product, **kwargs):
"""Replace Product
Replace all attributes of Product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_product_by_id(product_id, product, async=True)
>>> result = thread.get()
:param async bool
:param str product_id: ID of product to replace (required)
:param Product product: Attributes of product to replace (required)
:return: Product
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_product_by_id_with_http_info(product_id, product, **kwargs)
else:
(data) = cls._replace_product_by_id_with_http_info(product_id, product, **kwargs)
return data |
def compact_elements(elements):
"""
Create a string (with ranges) given a list of element numbers
For example, [1, 2, 3, 6, 7, 8, 10] will return "H-Li,C-O,Ne"
"""
if len(elements) == 0:
return
# We have to convert to integers for this function
elements = [int(el) for el in elements]
# Just to be safe, sort the list
el = sorted(set(elements))
ranges = []
i = 0
while i < len(el):
start_el = el[i]
end_el = start_el
i += 1
while i < len(el):
if el[i] != end_el + 1:
break
end_el += 1
i += 1
if start_el == end_el:
ranges.append([start_el])
else:
ranges.append([start_el, end_el])
# Convert to elemental symbols
range_strs = []
for r in ranges:
sym = lut.element_sym_from_Z(r[0], True)
if len(r) == 1:
range_strs.append(sym)
elif len(r) == 2 and r[1] == r[0] + 1:
sym2 = lut.element_sym_from_Z(r[1], True)
range_strs.append(sym + "," + sym2)
else:
sym2 = lut.element_sym_from_Z(r[1], True)
range_strs.append(sym + "-" + sym2)
return ",".join(range_strs) | Create a string (with ranges) given a list of element numbers
For example, [1, 2, 3, 6, 7, 8, 10] will return "H-Li,C-O,Ne" | Below is the the instruction that describes the task:
### Input:
Create a string (with ranges) given a list of element numbers
For example, [1, 2, 3, 6, 7, 8, 10] will return "H-Li,C-O,Ne"
### Response:
def compact_elements(elements):
"""
Create a string (with ranges) given a list of element numbers
For example, [1, 2, 3, 6, 7, 8, 10] will return "H-Li,C-O,Ne"
"""
if len(elements) == 0:
return
# We have to convert to integers for this function
elements = [int(el) for el in elements]
# Just to be safe, sort the list
el = sorted(set(elements))
ranges = []
i = 0
while i < len(el):
start_el = el[i]
end_el = start_el
i += 1
while i < len(el):
if el[i] != end_el + 1:
break
end_el += 1
i += 1
if start_el == end_el:
ranges.append([start_el])
else:
ranges.append([start_el, end_el])
# Convert to elemental symbols
range_strs = []
for r in ranges:
sym = lut.element_sym_from_Z(r[0], True)
if len(r) == 1:
range_strs.append(sym)
elif len(r) == 2 and r[1] == r[0] + 1:
sym2 = lut.element_sym_from_Z(r[1], True)
range_strs.append(sym + "," + sym2)
else:
sym2 = lut.element_sym_from_Z(r[1], True)
range_strs.append(sym + "-" + sym2)
return ",".join(range_strs) |
def lint(cls, document, is_saved, flags=''):
"""Plugin interface to pyls linter.
Args:
document: The document to be linted.
is_saved: Whether or not the file has been saved to disk.
flags: Additional flags to pass to pylint. Not exposed to
pyls_lint, but used for testing.
Returns:
A list of dicts with the following format:
{
'source': 'pylint',
'range': {
'start': {
'line': start_line,
'character': start_column,
},
'end': {
'line': end_line,
'character': end_column,
},
}
'message': msg,
'severity': lsp.DiagnosticSeverity.*,
}
"""
if not is_saved:
# Pylint can only be run on files that have been saved to disk.
# Rather than return nothing, return the previous list of
# diagnostics. If we return an empty list, any diagnostics we'd
# previously shown will be cleared until the next save. Instead,
# continue showing (possibly stale) diagnostics until the next
# save.
return cls.last_diags[document.path]
# py_run will call shlex.split on its arguments, and shlex.split does
# not handle Windows paths (it will try to perform escaping). Turn
# backslashes into forward slashes first to avoid this issue.
path = document.path
if sys.platform.startswith('win'):
path = path.replace('\\', '/')
out, _err = py_run(
'{} -f json {}'.format(path, flags), return_std=True
)
# pylint prints nothing rather than [] when there are no diagnostics.
# json.loads will not parse an empty string, so just return.
json_str = out.getvalue()
if not json_str.strip():
cls.last_diags[document.path] = []
return []
# Pylint's JSON output is a list of objects with the following format.
#
# {
# "obj": "main",
# "path": "foo.py",
# "message": "Missing function docstring",
# "message-id": "C0111",
# "symbol": "missing-docstring",
# "column": 0,
# "type": "convention",
# "line": 5,
# "module": "foo"
# }
#
# The type can be any of:
#
# * convention
# * error
# * fatal
# * refactor
# * warning
diagnostics = []
for diag in json.loads(json_str):
# pylint lines index from 1, pyls lines index from 0
line = diag['line'] - 1
# But both index columns from 0
col = diag['column']
# It's possible that we're linting an empty file. Even an empty
# file might fail linting if it isn't named properly.
end_col = len(document.lines[line]) if document.lines else 0
err_range = {
'start': {
'line': line,
'character': col,
},
'end': {
'line': line,
'character': end_col,
},
}
if diag['type'] == 'convention':
severity = lsp.DiagnosticSeverity.Information
elif diag['type'] == 'error':
severity = lsp.DiagnosticSeverity.Error
elif diag['type'] == 'fatal':
severity = lsp.DiagnosticSeverity.Error
elif diag['type'] == 'refactor':
severity = lsp.DiagnosticSeverity.Hint
elif diag['type'] == 'warning':
severity = lsp.DiagnosticSeverity.Warning
diagnostics.append({
'source': 'pylint',
'range': err_range,
'message': '[{}] {}'.format(diag['symbol'], diag['message']),
'severity': severity,
'code': diag['message-id']
})
cls.last_diags[document.path] = diagnostics
return diagnostics | Plugin interface to pyls linter.
Args:
document: The document to be linted.
is_saved: Whether or not the file has been saved to disk.
flags: Additional flags to pass to pylint. Not exposed to
pyls_lint, but used for testing.
Returns:
A list of dicts with the following format:
{
'source': 'pylint',
'range': {
'start': {
'line': start_line,
'character': start_column,
},
'end': {
'line': end_line,
'character': end_column,
},
}
'message': msg,
'severity': lsp.DiagnosticSeverity.*,
} | Below is the the instruction that describes the task:
### Input:
Plugin interface to pyls linter.
Args:
document: The document to be linted.
is_saved: Whether or not the file has been saved to disk.
flags: Additional flags to pass to pylint. Not exposed to
pyls_lint, but used for testing.
Returns:
A list of dicts with the following format:
{
'source': 'pylint',
'range': {
'start': {
'line': start_line,
'character': start_column,
},
'end': {
'line': end_line,
'character': end_column,
},
}
'message': msg,
'severity': lsp.DiagnosticSeverity.*,
}
### Response:
def lint(cls, document, is_saved, flags=''):
"""Plugin interface to pyls linter.
Args:
document: The document to be linted.
is_saved: Whether or not the file has been saved to disk.
flags: Additional flags to pass to pylint. Not exposed to
pyls_lint, but used for testing.
Returns:
A list of dicts with the following format:
{
'source': 'pylint',
'range': {
'start': {
'line': start_line,
'character': start_column,
},
'end': {
'line': end_line,
'character': end_column,
},
}
'message': msg,
'severity': lsp.DiagnosticSeverity.*,
}
"""
if not is_saved:
# Pylint can only be run on files that have been saved to disk.
# Rather than return nothing, return the previous list of
# diagnostics. If we return an empty list, any diagnostics we'd
# previously shown will be cleared until the next save. Instead,
# continue showing (possibly stale) diagnostics until the next
# save.
return cls.last_diags[document.path]
# py_run will call shlex.split on its arguments, and shlex.split does
# not handle Windows paths (it will try to perform escaping). Turn
# backslashes into forward slashes first to avoid this issue.
path = document.path
if sys.platform.startswith('win'):
path = path.replace('\\', '/')
out, _err = py_run(
'{} -f json {}'.format(path, flags), return_std=True
)
# pylint prints nothing rather than [] when there are no diagnostics.
# json.loads will not parse an empty string, so just return.
json_str = out.getvalue()
if not json_str.strip():
cls.last_diags[document.path] = []
return []
# Pylint's JSON output is a list of objects with the following format.
#
# {
# "obj": "main",
# "path": "foo.py",
# "message": "Missing function docstring",
# "message-id": "C0111",
# "symbol": "missing-docstring",
# "column": 0,
# "type": "convention",
# "line": 5,
# "module": "foo"
# }
#
# The type can be any of:
#
# * convention
# * error
# * fatal
# * refactor
# * warning
diagnostics = []
for diag in json.loads(json_str):
# pylint lines index from 1, pyls lines index from 0
line = diag['line'] - 1
# But both index columns from 0
col = diag['column']
# It's possible that we're linting an empty file. Even an empty
# file might fail linting if it isn't named properly.
end_col = len(document.lines[line]) if document.lines else 0
err_range = {
'start': {
'line': line,
'character': col,
},
'end': {
'line': line,
'character': end_col,
},
}
if diag['type'] == 'convention':
severity = lsp.DiagnosticSeverity.Information
elif diag['type'] == 'error':
severity = lsp.DiagnosticSeverity.Error
elif diag['type'] == 'fatal':
severity = lsp.DiagnosticSeverity.Error
elif diag['type'] == 'refactor':
severity = lsp.DiagnosticSeverity.Hint
elif diag['type'] == 'warning':
severity = lsp.DiagnosticSeverity.Warning
diagnostics.append({
'source': 'pylint',
'range': err_range,
'message': '[{}] {}'.format(diag['symbol'], diag['message']),
'severity': severity,
'code': diag['message-id']
})
cls.last_diags[document.path] = diagnostics
return diagnostics |
def _find_resource_id_from_path(path):
"""
Get a folder id from a path on the server.
Warning: This is NOT efficient at all.
The schema for this path is:
path := "/users/<name>/" | "/communities/<name>" , {<subfolder>/}
name := <firstname> , "_" , <lastname>
:param path: The virtual path on the server.
:type path: string
:returns: a tuple indicating True or False about whether the resource is an
item and id of the resource i.e. (True, item_id) or (False, folder_id)
:rtype: (bool, int | long)
"""
session.token = verify_credentials()
parsed_path = path.split('/')
if parsed_path[-1] == '':
parsed_path.pop()
if path.startswith('/users/'):
parsed_path.pop(0) # remove '' before /
parsed_path.pop(0) # remove 'users'
name = parsed_path.pop(0) # remove '<firstname>_<lastname>'
firstname, lastname = name.split('_')
end = parsed_path.pop()
user = session.communicator.get_user_by_name(firstname, lastname)
leaf_folder_id = _descend_folder_for_id(parsed_path, user['folder_id'])
return _search_folder_for_item_or_folder(end, leaf_folder_id)
elif path.startswith('/communities/'):
print(parsed_path)
parsed_path.pop(0) # remove '' before /
parsed_path.pop(0) # remove 'communities'
community_name = parsed_path.pop(0) # remove '<community>'
end = parsed_path.pop()
community = session.communicator.get_community_by_name(community_name)
leaf_folder_id = _descend_folder_for_id(parsed_path,
community['folder_id'])
return _search_folder_for_item_or_folder(end, leaf_folder_id)
else:
return False, -1 | Get a folder id from a path on the server.
Warning: This is NOT efficient at all.
The schema for this path is:
path := "/users/<name>/" | "/communities/<name>" , {<subfolder>/}
name := <firstname> , "_" , <lastname>
:param path: The virtual path on the server.
:type path: string
:returns: a tuple indicating True or False about whether the resource is an
item and id of the resource i.e. (True, item_id) or (False, folder_id)
:rtype: (bool, int | long) | Below is the the instruction that describes the task:
### Input:
Get a folder id from a path on the server.
Warning: This is NOT efficient at all.
The schema for this path is:
path := "/users/<name>/" | "/communities/<name>" , {<subfolder>/}
name := <firstname> , "_" , <lastname>
:param path: The virtual path on the server.
:type path: string
:returns: a tuple indicating True or False about whether the resource is an
item and id of the resource i.e. (True, item_id) or (False, folder_id)
:rtype: (bool, int | long)
### Response:
def _find_resource_id_from_path(path):
"""
Get a folder id from a path on the server.
Warning: This is NOT efficient at all.
The schema for this path is:
path := "/users/<name>/" | "/communities/<name>" , {<subfolder>/}
name := <firstname> , "_" , <lastname>
:param path: The virtual path on the server.
:type path: string
:returns: a tuple indicating True or False about whether the resource is an
item and id of the resource i.e. (True, item_id) or (False, folder_id)
:rtype: (bool, int | long)
"""
session.token = verify_credentials()
parsed_path = path.split('/')
if parsed_path[-1] == '':
parsed_path.pop()
if path.startswith('/users/'):
parsed_path.pop(0) # remove '' before /
parsed_path.pop(0) # remove 'users'
name = parsed_path.pop(0) # remove '<firstname>_<lastname>'
firstname, lastname = name.split('_')
end = parsed_path.pop()
user = session.communicator.get_user_by_name(firstname, lastname)
leaf_folder_id = _descend_folder_for_id(parsed_path, user['folder_id'])
return _search_folder_for_item_or_folder(end, leaf_folder_id)
elif path.startswith('/communities/'):
print(parsed_path)
parsed_path.pop(0) # remove '' before /
parsed_path.pop(0) # remove 'communities'
community_name = parsed_path.pop(0) # remove '<community>'
end = parsed_path.pop()
community = session.communicator.get_community_by_name(community_name)
leaf_folder_id = _descend_folder_for_id(parsed_path,
community['folder_id'])
return _search_folder_for_item_or_folder(end, leaf_folder_id)
else:
return False, -1 |
def range_request(numbytes):
"""Streams n random bytes generated with given seed, at given chunk size per packet.
---
tags:
- Dynamic data
parameters:
- in: path
name: numbytes
type: int
produces:
- application/octet-stream
responses:
200:
description: Bytes.
"""
if numbytes <= 0 or numbytes > (100 * 1024):
response = Response(
headers={"ETag": "range%d" % numbytes, "Accept-Ranges": "bytes"}
)
response.status_code = 404
response.data = "number of bytes must be in the range (0, 102400]"
return response
params = CaseInsensitiveDict(request.args.items())
if "chunk_size" in params:
chunk_size = max(1, int(params["chunk_size"]))
else:
chunk_size = 10 * 1024
duration = float(params.get("duration", 0))
pause_per_byte = duration / numbytes
request_headers = get_headers()
first_byte_pos, last_byte_pos = get_request_range(request_headers, numbytes)
range_length = (last_byte_pos + 1) - first_byte_pos
if (
first_byte_pos > last_byte_pos
or first_byte_pos not in xrange(0, numbytes)
or last_byte_pos not in xrange(0, numbytes)
):
response = Response(
headers={
"ETag": "range%d" % numbytes,
"Accept-Ranges": "bytes",
"Content-Range": "bytes */%d" % numbytes,
"Content-Length": "0",
}
)
response.status_code = 416
return response
def generate_bytes():
chunks = bytearray()
for i in xrange(first_byte_pos, last_byte_pos + 1):
# We don't want the resource to change across requests, so we need
# to use a predictable data generation function
chunks.append(ord("a") + (i % 26))
if len(chunks) == chunk_size:
yield (bytes(chunks))
time.sleep(pause_per_byte * chunk_size)
chunks = bytearray()
if chunks:
time.sleep(pause_per_byte * len(chunks))
yield (bytes(chunks))
content_range = "bytes %d-%d/%d" % (first_byte_pos, last_byte_pos, numbytes)
response_headers = {
"Content-Type": "application/octet-stream",
"ETag": "range%d" % numbytes,
"Accept-Ranges": "bytes",
"Content-Length": str(range_length),
"Content-Range": content_range,
}
response = Response(generate_bytes(), headers=response_headers)
if (first_byte_pos == 0) and (last_byte_pos == (numbytes - 1)):
response.status_code = 200
else:
response.status_code = 206
return response | Streams n random bytes generated with given seed, at given chunk size per packet.
---
tags:
- Dynamic data
parameters:
- in: path
name: numbytes
type: int
produces:
- application/octet-stream
responses:
200:
description: Bytes. | Below is the the instruction that describes the task:
### Input:
Streams n random bytes generated with given seed, at given chunk size per packet.
---
tags:
- Dynamic data
parameters:
- in: path
name: numbytes
type: int
produces:
- application/octet-stream
responses:
200:
description: Bytes.
### Response:
def range_request(numbytes):
"""Streams n random bytes generated with given seed, at given chunk size per packet.
---
tags:
- Dynamic data
parameters:
- in: path
name: numbytes
type: int
produces:
- application/octet-stream
responses:
200:
description: Bytes.
"""
if numbytes <= 0 or numbytes > (100 * 1024):
response = Response(
headers={"ETag": "range%d" % numbytes, "Accept-Ranges": "bytes"}
)
response.status_code = 404
response.data = "number of bytes must be in the range (0, 102400]"
return response
params = CaseInsensitiveDict(request.args.items())
if "chunk_size" in params:
chunk_size = max(1, int(params["chunk_size"]))
else:
chunk_size = 10 * 1024
duration = float(params.get("duration", 0))
pause_per_byte = duration / numbytes
request_headers = get_headers()
first_byte_pos, last_byte_pos = get_request_range(request_headers, numbytes)
range_length = (last_byte_pos + 1) - first_byte_pos
if (
first_byte_pos > last_byte_pos
or first_byte_pos not in xrange(0, numbytes)
or last_byte_pos not in xrange(0, numbytes)
):
response = Response(
headers={
"ETag": "range%d" % numbytes,
"Accept-Ranges": "bytes",
"Content-Range": "bytes */%d" % numbytes,
"Content-Length": "0",
}
)
response.status_code = 416
return response
def generate_bytes():
chunks = bytearray()
for i in xrange(first_byte_pos, last_byte_pos + 1):
# We don't want the resource to change across requests, so we need
# to use a predictable data generation function
chunks.append(ord("a") + (i % 26))
if len(chunks) == chunk_size:
yield (bytes(chunks))
time.sleep(pause_per_byte * chunk_size)
chunks = bytearray()
if chunks:
time.sleep(pause_per_byte * len(chunks))
yield (bytes(chunks))
content_range = "bytes %d-%d/%d" % (first_byte_pos, last_byte_pos, numbytes)
response_headers = {
"Content-Type": "application/octet-stream",
"ETag": "range%d" % numbytes,
"Accept-Ranges": "bytes",
"Content-Length": str(range_length),
"Content-Range": content_range,
}
response = Response(generate_bytes(), headers=response_headers)
if (first_byte_pos == 0) and (last_byte_pos == (numbytes - 1)):
response.status_code = 200
else:
response.status_code = 206
return response |
def create_report(self, report_type, account_id, term_id=None, params={}):
"""
Generates a report instance for the canvas account id.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.create
"""
if term_id is not None:
params["enrollment_term_id"] = term_id
url = ACCOUNTS_API.format(account_id) + "/reports/{}".format(
report_type)
body = {"parameters": params}
data = self._post_resource(url, body)
data["account_id"] = account_id
return Report(data=data) | Generates a report instance for the canvas account id.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.create | Below is the the instruction that describes the task:
### Input:
Generates a report instance for the canvas account id.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.create
### Response:
def create_report(self, report_type, account_id, term_id=None, params={}):
"""
Generates a report instance for the canvas account id.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.create
"""
if term_id is not None:
params["enrollment_term_id"] = term_id
url = ACCOUNTS_API.format(account_id) + "/reports/{}".format(
report_type)
body = {"parameters": params}
data = self._post_resource(url, body)
data["account_id"] = account_id
return Report(data=data) |
def make_error_response(self, validation_error, expose_errors):
""" Return an appropriate ``HttpResponse`` on authentication failure.
In case of an error, the specification only details the inclusion of the
``WWW-Authenticate`` header. Additionally, when allowed by the
specification, we respond with error details formatted in JSON in the body
of the response. For more information, read the specification:
http://tools.ietf.org/html/rfc6750#section-3.1 .
:param validation_error: A
:py:class:`djoauth2.access_token.AuthenticationError` raised by the
:py:meth:`validate` method.
:param expose_errors: A boolean describing whether or not to expose error
information in the error response, as described by the section of the
specification linked to above.
:rtype: a Django ``HttpResponse``.
"""
authenticate_header = ['Bearer realm="{}"'.format(settings.DJOAUTH2_REALM)]
if not expose_errors:
response = HttpResponse(status=400)
response['WWW-Authenticate'] = ', '.join(authenticate_header)
return response
status_code = 401
error_details = get_error_details(validation_error)
if isinstance(validation_error, InvalidRequest):
status_code = 400
elif isinstance(validation_error, InvalidToken):
status_code = 401
elif isinstance(validation_error, InsufficientScope):
error_details['scope'] = ' '.join(self.required_scope_names)
status_code = 403
# TODO(peter): should we return response details as JSON? This is not
# touched upon by the spec and may limit use of this library. Many
# programmers use other transport languaes such as YAML or XML. All of the
# error information is already included in the headers.
response = HttpResponse(content=json.dumps(error_details),
content_type='application/json',
status=status_code)
for key, value in error_details.iteritems():
authenticate_header.append('{}="{}"'.format(key, value))
response['WWW-Authenticate'] = ', '.join(authenticate_header)
return response | Return an appropriate ``HttpResponse`` on authentication failure.
In case of an error, the specification only details the inclusion of the
``WWW-Authenticate`` header. Additionally, when allowed by the
specification, we respond with error details formatted in JSON in the body
of the response. For more information, read the specification:
http://tools.ietf.org/html/rfc6750#section-3.1 .
:param validation_error: A
:py:class:`djoauth2.access_token.AuthenticationError` raised by the
:py:meth:`validate` method.
:param expose_errors: A boolean describing whether or not to expose error
information in the error response, as described by the section of the
specification linked to above.
:rtype: a Django ``HttpResponse``. | Below is the the instruction that describes the task:
### Input:
Return an appropriate ``HttpResponse`` on authentication failure.
In case of an error, the specification only details the inclusion of the
``WWW-Authenticate`` header. Additionally, when allowed by the
specification, we respond with error details formatted in JSON in the body
of the response. For more information, read the specification:
http://tools.ietf.org/html/rfc6750#section-3.1 .
:param validation_error: A
:py:class:`djoauth2.access_token.AuthenticationError` raised by the
:py:meth:`validate` method.
:param expose_errors: A boolean describing whether or not to expose error
information in the error response, as described by the section of the
specification linked to above.
:rtype: a Django ``HttpResponse``.
### Response:
def make_error_response(self, validation_error, expose_errors):
""" Return an appropriate ``HttpResponse`` on authentication failure.
In case of an error, the specification only details the inclusion of the
``WWW-Authenticate`` header. Additionally, when allowed by the
specification, we respond with error details formatted in JSON in the body
of the response. For more information, read the specification:
http://tools.ietf.org/html/rfc6750#section-3.1 .
:param validation_error: A
:py:class:`djoauth2.access_token.AuthenticationError` raised by the
:py:meth:`validate` method.
:param expose_errors: A boolean describing whether or not to expose error
information in the error response, as described by the section of the
specification linked to above.
:rtype: a Django ``HttpResponse``.
"""
authenticate_header = ['Bearer realm="{}"'.format(settings.DJOAUTH2_REALM)]
if not expose_errors:
response = HttpResponse(status=400)
response['WWW-Authenticate'] = ', '.join(authenticate_header)
return response
status_code = 401
error_details = get_error_details(validation_error)
if isinstance(validation_error, InvalidRequest):
status_code = 400
elif isinstance(validation_error, InvalidToken):
status_code = 401
elif isinstance(validation_error, InsufficientScope):
error_details['scope'] = ' '.join(self.required_scope_names)
status_code = 403
# TODO(peter): should we return response details as JSON? This is not
# touched upon by the spec and may limit use of this library. Many
# programmers use other transport languaes such as YAML or XML. All of the
# error information is already included in the headers.
response = HttpResponse(content=json.dumps(error_details),
content_type='application/json',
status=status_code)
for key, value in error_details.iteritems():
authenticate_header.append('{}="{}"'.format(key, value))
response['WWW-Authenticate'] = ', '.join(authenticate_header)
return response |
def check(self, item_id):
"""Check if an analysis is complete
:type item_id: int
:param item_id: task_id to check.
:rtype: bool
:return: Boolean indicating if a report is done or not.
"""
response = self._request("tasks/view/{id}".format(id=item_id))
if response.status_code == 404:
# probably an unknown task id
return False
try:
content = json.loads(response.content.decode('utf-8'))
status = content['task']["status"]
if status == 'completed' or status == "reported":
return True
except ValueError as e:
raise sandboxapi.SandboxError(e)
return False | Check if an analysis is complete
:type item_id: int
:param item_id: task_id to check.
:rtype: bool
:return: Boolean indicating if a report is done or not. | Below is the the instruction that describes the task:
### Input:
Check if an analysis is complete
:type item_id: int
:param item_id: task_id to check.
:rtype: bool
:return: Boolean indicating if a report is done or not.
### Response:
def check(self, item_id):
"""Check if an analysis is complete
:type item_id: int
:param item_id: task_id to check.
:rtype: bool
:return: Boolean indicating if a report is done or not.
"""
response = self._request("tasks/view/{id}".format(id=item_id))
if response.status_code == 404:
# probably an unknown task id
return False
try:
content = json.loads(response.content.decode('utf-8'))
status = content['task']["status"]
if status == 'completed' or status == "reported":
return True
except ValueError as e:
raise sandboxapi.SandboxError(e)
return False |
def write_all(filename, jobjects):
"""
Serializes the list of objects to disk. JavaObject instances get automatically unwrapped.
:param filename: the file to serialize the object to
:type filename: str
:param jobjects: the list of objects to serialize
:type jobjects: list
"""
array = javabridge.get_env().make_object_array(len(jobjects), javabridge.get_env().find_class("java/lang/Object"))
for i in xrange(len(jobjects)):
obj = jobjects[i]
if isinstance(obj, JavaObject):
obj = obj.jobject
javabridge.get_env().set_object_array_element(array, i, obj)
javabridge.static_call(
"Lweka/core/SerializationHelper;", "writeAll",
"(Ljava/lang/String;[Ljava/lang/Object;)V",
filename, array) | Serializes the list of objects to disk. JavaObject instances get automatically unwrapped.
:param filename: the file to serialize the object to
:type filename: str
:param jobjects: the list of objects to serialize
:type jobjects: list | Below is the the instruction that describes the task:
### Input:
Serializes the list of objects to disk. JavaObject instances get automatically unwrapped.
:param filename: the file to serialize the object to
:type filename: str
:param jobjects: the list of objects to serialize
:type jobjects: list
### Response:
def write_all(filename, jobjects):
"""
Serializes the list of objects to disk. JavaObject instances get automatically unwrapped.
:param filename: the file to serialize the object to
:type filename: str
:param jobjects: the list of objects to serialize
:type jobjects: list
"""
array = javabridge.get_env().make_object_array(len(jobjects), javabridge.get_env().find_class("java/lang/Object"))
for i in xrange(len(jobjects)):
obj = jobjects[i]
if isinstance(obj, JavaObject):
obj = obj.jobject
javabridge.get_env().set_object_array_element(array, i, obj)
javabridge.static_call(
"Lweka/core/SerializationHelper;", "writeAll",
"(Ljava/lang/String;[Ljava/lang/Object;)V",
filename, array) |
def get_digital_channels(channel_list):
"""Goes through channel list and returns digital channels with ids
Dev1/port0/line08, Dev1/port0/line09... Dev1/port0/line30."""
dig_ids = digital_channel_ids()
dig_channels = []
for ln in dig_ids:
for ch in channel_list:
if ch.dct['id'] == ln:
dig_channels.append(ch)
break
return dig_channels | Goes through channel list and returns digital channels with ids
Dev1/port0/line08, Dev1/port0/line09... Dev1/port0/line30. | Below is the the instruction that describes the task:
### Input:
Goes through channel list and returns digital channels with ids
Dev1/port0/line08, Dev1/port0/line09... Dev1/port0/line30.
### Response:
def get_digital_channels(channel_list):
"""Goes through channel list and returns digital channels with ids
Dev1/port0/line08, Dev1/port0/line09... Dev1/port0/line30."""
dig_ids = digital_channel_ids()
dig_channels = []
for ln in dig_ids:
for ch in channel_list:
if ch.dct['id'] == ln:
dig_channels.append(ch)
break
return dig_channels |
def platform(self, with_prompt, platforms=None):
"""Return the platform name based on the prompt matching."""
if platforms is None:
platforms = self._dict['generic']['prompt_detection']
for platform in platforms:
pattern = self.pattern(platform, 'prompt')
result = re.search(pattern, with_prompt)
if result:
return platform
return None | Return the platform name based on the prompt matching. | Below is the the instruction that describes the task:
### Input:
Return the platform name based on the prompt matching.
### Response:
def platform(self, with_prompt, platforms=None):
"""Return the platform name based on the prompt matching."""
if platforms is None:
platforms = self._dict['generic']['prompt_detection']
for platform in platforms:
pattern = self.pattern(platform, 'prompt')
result = re.search(pattern, with_prompt)
if result:
return platform
return None |
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
"""Extracts relevant Volume Configuration Spotlight entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
"""
stores = match.get('Stores', {})
for volume_name, volume in iter(stores.items()):
datetime_value = volume.get('CreationDate', None)
if not datetime_value:
continue
partial_path = volume['PartialPath']
event_data = plist_event.PlistTimeEventData()
event_data.desc = 'Spotlight Volume {0:s} ({1:s}) activated.'.format(
volume_name, partial_path)
event_data.key = ''
event_data.root = '/Stores'
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) | Extracts relevant Volume Configuration Spotlight entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS. | Below is the the instruction that describes the task:
### Input:
Extracts relevant Volume Configuration Spotlight entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
### Response:
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
"""Extracts relevant Volume Configuration Spotlight entries.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
"""
stores = match.get('Stores', {})
for volume_name, volume in iter(stores.items()):
datetime_value = volume.get('CreationDate', None)
if not datetime_value:
continue
partial_path = volume['PartialPath']
event_data = plist_event.PlistTimeEventData()
event_data.desc = 'Spotlight Volume {0:s} ({1:s}) activated.'.format(
volume_name, partial_path)
event_data.key = ''
event_data.root = '/Stores'
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) |
def extract_object_names_from_docs(filename):
"""Add matches from the text blocks (must be full names!)"""
text = split_code_and_text_blocks(filename)[1]
text = '\n'.join(t[1] for t in text if t[0] == 'text')
regex = re.compile(r':(?:'
r'func(?:tion)?|'
r'meth(?:od)?|'
r'attr(?:ibute)?|'
r'obj(?:ect)?|'
r'class):`(\S*)`'
)
return [(x, x) for x in re.findall(regex, text)] | Add matches from the text blocks (must be full names!) | Below is the the instruction that describes the task:
### Input:
Add matches from the text blocks (must be full names!)
### Response:
def extract_object_names_from_docs(filename):
"""Add matches from the text blocks (must be full names!)"""
text = split_code_and_text_blocks(filename)[1]
text = '\n'.join(t[1] for t in text if t[0] == 'text')
regex = re.compile(r':(?:'
r'func(?:tion)?|'
r'meth(?:od)?|'
r'attr(?:ibute)?|'
r'obj(?:ect)?|'
r'class):`(\S*)`'
)
return [(x, x) for x in re.findall(regex, text)] |
def get_presig(self, target, source, env):
"""Return the signature contents of this action list.
Simple concatenation of the signatures of the elements.
"""
return b"".join([bytes(x.get_contents(target, source, env)) for x in self.list]) | Return the signature contents of this action list.
Simple concatenation of the signatures of the elements. | Below is the the instruction that describes the task:
### Input:
Return the signature contents of this action list.
Simple concatenation of the signatures of the elements.
### Response:
def get_presig(self, target, source, env):
"""Return the signature contents of this action list.
Simple concatenation of the signatures of the elements.
"""
return b"".join([bytes(x.get_contents(target, source, env)) for x in self.list]) |
def select_catalogue(self, valid_id):
'''
Method to post-process the catalogue based on the selection options
:param numpy.ndarray valid_id:
Boolean vector indicating whether each event is selected (True)
or not (False)
:returns:
Catalogue of selected events as instance of
openquake.hmtk.seismicity.catalogue.Catalogue class
'''
if not np.any(valid_id):
# No events selected - create clean instance of class
output = Catalogue()
output.processes = self.catalogue.processes
elif np.all(valid_id):
if self.copycat:
output = deepcopy(self.catalogue)
else:
output = self.catalogue
else:
if self.copycat:
output = deepcopy(self.catalogue)
else:
output = self.catalogue
output.purge_catalogue(valid_id)
return output | Method to post-process the catalogue based on the selection options
:param numpy.ndarray valid_id:
Boolean vector indicating whether each event is selected (True)
or not (False)
:returns:
Catalogue of selected events as instance of
openquake.hmtk.seismicity.catalogue.Catalogue class | Below is the the instruction that describes the task:
### Input:
Method to post-process the catalogue based on the selection options
:param numpy.ndarray valid_id:
Boolean vector indicating whether each event is selected (True)
or not (False)
:returns:
Catalogue of selected events as instance of
openquake.hmtk.seismicity.catalogue.Catalogue class
### Response:
def select_catalogue(self, valid_id):
'''
Method to post-process the catalogue based on the selection options
:param numpy.ndarray valid_id:
Boolean vector indicating whether each event is selected (True)
or not (False)
:returns:
Catalogue of selected events as instance of
openquake.hmtk.seismicity.catalogue.Catalogue class
'''
if not np.any(valid_id):
# No events selected - create clean instance of class
output = Catalogue()
output.processes = self.catalogue.processes
elif np.all(valid_id):
if self.copycat:
output = deepcopy(self.catalogue)
else:
output = self.catalogue
else:
if self.copycat:
output = deepcopy(self.catalogue)
else:
output = self.catalogue
output.purge_catalogue(valid_id)
return output |
def open(self, mode=MODE_READ):
"""
Opens this repo in the specified mode.
TODO: figure out the correct semantics of this and document
the intended future behaviour as well as the current
transitional behaviour.
"""
if mode not in [MODE_READ, MODE_WRITE]:
error = "Open mode must be '{}' or '{}'".format(
MODE_READ, MODE_WRITE)
raise ValueError(error)
self._openMode = mode
if mode == MODE_READ:
self.assertExists()
if mode == MODE_READ:
# This is part of the transitional behaviour where
# we load the whole DB into memory to get access to
# the data model.
self.load() | Opens this repo in the specified mode.
TODO: figure out the correct semantics of this and document
the intended future behaviour as well as the current
transitional behaviour. | Below is the the instruction that describes the task:
### Input:
Opens this repo in the specified mode.
TODO: figure out the correct semantics of this and document
the intended future behaviour as well as the current
transitional behaviour.
### Response:
def open(self, mode=MODE_READ):
"""
Opens this repo in the specified mode.
TODO: figure out the correct semantics of this and document
the intended future behaviour as well as the current
transitional behaviour.
"""
if mode not in [MODE_READ, MODE_WRITE]:
error = "Open mode must be '{}' or '{}'".format(
MODE_READ, MODE_WRITE)
raise ValueError(error)
self._openMode = mode
if mode == MODE_READ:
self.assertExists()
if mode == MODE_READ:
# This is part of the transitional behaviour where
# we load the whole DB into memory to get access to
# the data model.
self.load() |
def get_current_hgnc_id(hgnc_name):
"""Return the HGNC ID(s) corresponding to a current or outdate HGNC symbol.
Parameters
----------
hgnc_name : str
The HGNC symbol to be converted, possibly an outdated symbol.
Returns
-------
str or list of str or None
If there is a single HGNC ID corresponding to the given current or
outdated HGNC symbol, that ID is returned as a string. If the symbol
is outdated and maps to multiple current IDs, a list of these
IDs is returned. If the given name doesn't correspond to either
a current or an outdated HGNC symbol, None is returned.
"""
hgnc_id = get_hgnc_id(hgnc_name)
if hgnc_id:
return hgnc_id
hgnc_id = prev_sym_map.get(hgnc_name)
return hgnc_id | Return the HGNC ID(s) corresponding to a current or outdate HGNC symbol.
Parameters
----------
hgnc_name : str
The HGNC symbol to be converted, possibly an outdated symbol.
Returns
-------
str or list of str or None
If there is a single HGNC ID corresponding to the given current or
outdated HGNC symbol, that ID is returned as a string. If the symbol
is outdated and maps to multiple current IDs, a list of these
IDs is returned. If the given name doesn't correspond to either
a current or an outdated HGNC symbol, None is returned. | Below is the the instruction that describes the task:
### Input:
Return the HGNC ID(s) corresponding to a current or outdate HGNC symbol.
Parameters
----------
hgnc_name : str
The HGNC symbol to be converted, possibly an outdated symbol.
Returns
-------
str or list of str or None
If there is a single HGNC ID corresponding to the given current or
outdated HGNC symbol, that ID is returned as a string. If the symbol
is outdated and maps to multiple current IDs, a list of these
IDs is returned. If the given name doesn't correspond to either
a current or an outdated HGNC symbol, None is returned.
### Response:
def get_current_hgnc_id(hgnc_name):
"""Return the HGNC ID(s) corresponding to a current or outdate HGNC symbol.
Parameters
----------
hgnc_name : str
The HGNC symbol to be converted, possibly an outdated symbol.
Returns
-------
str or list of str or None
If there is a single HGNC ID corresponding to the given current or
outdated HGNC symbol, that ID is returned as a string. If the symbol
is outdated and maps to multiple current IDs, a list of these
IDs is returned. If the given name doesn't correspond to either
a current or an outdated HGNC symbol, None is returned.
"""
hgnc_id = get_hgnc_id(hgnc_name)
if hgnc_id:
return hgnc_id
hgnc_id = prev_sym_map.get(hgnc_name)
return hgnc_id |
def get_edited_color_scheme(self):
"""
Get the values of the last edited color scheme to be used in an instant
preview in the preview editor, without using `apply`.
"""
color_scheme = {}
scheme_name = self.last_used_scheme
for key in self.widgets[scheme_name]:
items = self.widgets[scheme_name][key]
if len(items) == 1:
# ColorLayout
value = items[0].text()
else:
# ColorLayout + checkboxes
value = (items[0].text(), items[1].isChecked(),
items[2].isChecked())
color_scheme[key] = value
return color_scheme | Get the values of the last edited color scheme to be used in an instant
preview in the preview editor, without using `apply`. | Below is the the instruction that describes the task:
### Input:
Get the values of the last edited color scheme to be used in an instant
preview in the preview editor, without using `apply`.
### Response:
def get_edited_color_scheme(self):
"""
Get the values of the last edited color scheme to be used in an instant
preview in the preview editor, without using `apply`.
"""
color_scheme = {}
scheme_name = self.last_used_scheme
for key in self.widgets[scheme_name]:
items = self.widgets[scheme_name][key]
if len(items) == 1:
# ColorLayout
value = items[0].text()
else:
# ColorLayout + checkboxes
value = (items[0].text(), items[1].isChecked(),
items[2].isChecked())
color_scheme[key] = value
return color_scheme |
def _set_params(self, v, load=False):
"""
Setter method for params, mapped from YANG variable /overlay_gateway/site/bfd/params (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_params is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_params() directly.
YANG Description: Configure BFD parameters for the tunnels to the remote site.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=params.params, is_container='container', presence=False, yang_name="params", rest_name="bfd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Create BFD session for the tunnels to the remote site.', u'alt-name': u'bfd', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """params must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=params.params, is_container='container', presence=False, yang_name="params", rest_name="bfd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Create BFD session for the tunnels to the remote site.', u'alt-name': u'bfd', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='container', is_config=True)""",
})
self.__params = t
if hasattr(self, '_set'):
self._set() | Setter method for params, mapped from YANG variable /overlay_gateway/site/bfd/params (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_params is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_params() directly.
YANG Description: Configure BFD parameters for the tunnels to the remote site. | Below is the the instruction that describes the task:
### Input:
Setter method for params, mapped from YANG variable /overlay_gateway/site/bfd/params (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_params is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_params() directly.
YANG Description: Configure BFD parameters for the tunnels to the remote site.
### Response:
def _set_params(self, v, load=False):
"""
Setter method for params, mapped from YANG variable /overlay_gateway/site/bfd/params (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_params is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_params() directly.
YANG Description: Configure BFD parameters for the tunnels to the remote site.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=params.params, is_container='container', presence=False, yang_name="params", rest_name="bfd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Create BFD session for the tunnels to the remote site.', u'alt-name': u'bfd', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """params must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=params.params, is_container='container', presence=False, yang_name="params", rest_name="bfd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Create BFD session for the tunnels to the remote site.', u'alt-name': u'bfd', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='container', is_config=True)""",
})
self.__params = t
if hasattr(self, '_set'):
self._set() |
def parsed(self):
"""Get the code object which represents the compiled Python file.
This property is cached and only parses the content once.
"""
if not self._parsed:
self._parsed = compile(self.content, self.path, 'exec')
return self._parsed | Get the code object which represents the compiled Python file.
This property is cached and only parses the content once. | Below is the the instruction that describes the task:
### Input:
Get the code object which represents the compiled Python file.
This property is cached and only parses the content once.
### Response:
def parsed(self):
"""Get the code object which represents the compiled Python file.
This property is cached and only parses the content once.
"""
if not self._parsed:
self._parsed = compile(self.content, self.path, 'exec')
return self._parsed |
def recommendations(self, **kwargs):
"""
Get a list of recommended movies for a movie.
Args:
language: (optional) ISO 639-1 code.
page: (optional) Minimum value of 1. Expected value is an integer.
Returns:
A dict representation of the JSON returned from the API.
"""
path = self._get_id_path('recommendations')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | Get a list of recommended movies for a movie.
Args:
language: (optional) ISO 639-1 code.
page: (optional) Minimum value of 1. Expected value is an integer.
Returns:
A dict representation of the JSON returned from the API. | Below is the the instruction that describes the task:
### Input:
Get a list of recommended movies for a movie.
Args:
language: (optional) ISO 639-1 code.
page: (optional) Minimum value of 1. Expected value is an integer.
Returns:
A dict representation of the JSON returned from the API.
### Response:
def recommendations(self, **kwargs):
"""
Get a list of recommended movies for a movie.
Args:
language: (optional) ISO 639-1 code.
page: (optional) Minimum value of 1. Expected value is an integer.
Returns:
A dict representation of the JSON returned from the API.
"""
path = self._get_id_path('recommendations')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response |
def _patch_static_handler(handler):
"""Patch in support for static files serving if supported and enabled.
"""
if django.VERSION[:2] < (1, 3):
return
from django.contrib.staticfiles.handlers import StaticFilesHandler
return StaticFilesHandler(handler) | Patch in support for static files serving if supported and enabled. | Below is the the instruction that describes the task:
### Input:
Patch in support for static files serving if supported and enabled.
### Response:
def _patch_static_handler(handler):
"""Patch in support for static files serving if supported and enabled.
"""
if django.VERSION[:2] < (1, 3):
return
from django.contrib.staticfiles.handlers import StaticFilesHandler
return StaticFilesHandler(handler) |
def get_instance_by_bin_uuid(model, bin_uuid):
"""Get an instance by binary uuid.
:param model: a string, model name in rio.models.
:param bin_uuid: a 16-bytes binary string.
:return: None or a SQLAlchemy instance.
"""
try:
model = get_model(model)
except ImportError:
return None
return model.query.filter_by(**{'bin_uuid': bin_uuid}).first() | Get an instance by binary uuid.
:param model: a string, model name in rio.models.
:param bin_uuid: a 16-bytes binary string.
:return: None or a SQLAlchemy instance. | Below is the the instruction that describes the task:
### Input:
Get an instance by binary uuid.
:param model: a string, model name in rio.models.
:param bin_uuid: a 16-bytes binary string.
:return: None or a SQLAlchemy instance.
### Response:
def get_instance_by_bin_uuid(model, bin_uuid):
"""Get an instance by binary uuid.
:param model: a string, model name in rio.models.
:param bin_uuid: a 16-bytes binary string.
:return: None or a SQLAlchemy instance.
"""
try:
model = get_model(model)
except ImportError:
return None
return model.query.filter_by(**{'bin_uuid': bin_uuid}).first() |
def getInferenceTypeFromLabel(cls, label):
"""
Extracts the PredictionKind (temporal vs. nontemporal) from the given
metric label.
:param label: (string) for a metric spec generated by
:meth:`getMetricLabel`
:returns: (:class:`~nupic.frameworks.opf.opf_utils.InferenceType`)
"""
infType, _, _= label.partition(cls._LABEL_SEPARATOR)
if not InferenceType.validate(infType):
return None
return infType | Extracts the PredictionKind (temporal vs. nontemporal) from the given
metric label.
:param label: (string) for a metric spec generated by
:meth:`getMetricLabel`
:returns: (:class:`~nupic.frameworks.opf.opf_utils.InferenceType`) | Below is the the instruction that describes the task:
### Input:
Extracts the PredictionKind (temporal vs. nontemporal) from the given
metric label.
:param label: (string) for a metric spec generated by
:meth:`getMetricLabel`
:returns: (:class:`~nupic.frameworks.opf.opf_utils.InferenceType`)
### Response:
def getInferenceTypeFromLabel(cls, label):
"""
Extracts the PredictionKind (temporal vs. nontemporal) from the given
metric label.
:param label: (string) for a metric spec generated by
:meth:`getMetricLabel`
:returns: (:class:`~nupic.frameworks.opf.opf_utils.InferenceType`)
"""
infType, _, _= label.partition(cls._LABEL_SEPARATOR)
if not InferenceType.validate(infType):
return None
return infType |
def unsubscribe(self, code_list, subtype_list):
"""
取消订阅
:param code_list: 取消订阅的股票代码列表
:param subtype_list: 取消订阅的类型,参见SubType
:return: (ret, err_message)
ret == RET_OK err_message为None
ret != RET_OK err_message为错误描述字符串
"""
ret, msg, code_list, subtype_list = self._check_subscribe_param(code_list, subtype_list)
if ret != RET_OK:
return ret, msg
query_processor = self._get_sync_query_processor(SubscriptionQuery.pack_unsubscribe_req,
SubscriptionQuery.unpack_unsubscribe_rsp)
kargs = {
'code_list': code_list,
'subtype_list': subtype_list,
"conn_id": self.get_sync_conn_id()
}
for subtype in subtype_list:
if subtype not in self._ctx_subscribe:
continue
code_set = self._ctx_subscribe[subtype]
for code in code_list:
if code not in code_set:
continue
code_set.remove(code)
ret_code, msg, _ = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
ret_code, msg, unpush_req_str = SubscriptionQuery.pack_unpush_req(code_list, subtype_list, self.get_async_conn_id())
if ret_code != RET_OK:
return RET_ERROR, msg
ret_code, msg = self._send_async_req(unpush_req_str)
if ret_code != RET_OK:
return RET_ERROR, msg
return RET_OK, None | 取消订阅
:param code_list: 取消订阅的股票代码列表
:param subtype_list: 取消订阅的类型,参见SubType
:return: (ret, err_message)
ret == RET_OK err_message为None
ret != RET_OK err_message为错误描述字符串 | Below is the the instruction that describes the task:
### Input:
取消订阅
:param code_list: 取消订阅的股票代码列表
:param subtype_list: 取消订阅的类型,参见SubType
:return: (ret, err_message)
ret == RET_OK err_message为None
ret != RET_OK err_message为错误描述字符串
### Response:
def unsubscribe(self, code_list, subtype_list):
"""
取消订阅
:param code_list: 取消订阅的股票代码列表
:param subtype_list: 取消订阅的类型,参见SubType
:return: (ret, err_message)
ret == RET_OK err_message为None
ret != RET_OK err_message为错误描述字符串
"""
ret, msg, code_list, subtype_list = self._check_subscribe_param(code_list, subtype_list)
if ret != RET_OK:
return ret, msg
query_processor = self._get_sync_query_processor(SubscriptionQuery.pack_unsubscribe_req,
SubscriptionQuery.unpack_unsubscribe_rsp)
kargs = {
'code_list': code_list,
'subtype_list': subtype_list,
"conn_id": self.get_sync_conn_id()
}
for subtype in subtype_list:
if subtype not in self._ctx_subscribe:
continue
code_set = self._ctx_subscribe[subtype]
for code in code_list:
if code not in code_set:
continue
code_set.remove(code)
ret_code, msg, _ = query_processor(**kargs)
if ret_code != RET_OK:
return RET_ERROR, msg
ret_code, msg, unpush_req_str = SubscriptionQuery.pack_unpush_req(code_list, subtype_list, self.get_async_conn_id())
if ret_code != RET_OK:
return RET_ERROR, msg
ret_code, msg = self._send_async_req(unpush_req_str)
if ret_code != RET_OK:
return RET_ERROR, msg
return RET_OK, None |
def from_pyfile(self, filename: str) -> None:
"""Load values from a Python file."""
globals_ = {} # type: Dict[str, Any]
locals_ = {} # type: Dict[str, Any]
with open(filename, "rb") as f:
exec(compile(f.read(), filename, 'exec'), globals_, locals_)
for key, value in locals_.items():
if (key.isupper() and not isinstance(value, types.ModuleType)):
self._setattr(key, value)
logger.info("Config is loaded from file: %s", filename) | Load values from a Python file. | Below is the the instruction that describes the task:
### Input:
Load values from a Python file.
### Response:
def from_pyfile(self, filename: str) -> None:
"""Load values from a Python file."""
globals_ = {} # type: Dict[str, Any]
locals_ = {} # type: Dict[str, Any]
with open(filename, "rb") as f:
exec(compile(f.read(), filename, 'exec'), globals_, locals_)
for key, value in locals_.items():
if (key.isupper() and not isinstance(value, types.ModuleType)):
self._setattr(key, value)
logger.info("Config is loaded from file: %s", filename) |
def create_host_template(resource_root, name, cluster_name):
"""
Create a host template.
@param resource_root: The root Resource object.
@param name: Host template name
@param cluster_name: Cluster name
@return: An ApiHostTemplate object for the created host template.
@since: API v3
"""
apitemplate = ApiHostTemplate(resource_root, name, [])
return call(resource_root.post,
HOST_TEMPLATES_PATH % (cluster_name,),
ApiHostTemplate, True, data=[apitemplate], api_version=3)[0] | Create a host template.
@param resource_root: The root Resource object.
@param name: Host template name
@param cluster_name: Cluster name
@return: An ApiHostTemplate object for the created host template.
@since: API v3 | Below is the the instruction that describes the task:
### Input:
Create a host template.
@param resource_root: The root Resource object.
@param name: Host template name
@param cluster_name: Cluster name
@return: An ApiHostTemplate object for the created host template.
@since: API v3
### Response:
def create_host_template(resource_root, name, cluster_name):
"""
Create a host template.
@param resource_root: The root Resource object.
@param name: Host template name
@param cluster_name: Cluster name
@return: An ApiHostTemplate object for the created host template.
@since: API v3
"""
apitemplate = ApiHostTemplate(resource_root, name, [])
return call(resource_root.post,
HOST_TEMPLATES_PATH % (cluster_name,),
ApiHostTemplate, True, data=[apitemplate], api_version=3)[0] |
def launch_minecraft(port, installdir="MalmoPlatform", replaceable=False):
"""Launch Minecraft listening for malmoenv connections.
Args:
port: the TCP port to listen on.
installdir: the install dir name. Defaults to MalmoPlatform.
Must be same as given (or defaulted) in download call if used.
replaceable: whether or not to automatically restart Minecraft (default is false).
"""
launch_script = './launchClient.sh'
if os.name == 'nt':
launch_script = 'launchClient.bat'
cwd = os.getcwd()
os.chdir(installdir)
os.chdir("Minecraft")
try:
cmd = [launch_script, '-port', str(port), '-env']
if replaceable:
cmd.append('-replaceable')
subprocess.check_call(cmd)
finally:
os.chdir(cwd) | Launch Minecraft listening for malmoenv connections.
Args:
port: the TCP port to listen on.
installdir: the install dir name. Defaults to MalmoPlatform.
Must be same as given (or defaulted) in download call if used.
replaceable: whether or not to automatically restart Minecraft (default is false). | Below is the the instruction that describes the task:
### Input:
Launch Minecraft listening for malmoenv connections.
Args:
port: the TCP port to listen on.
installdir: the install dir name. Defaults to MalmoPlatform.
Must be same as given (or defaulted) in download call if used.
replaceable: whether or not to automatically restart Minecraft (default is false).
### Response:
def launch_minecraft(port, installdir="MalmoPlatform", replaceable=False):
"""Launch Minecraft listening for malmoenv connections.
Args:
port: the TCP port to listen on.
installdir: the install dir name. Defaults to MalmoPlatform.
Must be same as given (or defaulted) in download call if used.
replaceable: whether or not to automatically restart Minecraft (default is false).
"""
launch_script = './launchClient.sh'
if os.name == 'nt':
launch_script = 'launchClient.bat'
cwd = os.getcwd()
os.chdir(installdir)
os.chdir("Minecraft")
try:
cmd = [launch_script, '-port', str(port), '-env']
if replaceable:
cmd.append('-replaceable')
subprocess.check_call(cmd)
finally:
os.chdir(cwd) |
def colRowIsOnSciencePixelList(self, col, row, padding=DEFAULT_PADDING):
"""similar to colRowIsOnSciencePixelList() but takes lists as input"""
out = np.ones(len(col), dtype=bool)
col_arr = np.array(col)
row_arr = np.array(row)
mask = np.bitwise_or(col_arr < 12. - padding, col_arr > 1111 + padding)
out[mask] = False
mask = np.bitwise_or(row_arr < 20. - padding, row_arr > 1043 + padding)
out[mask] = False
return out | similar to colRowIsOnSciencePixelList() but takes lists as input | Below is the the instruction that describes the task:
### Input:
similar to colRowIsOnSciencePixelList() but takes lists as input
### Response:
def colRowIsOnSciencePixelList(self, col, row, padding=DEFAULT_PADDING):
"""similar to colRowIsOnSciencePixelList() but takes lists as input"""
out = np.ones(len(col), dtype=bool)
col_arr = np.array(col)
row_arr = np.array(row)
mask = np.bitwise_or(col_arr < 12. - padding, col_arr > 1111 + padding)
out[mask] = False
mask = np.bitwise_or(row_arr < 20. - padding, row_arr > 1043 + padding)
out[mask] = False
return out |
def stop_watching(self):
"""Stop watching for files.
Stop the observer started by watch function and finish
thread life.
"""
self._watch = False
if self._observer:
self._logger.info('Stopping watcher')
self._observer.stop()
self._logger.info('Watcher stopped') | Stop watching for files.
Stop the observer started by watch function and finish
thread life. | Below is the the instruction that describes the task:
### Input:
Stop watching for files.
Stop the observer started by watch function and finish
thread life.
### Response:
def stop_watching(self):
"""Stop watching for files.
Stop the observer started by watch function and finish
thread life.
"""
self._watch = False
if self._observer:
self._logger.info('Stopping watcher')
self._observer.stop()
self._logger.info('Watcher stopped') |
def _write_max_norm(self, norms:[])->None:
"Writes the maximum norm of the gradients to Tensorboard."
max_norm = max(norms)
self._add_gradient_scalar('max_norm', scalar_value=max_norm) | Writes the maximum norm of the gradients to Tensorboard. | Below is the the instruction that describes the task:
### Input:
Writes the maximum norm of the gradients to Tensorboard.
### Response:
def _write_max_norm(self, norms:[])->None:
"Writes the maximum norm of the gradients to Tensorboard."
max_norm = max(norms)
self._add_gradient_scalar('max_norm', scalar_value=max_norm) |
def framesFromTransTmpl(transaction: 'TransTmpl',
wordWidth: int,
maxFrameLen: Union[int, float]=inf,
maxPaddingWords: Union[int, float]=inf,
trimPaddingWordsOnStart: bool=False,
trimPaddingWordsOnEnd: bool=False) -> Generator[
'FrameTmpl', None, None]:
"""
Convert transaction template into FrameTmpls
:param transaction: transaction template used which are FrameTmpls
created from
:param wordWidth: width of data signal in target interface
where frames will be used
:param maxFrameLen: maximum length of frame in bits,
if exceeded another frame will be created
:param maxPaddingWords: maximum of continual padding words in frame,
if exceed frame is split and words are cut of
:attention: if maxPaddingWords<inf trimPaddingWordsOnEnd
or trimPaddingWordsOnStart has to be True
to decide where padding should be trimmed
:param trimPaddingWordsOnStart: trim padding from start of frame
at word granularity
:param trimPaddingWordsOnEnd: trim padding from end of frame
at word granularity
"""
isFirstInFrame = True
partsPending = False
startOfThisFrame = 0
assert maxFrameLen > 0
assert maxPaddingWords >= 0
if maxPaddingWords < inf:
assert trimPaddingWordsOnStart or trimPaddingWordsOnEnd, \
"Padding has to be cut off somewhere"
it = TransTmplWordIterator(wordWidth)
lastWordI = 0
endOfThisFrame = maxFrameLen
parts = []
for wordI, word in it.groupByWordIndex(transaction, 0):
if wordI * wordWidth >= endOfThisFrame:
# now in first+ word behind the frame
# cut off padding at end of frame
paddingWords = wordI - lastWordI
if trimPaddingWordsOnEnd and paddingWords > maxPaddingWords:
# cut off padding and align end of frame to word
_endOfThisFrame = (lastWordI + 1) * wordWidth
else:
_endOfThisFrame = wordI * wordWidth
yield FrameTmpl(transaction,
wordWidth,
startOfThisFrame,
_endOfThisFrame,
parts)
# prepare for start of new frame
parts = []
isFirstInFrame = True
partsPending = False
# start on new word
startOfThisFrame = _endOfThisFrame
endOfThisFrame = startOfThisFrame + maxFrameLen
lastWordI = wordI
# check if padding at potential end of frame can be cut off
if (not isFirstInFrame
and trimPaddingWordsOnEnd
and wordI - lastWordI > 1):
# there is too much continual padding,
# cut it out and start new frame
_endOfThisFrame = (lastWordI + 1) * wordWidth
yield FrameTmpl(transaction,
wordWidth,
startOfThisFrame,
_endOfThisFrame,
parts)
# prepare for start of new frame
parts = []
isFirstInFrame = True
partsPending = False
# start on new word
startOfThisFrame = _endOfThisFrame
endOfThisFrame = startOfThisFrame + maxFrameLen
lastWordI = wordI - 1
if isFirstInFrame:
partsPending = True
isFirstInFrame = False
# cut off padding at start of frame
paddingWords = wordI - lastWordI
if trimPaddingWordsOnStart and paddingWords > maxPaddingWords:
startOfThisFrame += paddingWords * wordWidth
endOfThisFrame = startOfThisFrame + maxFrameLen
# resolve end of this part
parts.extend(word)
lastWordI = wordI
# reminder in "parts" after last iteration
endOfThisFrame = transaction.bitAddrEnd
withPadding = not (trimPaddingWordsOnEnd or trimPaddingWordsOnStart)
if partsPending or (withPadding
and endOfThisFrame != startOfThisFrame):
# cut off padding at end of frame
endOfLastWord = (lastWordI + 1) * wordWidth
if endOfThisFrame < endOfLastWord:
endOfThisFrame = endOfLastWord
else:
paddingWords = it.fullWordCnt(endOfLastWord, endOfThisFrame)
if trimPaddingWordsOnEnd and paddingWords > maxPaddingWords:
endOfThisFrame -= paddingWords * wordWidth
# align end of frame to word
endOfThisFrame = min(startOfThisFrame +
maxFrameLen, endOfThisFrame)
yield FrameTmpl(transaction,
wordWidth,
startOfThisFrame,
endOfThisFrame,
parts)
parts = []
startOfThisFrame = endOfThisFrame
# final padding on the end
while withPadding and startOfThisFrame < transaction.bitAddrEnd:
endOfThisFrame = min(startOfThisFrame +
maxFrameLen, transaction.bitAddrEnd)
yield FrameTmpl(transaction,
wordWidth,
startOfThisFrame,
endOfThisFrame,
[])
startOfThisFrame = endOfThisFrame | Convert transaction template into FrameTmpls
:param transaction: transaction template used which are FrameTmpls
created from
:param wordWidth: width of data signal in target interface
where frames will be used
:param maxFrameLen: maximum length of frame in bits,
if exceeded another frame will be created
:param maxPaddingWords: maximum of continual padding words in frame,
if exceed frame is split and words are cut of
:attention: if maxPaddingWords<inf trimPaddingWordsOnEnd
or trimPaddingWordsOnStart has to be True
to decide where padding should be trimmed
:param trimPaddingWordsOnStart: trim padding from start of frame
at word granularity
:param trimPaddingWordsOnEnd: trim padding from end of frame
at word granularity | Below is the the instruction that describes the task:
### Input:
Convert transaction template into FrameTmpls
:param transaction: transaction template used which are FrameTmpls
created from
:param wordWidth: width of data signal in target interface
where frames will be used
:param maxFrameLen: maximum length of frame in bits,
if exceeded another frame will be created
:param maxPaddingWords: maximum of continual padding words in frame,
if exceed frame is split and words are cut of
:attention: if maxPaddingWords<inf trimPaddingWordsOnEnd
or trimPaddingWordsOnStart has to be True
to decide where padding should be trimmed
:param trimPaddingWordsOnStart: trim padding from start of frame
at word granularity
:param trimPaddingWordsOnEnd: trim padding from end of frame
at word granularity
### Response:
def framesFromTransTmpl(transaction: 'TransTmpl',
wordWidth: int,
maxFrameLen: Union[int, float]=inf,
maxPaddingWords: Union[int, float]=inf,
trimPaddingWordsOnStart: bool=False,
trimPaddingWordsOnEnd: bool=False) -> Generator[
'FrameTmpl', None, None]:
"""
Convert transaction template into FrameTmpls
:param transaction: transaction template used which are FrameTmpls
created from
:param wordWidth: width of data signal in target interface
where frames will be used
:param maxFrameLen: maximum length of frame in bits,
if exceeded another frame will be created
:param maxPaddingWords: maximum of continual padding words in frame,
if exceed frame is split and words are cut of
:attention: if maxPaddingWords<inf trimPaddingWordsOnEnd
or trimPaddingWordsOnStart has to be True
to decide where padding should be trimmed
:param trimPaddingWordsOnStart: trim padding from start of frame
at word granularity
:param trimPaddingWordsOnEnd: trim padding from end of frame
at word granularity
"""
isFirstInFrame = True
partsPending = False
startOfThisFrame = 0
assert maxFrameLen > 0
assert maxPaddingWords >= 0
if maxPaddingWords < inf:
assert trimPaddingWordsOnStart or trimPaddingWordsOnEnd, \
"Padding has to be cut off somewhere"
it = TransTmplWordIterator(wordWidth)
lastWordI = 0
endOfThisFrame = maxFrameLen
parts = []
for wordI, word in it.groupByWordIndex(transaction, 0):
if wordI * wordWidth >= endOfThisFrame:
# now in first+ word behind the frame
# cut off padding at end of frame
paddingWords = wordI - lastWordI
if trimPaddingWordsOnEnd and paddingWords > maxPaddingWords:
# cut off padding and align end of frame to word
_endOfThisFrame = (lastWordI + 1) * wordWidth
else:
_endOfThisFrame = wordI * wordWidth
yield FrameTmpl(transaction,
wordWidth,
startOfThisFrame,
_endOfThisFrame,
parts)
# prepare for start of new frame
parts = []
isFirstInFrame = True
partsPending = False
# start on new word
startOfThisFrame = _endOfThisFrame
endOfThisFrame = startOfThisFrame + maxFrameLen
lastWordI = wordI
# check if padding at potential end of frame can be cut off
if (not isFirstInFrame
and trimPaddingWordsOnEnd
and wordI - lastWordI > 1):
# there is too much continual padding,
# cut it out and start new frame
_endOfThisFrame = (lastWordI + 1) * wordWidth
yield FrameTmpl(transaction,
wordWidth,
startOfThisFrame,
_endOfThisFrame,
parts)
# prepare for start of new frame
parts = []
isFirstInFrame = True
partsPending = False
# start on new word
startOfThisFrame = _endOfThisFrame
endOfThisFrame = startOfThisFrame + maxFrameLen
lastWordI = wordI - 1
if isFirstInFrame:
partsPending = True
isFirstInFrame = False
# cut off padding at start of frame
paddingWords = wordI - lastWordI
if trimPaddingWordsOnStart and paddingWords > maxPaddingWords:
startOfThisFrame += paddingWords * wordWidth
endOfThisFrame = startOfThisFrame + maxFrameLen
# resolve end of this part
parts.extend(word)
lastWordI = wordI
# reminder in "parts" after last iteration
endOfThisFrame = transaction.bitAddrEnd
withPadding = not (trimPaddingWordsOnEnd or trimPaddingWordsOnStart)
if partsPending or (withPadding
and endOfThisFrame != startOfThisFrame):
# cut off padding at end of frame
endOfLastWord = (lastWordI + 1) * wordWidth
if endOfThisFrame < endOfLastWord:
endOfThisFrame = endOfLastWord
else:
paddingWords = it.fullWordCnt(endOfLastWord, endOfThisFrame)
if trimPaddingWordsOnEnd and paddingWords > maxPaddingWords:
endOfThisFrame -= paddingWords * wordWidth
# align end of frame to word
endOfThisFrame = min(startOfThisFrame +
maxFrameLen, endOfThisFrame)
yield FrameTmpl(transaction,
wordWidth,
startOfThisFrame,
endOfThisFrame,
parts)
parts = []
startOfThisFrame = endOfThisFrame
# final padding on the end
while withPadding and startOfThisFrame < transaction.bitAddrEnd:
endOfThisFrame = min(startOfThisFrame +
maxFrameLen, transaction.bitAddrEnd)
yield FrameTmpl(transaction,
wordWidth,
startOfThisFrame,
endOfThisFrame,
[])
startOfThisFrame = endOfThisFrame |
def process_notice(self, notice):
""" This method is called on notices that need processing. Here,
we call ``on_object`` and ``on_account`` slots.
"""
id = notice["id"]
_a, _b, _ = id.split(".")
if id in self.subscription_objects:
self.on_object(notice)
elif ".".join([_a, _b, "x"]) in self.subscription_objects:
self.on_object(notice)
elif id[:4] == "2.6.":
# Treat account updates separately
self.on_account(notice) | This method is called on notices that need processing. Here,
we call ``on_object`` and ``on_account`` slots. | Below is the the instruction that describes the task:
### Input:
This method is called on notices that need processing. Here,
we call ``on_object`` and ``on_account`` slots.
### Response:
def process_notice(self, notice):
""" This method is called on notices that need processing. Here,
we call ``on_object`` and ``on_account`` slots.
"""
id = notice["id"]
_a, _b, _ = id.split(".")
if id in self.subscription_objects:
self.on_object(notice)
elif ".".join([_a, _b, "x"]) in self.subscription_objects:
self.on_object(notice)
elif id[:4] == "2.6.":
# Treat account updates separately
self.on_account(notice) |
def shot_remove_asset(self, *args, **kwargs):
"""Remove the, in the asset table view selected, asset.
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_shot:
return
i = self.shot_asset_treev.currentIndex()
item = i.internalPointer()
if item:
asset = item.internal_data()
if not isinstance(asset, djadapter.models.Asset):
return
log.debug("Removing asset %s.", asset.name)
item.set_parent(None)
self.cur_shot.assets.remove(asset) | Remove the, in the asset table view selected, asset.
:returns: None
:rtype: None
:raises: None | Below is the the instruction that describes the task:
### Input:
Remove the, in the asset table view selected, asset.
:returns: None
:rtype: None
:raises: None
### Response:
def shot_remove_asset(self, *args, **kwargs):
"""Remove the, in the asset table view selected, asset.
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_shot:
return
i = self.shot_asset_treev.currentIndex()
item = i.internalPointer()
if item:
asset = item.internal_data()
if not isinstance(asset, djadapter.models.Asset):
return
log.debug("Removing asset %s.", asset.name)
item.set_parent(None)
self.cur_shot.assets.remove(asset) |
def match_stops_to_nodes(gtfs, walk_network):
"""
Parameters
----------
gtfs : a GTFS object
walk_network : networkx.Graph
Returns
-------
stop_I_to_node: dict
maps stop_I to closest walk_network node
stop_I_to_dist: dict
maps stop_I to the distance to the closest walk_network node
"""
network_nodes = walk_network.nodes(data="true")
stop_Is = set(gtfs.get_straight_line_transfer_distances()['from_stop_I'])
stops_df = gtfs.stops()
geo_index = GeoGridIndex(precision=6)
for net_node, data in network_nodes:
geo_index.add_point(GeoPoint(data['lat'], data['lon'], ref=net_node))
stop_I_to_node = {}
stop_I_to_dist = {}
for stop_I in stop_Is:
stop_lat = float(stops_df[stops_df.stop_I == stop_I].lat)
stop_lon = float(stops_df[stops_df.stop_I == stop_I].lon)
geo_point = GeoPoint(stop_lat, stop_lon)
min_dist = float('inf')
min_dist_node = None
search_distances_m = [0.100, 0.500]
for search_distance_m in search_distances_m:
for point, distance in geo_index.get_nearest_points(geo_point, search_distance_m, "km"):
if distance < min_dist:
min_dist = distance * 1000
min_dist_node = point.ref
if min_dist_node is not None:
break
if min_dist_node is None:
warn("No OSM node found for stop: " + str(stops_df[stops_df.stop_I == stop_I]))
stop_I_to_node[stop_I] = min_dist_node
stop_I_to_dist[stop_I] = min_dist
return stop_I_to_node, stop_I_to_dist | Parameters
----------
gtfs : a GTFS object
walk_network : networkx.Graph
Returns
-------
stop_I_to_node: dict
maps stop_I to closest walk_network node
stop_I_to_dist: dict
maps stop_I to the distance to the closest walk_network node | Below is the the instruction that describes the task:
### Input:
Parameters
----------
gtfs : a GTFS object
walk_network : networkx.Graph
Returns
-------
stop_I_to_node: dict
maps stop_I to closest walk_network node
stop_I_to_dist: dict
maps stop_I to the distance to the closest walk_network node
### Response:
def match_stops_to_nodes(gtfs, walk_network):
"""
Parameters
----------
gtfs : a GTFS object
walk_network : networkx.Graph
Returns
-------
stop_I_to_node: dict
maps stop_I to closest walk_network node
stop_I_to_dist: dict
maps stop_I to the distance to the closest walk_network node
"""
network_nodes = walk_network.nodes(data="true")
stop_Is = set(gtfs.get_straight_line_transfer_distances()['from_stop_I'])
stops_df = gtfs.stops()
geo_index = GeoGridIndex(precision=6)
for net_node, data in network_nodes:
geo_index.add_point(GeoPoint(data['lat'], data['lon'], ref=net_node))
stop_I_to_node = {}
stop_I_to_dist = {}
for stop_I in stop_Is:
stop_lat = float(stops_df[stops_df.stop_I == stop_I].lat)
stop_lon = float(stops_df[stops_df.stop_I == stop_I].lon)
geo_point = GeoPoint(stop_lat, stop_lon)
min_dist = float('inf')
min_dist_node = None
search_distances_m = [0.100, 0.500]
for search_distance_m in search_distances_m:
for point, distance in geo_index.get_nearest_points(geo_point, search_distance_m, "km"):
if distance < min_dist:
min_dist = distance * 1000
min_dist_node = point.ref
if min_dist_node is not None:
break
if min_dist_node is None:
warn("No OSM node found for stop: " + str(stops_df[stops_df.stop_I == stop_I]))
stop_I_to_node[stop_I] = min_dist_node
stop_I_to_dist[stop_I] = min_dist
return stop_I_to_node, stop_I_to_dist |
def transfer(self, payment_id, data={}, **kwargs):
""""
Create Transfer for given Payment Id
Args:
payment_id : Id for which payment object has to be transfered
Returns:
Payment dict after getting transfered
"""
url = "{}/{}/transfers".format(self.base_url, payment_id)
return self.post_url(url, data, **kwargs) | Create Transfer for given Payment Id
Args:
payment_id : Id for which payment object has to be transfered
Returns:
Payment dict after getting transfered | Below is the the instruction that describes the task:
### Input:
Create Transfer for given Payment Id
Args:
payment_id : Id for which payment object has to be transfered
Returns:
Payment dict after getting transfered
### Response:
def transfer(self, payment_id, data={}, **kwargs):
""""
Create Transfer for given Payment Id
Args:
payment_id : Id for which payment object has to be transfered
Returns:
Payment dict after getting transfered
"""
url = "{}/{}/transfers".format(self.base_url, payment_id)
return self.post_url(url, data, **kwargs) |
def init(redis_address=None,
num_cpus=None,
num_gpus=None,
resources=None,
object_store_memory=None,
redis_max_memory=None,
log_to_driver=True,
node_ip_address=None,
object_id_seed=None,
local_mode=False,
redirect_worker_output=None,
redirect_output=None,
ignore_reinit_error=False,
num_redis_shards=None,
redis_max_clients=None,
redis_password=None,
plasma_directory=None,
huge_pages=False,
include_webui=False,
driver_id=None,
configure_logging=True,
logging_level=logging.INFO,
logging_format=ray_constants.LOGGER_FORMAT,
plasma_store_socket_name=None,
raylet_socket_name=None,
temp_dir=None,
load_code_from_local=False,
_internal_config=None):
"""Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases. Either a Ray cluster already exists and we
just attach this driver to it, or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray and all of the relevant processes, use this as follows:
.. code-block:: python
ray.init()
To connect to an existing Ray cluster, use this as follows (substituting
in the appropriate address):
.. code-block:: python
ray.init(redis_address="123.45.67.89:6379")
Args:
redis_address (str): The address of the Redis server to connect to. If
this address is not provided, then this command will start Redis, a
raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits.
num_cpus (int): Number of cpus the user wishes all raylets to
be configured with.
num_gpus (int): Number of gpus the user wishes all raylets to
be configured with.
resources: A dictionary mapping the name of a resource to the quantity
of that resource available.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is capped at 20GB but can be
set higher.
redis_max_memory: The max amount of memory (in bytes) to allow each
redis shard to use. Once the limit is exceeded, redis will start
LRU eviction of entries. This only applies to the sharded redis
tables (task, object, and profile tables). By default, this is
capped at 10GB but can be set higher.
log_to_driver (bool): If true, then output from all of the worker
processes on all nodes will be directed to the driver.
node_ip_address (str): The IP address of the node that we are on.
object_id_seed (int): Used to seed the deterministic generation of
object IDs. The same value can be used across multiple runs of the
same driver in order to generate the object IDs in a consistent
manner. However, the same ID should not be used for different
drivers.
local_mode (bool): True if the code should be executed serially
without Ray. This is useful for debugging.
ignore_reinit_error: True if we should suppress errors from calling
ray.init() a second time.
num_redis_shards: The number of Redis shards to start in addition to
the primary Redis shard.
redis_max_clients: If provided, attempt to configure Redis with this
maxclients number.
redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
include_webui: Boolean flag indicating whether to start the web
UI, which displays the status of the Ray cluster.
driver_id: The ID of driver.
configure_logging: True if allow the logging cofiguration here.
Otherwise, the users may want to configure it by their own.
logging_level: Logging level, default will be logging.INFO.
logging_format: Logging format, default contains a timestamp,
filename, line number, and message. See ray_constants.py.
plasma_store_socket_name (str): If provided, it will specify the socket
name used by the plasma store.
raylet_socket_name (str): If provided, it will specify the socket path
used by the raylet process.
temp_dir (str): If provided, it will specify the root temporary
directory for the Ray process.
load_code_from_local: Whether code should be loaded from a local module
or from the GCS.
_internal_config (str): JSON configuration for overriding
RayConfig defaults. For testing purposes ONLY.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
if configure_logging:
setup_logger(logging_level, logging_format)
if local_mode:
driver_mode = LOCAL_MODE
else:
driver_mode = SCRIPT_MODE
if setproctitle is None:
logger.warning(
"WARNING: Not updating worker name since `setproctitle` is not "
"installed. Install this with `pip install setproctitle` "
"(or ray[debug]) to enable monitoring of worker processes.")
if global_worker.connected:
if ignore_reinit_error:
logger.error("Calling ray.init() again after it has already been "
"called.")
return
else:
raise Exception("Perhaps you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'.")
# Convert hostnames to numerical IP address.
if node_ip_address is not None:
node_ip_address = services.address_to_ip(node_ip_address)
if redis_address is not None:
redis_address = services.address_to_ip(redis_address)
global _global_node
if driver_mode == LOCAL_MODE:
# If starting Ray in LOCAL_MODE, don't start any other processes.
_global_node = ray.node.LocalNode()
elif redis_address is None:
# In this case, we need to start a new cluster.
ray_params = ray.parameter.RayParams(
redis_address=redis_address,
node_ip_address=node_ip_address,
object_id_seed=object_id_seed,
local_mode=local_mode,
driver_mode=driver_mode,
redirect_worker_output=redirect_worker_output,
redirect_output=redirect_output,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=num_redis_shards,
redis_max_clients=redis_max_clients,
redis_password=redis_password,
plasma_directory=plasma_directory,
huge_pages=huge_pages,
include_webui=include_webui,
object_store_memory=object_store_memory,
redis_max_memory=redis_max_memory,
plasma_store_socket_name=plasma_store_socket_name,
raylet_socket_name=raylet_socket_name,
temp_dir=temp_dir,
load_code_from_local=load_code_from_local,
_internal_config=_internal_config,
)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler.
_global_node = ray.node.Node(
head=True, shutdown_at_exit=False, ray_params=ray_params)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise Exception("When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided.")
if resources is not None:
raise Exception("When connecting to an existing cluster, "
"resources must not be provided.")
if num_redis_shards is not None:
raise Exception("When connecting to an existing cluster, "
"num_redis_shards must not be provided.")
if redis_max_clients is not None:
raise Exception("When connecting to an existing cluster, "
"redis_max_clients must not be provided.")
if object_store_memory is not None:
raise Exception("When connecting to an existing cluster, "
"object_store_memory must not be provided.")
if redis_max_memory is not None:
raise Exception("When connecting to an existing cluster, "
"redis_max_memory must not be provided.")
if plasma_directory is not None:
raise Exception("When connecting to an existing cluster, "
"plasma_directory must not be provided.")
if huge_pages:
raise Exception("When connecting to an existing cluster, "
"huge_pages must not be provided.")
if temp_dir is not None:
raise Exception("When connecting to an existing cluster, "
"temp_dir must not be provided.")
if plasma_store_socket_name is not None:
raise Exception("When connecting to an existing cluster, "
"plasma_store_socket_name must not be provided.")
if raylet_socket_name is not None:
raise Exception("When connecting to an existing cluster, "
"raylet_socket_name must not be provided.")
if _internal_config is not None:
raise Exception("When connecting to an existing cluster, "
"_internal_config must not be provided.")
# In this case, we only need to connect the node.
ray_params = ray.parameter.RayParams(
node_ip_address=node_ip_address,
redis_address=redis_address,
redis_password=redis_password,
object_id_seed=object_id_seed,
temp_dir=temp_dir,
load_code_from_local=load_code_from_local)
_global_node = ray.node.Node(
ray_params, head=False, shutdown_at_exit=False, connect_only=True)
connect(
_global_node,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_id=driver_id)
for hook in _post_init_hooks:
hook()
return _global_node.address_info | Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases. Either a Ray cluster already exists and we
just attach this driver to it, or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray and all of the relevant processes, use this as follows:
.. code-block:: python
ray.init()
To connect to an existing Ray cluster, use this as follows (substituting
in the appropriate address):
.. code-block:: python
ray.init(redis_address="123.45.67.89:6379")
Args:
redis_address (str): The address of the Redis server to connect to. If
this address is not provided, then this command will start Redis, a
raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits.
num_cpus (int): Number of cpus the user wishes all raylets to
be configured with.
num_gpus (int): Number of gpus the user wishes all raylets to
be configured with.
resources: A dictionary mapping the name of a resource to the quantity
of that resource available.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is capped at 20GB but can be
set higher.
redis_max_memory: The max amount of memory (in bytes) to allow each
redis shard to use. Once the limit is exceeded, redis will start
LRU eviction of entries. This only applies to the sharded redis
tables (task, object, and profile tables). By default, this is
capped at 10GB but can be set higher.
log_to_driver (bool): If true, then output from all of the worker
processes on all nodes will be directed to the driver.
node_ip_address (str): The IP address of the node that we are on.
object_id_seed (int): Used to seed the deterministic generation of
object IDs. The same value can be used across multiple runs of the
same driver in order to generate the object IDs in a consistent
manner. However, the same ID should not be used for different
drivers.
local_mode (bool): True if the code should be executed serially
without Ray. This is useful for debugging.
ignore_reinit_error: True if we should suppress errors from calling
ray.init() a second time.
num_redis_shards: The number of Redis shards to start in addition to
the primary Redis shard.
redis_max_clients: If provided, attempt to configure Redis with this
maxclients number.
redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
include_webui: Boolean flag indicating whether to start the web
UI, which displays the status of the Ray cluster.
driver_id: The ID of driver.
configure_logging: True if allow the logging cofiguration here.
Otherwise, the users may want to configure it by their own.
logging_level: Logging level, default will be logging.INFO.
logging_format: Logging format, default contains a timestamp,
filename, line number, and message. See ray_constants.py.
plasma_store_socket_name (str): If provided, it will specify the socket
name used by the plasma store.
raylet_socket_name (str): If provided, it will specify the socket path
used by the raylet process.
temp_dir (str): If provided, it will specify the root temporary
directory for the Ray process.
load_code_from_local: Whether code should be loaded from a local module
or from the GCS.
_internal_config (str): JSON configuration for overriding
RayConfig defaults. For testing purposes ONLY.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in. | Below is the the instruction that describes the task:
### Input:
Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases. Either a Ray cluster already exists and we
just attach this driver to it, or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray and all of the relevant processes, use this as follows:
.. code-block:: python
ray.init()
To connect to an existing Ray cluster, use this as follows (substituting
in the appropriate address):
.. code-block:: python
ray.init(redis_address="123.45.67.89:6379")
Args:
redis_address (str): The address of the Redis server to connect to. If
this address is not provided, then this command will start Redis, a
raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits.
num_cpus (int): Number of cpus the user wishes all raylets to
be configured with.
num_gpus (int): Number of gpus the user wishes all raylets to
be configured with.
resources: A dictionary mapping the name of a resource to the quantity
of that resource available.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is capped at 20GB but can be
set higher.
redis_max_memory: The max amount of memory (in bytes) to allow each
redis shard to use. Once the limit is exceeded, redis will start
LRU eviction of entries. This only applies to the sharded redis
tables (task, object, and profile tables). By default, this is
capped at 10GB but can be set higher.
log_to_driver (bool): If true, then output from all of the worker
processes on all nodes will be directed to the driver.
node_ip_address (str): The IP address of the node that we are on.
object_id_seed (int): Used to seed the deterministic generation of
object IDs. The same value can be used across multiple runs of the
same driver in order to generate the object IDs in a consistent
manner. However, the same ID should not be used for different
drivers.
local_mode (bool): True if the code should be executed serially
without Ray. This is useful for debugging.
ignore_reinit_error: True if we should suppress errors from calling
ray.init() a second time.
num_redis_shards: The number of Redis shards to start in addition to
the primary Redis shard.
redis_max_clients: If provided, attempt to configure Redis with this
maxclients number.
redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
include_webui: Boolean flag indicating whether to start the web
UI, which displays the status of the Ray cluster.
driver_id: The ID of driver.
configure_logging: True if allow the logging cofiguration here.
Otherwise, the users may want to configure it by their own.
logging_level: Logging level, default will be logging.INFO.
logging_format: Logging format, default contains a timestamp,
filename, line number, and message. See ray_constants.py.
plasma_store_socket_name (str): If provided, it will specify the socket
name used by the plasma store.
raylet_socket_name (str): If provided, it will specify the socket path
used by the raylet process.
temp_dir (str): If provided, it will specify the root temporary
directory for the Ray process.
load_code_from_local: Whether code should be loaded from a local module
or from the GCS.
_internal_config (str): JSON configuration for overriding
RayConfig defaults. For testing purposes ONLY.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
### Response:
def init(redis_address=None,
num_cpus=None,
num_gpus=None,
resources=None,
object_store_memory=None,
redis_max_memory=None,
log_to_driver=True,
node_ip_address=None,
object_id_seed=None,
local_mode=False,
redirect_worker_output=None,
redirect_output=None,
ignore_reinit_error=False,
num_redis_shards=None,
redis_max_clients=None,
redis_password=None,
plasma_directory=None,
huge_pages=False,
include_webui=False,
driver_id=None,
configure_logging=True,
logging_level=logging.INFO,
logging_format=ray_constants.LOGGER_FORMAT,
plasma_store_socket_name=None,
raylet_socket_name=None,
temp_dir=None,
load_code_from_local=False,
_internal_config=None):
"""Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases. Either a Ray cluster already exists and we
just attach this driver to it, or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray and all of the relevant processes, use this as follows:
.. code-block:: python
ray.init()
To connect to an existing Ray cluster, use this as follows (substituting
in the appropriate address):
.. code-block:: python
ray.init(redis_address="123.45.67.89:6379")
Args:
redis_address (str): The address of the Redis server to connect to. If
this address is not provided, then this command will start Redis, a
raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits.
num_cpus (int): Number of cpus the user wishes all raylets to
be configured with.
num_gpus (int): Number of gpus the user wishes all raylets to
be configured with.
resources: A dictionary mapping the name of a resource to the quantity
of that resource available.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is capped at 20GB but can be
set higher.
redis_max_memory: The max amount of memory (in bytes) to allow each
redis shard to use. Once the limit is exceeded, redis will start
LRU eviction of entries. This only applies to the sharded redis
tables (task, object, and profile tables). By default, this is
capped at 10GB but can be set higher.
log_to_driver (bool): If true, then output from all of the worker
processes on all nodes will be directed to the driver.
node_ip_address (str): The IP address of the node that we are on.
object_id_seed (int): Used to seed the deterministic generation of
object IDs. The same value can be used across multiple runs of the
same driver in order to generate the object IDs in a consistent
manner. However, the same ID should not be used for different
drivers.
local_mode (bool): True if the code should be executed serially
without Ray. This is useful for debugging.
ignore_reinit_error: True if we should suppress errors from calling
ray.init() a second time.
num_redis_shards: The number of Redis shards to start in addition to
the primary Redis shard.
redis_max_clients: If provided, attempt to configure Redis with this
maxclients number.
redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
include_webui: Boolean flag indicating whether to start the web
UI, which displays the status of the Ray cluster.
driver_id: The ID of driver.
configure_logging: True if allow the logging cofiguration here.
Otherwise, the users may want to configure it by their own.
logging_level: Logging level, default will be logging.INFO.
logging_format: Logging format, default contains a timestamp,
filename, line number, and message. See ray_constants.py.
plasma_store_socket_name (str): If provided, it will specify the socket
name used by the plasma store.
raylet_socket_name (str): If provided, it will specify the socket path
used by the raylet process.
temp_dir (str): If provided, it will specify the root temporary
directory for the Ray process.
load_code_from_local: Whether code should be loaded from a local module
or from the GCS.
_internal_config (str): JSON configuration for overriding
RayConfig defaults. For testing purposes ONLY.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
if configure_logging:
setup_logger(logging_level, logging_format)
if local_mode:
driver_mode = LOCAL_MODE
else:
driver_mode = SCRIPT_MODE
if setproctitle is None:
logger.warning(
"WARNING: Not updating worker name since `setproctitle` is not "
"installed. Install this with `pip install setproctitle` "
"(or ray[debug]) to enable monitoring of worker processes.")
if global_worker.connected:
if ignore_reinit_error:
logger.error("Calling ray.init() again after it has already been "
"called.")
return
else:
raise Exception("Perhaps you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'.")
# Convert hostnames to numerical IP address.
if node_ip_address is not None:
node_ip_address = services.address_to_ip(node_ip_address)
if redis_address is not None:
redis_address = services.address_to_ip(redis_address)
global _global_node
if driver_mode == LOCAL_MODE:
# If starting Ray in LOCAL_MODE, don't start any other processes.
_global_node = ray.node.LocalNode()
elif redis_address is None:
# In this case, we need to start a new cluster.
ray_params = ray.parameter.RayParams(
redis_address=redis_address,
node_ip_address=node_ip_address,
object_id_seed=object_id_seed,
local_mode=local_mode,
driver_mode=driver_mode,
redirect_worker_output=redirect_worker_output,
redirect_output=redirect_output,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=num_redis_shards,
redis_max_clients=redis_max_clients,
redis_password=redis_password,
plasma_directory=plasma_directory,
huge_pages=huge_pages,
include_webui=include_webui,
object_store_memory=object_store_memory,
redis_max_memory=redis_max_memory,
plasma_store_socket_name=plasma_store_socket_name,
raylet_socket_name=raylet_socket_name,
temp_dir=temp_dir,
load_code_from_local=load_code_from_local,
_internal_config=_internal_config,
)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler.
_global_node = ray.node.Node(
head=True, shutdown_at_exit=False, ray_params=ray_params)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise Exception("When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided.")
if resources is not None:
raise Exception("When connecting to an existing cluster, "
"resources must not be provided.")
if num_redis_shards is not None:
raise Exception("When connecting to an existing cluster, "
"num_redis_shards must not be provided.")
if redis_max_clients is not None:
raise Exception("When connecting to an existing cluster, "
"redis_max_clients must not be provided.")
if object_store_memory is not None:
raise Exception("When connecting to an existing cluster, "
"object_store_memory must not be provided.")
if redis_max_memory is not None:
raise Exception("When connecting to an existing cluster, "
"redis_max_memory must not be provided.")
if plasma_directory is not None:
raise Exception("When connecting to an existing cluster, "
"plasma_directory must not be provided.")
if huge_pages:
raise Exception("When connecting to an existing cluster, "
"huge_pages must not be provided.")
if temp_dir is not None:
raise Exception("When connecting to an existing cluster, "
"temp_dir must not be provided.")
if plasma_store_socket_name is not None:
raise Exception("When connecting to an existing cluster, "
"plasma_store_socket_name must not be provided.")
if raylet_socket_name is not None:
raise Exception("When connecting to an existing cluster, "
"raylet_socket_name must not be provided.")
if _internal_config is not None:
raise Exception("When connecting to an existing cluster, "
"_internal_config must not be provided.")
# In this case, we only need to connect the node.
ray_params = ray.parameter.RayParams(
node_ip_address=node_ip_address,
redis_address=redis_address,
redis_password=redis_password,
object_id_seed=object_id_seed,
temp_dir=temp_dir,
load_code_from_local=load_code_from_local)
_global_node = ray.node.Node(
ray_params, head=False, shutdown_at_exit=False, connect_only=True)
connect(
_global_node,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_id=driver_id)
for hook in _post_init_hooks:
hook()
return _global_node.address_info |
def get(self, sid):
"""
Constructs a CredentialListMappingContext
:param sid: A string that identifies the resource to fetch
:returns: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingContext
:rtype: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingContext
"""
return CredentialListMappingContext(
self._version,
account_sid=self._solution['account_sid'],
domain_sid=self._solution['domain_sid'],
sid=sid,
) | Constructs a CredentialListMappingContext
:param sid: A string that identifies the resource to fetch
:returns: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingContext
:rtype: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingContext | Below is the the instruction that describes the task:
### Input:
Constructs a CredentialListMappingContext
:param sid: A string that identifies the resource to fetch
:returns: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingContext
:rtype: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingContext
### Response:
def get(self, sid):
"""
Constructs a CredentialListMappingContext
:param sid: A string that identifies the resource to fetch
:returns: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingContext
:rtype: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingContext
"""
return CredentialListMappingContext(
self._version,
account_sid=self._solution['account_sid'],
domain_sid=self._solution['domain_sid'],
sid=sid,
) |
def set_as_object(self, index = None, value= None):
"""
Sets a new value to array element specified by its index.
When the index is not defined, it resets the entire array value.
This method has double purpose because method overrides are not supported in JavaScript.
:param index: (optional) an index of the element to set
:param value: a new element or array value.
"""
if index == None and value != None:
self.set_as_array(value)
else:
self[index] = value | Sets a new value to array element specified by its index.
When the index is not defined, it resets the entire array value.
This method has double purpose because method overrides are not supported in JavaScript.
:param index: (optional) an index of the element to set
:param value: a new element or array value. | Below is the the instruction that describes the task:
### Input:
Sets a new value to array element specified by its index.
When the index is not defined, it resets the entire array value.
This method has double purpose because method overrides are not supported in JavaScript.
:param index: (optional) an index of the element to set
:param value: a new element or array value.
### Response:
def set_as_object(self, index = None, value= None):
"""
Sets a new value to array element specified by its index.
When the index is not defined, it resets the entire array value.
This method has double purpose because method overrides are not supported in JavaScript.
:param index: (optional) an index of the element to set
:param value: a new element or array value.
"""
if index == None and value != None:
self.set_as_array(value)
else:
self[index] = value |
def main():
"""
NAME
lsq_redo.py
DESCRIPTION
converts a tab delimited LSQ format to PmagPy redo file and edits the magic_measurements table to mark "bad" measurements.
SYNTAX
lsq_redo.py [-h] [command line options]
OPTIONS
-h: prints help message and quits
-f FILE: specify LSQ input file
-fm MFILE: specify measurements file for editting, default is
magic_measurements.txt
-F FILE: specify output file, default is 'zeq_redo'
"""
letters=string.ascii_uppercase
for l in string.ascii_lowercase: letters=letters+l
dir_path='.'
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
inspec=dir_path+'/'+sys.argv[ind+1]
else:
zfile=dir_path+'/zeq_redo'
if '-fm' in sys.argv:
ind=sys.argv.index('-f')
meas_file=dir_path+'/'+sys.argv[ind+1]
else:
meas_file=dir_path+'/magic_measurements.txt'
if '-F' in sys.argv:
ind=sys.argv.index('-F')
zfile=dir_path+'/'+sys.argv[ind+1]
else:
zfile=dir_path+'/zeq_redo'
try:
open(meas_file,"r")
meas_data,file_type=pmag.magic_read(meas_file)
except IOError:
print(main.__doc__)
print("""You must have a valid measurements file prior to converting
this LSQ file""")
sys.exit()
zredo=open(zfile,"w")
MeasRecs=[]
#
# read in LSQ file
#
specs,MeasOuts=[],[]
prior_spec_data=open(inspec,'r').readlines()
for line in prior_spec_data:
if len(line)<2:
sys.exit()
# spec=line[0:14].strip().replace(" ","") # get out the specimen name = collapsing spaces
# rec=line[14:].split() # split up the rest of the line
rec=line.split('\t')
spec=rec[0].lower()
specs.append(spec)
comp_name=rec[2] # assign component name
calculation_type="DE-FM"
if rec[1][0]=="L":
calculation_type="DE-BFL" # best-fit line
else:
calculation_type="DE-BFP" # best-fit line
lists=rec[7].split('-') # get list of data used
incl=[]
for l in lists[0]:
incl.append(letters.index(l))
for l in letters[letters.index(lists[0][-1])+1:letters.index(lists[1][0])]:
incl.append(letters.index(l)) # add in the in between parts
for l in lists[1]:
incl.append(letters.index(l))
if len(lists)>2:
for l in letters[letters.index(lists[1][-1])+1:letters.index(lists[2][0])]:
incl.append(letters.index(l)) # add in the in between parts
for l in lists[2]:
incl.append(letters.index(l))
# now find all the data for this specimen in measurements
datablock,min,max=[],"",""
demag='N'
for s in meas_data:
if s['er_specimen_name'].lower()==spec.lower():
meths=s['magic_method_codes'].replace(" ","").split(":")
if 'LT-NO' in meths or 'LT-AF-Z' in meths or 'LT-T-Z' in meths:
datablock.append(s)
if len(datablock)>0:
for t in datablock:print(t['magic_method_codes'])
incl_int=len(incl)
while incl[-1]>len(datablock)-1:
del incl[-1] # don't include measurements beyond what is in file
if len(incl)!=incl_int:
'converting calculation type to best-fit line'
meths0= datablock[incl[0]]['magic_method_codes'].replace(" ","").split(':')
meths1= datablock[incl[-1]]['magic_method_codes'].replace(" ","").split(':')
H0=datablock[incl[0]]['treatment_ac_field']
T0=datablock[incl[0]]['treatment_temp']
H1=datablock[incl[-1]]['treatment_ac_field']
T1=datablock[incl[-1]]['treatment_temp']
if 'LT-T-Z' in meths1:
max=T1
demag="T"
elif 'LT-AF-Z' in meths1:
demag="AF"
max=H1
if 'LT-NO' in meths0:
if demag=='T':
min=273
else:
min=0
elif 'LT-T-Z' in meths0:
min=T0
else:
min=H0
for ind in range(incl[0]):
MeasRecs.append(datablock[ind])
for ind in range(incl[0],incl[-1]):
if ind not in incl: # datapoint not used in calculation
datablock[ind]['measurement_flag']='b'
MeasRecs.append(datablock[ind])
for ind in range(incl[-1],len(datablock)):
MeasRecs.append(datablock[ind])
outstring='%s %s %s %s %s \n'%(spec,calculation_type,min,max,comp_name)
zredo.write(outstring)
for s in meas_data: # collect the rest of the measurement data not already included
if s['er_specimen_name'] not in specs:
MeasRecs.append(s)
pmag.magic_write(meas_file,MeasRecs,'magic_measurements') | NAME
lsq_redo.py
DESCRIPTION
converts a tab delimited LSQ format to PmagPy redo file and edits the magic_measurements table to mark "bad" measurements.
SYNTAX
lsq_redo.py [-h] [command line options]
OPTIONS
-h: prints help message and quits
-f FILE: specify LSQ input file
-fm MFILE: specify measurements file for editting, default is
magic_measurements.txt
-F FILE: specify output file, default is 'zeq_redo' | Below is the the instruction that describes the task:
### Input:
NAME
lsq_redo.py
DESCRIPTION
converts a tab delimited LSQ format to PmagPy redo file and edits the magic_measurements table to mark "bad" measurements.
SYNTAX
lsq_redo.py [-h] [command line options]
OPTIONS
-h: prints help message and quits
-f FILE: specify LSQ input file
-fm MFILE: specify measurements file for editting, default is
magic_measurements.txt
-F FILE: specify output file, default is 'zeq_redo'
### Response:
def main():
"""
NAME
lsq_redo.py
DESCRIPTION
converts a tab delimited LSQ format to PmagPy redo file and edits the magic_measurements table to mark "bad" measurements.
SYNTAX
lsq_redo.py [-h] [command line options]
OPTIONS
-h: prints help message and quits
-f FILE: specify LSQ input file
-fm MFILE: specify measurements file for editting, default is
magic_measurements.txt
-F FILE: specify output file, default is 'zeq_redo'
"""
letters=string.ascii_uppercase
for l in string.ascii_lowercase: letters=letters+l
dir_path='.'
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
inspec=dir_path+'/'+sys.argv[ind+1]
else:
zfile=dir_path+'/zeq_redo'
if '-fm' in sys.argv:
ind=sys.argv.index('-f')
meas_file=dir_path+'/'+sys.argv[ind+1]
else:
meas_file=dir_path+'/magic_measurements.txt'
if '-F' in sys.argv:
ind=sys.argv.index('-F')
zfile=dir_path+'/'+sys.argv[ind+1]
else:
zfile=dir_path+'/zeq_redo'
try:
open(meas_file,"r")
meas_data,file_type=pmag.magic_read(meas_file)
except IOError:
print(main.__doc__)
print("""You must have a valid measurements file prior to converting
this LSQ file""")
sys.exit()
zredo=open(zfile,"w")
MeasRecs=[]
#
# read in LSQ file
#
specs,MeasOuts=[],[]
prior_spec_data=open(inspec,'r').readlines()
for line in prior_spec_data:
if len(line)<2:
sys.exit()
# spec=line[0:14].strip().replace(" ","") # get out the specimen name = collapsing spaces
# rec=line[14:].split() # split up the rest of the line
rec=line.split('\t')
spec=rec[0].lower()
specs.append(spec)
comp_name=rec[2] # assign component name
calculation_type="DE-FM"
if rec[1][0]=="L":
calculation_type="DE-BFL" # best-fit line
else:
calculation_type="DE-BFP" # best-fit line
lists=rec[7].split('-') # get list of data used
incl=[]
for l in lists[0]:
incl.append(letters.index(l))
for l in letters[letters.index(lists[0][-1])+1:letters.index(lists[1][0])]:
incl.append(letters.index(l)) # add in the in between parts
for l in lists[1]:
incl.append(letters.index(l))
if len(lists)>2:
for l in letters[letters.index(lists[1][-1])+1:letters.index(lists[2][0])]:
incl.append(letters.index(l)) # add in the in between parts
for l in lists[2]:
incl.append(letters.index(l))
# now find all the data for this specimen in measurements
datablock,min,max=[],"",""
demag='N'
for s in meas_data:
if s['er_specimen_name'].lower()==spec.lower():
meths=s['magic_method_codes'].replace(" ","").split(":")
if 'LT-NO' in meths or 'LT-AF-Z' in meths or 'LT-T-Z' in meths:
datablock.append(s)
if len(datablock)>0:
for t in datablock:print(t['magic_method_codes'])
incl_int=len(incl)
while incl[-1]>len(datablock)-1:
del incl[-1] # don't include measurements beyond what is in file
if len(incl)!=incl_int:
'converting calculation type to best-fit line'
meths0= datablock[incl[0]]['magic_method_codes'].replace(" ","").split(':')
meths1= datablock[incl[-1]]['magic_method_codes'].replace(" ","").split(':')
H0=datablock[incl[0]]['treatment_ac_field']
T0=datablock[incl[0]]['treatment_temp']
H1=datablock[incl[-1]]['treatment_ac_field']
T1=datablock[incl[-1]]['treatment_temp']
if 'LT-T-Z' in meths1:
max=T1
demag="T"
elif 'LT-AF-Z' in meths1:
demag="AF"
max=H1
if 'LT-NO' in meths0:
if demag=='T':
min=273
else:
min=0
elif 'LT-T-Z' in meths0:
min=T0
else:
min=H0
for ind in range(incl[0]):
MeasRecs.append(datablock[ind])
for ind in range(incl[0],incl[-1]):
if ind not in incl: # datapoint not used in calculation
datablock[ind]['measurement_flag']='b'
MeasRecs.append(datablock[ind])
for ind in range(incl[-1],len(datablock)):
MeasRecs.append(datablock[ind])
outstring='%s %s %s %s %s \n'%(spec,calculation_type,min,max,comp_name)
zredo.write(outstring)
for s in meas_data: # collect the rest of the measurement data not already included
if s['er_specimen_name'] not in specs:
MeasRecs.append(s)
pmag.magic_write(meas_file,MeasRecs,'magic_measurements') |
def enable_peer_bfd(self, **kwargs):
"""BFD enable for each specified peer.
Args:
rbridge_id (str): Rbridge to configure. (1, 225, etc)
peer_ip (str): Peer IPv4 address for BFD setting.
delete (bool): True if BFD configuration should be deleted.
Default value will be False if not specified.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
XML to be passed to the switch.
Raises:
None
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.230']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp.neighbor(ip_addr='10.10.10.20',
... remote_as='65535', rbridge_id='230')
... output = dev.bgp.enable_peer_bfd(peer_ip='10.10.10.20',
... rbridge_id='230')
... output = dev.bgp.enable_peer_bfd(peer_ip='10.10.10.20',
... rbridge_id='230',get=True)
... output = dev.bgp.enable_peer_bfd(peer_ip='10.10.10.20',
... rbridge_id='230', delete=True)
... output = dev.bgp.neighbor(ip_addr='10.10.10.20',
... delete=True, rbridge_id='230', remote_as='65535')
"""
method_name = 'rbridge_id_router_router_bgp_router_bgp_attributes_' \
'neighbor_neighbor_ips_neighbor_addr_bfd_bfd_enable'
bfd_enable = getattr(self._rbridge, method_name)
kwargs['router_bgp_neighbor_address'] = kwargs.pop('peer_ip')
callback = kwargs.pop('callback', self._callback)
config = bfd_enable(**kwargs)
if kwargs.pop('delete', False):
tag = 'bfd-enable'
config.find('.//*%s' % tag).set('operation', 'delete')
if kwargs.pop('get', False):
return callback(config, handler='get_config')
else:
return callback(config) | BFD enable for each specified peer.
Args:
rbridge_id (str): Rbridge to configure. (1, 225, etc)
peer_ip (str): Peer IPv4 address for BFD setting.
delete (bool): True if BFD configuration should be deleted.
Default value will be False if not specified.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
XML to be passed to the switch.
Raises:
None
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.230']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp.neighbor(ip_addr='10.10.10.20',
... remote_as='65535', rbridge_id='230')
... output = dev.bgp.enable_peer_bfd(peer_ip='10.10.10.20',
... rbridge_id='230')
... output = dev.bgp.enable_peer_bfd(peer_ip='10.10.10.20',
... rbridge_id='230',get=True)
... output = dev.bgp.enable_peer_bfd(peer_ip='10.10.10.20',
... rbridge_id='230', delete=True)
... output = dev.bgp.neighbor(ip_addr='10.10.10.20',
... delete=True, rbridge_id='230', remote_as='65535') | Below is the the instruction that describes the task:
### Input:
BFD enable for each specified peer.
Args:
rbridge_id (str): Rbridge to configure. (1, 225, etc)
peer_ip (str): Peer IPv4 address for BFD setting.
delete (bool): True if BFD configuration should be deleted.
Default value will be False if not specified.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
XML to be passed to the switch.
Raises:
None
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.230']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp.neighbor(ip_addr='10.10.10.20',
... remote_as='65535', rbridge_id='230')
... output = dev.bgp.enable_peer_bfd(peer_ip='10.10.10.20',
... rbridge_id='230')
... output = dev.bgp.enable_peer_bfd(peer_ip='10.10.10.20',
... rbridge_id='230',get=True)
... output = dev.bgp.enable_peer_bfd(peer_ip='10.10.10.20',
... rbridge_id='230', delete=True)
... output = dev.bgp.neighbor(ip_addr='10.10.10.20',
... delete=True, rbridge_id='230', remote_as='65535')
### Response:
def enable_peer_bfd(self, **kwargs):
"""BFD enable for each specified peer.
Args:
rbridge_id (str): Rbridge to configure. (1, 225, etc)
peer_ip (str): Peer IPv4 address for BFD setting.
delete (bool): True if BFD configuration should be deleted.
Default value will be False if not specified.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
XML to be passed to the switch.
Raises:
None
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.230']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp.neighbor(ip_addr='10.10.10.20',
... remote_as='65535', rbridge_id='230')
... output = dev.bgp.enable_peer_bfd(peer_ip='10.10.10.20',
... rbridge_id='230')
... output = dev.bgp.enable_peer_bfd(peer_ip='10.10.10.20',
... rbridge_id='230',get=True)
... output = dev.bgp.enable_peer_bfd(peer_ip='10.10.10.20',
... rbridge_id='230', delete=True)
... output = dev.bgp.neighbor(ip_addr='10.10.10.20',
... delete=True, rbridge_id='230', remote_as='65535')
"""
method_name = 'rbridge_id_router_router_bgp_router_bgp_attributes_' \
'neighbor_neighbor_ips_neighbor_addr_bfd_bfd_enable'
bfd_enable = getattr(self._rbridge, method_name)
kwargs['router_bgp_neighbor_address'] = kwargs.pop('peer_ip')
callback = kwargs.pop('callback', self._callback)
config = bfd_enable(**kwargs)
if kwargs.pop('delete', False):
tag = 'bfd-enable'
config.find('.//*%s' % tag).set('operation', 'delete')
if kwargs.pop('get', False):
return callback(config, handler='get_config')
else:
return callback(config) |
def add(x, y, context=None):
"""
Return ``x`` + ``y``.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_add,
(
BigFloat._implicit_convert(x),
BigFloat._implicit_convert(y),
),
context,
) | Return ``x`` + ``y``. | Below is the the instruction that describes the task:
### Input:
Return ``x`` + ``y``.
### Response:
def add(x, y, context=None):
"""
Return ``x`` + ``y``.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_add,
(
BigFloat._implicit_convert(x),
BigFloat._implicit_convert(y),
),
context,
) |
def main(*argv):
""" main driver of program """
try:
# Inputs
#
adminUsername = argv[0]
adminPassword = argv[1]
siteURL = argv[2]
username = argv[3]
subFolders = argv[4].lower() == "true"
# Logic
#
sh = arcrest.AGOLTokenSecurityHandler(adminUsername, adminPassword)
admin = arcrest.manageorg.Administration(url=siteURL,
securityHandler=sh)
content = admin.content
if isinstance(content, arcrest.manageorg._content.Content):pass
usercontent = content.usercontent(username=username)
res = usercontent.listUserContent(username=adminUsername)
# Delete Root Items
#
eItems = ""
itemsToErase = ",".join([item['id'] for item in res['items']])
usercontent.deleteItems(items=itemsToErase)
# Walk Each Folder and erase items if subfolder == True
#
if subFolders:
for folder in res['folders']:
c = usercontent.listUserContent(username=username, folderId=folder['id'])
itemsToErase = ",".join([item['id'] for item in c['items']])
if len(itemsToErase.split(',')) > 0:
usercontent.deleteItems(items=itemsToErase)
del c
usercontent.deleteFolder(folderId=folder['id'])
del folder
arcpy.AddMessage("User %s content has been deleted." % username)
arcpy.SetParameterAsText(4, True)
except arcpy.ExecuteError:
line, filename, synerror = trace()
arcpy.AddError("error on line: %s" % line)
arcpy.AddError("error in file name: %s" % filename)
arcpy.AddError("with error message: %s" % synerror)
arcpy.AddError("ArcPy Error Message: %s" % arcpy.GetMessages(2))
except FunctionError, f_e:
messages = f_e.args[0]
arcpy.AddError("error in function: %s" % messages["function"])
arcpy.AddError("error on line: %s" % messages["line"])
arcpy.AddError("error in file name: %s" % messages["filename"])
arcpy.AddError("with error message: %s" % messages["synerror"])
arcpy.AddError("ArcPy Error Message: %s" % messages["arc"])
except:
line, filename, synerror = trace()
arcpy.AddError("error on line: %s" % line)
arcpy.AddError("error in file name: %s" % filename)
arcpy.AddError("with error message: %s" % synerror) | main driver of program | Below is the the instruction that describes the task:
### Input:
main driver of program
### Response:
def main(*argv):
""" main driver of program """
try:
# Inputs
#
adminUsername = argv[0]
adminPassword = argv[1]
siteURL = argv[2]
username = argv[3]
subFolders = argv[4].lower() == "true"
# Logic
#
sh = arcrest.AGOLTokenSecurityHandler(adminUsername, adminPassword)
admin = arcrest.manageorg.Administration(url=siteURL,
securityHandler=sh)
content = admin.content
if isinstance(content, arcrest.manageorg._content.Content):pass
usercontent = content.usercontent(username=username)
res = usercontent.listUserContent(username=adminUsername)
# Delete Root Items
#
eItems = ""
itemsToErase = ",".join([item['id'] for item in res['items']])
usercontent.deleteItems(items=itemsToErase)
# Walk Each Folder and erase items if subfolder == True
#
if subFolders:
for folder in res['folders']:
c = usercontent.listUserContent(username=username, folderId=folder['id'])
itemsToErase = ",".join([item['id'] for item in c['items']])
if len(itemsToErase.split(',')) > 0:
usercontent.deleteItems(items=itemsToErase)
del c
usercontent.deleteFolder(folderId=folder['id'])
del folder
arcpy.AddMessage("User %s content has been deleted." % username)
arcpy.SetParameterAsText(4, True)
except arcpy.ExecuteError:
line, filename, synerror = trace()
arcpy.AddError("error on line: %s" % line)
arcpy.AddError("error in file name: %s" % filename)
arcpy.AddError("with error message: %s" % synerror)
arcpy.AddError("ArcPy Error Message: %s" % arcpy.GetMessages(2))
except FunctionError, f_e:
messages = f_e.args[0]
arcpy.AddError("error in function: %s" % messages["function"])
arcpy.AddError("error on line: %s" % messages["line"])
arcpy.AddError("error in file name: %s" % messages["filename"])
arcpy.AddError("with error message: %s" % messages["synerror"])
arcpy.AddError("ArcPy Error Message: %s" % messages["arc"])
except:
line, filename, synerror = trace()
arcpy.AddError("error on line: %s" % line)
arcpy.AddError("error in file name: %s" % filename)
arcpy.AddError("with error message: %s" % synerror) |
def resolve_invite_link(link):
"""
Resolves the given invite link. Returns a tuple of
``(link creator user id, global chat id, random int)``.
Note that for broadcast channels, the link creator
user ID will be zero to protect their identity.
Normal chats and megagroup channels will have such ID.
Note that the chat ID may not be accurate for chats
with a link that were upgraded to megagroup, since
the link can remain the same, but the chat ID will
be correct once a new link is generated.
"""
link_hash, is_link = parse_username(link)
if not is_link:
# Perhaps the user passed the link hash directly
link_hash = link
try:
return struct.unpack('>LLQ', _decode_telegram_base64(link_hash))
except (struct.error, TypeError):
return None, None, None | Resolves the given invite link. Returns a tuple of
``(link creator user id, global chat id, random int)``.
Note that for broadcast channels, the link creator
user ID will be zero to protect their identity.
Normal chats and megagroup channels will have such ID.
Note that the chat ID may not be accurate for chats
with a link that were upgraded to megagroup, since
the link can remain the same, but the chat ID will
be correct once a new link is generated. | Below is the the instruction that describes the task:
### Input:
Resolves the given invite link. Returns a tuple of
``(link creator user id, global chat id, random int)``.
Note that for broadcast channels, the link creator
user ID will be zero to protect their identity.
Normal chats and megagroup channels will have such ID.
Note that the chat ID may not be accurate for chats
with a link that were upgraded to megagroup, since
the link can remain the same, but the chat ID will
be correct once a new link is generated.
### Response:
def resolve_invite_link(link):
"""
Resolves the given invite link. Returns a tuple of
``(link creator user id, global chat id, random int)``.
Note that for broadcast channels, the link creator
user ID will be zero to protect their identity.
Normal chats and megagroup channels will have such ID.
Note that the chat ID may not be accurate for chats
with a link that were upgraded to megagroup, since
the link can remain the same, but the chat ID will
be correct once a new link is generated.
"""
link_hash, is_link = parse_username(link)
if not is_link:
# Perhaps the user passed the link hash directly
link_hash = link
try:
return struct.unpack('>LLQ', _decode_telegram_base64(link_hash))
except (struct.error, TypeError):
return None, None, None |
def set_attribute(name, value):
"""
Decorator factory for setting attributes on a function.
Doesn't change the behavior of the wrapped function.
Examples
--------
>>> @set_attribute('__name__', 'foo')
... def bar():
... return 3
...
>>> bar()
3
>>> bar.__name__
'foo'
"""
def decorator(f):
setattr(f, name, value)
return f
return decorator | Decorator factory for setting attributes on a function.
Doesn't change the behavior of the wrapped function.
Examples
--------
>>> @set_attribute('__name__', 'foo')
... def bar():
... return 3
...
>>> bar()
3
>>> bar.__name__
'foo' | Below is the the instruction that describes the task:
### Input:
Decorator factory for setting attributes on a function.
Doesn't change the behavior of the wrapped function.
Examples
--------
>>> @set_attribute('__name__', 'foo')
... def bar():
... return 3
...
>>> bar()
3
>>> bar.__name__
'foo'
### Response:
def set_attribute(name, value):
"""
Decorator factory for setting attributes on a function.
Doesn't change the behavior of the wrapped function.
Examples
--------
>>> @set_attribute('__name__', 'foo')
... def bar():
... return 3
...
>>> bar()
3
>>> bar.__name__
'foo'
"""
def decorator(f):
setattr(f, name, value)
return f
return decorator |
def load_from_stream(self, stream, container, **options):
"""
Load data from given stream 'stream'.
:param stream: Stream provides configuration data
:param container: callble to make a container object
:param options: keyword options passed to '_load_from_stream_fn'
:return: container object holding the configuration data
"""
return load_with_fn(self._load_from_stream_fn, stream, container,
allow_primitives=self.allow_primitives(),
**options) | Load data from given stream 'stream'.
:param stream: Stream provides configuration data
:param container: callble to make a container object
:param options: keyword options passed to '_load_from_stream_fn'
:return: container object holding the configuration data | Below is the the instruction that describes the task:
### Input:
Load data from given stream 'stream'.
:param stream: Stream provides configuration data
:param container: callble to make a container object
:param options: keyword options passed to '_load_from_stream_fn'
:return: container object holding the configuration data
### Response:
def load_from_stream(self, stream, container, **options):
"""
Load data from given stream 'stream'.
:param stream: Stream provides configuration data
:param container: callble to make a container object
:param options: keyword options passed to '_load_from_stream_fn'
:return: container object holding the configuration data
"""
return load_with_fn(self._load_from_stream_fn, stream, container,
allow_primitives=self.allow_primitives(),
**options) |
def on_builder_inited(app):
"""
Hooks into Sphinx's ``builder-inited`` event.
Builds out the ReST API source.
"""
config = app.builder.config
target_directory = (
pathlib.Path(app.builder.env.srcdir) / config.uqbar_api_directory_name
)
initial_source_paths: List[str] = []
source_paths = config.uqbar_api_source_paths
for source_path in source_paths:
if isinstance(source_path, types.ModuleType):
if hasattr(source_path, "__path__"):
initial_source_paths.extend(getattr(source_path, "__path__"))
else:
initial_source_paths.extend(source_path.__file__)
continue
try:
module = importlib.import_module(source_path)
if hasattr(module, "__path__"):
initial_source_paths.extend(getattr(module, "__path__"))
else:
initial_source_paths.append(module.__file__)
except ImportError:
initial_source_paths.append(source_path)
root_documenter_class = config.uqbar_api_root_documenter_class
if isinstance(root_documenter_class, str):
module_name, _, class_name = root_documenter_class.rpartition(".")
module = importlib.import_module(module_name)
root_documenter_class = getattr(module, class_name)
module_documenter_class = config.uqbar_api_module_documenter_class
if isinstance(module_documenter_class, str):
module_name, _, class_name = module_documenter_class.rpartition(".")
module = importlib.import_module(module_name)
module_documenter_class = getattr(module, class_name)
# Don't modify the list in Sphinx's config. Sphinx won't pickle class
# references, and strips them from the saved config. That leads to Sphinx
# believing that the config has changed on every run.
member_documenter_classes = list(config.uqbar_api_member_documenter_classes or [])
for i, member_documenter_class in enumerate(member_documenter_classes):
if isinstance(member_documenter_class, str):
module_name, _, class_name = member_documenter_class.rpartition(".")
module = importlib.import_module(module_name)
member_documenter_classes[i] = getattr(module, class_name)
api_builder = uqbar.apis.APIBuilder(
initial_source_paths=initial_source_paths,
target_directory=target_directory,
document_empty_modules=config.uqbar_api_document_empty_modules,
document_private_members=config.uqbar_api_document_private_members,
document_private_modules=config.uqbar_api_document_private_modules,
member_documenter_classes=member_documenter_classes or None,
module_documenter_class=module_documenter_class,
root_documenter_class=root_documenter_class,
title=config.uqbar_api_title,
logger_func=logger_func,
)
api_builder() | Hooks into Sphinx's ``builder-inited`` event.
Builds out the ReST API source. | Below is the the instruction that describes the task:
### Input:
Hooks into Sphinx's ``builder-inited`` event.
Builds out the ReST API source.
### Response:
def on_builder_inited(app):
"""
Hooks into Sphinx's ``builder-inited`` event.
Builds out the ReST API source.
"""
config = app.builder.config
target_directory = (
pathlib.Path(app.builder.env.srcdir) / config.uqbar_api_directory_name
)
initial_source_paths: List[str] = []
source_paths = config.uqbar_api_source_paths
for source_path in source_paths:
if isinstance(source_path, types.ModuleType):
if hasattr(source_path, "__path__"):
initial_source_paths.extend(getattr(source_path, "__path__"))
else:
initial_source_paths.extend(source_path.__file__)
continue
try:
module = importlib.import_module(source_path)
if hasattr(module, "__path__"):
initial_source_paths.extend(getattr(module, "__path__"))
else:
initial_source_paths.append(module.__file__)
except ImportError:
initial_source_paths.append(source_path)
root_documenter_class = config.uqbar_api_root_documenter_class
if isinstance(root_documenter_class, str):
module_name, _, class_name = root_documenter_class.rpartition(".")
module = importlib.import_module(module_name)
root_documenter_class = getattr(module, class_name)
module_documenter_class = config.uqbar_api_module_documenter_class
if isinstance(module_documenter_class, str):
module_name, _, class_name = module_documenter_class.rpartition(".")
module = importlib.import_module(module_name)
module_documenter_class = getattr(module, class_name)
# Don't modify the list in Sphinx's config. Sphinx won't pickle class
# references, and strips them from the saved config. That leads to Sphinx
# believing that the config has changed on every run.
member_documenter_classes = list(config.uqbar_api_member_documenter_classes or [])
for i, member_documenter_class in enumerate(member_documenter_classes):
if isinstance(member_documenter_class, str):
module_name, _, class_name = member_documenter_class.rpartition(".")
module = importlib.import_module(module_name)
member_documenter_classes[i] = getattr(module, class_name)
api_builder = uqbar.apis.APIBuilder(
initial_source_paths=initial_source_paths,
target_directory=target_directory,
document_empty_modules=config.uqbar_api_document_empty_modules,
document_private_members=config.uqbar_api_document_private_members,
document_private_modules=config.uqbar_api_document_private_modules,
member_documenter_classes=member_documenter_classes or None,
module_documenter_class=module_documenter_class,
root_documenter_class=root_documenter_class,
title=config.uqbar_api_title,
logger_func=logger_func,
)
api_builder() |
def with_metaclass(meta, *bases):
"""A Python 2/3 compatible way of declaring a metaclass.
Taken from `Jinja 2 <https://github.com/mitsuhiko/jinja2/blob/master/jinja2
/_compat.py>`_ via `python-future <http://python-future.org>`_. License:
BSD.
Use it like this::
class MyClass(with_metaclass(MyMetaClass, BaseClass)):
pass
"""
class _Metaclass(meta):
"""Inner class"""
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, attrs):
if this_bases is None:
return type.__new__(cls, name, (), attrs)
return meta(name, bases, attrs)
return _Metaclass(str('temporary_class'), None, {}) | A Python 2/3 compatible way of declaring a metaclass.
Taken from `Jinja 2 <https://github.com/mitsuhiko/jinja2/blob/master/jinja2
/_compat.py>`_ via `python-future <http://python-future.org>`_. License:
BSD.
Use it like this::
class MyClass(with_metaclass(MyMetaClass, BaseClass)):
pass | Below is the the instruction that describes the task:
### Input:
A Python 2/3 compatible way of declaring a metaclass.
Taken from `Jinja 2 <https://github.com/mitsuhiko/jinja2/blob/master/jinja2
/_compat.py>`_ via `python-future <http://python-future.org>`_. License:
BSD.
Use it like this::
class MyClass(with_metaclass(MyMetaClass, BaseClass)):
pass
### Response:
def with_metaclass(meta, *bases):
"""A Python 2/3 compatible way of declaring a metaclass.
Taken from `Jinja 2 <https://github.com/mitsuhiko/jinja2/blob/master/jinja2
/_compat.py>`_ via `python-future <http://python-future.org>`_. License:
BSD.
Use it like this::
class MyClass(with_metaclass(MyMetaClass, BaseClass)):
pass
"""
class _Metaclass(meta):
"""Inner class"""
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, attrs):
if this_bases is None:
return type.__new__(cls, name, (), attrs)
return meta(name, bases, attrs)
return _Metaclass(str('temporary_class'), None, {}) |
def get_int_noerr(self, arg):
"""Eval arg and it is an integer return the value. Otherwise
return None"""
if self.curframe:
g = self.curframe.f_globals
l = self.curframe.f_locals
else:
g = globals()
l = locals()
pass
try:
val = int(eval(arg, g, l))
except (SyntaxError, NameError, ValueError, TypeError):
return None
return val | Eval arg and it is an integer return the value. Otherwise
return None | Below is the the instruction that describes the task:
### Input:
Eval arg and it is an integer return the value. Otherwise
return None
### Response:
def get_int_noerr(self, arg):
"""Eval arg and it is an integer return the value. Otherwise
return None"""
if self.curframe:
g = self.curframe.f_globals
l = self.curframe.f_locals
else:
g = globals()
l = locals()
pass
try:
val = int(eval(arg, g, l))
except (SyntaxError, NameError, ValueError, TypeError):
return None
return val |
def _flatten_subclass_tree(cls):
"""Return the set of all child classes of `cls`.
Parameters
----------
cls : Type
Returns
-------
frozenset[Type]
"""
subclasses = frozenset(cls.__subclasses__())
children = frozenset(toolz.concat(map(_flatten_subclass_tree, subclasses)))
return frozenset({cls}) | subclasses | children | Return the set of all child classes of `cls`.
Parameters
----------
cls : Type
Returns
-------
frozenset[Type] | Below is the the instruction that describes the task:
### Input:
Return the set of all child classes of `cls`.
Parameters
----------
cls : Type
Returns
-------
frozenset[Type]
### Response:
def _flatten_subclass_tree(cls):
"""Return the set of all child classes of `cls`.
Parameters
----------
cls : Type
Returns
-------
frozenset[Type]
"""
subclasses = frozenset(cls.__subclasses__())
children = frozenset(toolz.concat(map(_flatten_subclass_tree, subclasses)))
return frozenset({cls}) | subclasses | children |
def _uniq(self):
"""
Get list of unique detections.
Works in place.
.. rubric:: Example
>>> family = Family(
... template=Template(name='a'), detections=[
... Detection(template_name='a', detect_time=UTCDateTime(0),
... no_chans=8, detect_val=4.2, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0) + 10,
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0) + 10,
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0)])
>>> len(family)
3
>>> len(family._uniq())
2
"""
_detections = []
[_detections.append(d) for d in self.detections
if not _detections.count(d)]
self.detections = _detections
return self | Get list of unique detections.
Works in place.
.. rubric:: Example
>>> family = Family(
... template=Template(name='a'), detections=[
... Detection(template_name='a', detect_time=UTCDateTime(0),
... no_chans=8, detect_val=4.2, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0) + 10,
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0) + 10,
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0)])
>>> len(family)
3
>>> len(family._uniq())
2 | Below is the the instruction that describes the task:
### Input:
Get list of unique detections.
Works in place.
.. rubric:: Example
>>> family = Family(
... template=Template(name='a'), detections=[
... Detection(template_name='a', detect_time=UTCDateTime(0),
... no_chans=8, detect_val=4.2, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0) + 10,
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0) + 10,
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0)])
>>> len(family)
3
>>> len(family._uniq())
2
### Response:
def _uniq(self):
"""
Get list of unique detections.
Works in place.
.. rubric:: Example
>>> family = Family(
... template=Template(name='a'), detections=[
... Detection(template_name='a', detect_time=UTCDateTime(0),
... no_chans=8, detect_val=4.2, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0) + 10,
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0),
... Detection(template_name='a', detect_time=UTCDateTime(0) + 10,
... no_chans=8, detect_val=4.5, threshold=1.2,
... typeofdet='corr', threshold_type='MAD',
... threshold_input=8.0)])
>>> len(family)
3
>>> len(family._uniq())
2
"""
_detections = []
[_detections.append(d) for d in self.detections
if not _detections.count(d)]
self.detections = _detections
return self |
def end_block(self, request_end_block):
"""Calculate block hash using transaction ids and previous block
hash to be stored in the next block.
Args:
height (int): new height of the chain.
"""
self.abort_if_abci_chain_is_not_synced()
chain_shift = 0 if self.chain is None else self.chain['height']
height = request_end_block.height + chain_shift
self.new_height = height
# store pre-commit state to recover in case there is a crash during
# `end_block` or `commit`
logger.debug(f'Updating pre-commit state: {self.new_height}')
pre_commit_state = dict(height=self.new_height,
transactions=self.block_txn_ids)
self.bigchaindb.store_pre_commit_state(pre_commit_state)
block_txn_hash = calculate_hash(self.block_txn_ids)
block = self.bigchaindb.get_latest_block()
if self.block_txn_ids:
self.block_txn_hash = calculate_hash([block['app_hash'], block_txn_hash])
else:
self.block_txn_hash = block['app_hash']
validator_update = Election.process_block(self.bigchaindb,
self.new_height,
self.block_transactions)
return ResponseEndBlock(validator_updates=validator_update) | Calculate block hash using transaction ids and previous block
hash to be stored in the next block.
Args:
height (int): new height of the chain. | Below is the the instruction that describes the task:
### Input:
Calculate block hash using transaction ids and previous block
hash to be stored in the next block.
Args:
height (int): new height of the chain.
### Response:
def end_block(self, request_end_block):
"""Calculate block hash using transaction ids and previous block
hash to be stored in the next block.
Args:
height (int): new height of the chain.
"""
self.abort_if_abci_chain_is_not_synced()
chain_shift = 0 if self.chain is None else self.chain['height']
height = request_end_block.height + chain_shift
self.new_height = height
# store pre-commit state to recover in case there is a crash during
# `end_block` or `commit`
logger.debug(f'Updating pre-commit state: {self.new_height}')
pre_commit_state = dict(height=self.new_height,
transactions=self.block_txn_ids)
self.bigchaindb.store_pre_commit_state(pre_commit_state)
block_txn_hash = calculate_hash(self.block_txn_ids)
block = self.bigchaindb.get_latest_block()
if self.block_txn_ids:
self.block_txn_hash = calculate_hash([block['app_hash'], block_txn_hash])
else:
self.block_txn_hash = block['app_hash']
validator_update = Election.process_block(self.bigchaindb,
self.new_height,
self.block_transactions)
return ResponseEndBlock(validator_updates=validator_update) |
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self | OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None. | Below is the the instruction that describes the task:
### Input:
OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
### Response:
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self |
def batch_update(self, pk=None, **kwargs):
"""Update all related inventory sources of the given inventory.
Note global option --format is not available here, as the output would always be JSON-formatted.
=====API DOCS=====
Update all related inventory sources of the given inventory.
:param pk: Primary key of the given inventory.
:type pk: int
:param `**kwargs`: Keyword arguments list of available fields used for searching resource objects.
:returns: A JSON object of update status of the given inventory.
:rtype: dict
=====API DOCS=====
"""
res = self.get(pk=pk, **kwargs)
url = self.endpoint + '%d/%s/' % (res['id'], 'update_inventory_sources')
return client.post(url, data={}).json() | Update all related inventory sources of the given inventory.
Note global option --format is not available here, as the output would always be JSON-formatted.
=====API DOCS=====
Update all related inventory sources of the given inventory.
:param pk: Primary key of the given inventory.
:type pk: int
:param `**kwargs`: Keyword arguments list of available fields used for searching resource objects.
:returns: A JSON object of update status of the given inventory.
:rtype: dict
=====API DOCS===== | Below is the the instruction that describes the task:
### Input:
Update all related inventory sources of the given inventory.
Note global option --format is not available here, as the output would always be JSON-formatted.
=====API DOCS=====
Update all related inventory sources of the given inventory.
:param pk: Primary key of the given inventory.
:type pk: int
:param `**kwargs`: Keyword arguments list of available fields used for searching resource objects.
:returns: A JSON object of update status of the given inventory.
:rtype: dict
=====API DOCS=====
### Response:
def batch_update(self, pk=None, **kwargs):
"""Update all related inventory sources of the given inventory.
Note global option --format is not available here, as the output would always be JSON-formatted.
=====API DOCS=====
Update all related inventory sources of the given inventory.
:param pk: Primary key of the given inventory.
:type pk: int
:param `**kwargs`: Keyword arguments list of available fields used for searching resource objects.
:returns: A JSON object of update status of the given inventory.
:rtype: dict
=====API DOCS=====
"""
res = self.get(pk=pk, **kwargs)
url = self.endpoint + '%d/%s/' % (res['id'], 'update_inventory_sources')
return client.post(url, data={}).json() |
def force_clear_lock(self):
'''Kick out whoever currently owns the namespace global lock.
This is intended as purely a last-resort tool. If another
process has managed to get the global lock for a very long time,
or if it requested the lock with a long expiration and then
crashed, this can make the system functional again. If the
original lock holder is still alive, its session calls may fail
with exceptions.
'''
return redis.Redis(connection_pool=self.pool).delete(self._lock_name) | Kick out whoever currently owns the namespace global lock.
This is intended as purely a last-resort tool. If another
process has managed to get the global lock for a very long time,
or if it requested the lock with a long expiration and then
crashed, this can make the system functional again. If the
original lock holder is still alive, its session calls may fail
with exceptions. | Below is the the instruction that describes the task:
### Input:
Kick out whoever currently owns the namespace global lock.
This is intended as purely a last-resort tool. If another
process has managed to get the global lock for a very long time,
or if it requested the lock with a long expiration and then
crashed, this can make the system functional again. If the
original lock holder is still alive, its session calls may fail
with exceptions.
### Response:
def force_clear_lock(self):
'''Kick out whoever currently owns the namespace global lock.
This is intended as purely a last-resort tool. If another
process has managed to get the global lock for a very long time,
or if it requested the lock with a long expiration and then
crashed, this can make the system functional again. If the
original lock holder is still alive, its session calls may fail
with exceptions.
'''
return redis.Redis(connection_pool=self.pool).delete(self._lock_name) |
def untranscribed_batch_gen(self):
""" A batch generator for all the untranscribed data. """
feat_fns = self.corpus.get_untranscribed_fns()
fn_batches = self.make_batches(feat_fns)
for fn_batch in fn_batches:
batch_inputs, batch_inputs_lens = utils.load_batch_x(fn_batch,
flatten=False)
yield batch_inputs, batch_inputs_lens, fn_batch | A batch generator for all the untranscribed data. | Below is the the instruction that describes the task:
### Input:
A batch generator for all the untranscribed data.
### Response:
def untranscribed_batch_gen(self):
""" A batch generator for all the untranscribed data. """
feat_fns = self.corpus.get_untranscribed_fns()
fn_batches = self.make_batches(feat_fns)
for fn_batch in fn_batches:
batch_inputs, batch_inputs_lens = utils.load_batch_x(fn_batch,
flatten=False)
yield batch_inputs, batch_inputs_lens, fn_batch |
def create_hooks(config: dict, model: AbstractModel,
dataset: AbstractDataset, output_dir: str) -> Iterable[AbstractHook]:
"""
Create hooks specified in ``config['hooks']`` list.
Hook config entries may be one of the following types:
.. code-block:: yaml
:caption: A hook with default args specified only by its name as a string; e.g.
hooks:
- LogVariables
- cxflow_tensorflow.WriteTensorBoard
.. code-block:: yaml
:caption: A hook with custom args as a dict name -> args; e.g.
hooks:
- StopAfter:
n_epochs: 10
:param config: config dict
:param model: model object to be passed to the hooks
:param dataset: dataset object to be passed to hooks
:param output_dir: training output dir available to the hooks
:return: list of hook objects
"""
logging.info('Creating hooks')
hooks = []
if 'hooks' in config:
for hook_config in config['hooks']:
if isinstance(hook_config, str):
hook_config = {hook_config: {}}
assert len(hook_config) == 1, 'Hook configuration must have exactly one key (fully qualified name).'
hook_path, hook_params = next(iter(hook_config.items()))
if hook_params is None:
logging.warning('\t\t Empty config of `%s` hook', hook_path)
hook_params = {}
# workaround for ruamel.yaml expansion bug; see #222
hook_params = dict(hook_params.items())
hook_module, hook_class = parse_fully_qualified_name(hook_path)
# find the hook module if not specified
if hook_module is None:
hook_module = get_class_module(CXF_HOOKS_MODULE, hook_class)
logging.debug('\tFound hook module `%s` for class `%s`', hook_module, hook_class)
if hook_module is None:
raise ValueError('Can`t find hook module for hook class `{}`. '
'Make sure it is defined under `{}` sub-modules.'
.format(hook_class, CXF_HOOKS_MODULE))
# create hook kwargs
hook_kwargs = {'dataset': dataset, 'model': model, 'output_dir': output_dir, **hook_params}
# create new hook
try:
hook = create_object(hook_module, hook_class, kwargs=hook_kwargs)
hooks.append(hook)
logging.info('\t%s created', type(hooks[-1]).__name__)
except (ValueError, KeyError, TypeError, NameError, AttributeError, AssertionError, ImportError) as ex:
logging.error('\tFailed to create a hook from config `%s`', hook_config)
raise ex
return hooks | Create hooks specified in ``config['hooks']`` list.
Hook config entries may be one of the following types:
.. code-block:: yaml
:caption: A hook with default args specified only by its name as a string; e.g.
hooks:
- LogVariables
- cxflow_tensorflow.WriteTensorBoard
.. code-block:: yaml
:caption: A hook with custom args as a dict name -> args; e.g.
hooks:
- StopAfter:
n_epochs: 10
:param config: config dict
:param model: model object to be passed to the hooks
:param dataset: dataset object to be passed to hooks
:param output_dir: training output dir available to the hooks
:return: list of hook objects | Below is the the instruction that describes the task:
### Input:
Create hooks specified in ``config['hooks']`` list.
Hook config entries may be one of the following types:
.. code-block:: yaml
:caption: A hook with default args specified only by its name as a string; e.g.
hooks:
- LogVariables
- cxflow_tensorflow.WriteTensorBoard
.. code-block:: yaml
:caption: A hook with custom args as a dict name -> args; e.g.
hooks:
- StopAfter:
n_epochs: 10
:param config: config dict
:param model: model object to be passed to the hooks
:param dataset: dataset object to be passed to hooks
:param output_dir: training output dir available to the hooks
:return: list of hook objects
### Response:
def create_hooks(config: dict, model: AbstractModel,
dataset: AbstractDataset, output_dir: str) -> Iterable[AbstractHook]:
"""
Create hooks specified in ``config['hooks']`` list.
Hook config entries may be one of the following types:
.. code-block:: yaml
:caption: A hook with default args specified only by its name as a string; e.g.
hooks:
- LogVariables
- cxflow_tensorflow.WriteTensorBoard
.. code-block:: yaml
:caption: A hook with custom args as a dict name -> args; e.g.
hooks:
- StopAfter:
n_epochs: 10
:param config: config dict
:param model: model object to be passed to the hooks
:param dataset: dataset object to be passed to hooks
:param output_dir: training output dir available to the hooks
:return: list of hook objects
"""
logging.info('Creating hooks')
hooks = []
if 'hooks' in config:
for hook_config in config['hooks']:
if isinstance(hook_config, str):
hook_config = {hook_config: {}}
assert len(hook_config) == 1, 'Hook configuration must have exactly one key (fully qualified name).'
hook_path, hook_params = next(iter(hook_config.items()))
if hook_params is None:
logging.warning('\t\t Empty config of `%s` hook', hook_path)
hook_params = {}
# workaround for ruamel.yaml expansion bug; see #222
hook_params = dict(hook_params.items())
hook_module, hook_class = parse_fully_qualified_name(hook_path)
# find the hook module if not specified
if hook_module is None:
hook_module = get_class_module(CXF_HOOKS_MODULE, hook_class)
logging.debug('\tFound hook module `%s` for class `%s`', hook_module, hook_class)
if hook_module is None:
raise ValueError('Can`t find hook module for hook class `{}`. '
'Make sure it is defined under `{}` sub-modules.'
.format(hook_class, CXF_HOOKS_MODULE))
# create hook kwargs
hook_kwargs = {'dataset': dataset, 'model': model, 'output_dir': output_dir, **hook_params}
# create new hook
try:
hook = create_object(hook_module, hook_class, kwargs=hook_kwargs)
hooks.append(hook)
logging.info('\t%s created', type(hooks[-1]).__name__)
except (ValueError, KeyError, TypeError, NameError, AttributeError, AssertionError, ImportError) as ex:
logging.error('\tFailed to create a hook from config `%s`', hook_config)
raise ex
return hooks |
def build_client_schema(
introspection: Dict, assume_valid: bool = False
) -> GraphQLSchema:
"""Build a GraphQLSchema for use by client tools.
Given the result of a client running the introspection query, creates and returns
a GraphQLSchema instance which can be then used with all GraphQL-core-next tools,
but cannot be used to execute a query, as introspection does not represent the
"resolver", "parse" or "serialize" functions or any other server-internal
mechanisms.
This function expects a complete introspection result. Don't forget to check the
"errors" field of a server response before calling this function.
"""
# Get the schema from the introspection result.
schema_introspection = introspection["__schema"]
# Given a type reference in introspection, return the GraphQLType instance,
# preferring cached instances before building new instances.
def get_type(type_ref: Dict) -> GraphQLType:
kind = type_ref.get("kind")
if kind == TypeKind.LIST.name:
item_ref = type_ref.get("ofType")
if not item_ref:
raise TypeError("Decorated type deeper than introspection query.")
return GraphQLList(get_type(item_ref))
elif kind == TypeKind.NON_NULL.name:
nullable_ref = type_ref.get("ofType")
if not nullable_ref:
raise TypeError("Decorated type deeper than introspection query.")
nullable_type = get_type(nullable_ref)
return GraphQLNonNull(assert_nullable_type(nullable_type))
name = type_ref.get("name")
if not name:
raise TypeError(f"Unknown type reference: {inspect(type_ref)}")
return get_named_type(name)
def get_named_type(type_name: str) -> GraphQLNamedType:
type_ = type_map.get(type_name)
if not type_:
raise TypeError(
f"Invalid or incomplete schema, unknown type: {type_name}."
" Ensure that a full introspection query is used in order"
" to build a client schema."
)
return type_
def get_input_type(type_ref: Dict) -> GraphQLInputType:
input_type = get_type(type_ref)
if not is_input_type(input_type):
raise TypeError(
"Introspection must provide input type for arguments,"
f" but received: {inspect(input_type)}."
)
return cast(GraphQLInputType, input_type)
def get_output_type(type_ref: Dict) -> GraphQLOutputType:
output_type = get_type(type_ref)
if not is_output_type(output_type):
raise TypeError(
"Introspection must provide output type for fields,"
f" but received: {inspect(output_type)}."
)
return cast(GraphQLOutputType, output_type)
def get_object_type(type_ref: Dict) -> GraphQLObjectType:
object_type = get_type(type_ref)
return assert_object_type(object_type)
def get_interface_type(type_ref: Dict) -> GraphQLInterfaceType:
interface_type = get_type(type_ref)
return assert_interface_type(interface_type)
# Given a type's introspection result, construct the correct GraphQLType instance.
def build_type(type_: Dict) -> GraphQLNamedType:
if type_ and "name" in type_ and "kind" in type_:
builder = type_builders.get(cast(str, type_["kind"]))
if builder:
return cast(GraphQLNamedType, builder(type_))
raise TypeError(
"Invalid or incomplete introspection result."
" Ensure that a full introspection query is used in order"
f" to build a client schema: {inspect(type_)}"
)
def build_scalar_def(scalar_introspection: Dict) -> GraphQLScalarType:
return GraphQLScalarType(
name=scalar_introspection["name"],
description=scalar_introspection.get("description"),
serialize=lambda value: value,
)
def build_object_def(object_introspection: Dict) -> GraphQLObjectType:
interfaces = object_introspection.get("interfaces")
if interfaces is None:
raise TypeError(
"Introspection result missing interfaces:"
f" {inspect(object_introspection)}"
)
return GraphQLObjectType(
name=object_introspection["name"],
description=object_introspection.get("description"),
interfaces=lambda: [
get_interface_type(interface)
for interface in cast(List[Dict], interfaces)
],
fields=lambda: build_field_def_map(object_introspection),
)
def build_interface_def(interface_introspection: Dict) -> GraphQLInterfaceType:
return GraphQLInterfaceType(
name=interface_introspection["name"],
description=interface_introspection.get("description"),
fields=lambda: build_field_def_map(interface_introspection),
)
def build_union_def(union_introspection: Dict) -> GraphQLUnionType:
possible_types = union_introspection.get("possibleTypes")
if possible_types is None:
raise TypeError(
"Introspection result missing possibleTypes:"
f" {inspect(union_introspection)}"
)
return GraphQLUnionType(
name=union_introspection["name"],
description=union_introspection.get("description"),
types=lambda: [
get_object_type(type_) for type_ in cast(List[Dict], possible_types)
],
)
def build_enum_def(enum_introspection: Dict) -> GraphQLEnumType:
if enum_introspection.get("enumValues") is None:
raise TypeError(
"Introspection result missing enumValues:"
f" {inspect(enum_introspection)}"
)
return GraphQLEnumType(
name=enum_introspection["name"],
description=enum_introspection.get("description"),
values={
value_introspect["name"]: GraphQLEnumValue(
description=value_introspect.get("description"),
deprecation_reason=value_introspect.get("deprecationReason"),
)
for value_introspect in enum_introspection["enumValues"]
},
)
def build_input_object_def(
input_object_introspection: Dict
) -> GraphQLInputObjectType:
if input_object_introspection.get("inputFields") is None:
raise TypeError(
"Introspection result missing inputFields:"
f" {inspect(input_object_introspection)}"
)
return GraphQLInputObjectType(
name=input_object_introspection["name"],
description=input_object_introspection.get("description"),
fields=lambda: build_input_value_def_map(
input_object_introspection["inputFields"]
),
)
type_builders: Dict[str, Callable[[Dict], GraphQLType]] = {
TypeKind.SCALAR.name: build_scalar_def,
TypeKind.OBJECT.name: build_object_def,
TypeKind.INTERFACE.name: build_interface_def,
TypeKind.UNION.name: build_union_def,
TypeKind.ENUM.name: build_enum_def,
TypeKind.INPUT_OBJECT.name: build_input_object_def,
}
def build_field(field_introspection: Dict) -> GraphQLField:
if field_introspection.get("args") is None:
raise TypeError(
"Introspection result missing field args:"
f" {inspect(field_introspection)}"
)
return GraphQLField(
get_output_type(field_introspection["type"]),
args=build_arg_value_def_map(field_introspection["args"]),
description=field_introspection.get("description"),
deprecation_reason=field_introspection.get("deprecationReason"),
)
def build_field_def_map(type_introspection: Dict) -> Dict[str, GraphQLField]:
if type_introspection.get("fields") is None:
raise TypeError(
"Introspection result missing fields:" f" {type_introspection}"
)
return {
field_introspection["name"]: build_field(field_introspection)
for field_introspection in type_introspection["fields"]
}
def build_arg_value(arg_introspection: Dict) -> GraphQLArgument:
type_ = get_input_type(arg_introspection["type"])
default_value = arg_introspection.get("defaultValue")
default_value = (
INVALID
if default_value is None
else value_from_ast(parse_value(default_value), type_)
)
return GraphQLArgument(
type_,
default_value=default_value,
description=arg_introspection.get("description"),
)
def build_arg_value_def_map(arg_introspections: Dict) -> Dict[str, GraphQLArgument]:
return {
input_value_introspection["name"]: build_arg_value(
input_value_introspection
)
for input_value_introspection in arg_introspections
}
def build_input_value(input_value_introspection: Dict) -> GraphQLInputField:
type_ = get_input_type(input_value_introspection["type"])
default_value = input_value_introspection.get("defaultValue")
default_value = (
INVALID
if default_value is None
else value_from_ast(parse_value(default_value), type_)
)
return GraphQLInputField(
type_,
default_value=default_value,
description=input_value_introspection.get("description"),
)
def build_input_value_def_map(
input_value_introspections: Dict
) -> Dict[str, GraphQLInputField]:
return {
input_value_introspection["name"]: build_input_value(
input_value_introspection
)
for input_value_introspection in input_value_introspections
}
def build_directive(directive_introspection: Dict) -> GraphQLDirective:
if directive_introspection.get("args") is None:
raise TypeError(
"Introspection result missing directive args:"
f" {inspect(directive_introspection)}"
)
if directive_introspection.get("locations") is None:
raise TypeError(
"Introspection result missing directive locations:"
f" {inspect(directive_introspection)}"
)
return GraphQLDirective(
name=directive_introspection["name"],
description=directive_introspection.get("description"),
locations=list(
cast(
Sequence[DirectiveLocation],
directive_introspection.get("locations"),
)
),
args=build_arg_value_def_map(directive_introspection["args"]),
)
# Iterate through all types, getting the type definition for each.
type_map: Dict[str, GraphQLNamedType] = {
type_introspection["name"]: build_type(type_introspection)
for type_introspection in schema_introspection["types"]
}
for std_type_name, std_type in chain(
specified_scalar_types.items(), introspection_types.items()
):
type_map[std_type_name] = std_type
# Get the root Query, Mutation, and Subscription types.
query_type_ref = schema_introspection.get("queryType")
query_type = None if query_type_ref is None else get_object_type(query_type_ref)
mutation_type_ref = schema_introspection.get("mutationType")
mutation_type = (
None if mutation_type_ref is None else get_object_type(mutation_type_ref)
)
subscription_type_ref = schema_introspection.get("subscriptionType")
subscription_type = (
None
if subscription_type_ref is None
else get_object_type(subscription_type_ref)
)
# Get the directives supported by Introspection, assuming empty-set if directives
# were not queried for.
directive_introspections = schema_introspection.get("directives")
directives = (
[
build_directive(directive_introspection)
for directive_introspection in directive_introspections
]
if directive_introspections
else []
)
return GraphQLSchema(
query=query_type,
mutation=mutation_type,
subscription=subscription_type,
types=list(type_map.values()),
directives=directives,
assume_valid=assume_valid,
) | Build a GraphQLSchema for use by client tools.
Given the result of a client running the introspection query, creates and returns
a GraphQLSchema instance which can be then used with all GraphQL-core-next tools,
but cannot be used to execute a query, as introspection does not represent the
"resolver", "parse" or "serialize" functions or any other server-internal
mechanisms.
This function expects a complete introspection result. Don't forget to check the
"errors" field of a server response before calling this function. | Below is the the instruction that describes the task:
### Input:
Build a GraphQLSchema for use by client tools.
Given the result of a client running the introspection query, creates and returns
a GraphQLSchema instance which can be then used with all GraphQL-core-next tools,
but cannot be used to execute a query, as introspection does not represent the
"resolver", "parse" or "serialize" functions or any other server-internal
mechanisms.
This function expects a complete introspection result. Don't forget to check the
"errors" field of a server response before calling this function.
### Response:
def build_client_schema(
introspection: Dict, assume_valid: bool = False
) -> GraphQLSchema:
"""Build a GraphQLSchema for use by client tools.
Given the result of a client running the introspection query, creates and returns
a GraphQLSchema instance which can be then used with all GraphQL-core-next tools,
but cannot be used to execute a query, as introspection does not represent the
"resolver", "parse" or "serialize" functions or any other server-internal
mechanisms.
This function expects a complete introspection result. Don't forget to check the
"errors" field of a server response before calling this function.
"""
# Get the schema from the introspection result.
schema_introspection = introspection["__schema"]
# Given a type reference in introspection, return the GraphQLType instance,
# preferring cached instances before building new instances.
def get_type(type_ref: Dict) -> GraphQLType:
kind = type_ref.get("kind")
if kind == TypeKind.LIST.name:
item_ref = type_ref.get("ofType")
if not item_ref:
raise TypeError("Decorated type deeper than introspection query.")
return GraphQLList(get_type(item_ref))
elif kind == TypeKind.NON_NULL.name:
nullable_ref = type_ref.get("ofType")
if not nullable_ref:
raise TypeError("Decorated type deeper than introspection query.")
nullable_type = get_type(nullable_ref)
return GraphQLNonNull(assert_nullable_type(nullable_type))
name = type_ref.get("name")
if not name:
raise TypeError(f"Unknown type reference: {inspect(type_ref)}")
return get_named_type(name)
def get_named_type(type_name: str) -> GraphQLNamedType:
type_ = type_map.get(type_name)
if not type_:
raise TypeError(
f"Invalid or incomplete schema, unknown type: {type_name}."
" Ensure that a full introspection query is used in order"
" to build a client schema."
)
return type_
def get_input_type(type_ref: Dict) -> GraphQLInputType:
input_type = get_type(type_ref)
if not is_input_type(input_type):
raise TypeError(
"Introspection must provide input type for arguments,"
f" but received: {inspect(input_type)}."
)
return cast(GraphQLInputType, input_type)
def get_output_type(type_ref: Dict) -> GraphQLOutputType:
output_type = get_type(type_ref)
if not is_output_type(output_type):
raise TypeError(
"Introspection must provide output type for fields,"
f" but received: {inspect(output_type)}."
)
return cast(GraphQLOutputType, output_type)
def get_object_type(type_ref: Dict) -> GraphQLObjectType:
object_type = get_type(type_ref)
return assert_object_type(object_type)
def get_interface_type(type_ref: Dict) -> GraphQLInterfaceType:
interface_type = get_type(type_ref)
return assert_interface_type(interface_type)
# Given a type's introspection result, construct the correct GraphQLType instance.
def build_type(type_: Dict) -> GraphQLNamedType:
if type_ and "name" in type_ and "kind" in type_:
builder = type_builders.get(cast(str, type_["kind"]))
if builder:
return cast(GraphQLNamedType, builder(type_))
raise TypeError(
"Invalid or incomplete introspection result."
" Ensure that a full introspection query is used in order"
f" to build a client schema: {inspect(type_)}"
)
def build_scalar_def(scalar_introspection: Dict) -> GraphQLScalarType:
return GraphQLScalarType(
name=scalar_introspection["name"],
description=scalar_introspection.get("description"),
serialize=lambda value: value,
)
def build_object_def(object_introspection: Dict) -> GraphQLObjectType:
interfaces = object_introspection.get("interfaces")
if interfaces is None:
raise TypeError(
"Introspection result missing interfaces:"
f" {inspect(object_introspection)}"
)
return GraphQLObjectType(
name=object_introspection["name"],
description=object_introspection.get("description"),
interfaces=lambda: [
get_interface_type(interface)
for interface in cast(List[Dict], interfaces)
],
fields=lambda: build_field_def_map(object_introspection),
)
def build_interface_def(interface_introspection: Dict) -> GraphQLInterfaceType:
return GraphQLInterfaceType(
name=interface_introspection["name"],
description=interface_introspection.get("description"),
fields=lambda: build_field_def_map(interface_introspection),
)
def build_union_def(union_introspection: Dict) -> GraphQLUnionType:
possible_types = union_introspection.get("possibleTypes")
if possible_types is None:
raise TypeError(
"Introspection result missing possibleTypes:"
f" {inspect(union_introspection)}"
)
return GraphQLUnionType(
name=union_introspection["name"],
description=union_introspection.get("description"),
types=lambda: [
get_object_type(type_) for type_ in cast(List[Dict], possible_types)
],
)
def build_enum_def(enum_introspection: Dict) -> GraphQLEnumType:
if enum_introspection.get("enumValues") is None:
raise TypeError(
"Introspection result missing enumValues:"
f" {inspect(enum_introspection)}"
)
return GraphQLEnumType(
name=enum_introspection["name"],
description=enum_introspection.get("description"),
values={
value_introspect["name"]: GraphQLEnumValue(
description=value_introspect.get("description"),
deprecation_reason=value_introspect.get("deprecationReason"),
)
for value_introspect in enum_introspection["enumValues"]
},
)
def build_input_object_def(
input_object_introspection: Dict
) -> GraphQLInputObjectType:
if input_object_introspection.get("inputFields") is None:
raise TypeError(
"Introspection result missing inputFields:"
f" {inspect(input_object_introspection)}"
)
return GraphQLInputObjectType(
name=input_object_introspection["name"],
description=input_object_introspection.get("description"),
fields=lambda: build_input_value_def_map(
input_object_introspection["inputFields"]
),
)
type_builders: Dict[str, Callable[[Dict], GraphQLType]] = {
TypeKind.SCALAR.name: build_scalar_def,
TypeKind.OBJECT.name: build_object_def,
TypeKind.INTERFACE.name: build_interface_def,
TypeKind.UNION.name: build_union_def,
TypeKind.ENUM.name: build_enum_def,
TypeKind.INPUT_OBJECT.name: build_input_object_def,
}
def build_field(field_introspection: Dict) -> GraphQLField:
if field_introspection.get("args") is None:
raise TypeError(
"Introspection result missing field args:"
f" {inspect(field_introspection)}"
)
return GraphQLField(
get_output_type(field_introspection["type"]),
args=build_arg_value_def_map(field_introspection["args"]),
description=field_introspection.get("description"),
deprecation_reason=field_introspection.get("deprecationReason"),
)
def build_field_def_map(type_introspection: Dict) -> Dict[str, GraphQLField]:
if type_introspection.get("fields") is None:
raise TypeError(
"Introspection result missing fields:" f" {type_introspection}"
)
return {
field_introspection["name"]: build_field(field_introspection)
for field_introspection in type_introspection["fields"]
}
def build_arg_value(arg_introspection: Dict) -> GraphQLArgument:
type_ = get_input_type(arg_introspection["type"])
default_value = arg_introspection.get("defaultValue")
default_value = (
INVALID
if default_value is None
else value_from_ast(parse_value(default_value), type_)
)
return GraphQLArgument(
type_,
default_value=default_value,
description=arg_introspection.get("description"),
)
def build_arg_value_def_map(arg_introspections: Dict) -> Dict[str, GraphQLArgument]:
return {
input_value_introspection["name"]: build_arg_value(
input_value_introspection
)
for input_value_introspection in arg_introspections
}
def build_input_value(input_value_introspection: Dict) -> GraphQLInputField:
type_ = get_input_type(input_value_introspection["type"])
default_value = input_value_introspection.get("defaultValue")
default_value = (
INVALID
if default_value is None
else value_from_ast(parse_value(default_value), type_)
)
return GraphQLInputField(
type_,
default_value=default_value,
description=input_value_introspection.get("description"),
)
def build_input_value_def_map(
input_value_introspections: Dict
) -> Dict[str, GraphQLInputField]:
return {
input_value_introspection["name"]: build_input_value(
input_value_introspection
)
for input_value_introspection in input_value_introspections
}
def build_directive(directive_introspection: Dict) -> GraphQLDirective:
if directive_introspection.get("args") is None:
raise TypeError(
"Introspection result missing directive args:"
f" {inspect(directive_introspection)}"
)
if directive_introspection.get("locations") is None:
raise TypeError(
"Introspection result missing directive locations:"
f" {inspect(directive_introspection)}"
)
return GraphQLDirective(
name=directive_introspection["name"],
description=directive_introspection.get("description"),
locations=list(
cast(
Sequence[DirectiveLocation],
directive_introspection.get("locations"),
)
),
args=build_arg_value_def_map(directive_introspection["args"]),
)
# Iterate through all types, getting the type definition for each.
type_map: Dict[str, GraphQLNamedType] = {
type_introspection["name"]: build_type(type_introspection)
for type_introspection in schema_introspection["types"]
}
for std_type_name, std_type in chain(
specified_scalar_types.items(), introspection_types.items()
):
type_map[std_type_name] = std_type
# Get the root Query, Mutation, and Subscription types.
query_type_ref = schema_introspection.get("queryType")
query_type = None if query_type_ref is None else get_object_type(query_type_ref)
mutation_type_ref = schema_introspection.get("mutationType")
mutation_type = (
None if mutation_type_ref is None else get_object_type(mutation_type_ref)
)
subscription_type_ref = schema_introspection.get("subscriptionType")
subscription_type = (
None
if subscription_type_ref is None
else get_object_type(subscription_type_ref)
)
# Get the directives supported by Introspection, assuming empty-set if directives
# were not queried for.
directive_introspections = schema_introspection.get("directives")
directives = (
[
build_directive(directive_introspection)
for directive_introspection in directive_introspections
]
if directive_introspections
else []
)
return GraphQLSchema(
query=query_type,
mutation=mutation_type,
subscription=subscription_type,
types=list(type_map.values()),
directives=directives,
assume_valid=assume_valid,
) |
def get_container_service(access_token, subscription_id, resource_group, service_name):
'''Get details about an Azure Container Server
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
service_name (str): Name of container service.
Returns:
HTTP response. JSON model.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', resource_group,
'/providers/Microsoft.ContainerService/ContainerServices/', service_name,
'?api-version=', ACS_API])
return do_get(endpoint, access_token) | Get details about an Azure Container Server
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
service_name (str): Name of container service.
Returns:
HTTP response. JSON model. | Below is the the instruction that describes the task:
### Input:
Get details about an Azure Container Server
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
service_name (str): Name of container service.
Returns:
HTTP response. JSON model.
### Response:
def get_container_service(access_token, subscription_id, resource_group, service_name):
'''Get details about an Azure Container Server
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
service_name (str): Name of container service.
Returns:
HTTP response. JSON model.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', resource_group,
'/providers/Microsoft.ContainerService/ContainerServices/', service_name,
'?api-version=', ACS_API])
return do_get(endpoint, access_token) |
def buffer_typechecks(self, call_id, payload):
"""Adds typecheck events to the buffer"""
if self.currently_buffering_typechecks:
for note in payload['notes']:
self.buffered_notes.append(note) | Adds typecheck events to the buffer | Below is the the instruction that describes the task:
### Input:
Adds typecheck events to the buffer
### Response:
def buffer_typechecks(self, call_id, payload):
"""Adds typecheck events to the buffer"""
if self.currently_buffering_typechecks:
for note in payload['notes']:
self.buffered_notes.append(note) |
def pos_to_linecol(text, pos):
"""Return a tuple of line and column for offset pos in text.
Lines are one-based, columns zero-based.
This is how Jedi wants it. Don't ask me why.
"""
line_start = text.rfind("\n", 0, pos) + 1
line = text.count("\n", 0, line_start) + 1
col = pos - line_start
return line, col | Return a tuple of line and column for offset pos in text.
Lines are one-based, columns zero-based.
This is how Jedi wants it. Don't ask me why. | Below is the the instruction that describes the task:
### Input:
Return a tuple of line and column for offset pos in text.
Lines are one-based, columns zero-based.
This is how Jedi wants it. Don't ask me why.
### Response:
def pos_to_linecol(text, pos):
"""Return a tuple of line and column for offset pos in text.
Lines are one-based, columns zero-based.
This is how Jedi wants it. Don't ask me why.
"""
line_start = text.rfind("\n", 0, pos) + 1
line = text.count("\n", 0, line_start) + 1
col = pos - line_start
return line, col |
def get_default_configs(self):
""" returns default configs list, from /etc, home dir and package_data"""
# initialize basic defaults
configs = [resource_filename(__name__, 'config/00-base.ini')]
try:
conf_files = sorted(os.listdir(self.baseconfigs_location))
for filename in conf_files:
if fnmatch.fnmatch(filename, '*.ini'):
configs += [
os.path.realpath(
self.baseconfigs_location + os.sep + filename)
]
except OSError:
self.log.warn(
self.baseconfigs_location + ' is not accessible to get configs list')
configs += [os.path.expanduser('~/.yandex-tank')]
return configs | returns default configs list, from /etc, home dir and package_data | Below is the the instruction that describes the task:
### Input:
returns default configs list, from /etc, home dir and package_data
### Response:
def get_default_configs(self):
""" returns default configs list, from /etc, home dir and package_data"""
# initialize basic defaults
configs = [resource_filename(__name__, 'config/00-base.ini')]
try:
conf_files = sorted(os.listdir(self.baseconfigs_location))
for filename in conf_files:
if fnmatch.fnmatch(filename, '*.ini'):
configs += [
os.path.realpath(
self.baseconfigs_location + os.sep + filename)
]
except OSError:
self.log.warn(
self.baseconfigs_location + ' is not accessible to get configs list')
configs += [os.path.expanduser('~/.yandex-tank')]
return configs |
def on_remove_vrf_conf(self, evt):
"""Removes VRF table associated with given `vrf_conf`.
Cleans up other links to this table as well.
"""
vrf_conf = evt.value
# Detach VrfConf change listener.
vrf_conf.remove_listener(VrfConf.VRF_CHG_EVT, self.on_chg_vrf_conf)
self._table_manager.remove_vrf_by_vrf_conf(vrf_conf)
# Update local RT NLRIs
self._rt_manager.update_local_rt_nlris()
self._signal_bus.vrf_removed(vrf_conf.route_dist)
# Remove AttributeMaps under the removed vrf
rd = vrf_conf.route_dist
rf = vrf_conf.route_family
peers = self._peer_manager.iterpeers
for peer in peers:
key = ':'.join([rd, rf])
peer.attribute_maps.pop(key, None) | Removes VRF table associated with given `vrf_conf`.
Cleans up other links to this table as well. | Below is the the instruction that describes the task:
### Input:
Removes VRF table associated with given `vrf_conf`.
Cleans up other links to this table as well.
### Response:
def on_remove_vrf_conf(self, evt):
"""Removes VRF table associated with given `vrf_conf`.
Cleans up other links to this table as well.
"""
vrf_conf = evt.value
# Detach VrfConf change listener.
vrf_conf.remove_listener(VrfConf.VRF_CHG_EVT, self.on_chg_vrf_conf)
self._table_manager.remove_vrf_by_vrf_conf(vrf_conf)
# Update local RT NLRIs
self._rt_manager.update_local_rt_nlris()
self._signal_bus.vrf_removed(vrf_conf.route_dist)
# Remove AttributeMaps under the removed vrf
rd = vrf_conf.route_dist
rf = vrf_conf.route_family
peers = self._peer_manager.iterpeers
for peer in peers:
key = ':'.join([rd, rf])
peer.attribute_maps.pop(key, None) |
def add_samples(self, samples, reverse=False):
"""
Concatenate the given new samples to the current audio data.
This function initializes the memory if no audio data
is present already.
If ``reverse`` is ``True``, the new samples
will be reversed and then concatenated.
:param samples: the new samples to be concatenated
:type samples: :class:`numpy.ndarray` (1D)
:param bool reverse: if ``True``, concatenate new samples after reversing them
.. versionadded:: 1.2.1
"""
self.log(u"Adding samples...")
samples_length = len(samples)
current_length = self.__samples_length
future_length = current_length + samples_length
if (self.__samples is None) or (self.__samples_capacity < future_length):
self.preallocate_memory(2 * future_length)
if reverse:
self.__samples[current_length:future_length] = samples[::-1]
else:
self.__samples[current_length:future_length] = samples[:]
self.__samples_length = future_length
self._update_length()
self.log(u"Adding samples... done") | Concatenate the given new samples to the current audio data.
This function initializes the memory if no audio data
is present already.
If ``reverse`` is ``True``, the new samples
will be reversed and then concatenated.
:param samples: the new samples to be concatenated
:type samples: :class:`numpy.ndarray` (1D)
:param bool reverse: if ``True``, concatenate new samples after reversing them
.. versionadded:: 1.2.1 | Below is the the instruction that describes the task:
### Input:
Concatenate the given new samples to the current audio data.
This function initializes the memory if no audio data
is present already.
If ``reverse`` is ``True``, the new samples
will be reversed and then concatenated.
:param samples: the new samples to be concatenated
:type samples: :class:`numpy.ndarray` (1D)
:param bool reverse: if ``True``, concatenate new samples after reversing them
.. versionadded:: 1.2.1
### Response:
def add_samples(self, samples, reverse=False):
"""
Concatenate the given new samples to the current audio data.
This function initializes the memory if no audio data
is present already.
If ``reverse`` is ``True``, the new samples
will be reversed and then concatenated.
:param samples: the new samples to be concatenated
:type samples: :class:`numpy.ndarray` (1D)
:param bool reverse: if ``True``, concatenate new samples after reversing them
.. versionadded:: 1.2.1
"""
self.log(u"Adding samples...")
samples_length = len(samples)
current_length = self.__samples_length
future_length = current_length + samples_length
if (self.__samples is None) or (self.__samples_capacity < future_length):
self.preallocate_memory(2 * future_length)
if reverse:
self.__samples[current_length:future_length] = samples[::-1]
else:
self.__samples[current_length:future_length] = samples[:]
self.__samples_length = future_length
self._update_length()
self.log(u"Adding samples... done") |
def _prune_edges(G, X, traj_lengths, pruning_thresh=0.1, verbose=False):
'''Prune edges in graph G via cosine distance with trajectory edges.'''
W = G.matrix('dense', copy=True)
degree = G.degree(kind='out', weighted=False)
i = 0
num_bad = 0
for n in traj_lengths:
s, t = np.nonzero(W[i:i+n-1])
graph_edges = X[t] - X[s+i]
traj_edges = np.diff(X[i:i+n], axis=0)
traj_edges = np.repeat(traj_edges, degree[i:i+n-1], axis=0)
theta = paired_distances(graph_edges, traj_edges, 'cosine')
bad_edges = theta > pruning_thresh
s, t = s[bad_edges], t[bad_edges]
if verbose: # pragma: no cover
num_bad += np.count_nonzero(W[s,t])
W[s,t] = 0
i += n
if verbose: # pragma: no cover
print('removed %d bad edges' % num_bad)
return Graph.from_adj_matrix(W) | Prune edges in graph G via cosine distance with trajectory edges. | Below is the the instruction that describes the task:
### Input:
Prune edges in graph G via cosine distance with trajectory edges.
### Response:
def _prune_edges(G, X, traj_lengths, pruning_thresh=0.1, verbose=False):
'''Prune edges in graph G via cosine distance with trajectory edges.'''
W = G.matrix('dense', copy=True)
degree = G.degree(kind='out', weighted=False)
i = 0
num_bad = 0
for n in traj_lengths:
s, t = np.nonzero(W[i:i+n-1])
graph_edges = X[t] - X[s+i]
traj_edges = np.diff(X[i:i+n], axis=0)
traj_edges = np.repeat(traj_edges, degree[i:i+n-1], axis=0)
theta = paired_distances(graph_edges, traj_edges, 'cosine')
bad_edges = theta > pruning_thresh
s, t = s[bad_edges], t[bad_edges]
if verbose: # pragma: no cover
num_bad += np.count_nonzero(W[s,t])
W[s,t] = 0
i += n
if verbose: # pragma: no cover
print('removed %d bad edges' % num_bad)
return Graph.from_adj_matrix(W) |
def clean_text(self, name, **kwargs):
"""Basic clean-up."""
name = strip_quotes(name)
name = collapse_spaces(name)
return name | Basic clean-up. | Below is the the instruction that describes the task:
### Input:
Basic clean-up.
### Response:
def clean_text(self, name, **kwargs):
"""Basic clean-up."""
name = strip_quotes(name)
name = collapse_spaces(name)
return name |
def get_sub_node(dsp, path, node_attr='auto', solution=NONE, _level=0,
_dsp_name=NONE):
"""
Returns a sub node of a dispatcher.
:param dsp:
A dispatcher object or a sub dispatch function.
:type dsp: schedula.Dispatcher | SubDispatch
:param path:
A sequence of node ids or a single node id. Each id identifies a
sub-level node.
:type path: tuple, str
:param node_attr:
Output node attr.
If the searched node does not have this attribute, all its attributes
are returned.
When 'auto', returns the "default" attributes of the searched node,
which are:
- for data node: its output, and if not exists, all its attributes.
- for function and sub-dispatcher nodes: the 'function' attribute.
:type node_attr: str | None
:param solution:
Parent Solution.
:type solution: schedula.utils.Solution
:param _level:
Path level.
:type _level: int
:param _dsp_name:
dsp name to show when the function raise a value error.
:type _dsp_name: str
:return:
A sub node of a dispatcher and its path.
:rtype: dict | object, tuple[str]
**Example**:
.. dispatcher:: o
:opt: graph_attr={'ratio': '1'}, depth=-1
:code:
>>> from schedula import Dispatcher
>>> s_dsp = Dispatcher(name='Sub-dispatcher')
>>> def fun(a, b):
... return a + b
...
>>> s_dsp.add_function('a + b', fun, ['a', 'b'], ['c'])
'a + b'
>>> dispatch = SubDispatch(s_dsp, ['c'], output_type='dict')
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_function('Sub-dispatcher', dispatch, ['a'], ['b'])
'Sub-dispatcher'
>>> o = dsp.dispatch(inputs={'a': {'a': 3, 'b': 1}})
...
Get the sub node 'c' output or type::
>>> get_sub_node(dsp, ('Sub-dispatcher', 'c'))
(4, ('Sub-dispatcher', 'c'))
>>> get_sub_node(dsp, ('Sub-dispatcher', 'c'), node_attr='type')
('data', ('Sub-dispatcher', 'c'))
Get the sub-dispatcher output:
.. dispatcher:: sol
:opt: graph_attr={'ratio': '1'}, depth=-1
:code:
>>> sol, p = get_sub_node(dsp, ('Sub-dispatcher',), node_attr='output')
>>> sol, p
(Solution([('a', 3), ('b', 1), ('c', 4)]), ('Sub-dispatcher',))
"""
path = list(path)
if isinstance(dsp, SubDispatch): # Take the dispatcher obj.
dsp = dsp.dsp
if _dsp_name is NONE: # Set origin dispatcher name for warning purpose.
_dsp_name = dsp.name
if solution is NONE: # Set origin dispatcher name for warning purpose.
solution = dsp.solution
node_id = path[_level] # Node id at given level.
try:
node_id, node = _get_node(dsp.nodes, node_id) # Get dispatcher node.
path[_level] = node_id
except KeyError:
if _level == len(path) - 1 and node_attr in ('auto', 'output') \
and solution is not EMPTY:
try:
# Get dispatcher node.
node_id, node = _get_node(solution, node_id, False)
path[_level] = node_id
return node, tuple(path)
except KeyError:
pass
msg = 'Path %s does not exist in %s dispatcher.' % (path, _dsp_name)
raise ValueError(msg)
_level += 1 # Next level.
if _level < len(path): # Is not path leaf?.
try:
if node['type'] in ('function', 'dispatcher'):
try:
solution = solution.workflow.node[node_id]['solution']
except (KeyError, AttributeError):
solution = EMPTY
dsp = parent_func(node['function']) # Get parent function.
else:
raise KeyError
except KeyError:
msg = 'Node of path %s at level %i is not a function or ' \
'sub-dispatcher node of %s ' \
'dispatcher.' % (path, _level, _dsp_name)
raise ValueError(msg)
# Continue the node search.
return get_sub_node(dsp, path, node_attr, solution, _level, _dsp_name)
else:
data, sol = EMPTY, solution
# Return the sub node.
if node_attr == 'auto' and node['type'] != 'data': # Auto: function.
node_attr = 'function'
elif node_attr == 'auto' and sol is not EMPTY and node_id in sol:
data = sol[node_id] # Auto: data output.
elif node_attr == 'output' and node['type'] != 'data':
data = sol.workflow.nodes[node_id]['solution']
elif node_attr == 'output' and node['type'] == 'data':
data = sol[node_id]
elif node_attr == 'description': # Search and return node description.
data = dsp.search_node_description(node_id)[0]
elif node_attr == 'value_type' and node['type'] == 'data':
# Search and return data node value's type.
data = dsp.search_node_description(node_id, node_attr)[0]
elif node_attr == 'default_value':
data = dsp.default_values[node_id]
elif node_attr == 'dsp':
data = dsp
elif node_attr == 'sol':
data = sol
if data is EMPTY:
data = node.get(node_attr, node)
return data, tuple(path) | Returns a sub node of a dispatcher.
:param dsp:
A dispatcher object or a sub dispatch function.
:type dsp: schedula.Dispatcher | SubDispatch
:param path:
A sequence of node ids or a single node id. Each id identifies a
sub-level node.
:type path: tuple, str
:param node_attr:
Output node attr.
If the searched node does not have this attribute, all its attributes
are returned.
When 'auto', returns the "default" attributes of the searched node,
which are:
- for data node: its output, and if not exists, all its attributes.
- for function and sub-dispatcher nodes: the 'function' attribute.
:type node_attr: str | None
:param solution:
Parent Solution.
:type solution: schedula.utils.Solution
:param _level:
Path level.
:type _level: int
:param _dsp_name:
dsp name to show when the function raise a value error.
:type _dsp_name: str
:return:
A sub node of a dispatcher and its path.
:rtype: dict | object, tuple[str]
**Example**:
.. dispatcher:: o
:opt: graph_attr={'ratio': '1'}, depth=-1
:code:
>>> from schedula import Dispatcher
>>> s_dsp = Dispatcher(name='Sub-dispatcher')
>>> def fun(a, b):
... return a + b
...
>>> s_dsp.add_function('a + b', fun, ['a', 'b'], ['c'])
'a + b'
>>> dispatch = SubDispatch(s_dsp, ['c'], output_type='dict')
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_function('Sub-dispatcher', dispatch, ['a'], ['b'])
'Sub-dispatcher'
>>> o = dsp.dispatch(inputs={'a': {'a': 3, 'b': 1}})
...
Get the sub node 'c' output or type::
>>> get_sub_node(dsp, ('Sub-dispatcher', 'c'))
(4, ('Sub-dispatcher', 'c'))
>>> get_sub_node(dsp, ('Sub-dispatcher', 'c'), node_attr='type')
('data', ('Sub-dispatcher', 'c'))
Get the sub-dispatcher output:
.. dispatcher:: sol
:opt: graph_attr={'ratio': '1'}, depth=-1
:code:
>>> sol, p = get_sub_node(dsp, ('Sub-dispatcher',), node_attr='output')
>>> sol, p
(Solution([('a', 3), ('b', 1), ('c', 4)]), ('Sub-dispatcher',)) | Below is the the instruction that describes the task:
### Input:
Returns a sub node of a dispatcher.
:param dsp:
A dispatcher object or a sub dispatch function.
:type dsp: schedula.Dispatcher | SubDispatch
:param path:
A sequence of node ids or a single node id. Each id identifies a
sub-level node.
:type path: tuple, str
:param node_attr:
Output node attr.
If the searched node does not have this attribute, all its attributes
are returned.
When 'auto', returns the "default" attributes of the searched node,
which are:
- for data node: its output, and if not exists, all its attributes.
- for function and sub-dispatcher nodes: the 'function' attribute.
:type node_attr: str | None
:param solution:
Parent Solution.
:type solution: schedula.utils.Solution
:param _level:
Path level.
:type _level: int
:param _dsp_name:
dsp name to show when the function raise a value error.
:type _dsp_name: str
:return:
A sub node of a dispatcher and its path.
:rtype: dict | object, tuple[str]
**Example**:
.. dispatcher:: o
:opt: graph_attr={'ratio': '1'}, depth=-1
:code:
>>> from schedula import Dispatcher
>>> s_dsp = Dispatcher(name='Sub-dispatcher')
>>> def fun(a, b):
... return a + b
...
>>> s_dsp.add_function('a + b', fun, ['a', 'b'], ['c'])
'a + b'
>>> dispatch = SubDispatch(s_dsp, ['c'], output_type='dict')
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_function('Sub-dispatcher', dispatch, ['a'], ['b'])
'Sub-dispatcher'
>>> o = dsp.dispatch(inputs={'a': {'a': 3, 'b': 1}})
...
Get the sub node 'c' output or type::
>>> get_sub_node(dsp, ('Sub-dispatcher', 'c'))
(4, ('Sub-dispatcher', 'c'))
>>> get_sub_node(dsp, ('Sub-dispatcher', 'c'), node_attr='type')
('data', ('Sub-dispatcher', 'c'))
Get the sub-dispatcher output:
.. dispatcher:: sol
:opt: graph_attr={'ratio': '1'}, depth=-1
:code:
>>> sol, p = get_sub_node(dsp, ('Sub-dispatcher',), node_attr='output')
>>> sol, p
(Solution([('a', 3), ('b', 1), ('c', 4)]), ('Sub-dispatcher',))
### Response:
def get_sub_node(dsp, path, node_attr='auto', solution=NONE, _level=0,
_dsp_name=NONE):
"""
Returns a sub node of a dispatcher.
:param dsp:
A dispatcher object or a sub dispatch function.
:type dsp: schedula.Dispatcher | SubDispatch
:param path:
A sequence of node ids or a single node id. Each id identifies a
sub-level node.
:type path: tuple, str
:param node_attr:
Output node attr.
If the searched node does not have this attribute, all its attributes
are returned.
When 'auto', returns the "default" attributes of the searched node,
which are:
- for data node: its output, and if not exists, all its attributes.
- for function and sub-dispatcher nodes: the 'function' attribute.
:type node_attr: str | None
:param solution:
Parent Solution.
:type solution: schedula.utils.Solution
:param _level:
Path level.
:type _level: int
:param _dsp_name:
dsp name to show when the function raise a value error.
:type _dsp_name: str
:return:
A sub node of a dispatcher and its path.
:rtype: dict | object, tuple[str]
**Example**:
.. dispatcher:: o
:opt: graph_attr={'ratio': '1'}, depth=-1
:code:
>>> from schedula import Dispatcher
>>> s_dsp = Dispatcher(name='Sub-dispatcher')
>>> def fun(a, b):
... return a + b
...
>>> s_dsp.add_function('a + b', fun, ['a', 'b'], ['c'])
'a + b'
>>> dispatch = SubDispatch(s_dsp, ['c'], output_type='dict')
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_function('Sub-dispatcher', dispatch, ['a'], ['b'])
'Sub-dispatcher'
>>> o = dsp.dispatch(inputs={'a': {'a': 3, 'b': 1}})
...
Get the sub node 'c' output or type::
>>> get_sub_node(dsp, ('Sub-dispatcher', 'c'))
(4, ('Sub-dispatcher', 'c'))
>>> get_sub_node(dsp, ('Sub-dispatcher', 'c'), node_attr='type')
('data', ('Sub-dispatcher', 'c'))
Get the sub-dispatcher output:
.. dispatcher:: sol
:opt: graph_attr={'ratio': '1'}, depth=-1
:code:
>>> sol, p = get_sub_node(dsp, ('Sub-dispatcher',), node_attr='output')
>>> sol, p
(Solution([('a', 3), ('b', 1), ('c', 4)]), ('Sub-dispatcher',))
"""
path = list(path)
if isinstance(dsp, SubDispatch): # Take the dispatcher obj.
dsp = dsp.dsp
if _dsp_name is NONE: # Set origin dispatcher name for warning purpose.
_dsp_name = dsp.name
if solution is NONE: # Set origin dispatcher name for warning purpose.
solution = dsp.solution
node_id = path[_level] # Node id at given level.
try:
node_id, node = _get_node(dsp.nodes, node_id) # Get dispatcher node.
path[_level] = node_id
except KeyError:
if _level == len(path) - 1 and node_attr in ('auto', 'output') \
and solution is not EMPTY:
try:
# Get dispatcher node.
node_id, node = _get_node(solution, node_id, False)
path[_level] = node_id
return node, tuple(path)
except KeyError:
pass
msg = 'Path %s does not exist in %s dispatcher.' % (path, _dsp_name)
raise ValueError(msg)
_level += 1 # Next level.
if _level < len(path): # Is not path leaf?.
try:
if node['type'] in ('function', 'dispatcher'):
try:
solution = solution.workflow.node[node_id]['solution']
except (KeyError, AttributeError):
solution = EMPTY
dsp = parent_func(node['function']) # Get parent function.
else:
raise KeyError
except KeyError:
msg = 'Node of path %s at level %i is not a function or ' \
'sub-dispatcher node of %s ' \
'dispatcher.' % (path, _level, _dsp_name)
raise ValueError(msg)
# Continue the node search.
return get_sub_node(dsp, path, node_attr, solution, _level, _dsp_name)
else:
data, sol = EMPTY, solution
# Return the sub node.
if node_attr == 'auto' and node['type'] != 'data': # Auto: function.
node_attr = 'function'
elif node_attr == 'auto' and sol is not EMPTY and node_id in sol:
data = sol[node_id] # Auto: data output.
elif node_attr == 'output' and node['type'] != 'data':
data = sol.workflow.nodes[node_id]['solution']
elif node_attr == 'output' and node['type'] == 'data':
data = sol[node_id]
elif node_attr == 'description': # Search and return node description.
data = dsp.search_node_description(node_id)[0]
elif node_attr == 'value_type' and node['type'] == 'data':
# Search and return data node value's type.
data = dsp.search_node_description(node_id, node_attr)[0]
elif node_attr == 'default_value':
data = dsp.default_values[node_id]
elif node_attr == 'dsp':
data = dsp
elif node_attr == 'sol':
data = sol
if data is EMPTY:
data = node.get(node_attr, node)
return data, tuple(path) |
def gbayes(x0, g_est, sigma):
"""
Estimate Bayes posterior with Gaussian noise [Efron2014]_.
Parameters
----------
x0: ndarray
an observation
g_est: float
a prior density, as returned by gfit
sigma: int
noise estimate
Returns
-------
An array of the posterior estimate E[mu | x0]
"""
Kx = norm().pdf((g_est[0] - x0) / sigma)
post = Kx * g_est[1]
post /= sum(post)
return sum(post * g_est[0]) | Estimate Bayes posterior with Gaussian noise [Efron2014]_.
Parameters
----------
x0: ndarray
an observation
g_est: float
a prior density, as returned by gfit
sigma: int
noise estimate
Returns
-------
An array of the posterior estimate E[mu | x0] | Below is the the instruction that describes the task:
### Input:
Estimate Bayes posterior with Gaussian noise [Efron2014]_.
Parameters
----------
x0: ndarray
an observation
g_est: float
a prior density, as returned by gfit
sigma: int
noise estimate
Returns
-------
An array of the posterior estimate E[mu | x0]
### Response:
def gbayes(x0, g_est, sigma):
"""
Estimate Bayes posterior with Gaussian noise [Efron2014]_.
Parameters
----------
x0: ndarray
an observation
g_est: float
a prior density, as returned by gfit
sigma: int
noise estimate
Returns
-------
An array of the posterior estimate E[mu | x0]
"""
Kx = norm().pdf((g_est[0] - x0) / sigma)
post = Kx * g_est[1]
post /= sum(post)
return sum(post * g_est[0]) |
def params_size(num_components, event_shape=(), name=None):
"""The number of `params` needed to create a single distribution."""
return MixtureSameFamily.params_size(
num_components,
IndependentLogistic.params_size(event_shape, name=name),
name=name) | The number of `params` needed to create a single distribution. | Below is the the instruction that describes the task:
### Input:
The number of `params` needed to create a single distribution.
### Response:
def params_size(num_components, event_shape=(), name=None):
"""The number of `params` needed to create a single distribution."""
return MixtureSameFamily.params_size(
num_components,
IndependentLogistic.params_size(event_shape, name=name),
name=name) |
def parse(self, data):
# type: (bytes) -> None
'''
Parse the passed in data into a UDF Implementation Use Volume
Descriptor Implementation Use field.
Parameters:
data - The data to parse.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Implementation Use Volume Descriptor Implementation Use field already initialized')
(self.char_set, self.log_vol_ident, self.lv_info1, self.lv_info2,
self.lv_info3, impl_ident,
self.impl_use) = struct.unpack_from(self.FMT, data, 0)
self.impl_ident = UDFEntityID()
self.impl_ident.parse(impl_ident)
self._initialized = True | Parse the passed in data into a UDF Implementation Use Volume
Descriptor Implementation Use field.
Parameters:
data - The data to parse.
Returns:
Nothing. | Below is the the instruction that describes the task:
### Input:
Parse the passed in data into a UDF Implementation Use Volume
Descriptor Implementation Use field.
Parameters:
data - The data to parse.
Returns:
Nothing.
### Response:
def parse(self, data):
# type: (bytes) -> None
'''
Parse the passed in data into a UDF Implementation Use Volume
Descriptor Implementation Use field.
Parameters:
data - The data to parse.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Implementation Use Volume Descriptor Implementation Use field already initialized')
(self.char_set, self.log_vol_ident, self.lv_info1, self.lv_info2,
self.lv_info3, impl_ident,
self.impl_use) = struct.unpack_from(self.FMT, data, 0)
self.impl_ident = UDFEntityID()
self.impl_ident.parse(impl_ident)
self._initialized = True |
def extract_value(mapping, bind, data):
""" Given a mapping and JSON schema spec, extract a value from ``data``
and apply certain transformations to normalize the value. """
columns = mapping.get('columns', [mapping.get('column')])
values = [data.get(c) for c in columns]
for transform in mapping.get('transforms', []):
# any added transforms must also be added to the schema.
values = list(TRANSFORMS[transform](mapping, bind, values))
format_str = mapping.get('format')
value = values[0] if len(values) else None
if not is_empty(format_str):
value = format_str % tuple('' if v is None else v for v in values)
empty = is_empty(value)
if empty:
value = mapping.get('default') or bind.schema.get('default')
return empty, convert_value(bind, value) | Given a mapping and JSON schema spec, extract a value from ``data``
and apply certain transformations to normalize the value. | Below is the the instruction that describes the task:
### Input:
Given a mapping and JSON schema spec, extract a value from ``data``
and apply certain transformations to normalize the value.
### Response:
def extract_value(mapping, bind, data):
""" Given a mapping and JSON schema spec, extract a value from ``data``
and apply certain transformations to normalize the value. """
columns = mapping.get('columns', [mapping.get('column')])
values = [data.get(c) for c in columns]
for transform in mapping.get('transforms', []):
# any added transforms must also be added to the schema.
values = list(TRANSFORMS[transform](mapping, bind, values))
format_str = mapping.get('format')
value = values[0] if len(values) else None
if not is_empty(format_str):
value = format_str % tuple('' if v is None else v for v in values)
empty = is_empty(value)
if empty:
value = mapping.get('default') or bind.schema.get('default')
return empty, convert_value(bind, value) |
def set_bool(_bytearray, byte_index, bool_index, value):
"""
Set boolean value on location in bytearray
"""
assert value in [0, 1, True, False]
current_value = get_bool(_bytearray, byte_index, bool_index)
index_value = 1 << bool_index
# check if bool already has correct value
if current_value == value:
return
if value:
# make sure index_v is IN current byte
_bytearray[byte_index] += index_value
else:
# make sure index_v is NOT in current byte
_bytearray[byte_index] -= index_value | Set boolean value on location in bytearray | Below is the the instruction that describes the task:
### Input:
Set boolean value on location in bytearray
### Response:
def set_bool(_bytearray, byte_index, bool_index, value):
"""
Set boolean value on location in bytearray
"""
assert value in [0, 1, True, False]
current_value = get_bool(_bytearray, byte_index, bool_index)
index_value = 1 << bool_index
# check if bool already has correct value
if current_value == value:
return
if value:
# make sure index_v is IN current byte
_bytearray[byte_index] += index_value
else:
# make sure index_v is NOT in current byte
_bytearray[byte_index] -= index_value |
def __valid_url(cls, url):
"""Expects input to already be a valid string"""
bits = urlparse(url)
return ((bits.scheme == "http" or bits.scheme == "https") and
_PATTERN_URL_PART.match(bits.netloc) and
_PATTERN_URL_PART.match(bits.path)) | Expects input to already be a valid string | Below is the the instruction that describes the task:
### Input:
Expects input to already be a valid string
### Response:
def __valid_url(cls, url):
"""Expects input to already be a valid string"""
bits = urlparse(url)
return ((bits.scheme == "http" or bits.scheme == "https") and
_PATTERN_URL_PART.match(bits.netloc) and
_PATTERN_URL_PART.match(bits.path)) |
def QA_util_date_gap(date, gap, methods):
'''
:param date: 字符串起始日 类型 str eg: 2018-11-11
:param gap: 整数 间隔多数个交易日
:param methods: gt大于 ,gte 大于等于, 小于lt ,小于等于lte , 等于===
:return: 字符串 eg:2000-01-01
'''
try:
if methods in ['>', 'gt']:
return trade_date_sse[trade_date_sse.index(date) + gap]
elif methods in ['>=', 'gte']:
return trade_date_sse[trade_date_sse.index(date) + gap - 1]
elif methods in ['<', 'lt']:
return trade_date_sse[trade_date_sse.index(date) - gap]
elif methods in ['<=', 'lte']:
return trade_date_sse[trade_date_sse.index(date) - gap + 1]
elif methods in ['==', '=', 'eq']:
return date
except:
return 'wrong date' | :param date: 字符串起始日 类型 str eg: 2018-11-11
:param gap: 整数 间隔多数个交易日
:param methods: gt大于 ,gte 大于等于, 小于lt ,小于等于lte , 等于===
:return: 字符串 eg:2000-01-01 | Below is the the instruction that describes the task:
### Input:
:param date: 字符串起始日 类型 str eg: 2018-11-11
:param gap: 整数 间隔多数个交易日
:param methods: gt大于 ,gte 大于等于, 小于lt ,小于等于lte , 等于===
:return: 字符串 eg:2000-01-01
### Response:
def QA_util_date_gap(date, gap, methods):
'''
:param date: 字符串起始日 类型 str eg: 2018-11-11
:param gap: 整数 间隔多数个交易日
:param methods: gt大于 ,gte 大于等于, 小于lt ,小于等于lte , 等于===
:return: 字符串 eg:2000-01-01
'''
try:
if methods in ['>', 'gt']:
return trade_date_sse[trade_date_sse.index(date) + gap]
elif methods in ['>=', 'gte']:
return trade_date_sse[trade_date_sse.index(date) + gap - 1]
elif methods in ['<', 'lt']:
return trade_date_sse[trade_date_sse.index(date) - gap]
elif methods in ['<=', 'lte']:
return trade_date_sse[trade_date_sse.index(date) - gap + 1]
elif methods in ['==', '=', 'eq']:
return date
except:
return 'wrong date' |
def command_info(self, command, *commands):
"""Get array of specific Redis command details."""
return self.execute(b'COMMAND', b'INFO', command, *commands,
encoding='utf-8') | Get array of specific Redis command details. | Below is the the instruction that describes the task:
### Input:
Get array of specific Redis command details.
### Response:
def command_info(self, command, *commands):
"""Get array of specific Redis command details."""
return self.execute(b'COMMAND', b'INFO', command, *commands,
encoding='utf-8') |
def tabModificationStateChanged(self, tab):
'''
Perform all UI state changes that need to be done when the
modification state of the current tab has changed.
'''
if tab == self.currentTab:
changed = tab.editBox.document().isModified()
if self.autoSaveActive(tab):
changed = False
self.actionSave.setEnabled(changed)
self.setWindowModified(changed) | Perform all UI state changes that need to be done when the
modification state of the current tab has changed. | Below is the the instruction that describes the task:
### Input:
Perform all UI state changes that need to be done when the
modification state of the current tab has changed.
### Response:
def tabModificationStateChanged(self, tab):
'''
Perform all UI state changes that need to be done when the
modification state of the current tab has changed.
'''
if tab == self.currentTab:
changed = tab.editBox.document().isModified()
if self.autoSaveActive(tab):
changed = False
self.actionSave.setEnabled(changed)
self.setWindowModified(changed) |
def get_arp_table(self):
"""
Get arp table information.
Return a list of dictionaries having the following set of keys:
* interface (string)
* mac (string)
* ip (string)
* age (float)
For example::
[
{
'interface' : 'MgmtEth0/RSP0/CPU0/0',
'mac' : '5c:5e:ab:da:3c:f0',
'ip' : '172.17.17.1',
'age' : 12.0
},
{
'interface': 'MgmtEth0/RSP0/CPU0/0',
'mac' : '66:0e:94:96:e0:ff',
'ip' : '172.17.17.2',
'age' : 14.0
}
]
"""
arp_table = []
command = 'show ip arp vrf default | exc INCOMPLETE'
output = self.device.send_command(command)
separator = r"^Address\s+Age.*Interface.*$"
arp_list = re.split(separator, output, flags=re.M)
if len(arp_list) != 2:
raise ValueError("Error processing arp table output:\n\n{}".format(output))
arp_entries = arp_list[1].strip()
for line in arp_entries.splitlines():
if len(line.split()) == 4:
address, age, mac, interface = line.split()
else:
raise ValueError("Unexpected output from: {}".format(line.split()))
if age == '-':
age = -1.0
elif ':' not in age:
# Cisco sometimes returns a sub second arp time 0.411797
try:
age = float(age)
except ValueError:
age = -1.0
else:
age = convert_hhmmss(age)
age = float(age)
age = round(age, 1)
# Validate we matched correctly
if not re.search(RE_IPADDR, address):
raise ValueError("Invalid IP Address detected: {}".format(address))
if not re.search(RE_MAC, mac):
raise ValueError("Invalid MAC Address detected: {}".format(mac))
entry = {
'interface': interface,
'mac': napalm_base.helpers.mac(mac),
'ip': address,
'age': age
}
arp_table.append(entry)
return arp_table | Get arp table information.
Return a list of dictionaries having the following set of keys:
* interface (string)
* mac (string)
* ip (string)
* age (float)
For example::
[
{
'interface' : 'MgmtEth0/RSP0/CPU0/0',
'mac' : '5c:5e:ab:da:3c:f0',
'ip' : '172.17.17.1',
'age' : 12.0
},
{
'interface': 'MgmtEth0/RSP0/CPU0/0',
'mac' : '66:0e:94:96:e0:ff',
'ip' : '172.17.17.2',
'age' : 14.0
}
] | Below is the the instruction that describes the task:
### Input:
Get arp table information.
Return a list of dictionaries having the following set of keys:
* interface (string)
* mac (string)
* ip (string)
* age (float)
For example::
[
{
'interface' : 'MgmtEth0/RSP0/CPU0/0',
'mac' : '5c:5e:ab:da:3c:f0',
'ip' : '172.17.17.1',
'age' : 12.0
},
{
'interface': 'MgmtEth0/RSP0/CPU0/0',
'mac' : '66:0e:94:96:e0:ff',
'ip' : '172.17.17.2',
'age' : 14.0
}
]
### Response:
def get_arp_table(self):
"""
Get arp table information.
Return a list of dictionaries having the following set of keys:
* interface (string)
* mac (string)
* ip (string)
* age (float)
For example::
[
{
'interface' : 'MgmtEth0/RSP0/CPU0/0',
'mac' : '5c:5e:ab:da:3c:f0',
'ip' : '172.17.17.1',
'age' : 12.0
},
{
'interface': 'MgmtEth0/RSP0/CPU0/0',
'mac' : '66:0e:94:96:e0:ff',
'ip' : '172.17.17.2',
'age' : 14.0
}
]
"""
arp_table = []
command = 'show ip arp vrf default | exc INCOMPLETE'
output = self.device.send_command(command)
separator = r"^Address\s+Age.*Interface.*$"
arp_list = re.split(separator, output, flags=re.M)
if len(arp_list) != 2:
raise ValueError("Error processing arp table output:\n\n{}".format(output))
arp_entries = arp_list[1].strip()
for line in arp_entries.splitlines():
if len(line.split()) == 4:
address, age, mac, interface = line.split()
else:
raise ValueError("Unexpected output from: {}".format(line.split()))
if age == '-':
age = -1.0
elif ':' not in age:
# Cisco sometimes returns a sub second arp time 0.411797
try:
age = float(age)
except ValueError:
age = -1.0
else:
age = convert_hhmmss(age)
age = float(age)
age = round(age, 1)
# Validate we matched correctly
if not re.search(RE_IPADDR, address):
raise ValueError("Invalid IP Address detected: {}".format(address))
if not re.search(RE_MAC, mac):
raise ValueError("Invalid MAC Address detected: {}".format(mac))
entry = {
'interface': interface,
'mac': napalm_base.helpers.mac(mac),
'ip': address,
'age': age
}
arp_table.append(entry)
return arp_table |
def kappa_se_calc(PA, PE, POP):
"""
Calculate kappa standard error.
:param PA: observed agreement among raters (overall accuracy)
:type PA : float
:param PE: hypothetical probability of chance agreement (random accuracy)
:type PE : float
:param POP: population
:type POP:int
:return: kappa standard error as float
"""
try:
result = math.sqrt((PA * (1 - PA)) / (POP * ((1 - PE)**2)))
return result
except Exception:
return "None" | Calculate kappa standard error.
:param PA: observed agreement among raters (overall accuracy)
:type PA : float
:param PE: hypothetical probability of chance agreement (random accuracy)
:type PE : float
:param POP: population
:type POP:int
:return: kappa standard error as float | Below is the the instruction that describes the task:
### Input:
Calculate kappa standard error.
:param PA: observed agreement among raters (overall accuracy)
:type PA : float
:param PE: hypothetical probability of chance agreement (random accuracy)
:type PE : float
:param POP: population
:type POP:int
:return: kappa standard error as float
### Response:
def kappa_se_calc(PA, PE, POP):
"""
Calculate kappa standard error.
:param PA: observed agreement among raters (overall accuracy)
:type PA : float
:param PE: hypothetical probability of chance agreement (random accuracy)
:type PE : float
:param POP: population
:type POP:int
:return: kappa standard error as float
"""
try:
result = math.sqrt((PA * (1 - PA)) / (POP * ((1 - PE)**2)))
return result
except Exception:
return "None" |
def set_value(self, value: ScalarType) -> None:
"""Sets the value of the node to a scalar value.
After this, is_scalar(type(value)) will return true.
Args:
value: The value to set this node to, a str, int, float, \
bool, or None.
"""
if isinstance(value, bool):
value_str = 'true' if value else 'false'
else:
value_str = str(value)
start_mark = self.yaml_node.start_mark
end_mark = self.yaml_node.end_mark
# If we're of a class type, then we want to keep that tag so that the
# correct Constructor is called. If we're a built-in type, set the tag
# to the appropriate YAML tag.
tag = self.yaml_node.tag
if tag.startswith('tag:yaml.org,2002:'):
tag = scalar_type_to_tag[type(value)]
new_node = yaml.ScalarNode(tag, value_str, start_mark, end_mark)
self.yaml_node = new_node | Sets the value of the node to a scalar value.
After this, is_scalar(type(value)) will return true.
Args:
value: The value to set this node to, a str, int, float, \
bool, or None. | Below is the the instruction that describes the task:
### Input:
Sets the value of the node to a scalar value.
After this, is_scalar(type(value)) will return true.
Args:
value: The value to set this node to, a str, int, float, \
bool, or None.
### Response:
def set_value(self, value: ScalarType) -> None:
"""Sets the value of the node to a scalar value.
After this, is_scalar(type(value)) will return true.
Args:
value: The value to set this node to, a str, int, float, \
bool, or None.
"""
if isinstance(value, bool):
value_str = 'true' if value else 'false'
else:
value_str = str(value)
start_mark = self.yaml_node.start_mark
end_mark = self.yaml_node.end_mark
# If we're of a class type, then we want to keep that tag so that the
# correct Constructor is called. If we're a built-in type, set the tag
# to the appropriate YAML tag.
tag = self.yaml_node.tag
if tag.startswith('tag:yaml.org,2002:'):
tag = scalar_type_to_tag[type(value)]
new_node = yaml.ScalarNode(tag, value_str, start_mark, end_mark)
self.yaml_node = new_node |
def translate(self, package, into=None):
"""From a binary package, translate to a local binary distribution."""
if not package.local:
raise ValueError('BinaryTranslator cannot translate remote packages.')
if not isinstance(package, self._package_type):
return None
if not package.compatible(self._supported_tags):
TRACER.log('Target package %s is not compatible with %s' % (
package, self._supported_tags))
return None
into = into or safe_mkdtemp()
target_path = os.path.join(into, package.filename)
safe_copy(package.local_path, target_path)
return DistributionHelper.distribution_from_path(target_path) | From a binary package, translate to a local binary distribution. | Below is the the instruction that describes the task:
### Input:
From a binary package, translate to a local binary distribution.
### Response:
def translate(self, package, into=None):
"""From a binary package, translate to a local binary distribution."""
if not package.local:
raise ValueError('BinaryTranslator cannot translate remote packages.')
if not isinstance(package, self._package_type):
return None
if not package.compatible(self._supported_tags):
TRACER.log('Target package %s is not compatible with %s' % (
package, self._supported_tags))
return None
into = into or safe_mkdtemp()
target_path = os.path.join(into, package.filename)
safe_copy(package.local_path, target_path)
return DistributionHelper.distribution_from_path(target_path) |
def serialize_me(self, account_id, region, next_token=None):
"""Dumps the proper JSON for the schema.
:param account_id:
:param region:
:param next_token:
:return:
"""
payload = {
'account_id': account_id,
'region': region
}
if next_token:
payload['next_token'] = next_token
return self.dumps(payload).data | Dumps the proper JSON for the schema.
:param account_id:
:param region:
:param next_token:
:return: | Below is the the instruction that describes the task:
### Input:
Dumps the proper JSON for the schema.
:param account_id:
:param region:
:param next_token:
:return:
### Response:
def serialize_me(self, account_id, region, next_token=None):
"""Dumps the proper JSON for the schema.
:param account_id:
:param region:
:param next_token:
:return:
"""
payload = {
'account_id': account_id,
'region': region
}
if next_token:
payload['next_token'] = next_token
return self.dumps(payload).data |
def delete(self):
"""Returns a response after attempting to delete the list.
"""
if not self.email_enabled:
raise EmailNotEnabledError("See settings.EMAIL_ENABLED")
return requests.delete(
f"{self.api_url}/{self.address}", auth=("api", self.api_key)
) | Returns a response after attempting to delete the list. | Below is the the instruction that describes the task:
### Input:
Returns a response after attempting to delete the list.
### Response:
def delete(self):
"""Returns a response after attempting to delete the list.
"""
if not self.email_enabled:
raise EmailNotEnabledError("See settings.EMAIL_ENABLED")
return requests.delete(
f"{self.api_url}/{self.address}", auth=("api", self.api_key)
) |
def find_by_typename(self, typename):
"""
List of all objects whose type has the given name.
"""
return self.find_by(lambda obj: type(obj).__name__ == typename) | List of all objects whose type has the given name. | Below is the the instruction that describes the task:
### Input:
List of all objects whose type has the given name.
### Response:
def find_by_typename(self, typename):
"""
List of all objects whose type has the given name.
"""
return self.find_by(lambda obj: type(obj).__name__ == typename) |
def search_and_extract_nucleotides_matching_nucleotide_database(self,
unpack,
euk_check,
search_method,
maximum_range,
threads,
evalue,
hmmsearch_output_table,
hit_reads_fasta):
'''As per nt_db_search() except slightly lower level. Search an
input read set (unpack) and then extract the sequences that hit.
Parameters
----------
hmmsearch_output_table: str
path to hmmsearch output table
hit_reads_fasta: str
path to hit nucleotide sequences
Returns
-------
direction_information: dict
{read_1: False
...
read n: True}
where True = Forward direction
and False = Reverse direction
result: DBSearchResult object containing file locations and hit
information
'''
if search_method == "hmmsearch":
# First search the reads using the HMM
search_result, table_list = self.nhmmer(
hmmsearch_output_table,
unpack,
threads,
evalue
)
elif search_method == 'diamond':
raise Exception("Diamond searches not supported for nucelotide databases yet")
if maximum_range:
hits = self._get_read_names(
search_result, # define the span of hits
maximum_range
)
else:
hits = self._get_sequence_directions(search_result)
hit_readnames = hits.keys()
if euk_check:
euk_reads = self._check_euk_contamination(table_list)
hit_readnames = set([read for read in hit_readnames if read not in euk_reads])
hits = {key:item for key, item in hits.iteritems() if key in hit_readnames}
hit_read_count = [len(euk_reads), len(hit_readnames)]
else:
hit_read_count = [0, len(hit_readnames)]
hit_reads_fasta, direction_information = self._extract_from_raw_reads(
hit_reads_fasta,
hit_readnames,
unpack.read_file,
unpack.format(),
hits
)
if not hit_readnames:
result = DBSearchResult(None,
search_result,
hit_read_count,
None)
else:
slash_endings=self._check_for_slash_endings(hit_readnames)
result = DBSearchResult(hit_reads_fasta,
search_result,
hit_read_count,
slash_endings)
if maximum_range:
n_hits = sum([len(x["strand"]) for x in hits.values()])
else:
n_hits = len(hits)
logging.info("%s read(s) detected" % n_hits)
return result, direction_information | As per nt_db_search() except slightly lower level. Search an
input read set (unpack) and then extract the sequences that hit.
Parameters
----------
hmmsearch_output_table: str
path to hmmsearch output table
hit_reads_fasta: str
path to hit nucleotide sequences
Returns
-------
direction_information: dict
{read_1: False
...
read n: True}
where True = Forward direction
and False = Reverse direction
result: DBSearchResult object containing file locations and hit
information | Below is the the instruction that describes the task:
### Input:
As per nt_db_search() except slightly lower level. Search an
input read set (unpack) and then extract the sequences that hit.
Parameters
----------
hmmsearch_output_table: str
path to hmmsearch output table
hit_reads_fasta: str
path to hit nucleotide sequences
Returns
-------
direction_information: dict
{read_1: False
...
read n: True}
where True = Forward direction
and False = Reverse direction
result: DBSearchResult object containing file locations and hit
information
### Response:
def search_and_extract_nucleotides_matching_nucleotide_database(self,
unpack,
euk_check,
search_method,
maximum_range,
threads,
evalue,
hmmsearch_output_table,
hit_reads_fasta):
'''As per nt_db_search() except slightly lower level. Search an
input read set (unpack) and then extract the sequences that hit.
Parameters
----------
hmmsearch_output_table: str
path to hmmsearch output table
hit_reads_fasta: str
path to hit nucleotide sequences
Returns
-------
direction_information: dict
{read_1: False
...
read n: True}
where True = Forward direction
and False = Reverse direction
result: DBSearchResult object containing file locations and hit
information
'''
if search_method == "hmmsearch":
# First search the reads using the HMM
search_result, table_list = self.nhmmer(
hmmsearch_output_table,
unpack,
threads,
evalue
)
elif search_method == 'diamond':
raise Exception("Diamond searches not supported for nucelotide databases yet")
if maximum_range:
hits = self._get_read_names(
search_result, # define the span of hits
maximum_range
)
else:
hits = self._get_sequence_directions(search_result)
hit_readnames = hits.keys()
if euk_check:
euk_reads = self._check_euk_contamination(table_list)
hit_readnames = set([read for read in hit_readnames if read not in euk_reads])
hits = {key:item for key, item in hits.iteritems() if key in hit_readnames}
hit_read_count = [len(euk_reads), len(hit_readnames)]
else:
hit_read_count = [0, len(hit_readnames)]
hit_reads_fasta, direction_information = self._extract_from_raw_reads(
hit_reads_fasta,
hit_readnames,
unpack.read_file,
unpack.format(),
hits
)
if not hit_readnames:
result = DBSearchResult(None,
search_result,
hit_read_count,
None)
else:
slash_endings=self._check_for_slash_endings(hit_readnames)
result = DBSearchResult(hit_reads_fasta,
search_result,
hit_read_count,
slash_endings)
if maximum_range:
n_hits = sum([len(x["strand"]) for x in hits.values()])
else:
n_hits = len(hits)
logging.info("%s read(s) detected" % n_hits)
return result, direction_information |
def _add_hypotheses_assuming_new_stroke(self,
new_stroke,
stroke_nr,
new_beam):
"""
Get new guesses by assuming new_stroke is a new symbol.
Parameters
----------
new_stroke : list of dicts
A list of dicts [{'x': 12, 'y': 34, 'time': 56}, ...] which
represent a point.
stroke_nr : int
Number of the stroke for segmentation
new_beam : beam object
"""
guesses = single_clf.predict({'data': [new_stroke],
'id': None})[:self.m]
for hyp in self.hypotheses:
new_geometry = deepcopy(hyp['geometry'])
most_right = new_geometry
if len(hyp['symbols']) == 0:
while 'right' in most_right:
most_right = most_right['right']
most_right['right'] = {'symbol_index': len(hyp['symbols']),
'right': None}
else:
most_right = {'symbol_index': len(hyp['symbols']),
'right': None}
for guess in guesses:
sym = {'symbol': guess['semantics'],
'probability': guess['probability']}
new_seg = deepcopy(hyp['segmentation'])
new_seg.append([stroke_nr])
new_sym = deepcopy(hyp['symbols'])
new_sym.append(sym)
b = {'segmentation': new_seg,
'symbols': new_sym,
'geometry': new_geometry,
'probability': None
}
# spacial_rels = [] # TODO
# for s1_indices, s2_indices in zip(b['segmentation'],
# b['segmentation'][1:]):
# tmp = [new_beam.history['data'][el] for el in s1_indices]
# s1 = HandwrittenData(json.dumps(tmp))
# tmp = [new_beam.history['data'][el] for el in s2_indices]
# s2 = HandwrittenData(json.dumps(tmp))
# rel = spacial_relationship.estimate(s1, s2)
# spacial_rels.append(rel)
# b['geometry'] = spacial_rels
new_beam.hypotheses.append(b) | Get new guesses by assuming new_stroke is a new symbol.
Parameters
----------
new_stroke : list of dicts
A list of dicts [{'x': 12, 'y': 34, 'time': 56}, ...] which
represent a point.
stroke_nr : int
Number of the stroke for segmentation
new_beam : beam object | Below is the the instruction that describes the task:
### Input:
Get new guesses by assuming new_stroke is a new symbol.
Parameters
----------
new_stroke : list of dicts
A list of dicts [{'x': 12, 'y': 34, 'time': 56}, ...] which
represent a point.
stroke_nr : int
Number of the stroke for segmentation
new_beam : beam object
### Response:
def _add_hypotheses_assuming_new_stroke(self,
new_stroke,
stroke_nr,
new_beam):
"""
Get new guesses by assuming new_stroke is a new symbol.
Parameters
----------
new_stroke : list of dicts
A list of dicts [{'x': 12, 'y': 34, 'time': 56}, ...] which
represent a point.
stroke_nr : int
Number of the stroke for segmentation
new_beam : beam object
"""
guesses = single_clf.predict({'data': [new_stroke],
'id': None})[:self.m]
for hyp in self.hypotheses:
new_geometry = deepcopy(hyp['geometry'])
most_right = new_geometry
if len(hyp['symbols']) == 0:
while 'right' in most_right:
most_right = most_right['right']
most_right['right'] = {'symbol_index': len(hyp['symbols']),
'right': None}
else:
most_right = {'symbol_index': len(hyp['symbols']),
'right': None}
for guess in guesses:
sym = {'symbol': guess['semantics'],
'probability': guess['probability']}
new_seg = deepcopy(hyp['segmentation'])
new_seg.append([stroke_nr])
new_sym = deepcopy(hyp['symbols'])
new_sym.append(sym)
b = {'segmentation': new_seg,
'symbols': new_sym,
'geometry': new_geometry,
'probability': None
}
# spacial_rels = [] # TODO
# for s1_indices, s2_indices in zip(b['segmentation'],
# b['segmentation'][1:]):
# tmp = [new_beam.history['data'][el] for el in s1_indices]
# s1 = HandwrittenData(json.dumps(tmp))
# tmp = [new_beam.history['data'][el] for el in s2_indices]
# s2 = HandwrittenData(json.dumps(tmp))
# rel = spacial_relationship.estimate(s1, s2)
# spacial_rels.append(rel)
# b['geometry'] = spacial_rels
new_beam.hypotheses.append(b) |
def getICMPrimitives(uuid: str):
""" returns all ICM primitives (TODO - needs filter support)"""
primitives = [
p.deserialize()
for p in CausalPrimitive.query.filter_by(model_id=uuid).all()
]
for p in primitives:
del p["model_id"]
return jsonify(primitives) | returns all ICM primitives (TODO - needs filter support) | Below is the the instruction that describes the task:
### Input:
returns all ICM primitives (TODO - needs filter support)
### Response:
def getICMPrimitives(uuid: str):
""" returns all ICM primitives (TODO - needs filter support)"""
primitives = [
p.deserialize()
for p in CausalPrimitive.query.filter_by(model_id=uuid).all()
]
for p in primitives:
del p["model_id"]
return jsonify(primitives) |
def record_tx(self, origin, destination, amount,
outcome, destination_id=None):
"""Records a transaction in the database.
Args:
origin (str): user_id of the sender
destination (str): coin address or user_id of the recipient
amount (str, Decimal, number): amount to send
outcome (str, bool): the transaction hash if this is a "sendfrom"
transaction; for "move", True if successful,
False otherwise
destination_id (str): the destination account label ("move" only)
Returns:
str or bool: the outcome (input) argument
"""
# "move" commands
if destination_id:
tx = db.Transaction(
txtype="move",
from_user_id=origin,
to_user_id=destination_id,
txdate=datetime.now(),
amount=amount,
currency=COINS[self.coin]["ticker"],
to_coin_address=destination,
)
# "sendfrom" commands
else:
self.logger.debug(self.gettransaction(outcome))
confirmations = self.gettransaction(outcome)["confirmations"]
last_confirmation = datetime.now() if confirmations else None
tx = db.Transaction(
txtype="sendfrom",
from_user_id=origin,
txhash=outcome,
txdate=datetime.now(),
amount=amount,
currency=COINS[self.coin]["ticker"],
to_coin_address=destination,
confirmations=confirmations,
last_confirmation=last_confirmation
)
db.session.add(tx)
db.session.commit()
return outcome | Records a transaction in the database.
Args:
origin (str): user_id of the sender
destination (str): coin address or user_id of the recipient
amount (str, Decimal, number): amount to send
outcome (str, bool): the transaction hash if this is a "sendfrom"
transaction; for "move", True if successful,
False otherwise
destination_id (str): the destination account label ("move" only)
Returns:
str or bool: the outcome (input) argument | Below is the the instruction that describes the task:
### Input:
Records a transaction in the database.
Args:
origin (str): user_id of the sender
destination (str): coin address or user_id of the recipient
amount (str, Decimal, number): amount to send
outcome (str, bool): the transaction hash if this is a "sendfrom"
transaction; for "move", True if successful,
False otherwise
destination_id (str): the destination account label ("move" only)
Returns:
str or bool: the outcome (input) argument
### Response:
def record_tx(self, origin, destination, amount,
outcome, destination_id=None):
"""Records a transaction in the database.
Args:
origin (str): user_id of the sender
destination (str): coin address or user_id of the recipient
amount (str, Decimal, number): amount to send
outcome (str, bool): the transaction hash if this is a "sendfrom"
transaction; for "move", True if successful,
False otherwise
destination_id (str): the destination account label ("move" only)
Returns:
str or bool: the outcome (input) argument
"""
# "move" commands
if destination_id:
tx = db.Transaction(
txtype="move",
from_user_id=origin,
to_user_id=destination_id,
txdate=datetime.now(),
amount=amount,
currency=COINS[self.coin]["ticker"],
to_coin_address=destination,
)
# "sendfrom" commands
else:
self.logger.debug(self.gettransaction(outcome))
confirmations = self.gettransaction(outcome)["confirmations"]
last_confirmation = datetime.now() if confirmations else None
tx = db.Transaction(
txtype="sendfrom",
from_user_id=origin,
txhash=outcome,
txdate=datetime.now(),
amount=amount,
currency=COINS[self.coin]["ticker"],
to_coin_address=destination,
confirmations=confirmations,
last_confirmation=last_confirmation
)
db.session.add(tx)
db.session.commit()
return outcome |
def updateidf(idf, dct):
"""update idf using dct"""
for key in list(dct.keys()):
if key.startswith('idf.'):
idftag, objkey, objname, field = key2elements(key)
if objname == '':
try:
idfobj = idf.idfobjects[objkey.upper()][0]
except IndexError as e:
idfobj = idf.newidfobject(objkey.upper())
else:
idfobj = idf.getobject(objkey.upper(), objname)
if idfobj == None:
idfobj = idf.newidfobject(objkey.upper(), Name=objname)
idfobj[field] = dct[key] | update idf using dct | Below is the the instruction that describes the task:
### Input:
update idf using dct
### Response:
def updateidf(idf, dct):
"""update idf using dct"""
for key in list(dct.keys()):
if key.startswith('idf.'):
idftag, objkey, objname, field = key2elements(key)
if objname == '':
try:
idfobj = idf.idfobjects[objkey.upper()][0]
except IndexError as e:
idfobj = idf.newidfobject(objkey.upper())
else:
idfobj = idf.getobject(objkey.upper(), objname)
if idfobj == None:
idfobj = idf.newidfobject(objkey.upper(), Name=objname)
idfobj[field] = dct[key] |
def find_model_dat():
"""
Find the file containing the definition of all the models in Xspec
(model.dat) and return its path
"""
# model.dat is in $HEADAS/../spectral
headas_env = os.environ.get("HEADAS")
assert headas_env is not None, ("You need to setup the HEADAS variable before importing this module."
" See Heasoft documentation.")
# Expand all variables and other things like ~
headas_env = os.path.expandvars(os.path.expanduser(headas_env))
# Lazy check that it exists
assert os.path.exists(headas_env), "The HEADAS env. variable point to a non-existent directory: %s" % (headas_env)
# Get one directory above HEADAS (i.e., $HEADAS/..)
inferred_path = os.path.dirname(headas_env)
# Now model.dat should be in $HEADAS/../spectral/manager
final_path = os.path.join(inferred_path, 'spectral', 'manager', 'model.dat')
# Check that model.dat exists
assert os.path.exists(final_path), "Cannot find Xspec model definition file %s" % (final_path)
return os.path.abspath(final_path) | Find the file containing the definition of all the models in Xspec
(model.dat) and return its path | Below is the the instruction that describes the task:
### Input:
Find the file containing the definition of all the models in Xspec
(model.dat) and return its path
### Response:
def find_model_dat():
"""
Find the file containing the definition of all the models in Xspec
(model.dat) and return its path
"""
# model.dat is in $HEADAS/../spectral
headas_env = os.environ.get("HEADAS")
assert headas_env is not None, ("You need to setup the HEADAS variable before importing this module."
" See Heasoft documentation.")
# Expand all variables and other things like ~
headas_env = os.path.expandvars(os.path.expanduser(headas_env))
# Lazy check that it exists
assert os.path.exists(headas_env), "The HEADAS env. variable point to a non-existent directory: %s" % (headas_env)
# Get one directory above HEADAS (i.e., $HEADAS/..)
inferred_path = os.path.dirname(headas_env)
# Now model.dat should be in $HEADAS/../spectral/manager
final_path = os.path.join(inferred_path, 'spectral', 'manager', 'model.dat')
# Check that model.dat exists
assert os.path.exists(final_path), "Cannot find Xspec model definition file %s" % (final_path)
return os.path.abspath(final_path) |
def create(self, name, overwrite=True):
"""Creates an SQLite database file.
Creates an SQLite database with the given name.
The .box file extension is added automatically.
Overwrites any existing database by default.
"""
self._name = name.rstrip(".db")
from os import unlink
if overwrite:
try: unlink(self._name + ".db")
except: pass
self._con = sqlite.connect(self._name + ".db")
self._cur = self._con.cursor() | Creates an SQLite database file.
Creates an SQLite database with the given name.
The .box file extension is added automatically.
Overwrites any existing database by default. | Below is the the instruction that describes the task:
### Input:
Creates an SQLite database file.
Creates an SQLite database with the given name.
The .box file extension is added automatically.
Overwrites any existing database by default.
### Response:
def create(self, name, overwrite=True):
"""Creates an SQLite database file.
Creates an SQLite database with the given name.
The .box file extension is added automatically.
Overwrites any existing database by default.
"""
self._name = name.rstrip(".db")
from os import unlink
if overwrite:
try: unlink(self._name + ".db")
except: pass
self._con = sqlite.connect(self._name + ".db")
self._cur = self._con.cursor() |
def sample_greedy(self):
"""
Sample a point in the leaf with the max progress.
"""
if self.leafnode:
return self.sample_bounds()
else:
lp = self.lower.max_leaf_progress
gp = self.greater.max_leaf_progress
maxp = max(lp, gp)
if self.sampling_mode['multiscale']:
tp = self.progress
if tp > maxp:
return self.sample_bounds()
if gp == maxp:
sampling_mode = self.sampling_mode
sampling_mode['mode'] = 'greedy'
return self.greater.sample(sampling_mode=sampling_mode)
else:
sampling_mode = self.sampling_mode
sampling_mode['mode'] = 'greedy'
return self.lower.sample(sampling_mode=sampling_mode) | Sample a point in the leaf with the max progress. | Below is the the instruction that describes the task:
### Input:
Sample a point in the leaf with the max progress.
### Response:
def sample_greedy(self):
"""
Sample a point in the leaf with the max progress.
"""
if self.leafnode:
return self.sample_bounds()
else:
lp = self.lower.max_leaf_progress
gp = self.greater.max_leaf_progress
maxp = max(lp, gp)
if self.sampling_mode['multiscale']:
tp = self.progress
if tp > maxp:
return self.sample_bounds()
if gp == maxp:
sampling_mode = self.sampling_mode
sampling_mode['mode'] = 'greedy'
return self.greater.sample(sampling_mode=sampling_mode)
else:
sampling_mode = self.sampling_mode
sampling_mode['mode'] = 'greedy'
return self.lower.sample(sampling_mode=sampling_mode) |
def weighted_random(samples, chains):
"""Determine the sample values of chains by weighed random choice.
Args:
samples (array_like):
Samples as a nS x nV array_like object where nS is the number of samples and nV is the
number of variables. The values should all be 0/1 or -1/+1.
chains (list[array_like]):
List of chains of length nC where nC is the number of chains.
Each chain should be an array_like collection of column indices in samples.
Returns:
tuple: A 2-tuple containing:
:obj:`numpy.ndarray`: A nS x nC array of unembedded samples. The array has dtype 'int8'.
Where there is a chain break, the value is chosen randomly, weighted by frequency of the
chain's value.
:obj:`numpy.ndarray`: Equivalent to :code:`np.arange(nS)` because all samples are kept
and no samples are added.
Examples:
This example unembeds samples from a target graph that chains nodes 0 and 1 to
represent one source node and nodes 2, 3, and 4 to represent another.
The sample has broken chains for both source nodes.
>>> import dimod
>>> import numpy as np
...
>>> chains = [(0, 1), (2, 3, 4)]
>>> samples = np.array([[1, 0, 1, 0, 1]], dtype=np.int8)
>>> unembedded, idx = dwave.embedding.weighted_random(samples, chains) # doctest: +SKIP
>>> unembedded # doctest: +SKIP
array([[1, 1]], dtype=int8)
>>> idx # doctest: +SKIP
array([0, 1])
"""
samples = np.asarray(samples)
if samples.ndim != 2:
raise ValueError("expected samples to be a numpy 2D array")
# it sufficies to choose a random index from each chain and use that to construct the matrix
idx = [np.random.choice(chain) for chain in chains]
num_samples, num_variables = samples.shape
return samples[:, idx], np.arange(num_samples) | Determine the sample values of chains by weighed random choice.
Args:
samples (array_like):
Samples as a nS x nV array_like object where nS is the number of samples and nV is the
number of variables. The values should all be 0/1 or -1/+1.
chains (list[array_like]):
List of chains of length nC where nC is the number of chains.
Each chain should be an array_like collection of column indices in samples.
Returns:
tuple: A 2-tuple containing:
:obj:`numpy.ndarray`: A nS x nC array of unembedded samples. The array has dtype 'int8'.
Where there is a chain break, the value is chosen randomly, weighted by frequency of the
chain's value.
:obj:`numpy.ndarray`: Equivalent to :code:`np.arange(nS)` because all samples are kept
and no samples are added.
Examples:
This example unembeds samples from a target graph that chains nodes 0 and 1 to
represent one source node and nodes 2, 3, and 4 to represent another.
The sample has broken chains for both source nodes.
>>> import dimod
>>> import numpy as np
...
>>> chains = [(0, 1), (2, 3, 4)]
>>> samples = np.array([[1, 0, 1, 0, 1]], dtype=np.int8)
>>> unembedded, idx = dwave.embedding.weighted_random(samples, chains) # doctest: +SKIP
>>> unembedded # doctest: +SKIP
array([[1, 1]], dtype=int8)
>>> idx # doctest: +SKIP
array([0, 1]) | Below is the the instruction that describes the task:
### Input:
Determine the sample values of chains by weighed random choice.
Args:
samples (array_like):
Samples as a nS x nV array_like object where nS is the number of samples and nV is the
number of variables. The values should all be 0/1 or -1/+1.
chains (list[array_like]):
List of chains of length nC where nC is the number of chains.
Each chain should be an array_like collection of column indices in samples.
Returns:
tuple: A 2-tuple containing:
:obj:`numpy.ndarray`: A nS x nC array of unembedded samples. The array has dtype 'int8'.
Where there is a chain break, the value is chosen randomly, weighted by frequency of the
chain's value.
:obj:`numpy.ndarray`: Equivalent to :code:`np.arange(nS)` because all samples are kept
and no samples are added.
Examples:
This example unembeds samples from a target graph that chains nodes 0 and 1 to
represent one source node and nodes 2, 3, and 4 to represent another.
The sample has broken chains for both source nodes.
>>> import dimod
>>> import numpy as np
...
>>> chains = [(0, 1), (2, 3, 4)]
>>> samples = np.array([[1, 0, 1, 0, 1]], dtype=np.int8)
>>> unembedded, idx = dwave.embedding.weighted_random(samples, chains) # doctest: +SKIP
>>> unembedded # doctest: +SKIP
array([[1, 1]], dtype=int8)
>>> idx # doctest: +SKIP
array([0, 1])
### Response:
def weighted_random(samples, chains):
"""Determine the sample values of chains by weighed random choice.
Args:
samples (array_like):
Samples as a nS x nV array_like object where nS is the number of samples and nV is the
number of variables. The values should all be 0/1 or -1/+1.
chains (list[array_like]):
List of chains of length nC where nC is the number of chains.
Each chain should be an array_like collection of column indices in samples.
Returns:
tuple: A 2-tuple containing:
:obj:`numpy.ndarray`: A nS x nC array of unembedded samples. The array has dtype 'int8'.
Where there is a chain break, the value is chosen randomly, weighted by frequency of the
chain's value.
:obj:`numpy.ndarray`: Equivalent to :code:`np.arange(nS)` because all samples are kept
and no samples are added.
Examples:
This example unembeds samples from a target graph that chains nodes 0 and 1 to
represent one source node and nodes 2, 3, and 4 to represent another.
The sample has broken chains for both source nodes.
>>> import dimod
>>> import numpy as np
...
>>> chains = [(0, 1), (2, 3, 4)]
>>> samples = np.array([[1, 0, 1, 0, 1]], dtype=np.int8)
>>> unembedded, idx = dwave.embedding.weighted_random(samples, chains) # doctest: +SKIP
>>> unembedded # doctest: +SKIP
array([[1, 1]], dtype=int8)
>>> idx # doctest: +SKIP
array([0, 1])
"""
samples = np.asarray(samples)
if samples.ndim != 2:
raise ValueError("expected samples to be a numpy 2D array")
# it sufficies to choose a random index from each chain and use that to construct the matrix
idx = [np.random.choice(chain) for chain in chains]
num_samples, num_variables = samples.shape
return samples[:, idx], np.arange(num_samples) |
def warp(source_file, destination_file, dst_crs=None, resolution=None, dimensions=None,
src_bounds=None, dst_bounds=None, src_nodata=None, dst_nodata=None,
target_aligned_pixels=False, check_invert_proj=True,
creation_options=None, resampling=Resampling.cubic, **kwargs):
"""Warp a raster dataset.
Parameters
------------
source_file: str, file object or pathlib.Path object
Source file.
destination_file: str, file object or pathlib.Path object
Destination file.
dst_crs: rasterio.crs.CRS, optional
Target coordinate reference system.
resolution: tuple (x resolution, y resolution) or float, optional
Target resolution, in units of target coordinate reference
system.
dimensions: tuple (width, height), optional
Output file size in pixels and lines.
src_bounds: tuple (xmin, ymin, xmax, ymax), optional
Georeferenced extent of output file from source bounds
(in source georeferenced units).
dst_bounds: tuple (xmin, ymin, xmax, ymax), optional
Georeferenced extent of output file from destination bounds
(in destination georeferenced units).
src_nodata: int, float, or nan, optional
Manually overridden source nodata.
dst_nodata: int, float, or nan, optional
Manually overridden destination nodata.
target_aligned_pixels: bool, optional
Align the output bounds based on the resolution.
Default is `False`.
check_invert_proj: bool, optional
Constrain output to valid coordinate region in dst_crs.
Default is `True`.
creation_options: dict, optional
Custom creation options.
resampling: rasterio.enums.Resampling
Reprojection resampling method. Default is `cubic`.
kwargs: optional
Additional arguments passed to transformation function.
Returns
---------
out: None
Output is written to destination.
"""
with rasterio.Env(CHECK_WITH_INVERT_PROJ=check_invert_proj):
with rasterio.open(source_file) as src:
out_kwargs = src.profile.copy()
dst_crs, dst_transform, dst_width, dst_height = calc_transform(
src, dst_crs, resolution, dimensions,
src_bounds, dst_bounds, target_aligned_pixels)
# If src_nodata is not None, update the dst metadata NODATA
# value to src_nodata (will be overridden by dst_nodata if it is not None.
if src_nodata is not None:
# Update the destination NODATA value
out_kwargs.update({
'nodata': src_nodata
})
# Validate a manually set destination NODATA value.
if dst_nodata is not None:
if src_nodata is None and src.meta['nodata'] is None:
raise ValueError('src_nodata must be provided because dst_nodata is not None')
else:
out_kwargs.update({'nodata': dst_nodata})
out_kwargs.update({
'crs': dst_crs,
'transform': dst_transform,
'width': dst_width,
'height': dst_height
})
# Adjust block size if necessary.
if ('blockxsize' in out_kwargs and
dst_width < out_kwargs['blockxsize']):
del out_kwargs['blockxsize']
if ('blockysize' in out_kwargs and
dst_height < out_kwargs['blockysize']):
del out_kwargs['blockysize']
if creation_options is not None:
out_kwargs.update(**creation_options)
with rasterio.open(destination_file, 'w', **out_kwargs) as dst:
reproject(
source=rasterio.band(src, src.indexes),
destination=rasterio.band(dst, dst.indexes),
src_transform=src.transform,
src_crs=src.crs,
src_nodata=src_nodata,
dst_transform=out_kwargs['transform'],
dst_crs=out_kwargs['crs'],
dst_nodata=dst_nodata,
resampling=resampling,
**kwargs) | Warp a raster dataset.
Parameters
------------
source_file: str, file object or pathlib.Path object
Source file.
destination_file: str, file object or pathlib.Path object
Destination file.
dst_crs: rasterio.crs.CRS, optional
Target coordinate reference system.
resolution: tuple (x resolution, y resolution) or float, optional
Target resolution, in units of target coordinate reference
system.
dimensions: tuple (width, height), optional
Output file size in pixels and lines.
src_bounds: tuple (xmin, ymin, xmax, ymax), optional
Georeferenced extent of output file from source bounds
(in source georeferenced units).
dst_bounds: tuple (xmin, ymin, xmax, ymax), optional
Georeferenced extent of output file from destination bounds
(in destination georeferenced units).
src_nodata: int, float, or nan, optional
Manually overridden source nodata.
dst_nodata: int, float, or nan, optional
Manually overridden destination nodata.
target_aligned_pixels: bool, optional
Align the output bounds based on the resolution.
Default is `False`.
check_invert_proj: bool, optional
Constrain output to valid coordinate region in dst_crs.
Default is `True`.
creation_options: dict, optional
Custom creation options.
resampling: rasterio.enums.Resampling
Reprojection resampling method. Default is `cubic`.
kwargs: optional
Additional arguments passed to transformation function.
Returns
---------
out: None
Output is written to destination. | Below is the the instruction that describes the task:
### Input:
Warp a raster dataset.
Parameters
------------
source_file: str, file object or pathlib.Path object
Source file.
destination_file: str, file object or pathlib.Path object
Destination file.
dst_crs: rasterio.crs.CRS, optional
Target coordinate reference system.
resolution: tuple (x resolution, y resolution) or float, optional
Target resolution, in units of target coordinate reference
system.
dimensions: tuple (width, height), optional
Output file size in pixels and lines.
src_bounds: tuple (xmin, ymin, xmax, ymax), optional
Georeferenced extent of output file from source bounds
(in source georeferenced units).
dst_bounds: tuple (xmin, ymin, xmax, ymax), optional
Georeferenced extent of output file from destination bounds
(in destination georeferenced units).
src_nodata: int, float, or nan, optional
Manually overridden source nodata.
dst_nodata: int, float, or nan, optional
Manually overridden destination nodata.
target_aligned_pixels: bool, optional
Align the output bounds based on the resolution.
Default is `False`.
check_invert_proj: bool, optional
Constrain output to valid coordinate region in dst_crs.
Default is `True`.
creation_options: dict, optional
Custom creation options.
resampling: rasterio.enums.Resampling
Reprojection resampling method. Default is `cubic`.
kwargs: optional
Additional arguments passed to transformation function.
Returns
---------
out: None
Output is written to destination.
### Response:
def warp(source_file, destination_file, dst_crs=None, resolution=None, dimensions=None,
src_bounds=None, dst_bounds=None, src_nodata=None, dst_nodata=None,
target_aligned_pixels=False, check_invert_proj=True,
creation_options=None, resampling=Resampling.cubic, **kwargs):
"""Warp a raster dataset.
Parameters
------------
source_file: str, file object or pathlib.Path object
Source file.
destination_file: str, file object or pathlib.Path object
Destination file.
dst_crs: rasterio.crs.CRS, optional
Target coordinate reference system.
resolution: tuple (x resolution, y resolution) or float, optional
Target resolution, in units of target coordinate reference
system.
dimensions: tuple (width, height), optional
Output file size in pixels and lines.
src_bounds: tuple (xmin, ymin, xmax, ymax), optional
Georeferenced extent of output file from source bounds
(in source georeferenced units).
dst_bounds: tuple (xmin, ymin, xmax, ymax), optional
Georeferenced extent of output file from destination bounds
(in destination georeferenced units).
src_nodata: int, float, or nan, optional
Manually overridden source nodata.
dst_nodata: int, float, or nan, optional
Manually overridden destination nodata.
target_aligned_pixels: bool, optional
Align the output bounds based on the resolution.
Default is `False`.
check_invert_proj: bool, optional
Constrain output to valid coordinate region in dst_crs.
Default is `True`.
creation_options: dict, optional
Custom creation options.
resampling: rasterio.enums.Resampling
Reprojection resampling method. Default is `cubic`.
kwargs: optional
Additional arguments passed to transformation function.
Returns
---------
out: None
Output is written to destination.
"""
with rasterio.Env(CHECK_WITH_INVERT_PROJ=check_invert_proj):
with rasterio.open(source_file) as src:
out_kwargs = src.profile.copy()
dst_crs, dst_transform, dst_width, dst_height = calc_transform(
src, dst_crs, resolution, dimensions,
src_bounds, dst_bounds, target_aligned_pixels)
# If src_nodata is not None, update the dst metadata NODATA
# value to src_nodata (will be overridden by dst_nodata if it is not None.
if src_nodata is not None:
# Update the destination NODATA value
out_kwargs.update({
'nodata': src_nodata
})
# Validate a manually set destination NODATA value.
if dst_nodata is not None:
if src_nodata is None and src.meta['nodata'] is None:
raise ValueError('src_nodata must be provided because dst_nodata is not None')
else:
out_kwargs.update({'nodata': dst_nodata})
out_kwargs.update({
'crs': dst_crs,
'transform': dst_transform,
'width': dst_width,
'height': dst_height
})
# Adjust block size if necessary.
if ('blockxsize' in out_kwargs and
dst_width < out_kwargs['blockxsize']):
del out_kwargs['blockxsize']
if ('blockysize' in out_kwargs and
dst_height < out_kwargs['blockysize']):
del out_kwargs['blockysize']
if creation_options is not None:
out_kwargs.update(**creation_options)
with rasterio.open(destination_file, 'w', **out_kwargs) as dst:
reproject(
source=rasterio.band(src, src.indexes),
destination=rasterio.band(dst, dst.indexes),
src_transform=src.transform,
src_crs=src.crs,
src_nodata=src_nodata,
dst_transform=out_kwargs['transform'],
dst_crs=out_kwargs['crs'],
dst_nodata=dst_nodata,
resampling=resampling,
**kwargs) |
def get_object(
self, object_t, object_id=None, relation=None, parent=None, **kwargs
):
"""
Actually query the Deezer API to retrieve the object
:returns: json dictionary
"""
url = self.object_url(object_t, object_id, relation, **kwargs)
response = self.session.get(url)
return self._process_json(response.json(), parent) | Actually query the Deezer API to retrieve the object
:returns: json dictionary | Below is the the instruction that describes the task:
### Input:
Actually query the Deezer API to retrieve the object
:returns: json dictionary
### Response:
def get_object(
self, object_t, object_id=None, relation=None, parent=None, **kwargs
):
"""
Actually query the Deezer API to retrieve the object
:returns: json dictionary
"""
url = self.object_url(object_t, object_id, relation, **kwargs)
response = self.session.get(url)
return self._process_json(response.json(), parent) |
def _setup_console_logger(cls, session: AppSession, args, stderr):
'''Set up the console logger.
A handler and with a formatter is added to the root logger.
'''
stream = new_encoded_stream(args, stderr)
logger = logging.getLogger()
session.console_log_handler = handler = logging.StreamHandler(stream)
formatter = logging.Formatter('%(levelname)s %(message)s')
log_filter = logging.Filter('wpull')
handler.setFormatter(formatter)
handler.setLevel(args.verbosity or logging.INFO)
handler.addFilter(log_filter)
logger.addHandler(handler) | Set up the console logger.
A handler and with a formatter is added to the root logger. | Below is the the instruction that describes the task:
### Input:
Set up the console logger.
A handler and with a formatter is added to the root logger.
### Response:
def _setup_console_logger(cls, session: AppSession, args, stderr):
'''Set up the console logger.
A handler and with a formatter is added to the root logger.
'''
stream = new_encoded_stream(args, stderr)
logger = logging.getLogger()
session.console_log_handler = handler = logging.StreamHandler(stream)
formatter = logging.Formatter('%(levelname)s %(message)s')
log_filter = logging.Filter('wpull')
handler.setFormatter(formatter)
handler.setLevel(args.verbosity or logging.INFO)
handler.addFilter(log_filter)
logger.addHandler(handler) |
def split(self, cutting_points, shift_times=False, overlap=0.0):
"""
Split the label-list into x parts and return them as new label-lists.
x is defined by the number of cutting-points(``x == len(cutting_points) + 1``)
The result is a list of label-lists corresponding to each part.
Label-list 0 contains labels between ``0`` and ``cutting_points[0]``.
Label-list 1 contains labels between ``cutting_points[0]`` and ``cutting_points[1]``.
And so on.
Args:
cutting_points(list): List of floats defining the points in seconds,
where the label-list is splitted.
shift_times(bool): If True, start and end-time are shifted in splitted label-lists.
So the start is relative to the cutting point and
not to the beginning of the original label-list.
overlap(float): Amount of overlap in seconds. This amount is subtracted
from a start-cutting-point, and added to a end-cutting-point.
Returns:
list: A list of of: class: `audiomate.annotations.LabelList`.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 0, 5),
>>> Label('b', 5, 10),
>>> Label('c', 11, 15),
>>>])
>>>
>>> res = ll.split([4.1, 8.9, 12.0])
>>> len(res)
4
>>> res[0].labels
[Label('a', 0.0, 4.1)]
>>> res[1].labels
[
Label('a', 4.1, 5.0),
Label('b', 5.0, 8.9)
]
>>> res[2].labels
[
Label('b', 8.9, 10.0),
Label('c', 11.0, 12.0)
]
>>> res[3].labels
[Label('c', 12.0, 15.0)]
If ``shift_times = True``, the times are adjusted to be relative
to the cutting-points for every label-list but the first.
>>> ll = LabelList(labels=[
>>> Label('a', 0, 5),
>>> Label('b', 5, 10),
>>>])
>>>
>>> res = ll.split([4.6])
>>> len(res)
4
>>> res[0].labels
[Label('a', 0.0, 4.6)]
>>> res[1].labels
[
Label('a', 0.0, 0.4),
Label('b', 0.4, 5.4)
]
"""
if len(cutting_points) == 0:
raise ValueError('At least one cutting-point is needed!')
# we have to loop in sorted order
cutting_points = sorted(cutting_points)
splits = []
iv_start = 0.0
for i in range(len(cutting_points) + 1):
if i < len(cutting_points):
iv_end = cutting_points[i]
else:
iv_end = float('inf')
# get all intervals intersecting range
intervals = self.label_tree.overlap(
iv_start - overlap,
iv_end + overlap
)
cp_splits = LabelList(idx=self.idx)
# Extract labels from intervals with updated times
for iv in intervals:
label = copy.deepcopy(iv.data)
label.start = max(0, iv_start - overlap, label.start)
label.end = min(iv_end + overlap, label.end)
if shift_times:
orig_start = max(0, iv_start - overlap)
label.start -= orig_start
label.end -= orig_start
cp_splits.add(label)
splits.append(cp_splits)
iv_start = iv_end
return splits | Split the label-list into x parts and return them as new label-lists.
x is defined by the number of cutting-points(``x == len(cutting_points) + 1``)
The result is a list of label-lists corresponding to each part.
Label-list 0 contains labels between ``0`` and ``cutting_points[0]``.
Label-list 1 contains labels between ``cutting_points[0]`` and ``cutting_points[1]``.
And so on.
Args:
cutting_points(list): List of floats defining the points in seconds,
where the label-list is splitted.
shift_times(bool): If True, start and end-time are shifted in splitted label-lists.
So the start is relative to the cutting point and
not to the beginning of the original label-list.
overlap(float): Amount of overlap in seconds. This amount is subtracted
from a start-cutting-point, and added to a end-cutting-point.
Returns:
list: A list of of: class: `audiomate.annotations.LabelList`.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 0, 5),
>>> Label('b', 5, 10),
>>> Label('c', 11, 15),
>>>])
>>>
>>> res = ll.split([4.1, 8.9, 12.0])
>>> len(res)
4
>>> res[0].labels
[Label('a', 0.0, 4.1)]
>>> res[1].labels
[
Label('a', 4.1, 5.0),
Label('b', 5.0, 8.9)
]
>>> res[2].labels
[
Label('b', 8.9, 10.0),
Label('c', 11.0, 12.0)
]
>>> res[3].labels
[Label('c', 12.0, 15.0)]
If ``shift_times = True``, the times are adjusted to be relative
to the cutting-points for every label-list but the first.
>>> ll = LabelList(labels=[
>>> Label('a', 0, 5),
>>> Label('b', 5, 10),
>>>])
>>>
>>> res = ll.split([4.6])
>>> len(res)
4
>>> res[0].labels
[Label('a', 0.0, 4.6)]
>>> res[1].labels
[
Label('a', 0.0, 0.4),
Label('b', 0.4, 5.4)
] | Below is the the instruction that describes the task:
### Input:
Split the label-list into x parts and return them as new label-lists.
x is defined by the number of cutting-points(``x == len(cutting_points) + 1``)
The result is a list of label-lists corresponding to each part.
Label-list 0 contains labels between ``0`` and ``cutting_points[0]``.
Label-list 1 contains labels between ``cutting_points[0]`` and ``cutting_points[1]``.
And so on.
Args:
cutting_points(list): List of floats defining the points in seconds,
where the label-list is splitted.
shift_times(bool): If True, start and end-time are shifted in splitted label-lists.
So the start is relative to the cutting point and
not to the beginning of the original label-list.
overlap(float): Amount of overlap in seconds. This amount is subtracted
from a start-cutting-point, and added to a end-cutting-point.
Returns:
list: A list of of: class: `audiomate.annotations.LabelList`.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 0, 5),
>>> Label('b', 5, 10),
>>> Label('c', 11, 15),
>>>])
>>>
>>> res = ll.split([4.1, 8.9, 12.0])
>>> len(res)
4
>>> res[0].labels
[Label('a', 0.0, 4.1)]
>>> res[1].labels
[
Label('a', 4.1, 5.0),
Label('b', 5.0, 8.9)
]
>>> res[2].labels
[
Label('b', 8.9, 10.0),
Label('c', 11.0, 12.0)
]
>>> res[3].labels
[Label('c', 12.0, 15.0)]
If ``shift_times = True``, the times are adjusted to be relative
to the cutting-points for every label-list but the first.
>>> ll = LabelList(labels=[
>>> Label('a', 0, 5),
>>> Label('b', 5, 10),
>>>])
>>>
>>> res = ll.split([4.6])
>>> len(res)
4
>>> res[0].labels
[Label('a', 0.0, 4.6)]
>>> res[1].labels
[
Label('a', 0.0, 0.4),
Label('b', 0.4, 5.4)
]
### Response:
def split(self, cutting_points, shift_times=False, overlap=0.0):
"""
Split the label-list into x parts and return them as new label-lists.
x is defined by the number of cutting-points(``x == len(cutting_points) + 1``)
The result is a list of label-lists corresponding to each part.
Label-list 0 contains labels between ``0`` and ``cutting_points[0]``.
Label-list 1 contains labels between ``cutting_points[0]`` and ``cutting_points[1]``.
And so on.
Args:
cutting_points(list): List of floats defining the points in seconds,
where the label-list is splitted.
shift_times(bool): If True, start and end-time are shifted in splitted label-lists.
So the start is relative to the cutting point and
not to the beginning of the original label-list.
overlap(float): Amount of overlap in seconds. This amount is subtracted
from a start-cutting-point, and added to a end-cutting-point.
Returns:
list: A list of of: class: `audiomate.annotations.LabelList`.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 0, 5),
>>> Label('b', 5, 10),
>>> Label('c', 11, 15),
>>>])
>>>
>>> res = ll.split([4.1, 8.9, 12.0])
>>> len(res)
4
>>> res[0].labels
[Label('a', 0.0, 4.1)]
>>> res[1].labels
[
Label('a', 4.1, 5.0),
Label('b', 5.0, 8.9)
]
>>> res[2].labels
[
Label('b', 8.9, 10.0),
Label('c', 11.0, 12.0)
]
>>> res[3].labels
[Label('c', 12.0, 15.0)]
If ``shift_times = True``, the times are adjusted to be relative
to the cutting-points for every label-list but the first.
>>> ll = LabelList(labels=[
>>> Label('a', 0, 5),
>>> Label('b', 5, 10),
>>>])
>>>
>>> res = ll.split([4.6])
>>> len(res)
4
>>> res[0].labels
[Label('a', 0.0, 4.6)]
>>> res[1].labels
[
Label('a', 0.0, 0.4),
Label('b', 0.4, 5.4)
]
"""
if len(cutting_points) == 0:
raise ValueError('At least one cutting-point is needed!')
# we have to loop in sorted order
cutting_points = sorted(cutting_points)
splits = []
iv_start = 0.0
for i in range(len(cutting_points) + 1):
if i < len(cutting_points):
iv_end = cutting_points[i]
else:
iv_end = float('inf')
# get all intervals intersecting range
intervals = self.label_tree.overlap(
iv_start - overlap,
iv_end + overlap
)
cp_splits = LabelList(idx=self.idx)
# Extract labels from intervals with updated times
for iv in intervals:
label = copy.deepcopy(iv.data)
label.start = max(0, iv_start - overlap, label.start)
label.end = min(iv_end + overlap, label.end)
if shift_times:
orig_start = max(0, iv_start - overlap)
label.start -= orig_start
label.end -= orig_start
cp_splits.add(label)
splits.append(cp_splits)
iv_start = iv_end
return splits |
def compile_insert(self, query, values):
"""
Compile insert statement into SQL
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param values: The insert values
:type values: dict or list
:return: The compiled insert
:rtype: str
"""
table = self.wrap_table(query.from__)
if not isinstance(values, list):
values = [values]
# If there is only one row to insert, we just use the normal grammar
if len(values) == 1:
return super(SQLiteQueryGrammar, self).compile_insert(query, values)
names = self.columnize(values[0].keys())
columns = []
# SQLite requires us to build the multi-row insert as a listing of select with
# unions joining them together. So we'll build out this list of columns and
# then join them all together with select unions to complete the queries.
for column in values[0].keys():
columns.append("%s AS %s" % (self.get_marker(), self.wrap(column)))
columns = [", ".join(columns)] * len(values)
return "INSERT INTO %s (%s) SELECT %s" % (
table,
names,
" UNION ALL SELECT ".join(columns),
) | Compile insert statement into SQL
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param values: The insert values
:type values: dict or list
:return: The compiled insert
:rtype: str | Below is the the instruction that describes the task:
### Input:
Compile insert statement into SQL
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param values: The insert values
:type values: dict or list
:return: The compiled insert
:rtype: str
### Response:
def compile_insert(self, query, values):
"""
Compile insert statement into SQL
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param values: The insert values
:type values: dict or list
:return: The compiled insert
:rtype: str
"""
table = self.wrap_table(query.from__)
if not isinstance(values, list):
values = [values]
# If there is only one row to insert, we just use the normal grammar
if len(values) == 1:
return super(SQLiteQueryGrammar, self).compile_insert(query, values)
names = self.columnize(values[0].keys())
columns = []
# SQLite requires us to build the multi-row insert as a listing of select with
# unions joining them together. So we'll build out this list of columns and
# then join them all together with select unions to complete the queries.
for column in values[0].keys():
columns.append("%s AS %s" % (self.get_marker(), self.wrap(column)))
columns = [", ".join(columns)] * len(values)
return "INSERT INTO %s (%s) SELECT %s" % (
table,
names,
" UNION ALL SELECT ".join(columns),
) |
def dbRestore(self, db_value, context=None):
"""
Converts a stored database value to Python.
:param py_value: <variant>
:param context: <orb.Context>
:return: <variant>
"""
if db_value is not None:
try:
return rest.unjsonify(db_value)
except StandardError:
log.exception('Failed to restore json')
raise orb.errors.DataStoreError('Failed to restore json.')
else:
return db_value | Converts a stored database value to Python.
:param py_value: <variant>
:param context: <orb.Context>
:return: <variant> | Below is the the instruction that describes the task:
### Input:
Converts a stored database value to Python.
:param py_value: <variant>
:param context: <orb.Context>
:return: <variant>
### Response:
def dbRestore(self, db_value, context=None):
"""
Converts a stored database value to Python.
:param py_value: <variant>
:param context: <orb.Context>
:return: <variant>
"""
if db_value is not None:
try:
return rest.unjsonify(db_value)
except StandardError:
log.exception('Failed to restore json')
raise orb.errors.DataStoreError('Failed to restore json.')
else:
return db_value |
Subsets and Splits