code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def delete_agile_board(self, board_id):
"""
Delete agile board by id
:param board_id:
:return:
"""
url = 'rest/agile/1.0/board/{}'.format(str(board_id))
return self.delete(url) | Delete agile board by id
:param board_id:
:return: | Below is the the instruction that describes the task:
### Input:
Delete agile board by id
:param board_id:
:return:
### Response:
def delete_agile_board(self, board_id):
"""
Delete agile board by id
:param board_id:
:return:
"""
url = 'rest/agile/1.0/board/{}'.format(str(board_id))
return self.delete(url) |
def do_execute_direct(self, code: str, silent: bool = False) -> [str, dict]:
"""
This is the main method that takes code from the Jupyter cell and submits it to the SAS server
:param code: code from the cell
:param silent:
:return: str with either the log or list
"""
if not code.strip():
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
if self.mva is None:
self._allow_stdin = True
self._start_sas()
if self.lst_len < 0:
self._get_lst_len()
if code.startswith('Obfuscated SAS Code'):
logger.debug("decoding string")
tmp1 = code.split()
decode = base64.b64decode(tmp1[-1])
code = decode.decode('utf-8')
if code.startswith('showSASLog_11092015') == False and code.startswith("CompleteshowSASLog_11092015") == False:
logger.debug("code type: " + str(type(code)))
logger.debug("code length: " + str(len(code)))
logger.debug("code string: " + code)
if code.startswith("/*SASKernelTest*/"):
res = self.mva.submit(code, "text")
else:
res = self.mva.submit(code, prompt=self.promptDict)
self.promptDict = {}
if res['LOG'].find("SAS process has terminated unexpectedly") > -1:
print(res['LOG'], '\n' "Restarting SAS session on your behalf")
self.do_shutdown(True)
return res['LOG']
output = res['LST']
log = res['LOG']
return self._which_display(log, output)
elif code.startswith("CompleteshowSASLog_11092015") == True and code.startswith('showSASLog_11092015') == False:
full_log = highlight(self.mva.saslog(), SASLogLexer(),
HtmlFormatter(full=True, style=SASLogStyle, lineseparator="<br>",
title="Full SAS Log"))
return full_log.replace('\n', ' ')
else:
return self.cachedlog.replace('\n', ' ') | This is the main method that takes code from the Jupyter cell and submits it to the SAS server
:param code: code from the cell
:param silent:
:return: str with either the log or list | Below is the the instruction that describes the task:
### Input:
This is the main method that takes code from the Jupyter cell and submits it to the SAS server
:param code: code from the cell
:param silent:
:return: str with either the log or list
### Response:
def do_execute_direct(self, code: str, silent: bool = False) -> [str, dict]:
"""
This is the main method that takes code from the Jupyter cell and submits it to the SAS server
:param code: code from the cell
:param silent:
:return: str with either the log or list
"""
if not code.strip():
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
if self.mva is None:
self._allow_stdin = True
self._start_sas()
if self.lst_len < 0:
self._get_lst_len()
if code.startswith('Obfuscated SAS Code'):
logger.debug("decoding string")
tmp1 = code.split()
decode = base64.b64decode(tmp1[-1])
code = decode.decode('utf-8')
if code.startswith('showSASLog_11092015') == False and code.startswith("CompleteshowSASLog_11092015") == False:
logger.debug("code type: " + str(type(code)))
logger.debug("code length: " + str(len(code)))
logger.debug("code string: " + code)
if code.startswith("/*SASKernelTest*/"):
res = self.mva.submit(code, "text")
else:
res = self.mva.submit(code, prompt=self.promptDict)
self.promptDict = {}
if res['LOG'].find("SAS process has terminated unexpectedly") > -1:
print(res['LOG'], '\n' "Restarting SAS session on your behalf")
self.do_shutdown(True)
return res['LOG']
output = res['LST']
log = res['LOG']
return self._which_display(log, output)
elif code.startswith("CompleteshowSASLog_11092015") == True and code.startswith('showSASLog_11092015') == False:
full_log = highlight(self.mva.saslog(), SASLogLexer(),
HtmlFormatter(full=True, style=SASLogStyle, lineseparator="<br>",
title="Full SAS Log"))
return full_log.replace('\n', ' ')
else:
return self.cachedlog.replace('\n', ' ') |
def unregister(self, id):
'''
Remove the service with id `id` from the service registry.
'''
result = self.rr.table(self.table).get(id).delete().run()
if result != {
'deleted':1, 'errors':0,'inserted':0,
'replaced':0,'skipped':0,'unchanged':0}:
self.logger.warn(
'unexpected result attempting to delete id=%s from '
'rethinkdb services table: %s', id, result) | Remove the service with id `id` from the service registry. | Below is the the instruction that describes the task:
### Input:
Remove the service with id `id` from the service registry.
### Response:
def unregister(self, id):
'''
Remove the service with id `id` from the service registry.
'''
result = self.rr.table(self.table).get(id).delete().run()
if result != {
'deleted':1, 'errors':0,'inserted':0,
'replaced':0,'skipped':0,'unchanged':0}:
self.logger.warn(
'unexpected result attempting to delete id=%s from '
'rethinkdb services table: %s', id, result) |
def _left_click(self, event):
"""Function bound to left click event for marker canvas"""
self.update_active()
iid = self.current_iid
if iid is None:
return
args = (iid, event.x_root, event.y_root)
self.call_callbacks(iid, "left_callback", args) | Function bound to left click event for marker canvas | Below is the the instruction that describes the task:
### Input:
Function bound to left click event for marker canvas
### Response:
def _left_click(self, event):
"""Function bound to left click event for marker canvas"""
self.update_active()
iid = self.current_iid
if iid is None:
return
args = (iid, event.x_root, event.y_root)
self.call_callbacks(iid, "left_callback", args) |
def should_submit(stack):
"""Tests whether a stack should be submitted to CF for update/create
Args:
stack (:class:`stacker.stack.Stack`): The stack object to check.
Returns:
bool: If the stack should be submitted, return True.
"""
if stack.enabled:
return True
logger.debug("Stack %s is not enabled. Skipping.", stack.name)
return False | Tests whether a stack should be submitted to CF for update/create
Args:
stack (:class:`stacker.stack.Stack`): The stack object to check.
Returns:
bool: If the stack should be submitted, return True. | Below is the the instruction that describes the task:
### Input:
Tests whether a stack should be submitted to CF for update/create
Args:
stack (:class:`stacker.stack.Stack`): The stack object to check.
Returns:
bool: If the stack should be submitted, return True.
### Response:
def should_submit(stack):
"""Tests whether a stack should be submitted to CF for update/create
Args:
stack (:class:`stacker.stack.Stack`): The stack object to check.
Returns:
bool: If the stack should be submitted, return True.
"""
if stack.enabled:
return True
logger.debug("Stack %s is not enabled. Skipping.", stack.name)
return False |
def index_data(self, data, index_name, doc_type):
"""Take an arbitrary dictionary of data and index it with ELS.
Args:
data: data to be Indexed. Should be a dictionary.
index_name: Name of the index.
doc_type: The type of the document.
Raises:
RuntimeError: When the Indexing fails.
"""
# Index the data (which needs to be a dict/object) if it's not
# we're going to toss an exception
if not isinstance(data, dict):
raise RuntimeError('Index failed, data needs to be a dict!')
try:
self.els_search.index(index=index_name, doc_type=doc_type, body=data)
except Exception, error:
print 'Index failed: %s' % str(error)
raise RuntimeError('Index failed: %s' % str(error)) | Take an arbitrary dictionary of data and index it with ELS.
Args:
data: data to be Indexed. Should be a dictionary.
index_name: Name of the index.
doc_type: The type of the document.
Raises:
RuntimeError: When the Indexing fails. | Below is the the instruction that describes the task:
### Input:
Take an arbitrary dictionary of data and index it with ELS.
Args:
data: data to be Indexed. Should be a dictionary.
index_name: Name of the index.
doc_type: The type of the document.
Raises:
RuntimeError: When the Indexing fails.
### Response:
def index_data(self, data, index_name, doc_type):
"""Take an arbitrary dictionary of data and index it with ELS.
Args:
data: data to be Indexed. Should be a dictionary.
index_name: Name of the index.
doc_type: The type of the document.
Raises:
RuntimeError: When the Indexing fails.
"""
# Index the data (which needs to be a dict/object) if it's not
# we're going to toss an exception
if not isinstance(data, dict):
raise RuntimeError('Index failed, data needs to be a dict!')
try:
self.els_search.index(index=index_name, doc_type=doc_type, body=data)
except Exception, error:
print 'Index failed: %s' % str(error)
raise RuntimeError('Index failed: %s' % str(error)) |
def t_direction(self, s):
r'^[+-]$'
# Used in the "list" command
self.add_token('DIRECTION', s)
self.pos += len(s) | r'^[+-]$ | Below is the the instruction that describes the task:
### Input:
r'^[+-]$
### Response:
def t_direction(self, s):
r'^[+-]$'
# Used in the "list" command
self.add_token('DIRECTION', s)
self.pos += len(s) |
def skipline(self):
"""
Skip the next line and returns position and size of line.
Raises IOError if pre- and suffix of line do not match.
"""
position = self.tell()
prefix = self._fix()
self.seek(prefix, 1) # skip content
suffix = self._fix()
if prefix != suffix:
raise IOError(_FIX_ERROR)
return position, prefix | Skip the next line and returns position and size of line.
Raises IOError if pre- and suffix of line do not match. | Below is the the instruction that describes the task:
### Input:
Skip the next line and returns position and size of line.
Raises IOError if pre- and suffix of line do not match.
### Response:
def skipline(self):
"""
Skip the next line and returns position and size of line.
Raises IOError if pre- and suffix of line do not match.
"""
position = self.tell()
prefix = self._fix()
self.seek(prefix, 1) # skip content
suffix = self._fix()
if prefix != suffix:
raise IOError(_FIX_ERROR)
return position, prefix |
def accuracy(y_true: [list, np.ndarray], y_predicted: [list, np.ndarray]) -> float:
"""
Calculate accuracy in terms of absolute coincidence
Args:
y_true: array of true values
y_predicted: array of predicted values
Returns:
portion of absolutely coincidental samples
"""
examples_len = len(y_true)
correct = sum([y1 == y2 for y1, y2 in zip(y_true, y_predicted)])
return correct / examples_len if examples_len else 0 | Calculate accuracy in terms of absolute coincidence
Args:
y_true: array of true values
y_predicted: array of predicted values
Returns:
portion of absolutely coincidental samples | Below is the the instruction that describes the task:
### Input:
Calculate accuracy in terms of absolute coincidence
Args:
y_true: array of true values
y_predicted: array of predicted values
Returns:
portion of absolutely coincidental samples
### Response:
def accuracy(y_true: [list, np.ndarray], y_predicted: [list, np.ndarray]) -> float:
"""
Calculate accuracy in terms of absolute coincidence
Args:
y_true: array of true values
y_predicted: array of predicted values
Returns:
portion of absolutely coincidental samples
"""
examples_len = len(y_true)
correct = sum([y1 == y2 for y1, y2 in zip(y_true, y_predicted)])
return correct / examples_len if examples_len else 0 |
def data(self, data):
"""Overwrite the file with new data. You probably shouldn't do
this yourself, it's easy to screw up your whole file with this."""
if self.is_caching:
self.cache = data
else:
fcontents = self.file_contents
with open(self.path, "w") as f:
try:
# Write the file. Keep user settings about indentation, etc
indent = self.indent if self.pretty else None
json.dump(data, f, sort_keys=self.sort_keys, indent=indent)
except Exception as e:
# Rollback to prevent data loss
f.seek(0)
f.truncate()
f.write(fcontents)
# And re-raise the exception
raise e
self._updateType() | Overwrite the file with new data. You probably shouldn't do
this yourself, it's easy to screw up your whole file with this. | Below is the the instruction that describes the task:
### Input:
Overwrite the file with new data. You probably shouldn't do
this yourself, it's easy to screw up your whole file with this.
### Response:
def data(self, data):
"""Overwrite the file with new data. You probably shouldn't do
this yourself, it's easy to screw up your whole file with this."""
if self.is_caching:
self.cache = data
else:
fcontents = self.file_contents
with open(self.path, "w") as f:
try:
# Write the file. Keep user settings about indentation, etc
indent = self.indent if self.pretty else None
json.dump(data, f, sort_keys=self.sort_keys, indent=indent)
except Exception as e:
# Rollback to prevent data loss
f.seek(0)
f.truncate()
f.write(fcontents)
# And re-raise the exception
raise e
self._updateType() |
def get_decision(self, child, is_missing = False):
"""
Get the decision from this node to a child node.
Parameters
----------
child: Node
A child node of this node.
Returns
-------
dict: A dictionary that describes how to get from this node to the
child node.
"""
# Child does exist and there is a path to the child.
value = self.value
feature = self.split_feature_column
index = self.split_feature_index
if not is_missing:
if self.left_id == child.node_id:
if self.node_type in ["float", "integer"]:
sign = "<"
else:
sign = "="
else:
if self.node_type in ["float", "integer"]:
sign = ">="
else:
sign = "!="
else:
sign = "missing"
value = None
return {
"node_id" : self.node_id,
"node_type" : self.node_type,
"feature" : feature,
"index" : index,
"sign" : sign,
"value" : value,
"child_id" : child.node_id,
"is_missing" : is_missing
} | Get the decision from this node to a child node.
Parameters
----------
child: Node
A child node of this node.
Returns
-------
dict: A dictionary that describes how to get from this node to the
child node. | Below is the the instruction that describes the task:
### Input:
Get the decision from this node to a child node.
Parameters
----------
child: Node
A child node of this node.
Returns
-------
dict: A dictionary that describes how to get from this node to the
child node.
### Response:
def get_decision(self, child, is_missing = False):
"""
Get the decision from this node to a child node.
Parameters
----------
child: Node
A child node of this node.
Returns
-------
dict: A dictionary that describes how to get from this node to the
child node.
"""
# Child does exist and there is a path to the child.
value = self.value
feature = self.split_feature_column
index = self.split_feature_index
if not is_missing:
if self.left_id == child.node_id:
if self.node_type in ["float", "integer"]:
sign = "<"
else:
sign = "="
else:
if self.node_type in ["float", "integer"]:
sign = ">="
else:
sign = "!="
else:
sign = "missing"
value = None
return {
"node_id" : self.node_id,
"node_type" : self.node_type,
"feature" : feature,
"index" : index,
"sign" : sign,
"value" : value,
"child_id" : child.node_id,
"is_missing" : is_missing
} |
def get_port(self):
""" Return a port to use to talk to this cluster. """
if len(self.client_nodes) > 0:
node = self.client_nodes[0]
else:
node = self.nodes[0]
return node.get_port() | Return a port to use to talk to this cluster. | Below is the the instruction that describes the task:
### Input:
Return a port to use to talk to this cluster.
### Response:
def get_port(self):
""" Return a port to use to talk to this cluster. """
if len(self.client_nodes) > 0:
node = self.client_nodes[0]
else:
node = self.nodes[0]
return node.get_port() |
def CountClientPlatformReleasesByLabel(self, day_buckets):
"""Computes client-activity stats for OS-release strings in the DB."""
return self._CountClientStatisticByLabel(
day_buckets, lambda client_info: client_info.last_snapshot.Uname()) | Computes client-activity stats for OS-release strings in the DB. | Below is the the instruction that describes the task:
### Input:
Computes client-activity stats for OS-release strings in the DB.
### Response:
def CountClientPlatformReleasesByLabel(self, day_buckets):
"""Computes client-activity stats for OS-release strings in the DB."""
return self._CountClientStatisticByLabel(
day_buckets, lambda client_info: client_info.last_snapshot.Uname()) |
def get_public_url(self, doc_id, branch='master'):
"""Returns a GitHub URL for the doc in question (study, collection, ...)
"""
name, path_frag = self.get_repo_and_path_fragment(doc_id)
return 'https://raw.githubusercontent.com/OpenTreeOfLife/' + name + '/' + branch + '/' + path_frag | Returns a GitHub URL for the doc in question (study, collection, ...) | Below is the the instruction that describes the task:
### Input:
Returns a GitHub URL for the doc in question (study, collection, ...)
### Response:
def get_public_url(self, doc_id, branch='master'):
"""Returns a GitHub URL for the doc in question (study, collection, ...)
"""
name, path_frag = self.get_repo_and_path_fragment(doc_id)
return 'https://raw.githubusercontent.com/OpenTreeOfLife/' + name + '/' + branch + '/' + path_frag |
def check_sum(buf, csum):
"""
检查数据的校验和
:param buf:
:type buf:
:param csum:
:type csum:
:return:
:rtype:
"""
csum = csum.encode('utf-8')
_csum = ord(buf[0])
for x in buf[1:]:
_csum ^= ord(x)
_csum = binascii.b2a_hex(chr(_csum).encode('utf-8')).upper()
if _csum != csum:
sys.stderr.write('csum not matched: ({} {})\n'.format(_csum, csum))
return _csum == csum | 检查数据的校验和
:param buf:
:type buf:
:param csum:
:type csum:
:return:
:rtype: | Below is the the instruction that describes the task:
### Input:
检查数据的校验和
:param buf:
:type buf:
:param csum:
:type csum:
:return:
:rtype:
### Response:
def check_sum(buf, csum):
"""
检查数据的校验和
:param buf:
:type buf:
:param csum:
:type csum:
:return:
:rtype:
"""
csum = csum.encode('utf-8')
_csum = ord(buf[0])
for x in buf[1:]:
_csum ^= ord(x)
_csum = binascii.b2a_hex(chr(_csum).encode('utf-8')).upper()
if _csum != csum:
sys.stderr.write('csum not matched: ({} {})\n'.format(_csum, csum))
return _csum == csum |
def raw_input(prompt=""):
"""raw_input([prompt]) -> string
Read a string from standard input. The trailing newline is stripped.
If the user hits EOF (Unix: Ctl-D, Windows: Ctl-Z+Return), raise EOFError.
On Unix, GNU readline is used if enabled. The prompt string, if given,
is printed without a trailing newline before reading."""
sys.stderr.flush()
tty = STDIN.is_a_TTY() and STDOUT.is_a_TTY()
if RETURN_UNICODE:
if tty:
line_bytes = readline(prompt)
line = stdin_decode(line_bytes)
else:
line = stdio_readline(prompt)
else:
if tty:
line = readline(prompt)
else:
line_unicode = stdio_readline(prompt)
line = stdin_encode(line_unicode)
if line:
return line[:-1] # strip strailing "\n"
else:
raise EOFError | raw_input([prompt]) -> string
Read a string from standard input. The trailing newline is stripped.
If the user hits EOF (Unix: Ctl-D, Windows: Ctl-Z+Return), raise EOFError.
On Unix, GNU readline is used if enabled. The prompt string, if given,
is printed without a trailing newline before reading. | Below is the the instruction that describes the task:
### Input:
raw_input([prompt]) -> string
Read a string from standard input. The trailing newline is stripped.
If the user hits EOF (Unix: Ctl-D, Windows: Ctl-Z+Return), raise EOFError.
On Unix, GNU readline is used if enabled. The prompt string, if given,
is printed without a trailing newline before reading.
### Response:
def raw_input(prompt=""):
"""raw_input([prompt]) -> string
Read a string from standard input. The trailing newline is stripped.
If the user hits EOF (Unix: Ctl-D, Windows: Ctl-Z+Return), raise EOFError.
On Unix, GNU readline is used if enabled. The prompt string, if given,
is printed without a trailing newline before reading."""
sys.stderr.flush()
tty = STDIN.is_a_TTY() and STDOUT.is_a_TTY()
if RETURN_UNICODE:
if tty:
line_bytes = readline(prompt)
line = stdin_decode(line_bytes)
else:
line = stdio_readline(prompt)
else:
if tty:
line = readline(prompt)
else:
line_unicode = stdio_readline(prompt)
line = stdin_encode(line_unicode)
if line:
return line[:-1] # strip strailing "\n"
else:
raise EOFError |
def execCommand(Argv, collect_missing):
r"""Executes the given task with parameters.
"""
try:
return _execCommand(Argv, collect_missing)
except Exception as e:
if Settings['errorHandler']:
Settings['errorHandler'](e)
if Settings['debug']:
# #ToDo: Have an option to debug through stderr. The issue is, the way to make pdb.post_mortem, to use stderr, like pdb.set_trace is unknown.
import pdb
pdb.post_mortem(sys.exc_info()[2])
if not Settings['silent']: # Debug, then log the trace.
import traceback
etype, value, tb = sys.exc_info()
tb = tb.tb_next.tb_next # remove the ec - calls from the traceback, to make it more understandable
message = ''.join(traceback.format_exception(etype, value, tb))[:-1]
else:
if isinstance(e, HandledException): # let the modes handle the HandledException
raise e
message = str(e) # provide a succinct error message
raise HandledException(message) | r"""Executes the given task with parameters. | Below is the the instruction that describes the task:
### Input:
r"""Executes the given task with parameters.
### Response:
def execCommand(Argv, collect_missing):
r"""Executes the given task with parameters.
"""
try:
return _execCommand(Argv, collect_missing)
except Exception as e:
if Settings['errorHandler']:
Settings['errorHandler'](e)
if Settings['debug']:
# #ToDo: Have an option to debug through stderr. The issue is, the way to make pdb.post_mortem, to use stderr, like pdb.set_trace is unknown.
import pdb
pdb.post_mortem(sys.exc_info()[2])
if not Settings['silent']: # Debug, then log the trace.
import traceback
etype, value, tb = sys.exc_info()
tb = tb.tb_next.tb_next # remove the ec - calls from the traceback, to make it more understandable
message = ''.join(traceback.format_exception(etype, value, tb))[:-1]
else:
if isinstance(e, HandledException): # let the modes handle the HandledException
raise e
message = str(e) # provide a succinct error message
raise HandledException(message) |
def addDataset(self, dataset):
"""
Creates a new dataset instance for this scene.
:param dataset | <XChartDataset>
:return <XChartDatasetItem>
"""
item = XChartDatasetItem()
self.addItem(item)
item.setDataset(dataset)
return item | Creates a new dataset instance for this scene.
:param dataset | <XChartDataset>
:return <XChartDatasetItem> | Below is the the instruction that describes the task:
### Input:
Creates a new dataset instance for this scene.
:param dataset | <XChartDataset>
:return <XChartDatasetItem>
### Response:
def addDataset(self, dataset):
"""
Creates a new dataset instance for this scene.
:param dataset | <XChartDataset>
:return <XChartDatasetItem>
"""
item = XChartDatasetItem()
self.addItem(item)
item.setDataset(dataset)
return item |
def _compute_hparam_info_from_values(self, name, values):
"""Builds an HParamInfo message from the hparam name and list of values.
Args:
name: string. The hparam name.
values: list of google.protobuf.Value messages. The list of values for the
hparam.
Returns:
An api_pb2.HParamInfo message.
"""
# Figure out the type from the values.
# Ignore values whose type is not listed in api_pb2.DataType
# If all values have the same type, then that is the type used.
# Otherwise, the returned type is DATA_TYPE_STRING.
result = api_pb2.HParamInfo(name=name, type=api_pb2.DATA_TYPE_UNSET)
distinct_values = set(
_protobuf_value_to_string(v) for v in values if _protobuf_value_type(v))
for v in values:
v_type = _protobuf_value_type(v)
if not v_type:
continue
if result.type == api_pb2.DATA_TYPE_UNSET:
result.type = v_type
elif result.type != v_type:
result.type = api_pb2.DATA_TYPE_STRING
if result.type == api_pb2.DATA_TYPE_STRING:
# A string result.type does not change, so we can exit the loop.
break
# If we couldn't figure out a type, then we can't compute the hparam_info.
if result.type == api_pb2.DATA_TYPE_UNSET:
return None
# If the result is a string, set the domain to be the distinct values if
# there aren't too many of them.
if (result.type == api_pb2.DATA_TYPE_STRING
and len(distinct_values) <= self._max_domain_discrete_len):
result.domain_discrete.extend(distinct_values)
return result | Builds an HParamInfo message from the hparam name and list of values.
Args:
name: string. The hparam name.
values: list of google.protobuf.Value messages. The list of values for the
hparam.
Returns:
An api_pb2.HParamInfo message. | Below is the the instruction that describes the task:
### Input:
Builds an HParamInfo message from the hparam name and list of values.
Args:
name: string. The hparam name.
values: list of google.protobuf.Value messages. The list of values for the
hparam.
Returns:
An api_pb2.HParamInfo message.
### Response:
def _compute_hparam_info_from_values(self, name, values):
"""Builds an HParamInfo message from the hparam name and list of values.
Args:
name: string. The hparam name.
values: list of google.protobuf.Value messages. The list of values for the
hparam.
Returns:
An api_pb2.HParamInfo message.
"""
# Figure out the type from the values.
# Ignore values whose type is not listed in api_pb2.DataType
# If all values have the same type, then that is the type used.
# Otherwise, the returned type is DATA_TYPE_STRING.
result = api_pb2.HParamInfo(name=name, type=api_pb2.DATA_TYPE_UNSET)
distinct_values = set(
_protobuf_value_to_string(v) for v in values if _protobuf_value_type(v))
for v in values:
v_type = _protobuf_value_type(v)
if not v_type:
continue
if result.type == api_pb2.DATA_TYPE_UNSET:
result.type = v_type
elif result.type != v_type:
result.type = api_pb2.DATA_TYPE_STRING
if result.type == api_pb2.DATA_TYPE_STRING:
# A string result.type does not change, so we can exit the loop.
break
# If we couldn't figure out a type, then we can't compute the hparam_info.
if result.type == api_pb2.DATA_TYPE_UNSET:
return None
# If the result is a string, set the domain to be the distinct values if
# there aren't too many of them.
if (result.type == api_pb2.DATA_TYPE_STRING
and len(distinct_values) <= self._max_domain_discrete_len):
result.domain_discrete.extend(distinct_values)
return result |
def nvmlDeviceSetAccountingMode(handle, mode):
r"""
/**
* Enables or disables per process accounting.
*
* For Kepler &tm; or newer fully supported devices.
* Requires root/admin permissions.
*
* @note This setting is not persistent and will default to disabled after driver unloads.
* Enable persistence mode to be sure the setting doesn't switch off to disabled.
*
* @note Enabling accounting mode has no negative impact on the GPU performance.
*
* @note Disabling accounting clears all accounting pids information.
*
* See \ref nvmlDeviceGetAccountingMode
* See \ref nvmlDeviceGetAccountingStats
* See \ref nvmlDeviceClearAccountingPids
*
* @param device The identifier of the target device
* @param mode The target accounting mode
*
* @return
* - \ref NVML_SUCCESS if the new mode has been set
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a mode are invalid
* - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
* - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlDeviceSetAccountingMode
"""
fn = _nvmlGetFunctionPointer("nvmlDeviceSetAccountingMode")
ret = fn(handle, _nvmlEnableState_t(mode))
_nvmlCheckReturn(ret)
return None | r"""
/**
* Enables or disables per process accounting.
*
* For Kepler &tm; or newer fully supported devices.
* Requires root/admin permissions.
*
* @note This setting is not persistent and will default to disabled after driver unloads.
* Enable persistence mode to be sure the setting doesn't switch off to disabled.
*
* @note Enabling accounting mode has no negative impact on the GPU performance.
*
* @note Disabling accounting clears all accounting pids information.
*
* See \ref nvmlDeviceGetAccountingMode
* See \ref nvmlDeviceGetAccountingStats
* See \ref nvmlDeviceClearAccountingPids
*
* @param device The identifier of the target device
* @param mode The target accounting mode
*
* @return
* - \ref NVML_SUCCESS if the new mode has been set
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a mode are invalid
* - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
* - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlDeviceSetAccountingMode | Below is the the instruction that describes the task:
### Input:
r"""
/**
* Enables or disables per process accounting.
*
* For Kepler &tm; or newer fully supported devices.
* Requires root/admin permissions.
*
* @note This setting is not persistent and will default to disabled after driver unloads.
* Enable persistence mode to be sure the setting doesn't switch off to disabled.
*
* @note Enabling accounting mode has no negative impact on the GPU performance.
*
* @note Disabling accounting clears all accounting pids information.
*
* See \ref nvmlDeviceGetAccountingMode
* See \ref nvmlDeviceGetAccountingStats
* See \ref nvmlDeviceClearAccountingPids
*
* @param device The identifier of the target device
* @param mode The target accounting mode
*
* @return
* - \ref NVML_SUCCESS if the new mode has been set
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a mode are invalid
* - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
* - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlDeviceSetAccountingMode
### Response:
def nvmlDeviceSetAccountingMode(handle, mode):
r"""
/**
* Enables or disables per process accounting.
*
* For Kepler &tm; or newer fully supported devices.
* Requires root/admin permissions.
*
* @note This setting is not persistent and will default to disabled after driver unloads.
* Enable persistence mode to be sure the setting doesn't switch off to disabled.
*
* @note Enabling accounting mode has no negative impact on the GPU performance.
*
* @note Disabling accounting clears all accounting pids information.
*
* See \ref nvmlDeviceGetAccountingMode
* See \ref nvmlDeviceGetAccountingStats
* See \ref nvmlDeviceClearAccountingPids
*
* @param device The identifier of the target device
* @param mode The target accounting mode
*
* @return
* - \ref NVML_SUCCESS if the new mode has been set
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a mode are invalid
* - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature
* - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*/
nvmlReturn_t DECLDIR nvmlDeviceSetAccountingMode
"""
fn = _nvmlGetFunctionPointer("nvmlDeviceSetAccountingMode")
ret = fn(handle, _nvmlEnableState_t(mode))
_nvmlCheckReturn(ret)
return None |
def save_var(self, key, value, **kwargs):
'Save one variable to the database.'
# Check whether Highwall's variables table exists
self.__check_or_create_vars_table()
column_type = get_column_type(value)
tmp = quote(self.__vars_table_tmp)
self.execute(u'DROP TABLE IF EXISTS %s' % tmp, commit = False)
# This is vulnerable to injection
self.execute(u'CREATE TABLE %s (`value` %s)' % (tmp, column_type), commit = False)
# This is ugly
self.execute(u'INSERT INTO %s (`value`) VALUES (?)' % tmp, [value], commit = False)
table = (quote(self.__vars_table), tmp)
params = [key, column_type]
self.execute(u'''
INSERT OR REPLACE INTO %s (`key`, `type`, `value`)
SELECT
? AS key,
? AS type,
value
FROM %s
''' % table, params)
self.execute(u'DROP TABLE %s' % tmp, commit = False)
self.__commit_if_necessary(kwargs) | Save one variable to the database. | Below is the the instruction that describes the task:
### Input:
Save one variable to the database.
### Response:
def save_var(self, key, value, **kwargs):
'Save one variable to the database.'
# Check whether Highwall's variables table exists
self.__check_or_create_vars_table()
column_type = get_column_type(value)
tmp = quote(self.__vars_table_tmp)
self.execute(u'DROP TABLE IF EXISTS %s' % tmp, commit = False)
# This is vulnerable to injection
self.execute(u'CREATE TABLE %s (`value` %s)' % (tmp, column_type), commit = False)
# This is ugly
self.execute(u'INSERT INTO %s (`value`) VALUES (?)' % tmp, [value], commit = False)
table = (quote(self.__vars_table), tmp)
params = [key, column_type]
self.execute(u'''
INSERT OR REPLACE INTO %s (`key`, `type`, `value`)
SELECT
? AS key,
? AS type,
value
FROM %s
''' % table, params)
self.execute(u'DROP TABLE %s' % tmp, commit = False)
self.__commit_if_necessary(kwargs) |
def truncate(value: Decimal, n_digits: int) -> Decimal:
"""Truncates a value to a number of decimals places"""
return Decimal(math.trunc(value * (10 ** n_digits))) / (10 ** n_digits) | Truncates a value to a number of decimals places | Below is the the instruction that describes the task:
### Input:
Truncates a value to a number of decimals places
### Response:
def truncate(value: Decimal, n_digits: int) -> Decimal:
"""Truncates a value to a number of decimals places"""
return Decimal(math.trunc(value * (10 ** n_digits))) / (10 ** n_digits) |
def create_organization(self, name):
"""
To create an organization Jira administrator global permission or agent permission is required
depending on the settings
:param name: str
:return: Organization data
"""
log.warning('Creating organization...')
url = 'rest/servicedeskapi/organization'
data = {'name': name}
return self.post(url, headers=self.experimental_headers, data=data) | To create an organization Jira administrator global permission or agent permission is required
depending on the settings
:param name: str
:return: Organization data | Below is the the instruction that describes the task:
### Input:
To create an organization Jira administrator global permission or agent permission is required
depending on the settings
:param name: str
:return: Organization data
### Response:
def create_organization(self, name):
"""
To create an organization Jira administrator global permission or agent permission is required
depending on the settings
:param name: str
:return: Organization data
"""
log.warning('Creating organization...')
url = 'rest/servicedeskapi/organization'
data = {'name': name}
return self.post(url, headers=self.experimental_headers, data=data) |
def n_bifurcation_points(neurites, neurite_type=NeuriteType.all):
'''number of bifurcation points in a collection of neurites'''
return n_sections(neurites, neurite_type=neurite_type, iterator_type=Tree.ibifurcation_point) | number of bifurcation points in a collection of neurites | Below is the the instruction that describes the task:
### Input:
number of bifurcation points in a collection of neurites
### Response:
def n_bifurcation_points(neurites, neurite_type=NeuriteType.all):
'''number of bifurcation points in a collection of neurites'''
return n_sections(neurites, neurite_type=neurite_type, iterator_type=Tree.ibifurcation_point) |
def is_consecutive(self, rtol: float = 1.e-5, atol: float = 1.e-8) -> bool:
"""Whether all bins are in a growing order.
Parameters
----------
rtol, atol : numpy tolerance parameters
"""
if self.inconsecutive_allowed:
if self._consecutive is None:
if self._numpy_bins is not None:
self._consecutive = True
self._consecutive = is_consecutive(self.bins, rtol, atol)
return self._consecutive
else:
return True | Whether all bins are in a growing order.
Parameters
----------
rtol, atol : numpy tolerance parameters | Below is the the instruction that describes the task:
### Input:
Whether all bins are in a growing order.
Parameters
----------
rtol, atol : numpy tolerance parameters
### Response:
def is_consecutive(self, rtol: float = 1.e-5, atol: float = 1.e-8) -> bool:
"""Whether all bins are in a growing order.
Parameters
----------
rtol, atol : numpy tolerance parameters
"""
if self.inconsecutive_allowed:
if self._consecutive is None:
if self._numpy_bins is not None:
self._consecutive = True
self._consecutive = is_consecutive(self.bins, rtol, atol)
return self._consecutive
else:
return True |
def update(self):
"""
This method should be called when you want to ensure all cached attributes
are in sync with the actual object attributes at runtime.
This happens because attributes could store mutable objects and be
modified outside the scope of this class.
The most common idiom that isn't automagically caught is mutating a list
or dictionary. Lets say 'user' object have an attribute named 'friends'
containing a list, calling 'user.friends.append(new_friend)' only get the
attribute, SIWrapper isn't aware that the object returned was modified
and the cached data is not updated.
"""
self.holder = siget(self.holder.FullName) # fix dispatch issues
for key, value in self.__dict__.iteritems():
key = self.namespace + key
if self._validate_key(key):
if not self.holder.Parameters(key):
self.holder.AddParameter3(key, C.siString)
self.holder.Parameters(key).Value = encode(value) | This method should be called when you want to ensure all cached attributes
are in sync with the actual object attributes at runtime.
This happens because attributes could store mutable objects and be
modified outside the scope of this class.
The most common idiom that isn't automagically caught is mutating a list
or dictionary. Lets say 'user' object have an attribute named 'friends'
containing a list, calling 'user.friends.append(new_friend)' only get the
attribute, SIWrapper isn't aware that the object returned was modified
and the cached data is not updated. | Below is the the instruction that describes the task:
### Input:
This method should be called when you want to ensure all cached attributes
are in sync with the actual object attributes at runtime.
This happens because attributes could store mutable objects and be
modified outside the scope of this class.
The most common idiom that isn't automagically caught is mutating a list
or dictionary. Lets say 'user' object have an attribute named 'friends'
containing a list, calling 'user.friends.append(new_friend)' only get the
attribute, SIWrapper isn't aware that the object returned was modified
and the cached data is not updated.
### Response:
def update(self):
"""
This method should be called when you want to ensure all cached attributes
are in sync with the actual object attributes at runtime.
This happens because attributes could store mutable objects and be
modified outside the scope of this class.
The most common idiom that isn't automagically caught is mutating a list
or dictionary. Lets say 'user' object have an attribute named 'friends'
containing a list, calling 'user.friends.append(new_friend)' only get the
attribute, SIWrapper isn't aware that the object returned was modified
and the cached data is not updated.
"""
self.holder = siget(self.holder.FullName) # fix dispatch issues
for key, value in self.__dict__.iteritems():
key = self.namespace + key
if self._validate_key(key):
if not self.holder.Parameters(key):
self.holder.AddParameter3(key, C.siString)
self.holder.Parameters(key).Value = encode(value) |
def property_observer(self, name):
"""Function decorator to register a property observer. See ``MPV.observe_property`` for details."""
def wrapper(fun):
self.observe_property(name, fun)
fun.unobserve_mpv_properties = lambda: self.unobserve_property(name, fun)
return fun
return wrapper | Function decorator to register a property observer. See ``MPV.observe_property`` for details. | Below is the the instruction that describes the task:
### Input:
Function decorator to register a property observer. See ``MPV.observe_property`` for details.
### Response:
def property_observer(self, name):
"""Function decorator to register a property observer. See ``MPV.observe_property`` for details."""
def wrapper(fun):
self.observe_property(name, fun)
fun.unobserve_mpv_properties = lambda: self.unobserve_property(name, fun)
return fun
return wrapper |
def main():
"""
NAME
apwp.py
DESCRIPTION
returns predicted paleolatitudes, directions and pole latitude/longitude
from apparent polar wander paths of Besse and Courtillot (2002).
SYNTAX
apwp.py [command line options][< filename]
OPTIONS
-h prints help message and quits
-i allows interactive data entry
f file: read plate, lat, lon, age data from file
-F output_file: write output to output_file
-P [NA, SA, AF, IN, EU, AU, ANT, GL] plate
-lat LAT specify present latitude (positive = North; negative=South)
-lon LON specify present longitude (positive = East, negative=West)
-age AGE specify Age in Ma
Note: must have all -P, -lat, -lon, -age or none.
OUTPUT
Age Paleolat. Dec. Inc. Pole_lat. Pole_Long.
"""
infile,outfile,data,indata="","",[],[]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-F' in sys.argv:
ind=sys.argv.index('-F')
outfile=sys.argv[ind+1]
out=open(outfile,'w')
if '-i' in sys.argv:
print("Welcome to paleolatitude calculator\n")
while 1:
data=[]
print("pick a plate: NA, SA, AF, IN, EU, AU, ANT, GL \n cntl-D to quit")
try:
plate=input("Plate\n").upper()
except:
print("Goodbye \n")
sys.exit()
lat=float(input( "Site latitude\n"))
lon=float(input(" Site longitude\n"))
age=float(input(" Age\n"))
data=[plate,lat,lon,age]
print("Age Paleolat. Dec. Inc. Pole_lat. Pole_Long.")
print(spitout(data))
elif '-f' in sys.argv:
ind=sys.argv.index('-f')
infile=sys.argv[ind+1]
f=open(infile,'r')
inp=f.readlines()
elif '-P' in sys.argv:
ind=sys.argv.index('-P')
plate=sys.argv[ind+1].upper()
if '-lat' in sys.argv:
ind=sys.argv.index('-lat')
lat=float(sys.argv[ind+1])
else:
print(main.__doc__)
sys.exit()
if '-lon' in sys.argv:
ind=sys.argv.index('-lon')
lon=float(sys.argv[ind+1])
else:
print(main.__doc__)
sys.exit()
if '-age' in sys.argv:
ind=sys.argv.index('-age')
age=float(sys.argv[ind+1])
else:
print(main.__doc__)
sys.exit()
data=[plate,lat,lon,age]
outstring=spitout(data)
if outfile=="":
print("Age Paleolat. Dec. Inc. Pole_lat. Pole_Long.")
print(outstring)
else:
out.write(outstring)
sys.exit()
else:
inp=sys.stdin.readlines() # read from standard input
if len(inp)>0:
for line in inp:
data=[]
rec=line.split()
data.append(rec[0])
for k in range(1,4): data.append(float(rec[k]))
indata.append(data)
if len(indata)>0:
for line in indata:
outstring=spitout(line)
if outfile=="":
print(outstring)
else:
out.write(outstring)
else:
print('no input data')
sys.exit() | NAME
apwp.py
DESCRIPTION
returns predicted paleolatitudes, directions and pole latitude/longitude
from apparent polar wander paths of Besse and Courtillot (2002).
SYNTAX
apwp.py [command line options][< filename]
OPTIONS
-h prints help message and quits
-i allows interactive data entry
f file: read plate, lat, lon, age data from file
-F output_file: write output to output_file
-P [NA, SA, AF, IN, EU, AU, ANT, GL] plate
-lat LAT specify present latitude (positive = North; negative=South)
-lon LON specify present longitude (positive = East, negative=West)
-age AGE specify Age in Ma
Note: must have all -P, -lat, -lon, -age or none.
OUTPUT
Age Paleolat. Dec. Inc. Pole_lat. Pole_Long. | Below is the the instruction that describes the task:
### Input:
NAME
apwp.py
DESCRIPTION
returns predicted paleolatitudes, directions and pole latitude/longitude
from apparent polar wander paths of Besse and Courtillot (2002).
SYNTAX
apwp.py [command line options][< filename]
OPTIONS
-h prints help message and quits
-i allows interactive data entry
f file: read plate, lat, lon, age data from file
-F output_file: write output to output_file
-P [NA, SA, AF, IN, EU, AU, ANT, GL] plate
-lat LAT specify present latitude (positive = North; negative=South)
-lon LON specify present longitude (positive = East, negative=West)
-age AGE specify Age in Ma
Note: must have all -P, -lat, -lon, -age or none.
OUTPUT
Age Paleolat. Dec. Inc. Pole_lat. Pole_Long.
### Response:
def main():
"""
NAME
apwp.py
DESCRIPTION
returns predicted paleolatitudes, directions and pole latitude/longitude
from apparent polar wander paths of Besse and Courtillot (2002).
SYNTAX
apwp.py [command line options][< filename]
OPTIONS
-h prints help message and quits
-i allows interactive data entry
f file: read plate, lat, lon, age data from file
-F output_file: write output to output_file
-P [NA, SA, AF, IN, EU, AU, ANT, GL] plate
-lat LAT specify present latitude (positive = North; negative=South)
-lon LON specify present longitude (positive = East, negative=West)
-age AGE specify Age in Ma
Note: must have all -P, -lat, -lon, -age or none.
OUTPUT
Age Paleolat. Dec. Inc. Pole_lat. Pole_Long.
"""
infile,outfile,data,indata="","",[],[]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-F' in sys.argv:
ind=sys.argv.index('-F')
outfile=sys.argv[ind+1]
out=open(outfile,'w')
if '-i' in sys.argv:
print("Welcome to paleolatitude calculator\n")
while 1:
data=[]
print("pick a plate: NA, SA, AF, IN, EU, AU, ANT, GL \n cntl-D to quit")
try:
plate=input("Plate\n").upper()
except:
print("Goodbye \n")
sys.exit()
lat=float(input( "Site latitude\n"))
lon=float(input(" Site longitude\n"))
age=float(input(" Age\n"))
data=[plate,lat,lon,age]
print("Age Paleolat. Dec. Inc. Pole_lat. Pole_Long.")
print(spitout(data))
elif '-f' in sys.argv:
ind=sys.argv.index('-f')
infile=sys.argv[ind+1]
f=open(infile,'r')
inp=f.readlines()
elif '-P' in sys.argv:
ind=sys.argv.index('-P')
plate=sys.argv[ind+1].upper()
if '-lat' in sys.argv:
ind=sys.argv.index('-lat')
lat=float(sys.argv[ind+1])
else:
print(main.__doc__)
sys.exit()
if '-lon' in sys.argv:
ind=sys.argv.index('-lon')
lon=float(sys.argv[ind+1])
else:
print(main.__doc__)
sys.exit()
if '-age' in sys.argv:
ind=sys.argv.index('-age')
age=float(sys.argv[ind+1])
else:
print(main.__doc__)
sys.exit()
data=[plate,lat,lon,age]
outstring=spitout(data)
if outfile=="":
print("Age Paleolat. Dec. Inc. Pole_lat. Pole_Long.")
print(outstring)
else:
out.write(outstring)
sys.exit()
else:
inp=sys.stdin.readlines() # read from standard input
if len(inp)>0:
for line in inp:
data=[]
rec=line.split()
data.append(rec[0])
for k in range(1,4): data.append(float(rec[k]))
indata.append(data)
if len(indata)>0:
for line in indata:
outstring=spitout(line)
if outfile=="":
print(outstring)
else:
out.write(outstring)
else:
print('no input data')
sys.exit() |
def setUpImports(self):
'''set import statements
'''
i = self.imports
print >>i, 'from pyremotevbox.ZSI.schema import GED, GTD'
print >>i, 'from pyremotevbox.ZSI.TCcompound import ComplexType, Struct'
module = self.getTypesModuleName()
package = self.getTypesModulePath()
if package:
module = '%s.%s' %(package, module)
print >>i, 'from %s import *' %(module)
print >>i, 'from %s import %s' %(self.base_module_name, self.base_class_name) | set import statements | Below is the the instruction that describes the task:
### Input:
set import statements
### Response:
def setUpImports(self):
'''set import statements
'''
i = self.imports
print >>i, 'from pyremotevbox.ZSI.schema import GED, GTD'
print >>i, 'from pyremotevbox.ZSI.TCcompound import ComplexType, Struct'
module = self.getTypesModuleName()
package = self.getTypesModulePath()
if package:
module = '%s.%s' %(package, module)
print >>i, 'from %s import *' %(module)
print >>i, 'from %s import %s' %(self.base_module_name, self.base_class_name) |
def setup(app):
''' Required Sphinx extension setup function. '''
# These two are deprecated and no longer have any effect, to be removed 2.0
app.add_config_value('bokeh_plot_pyfile_include_dirs', [], 'html')
app.add_config_value('bokeh_plot_use_relative_paths', False, 'html')
app.add_directive('bokeh-plot', BokehPlotDirective)
app.add_config_value('bokeh_missing_google_api_key_ok', True, 'html')
app.connect('builder-inited', builder_inited)
app.connect('build-finished', build_finished) | Required Sphinx extension setup function. | Below is the the instruction that describes the task:
### Input:
Required Sphinx extension setup function.
### Response:
def setup(app):
''' Required Sphinx extension setup function. '''
# These two are deprecated and no longer have any effect, to be removed 2.0
app.add_config_value('bokeh_plot_pyfile_include_dirs', [], 'html')
app.add_config_value('bokeh_plot_use_relative_paths', False, 'html')
app.add_directive('bokeh-plot', BokehPlotDirective)
app.add_config_value('bokeh_missing_google_api_key_ok', True, 'html')
app.connect('builder-inited', builder_inited)
app.connect('build-finished', build_finished) |
def module_checkpoint(mod, prefix, period=1, save_optimizer_states=False):
"""Callback to checkpoint Module to prefix every epoch.
Parameters
----------
mod : subclass of BaseModule
The module to checkpoint.
prefix : str
The file prefix for this checkpoint.
period : int
How many epochs to wait before checkpointing. Defaults to 1.
save_optimizer_states : bool
Indicates whether or not to save optimizer states for continued training.
Returns
-------
callback : function
The callback function that can be passed as iter_end_callback to fit.
"""
period = int(max(1, period))
# pylint: disable=unused-argument
def _callback(iter_no, sym=None, arg=None, aux=None):
"""The checkpoint function."""
if (iter_no + 1) % period == 0:
mod.save_checkpoint(prefix, iter_no + 1, save_optimizer_states)
return _callback | Callback to checkpoint Module to prefix every epoch.
Parameters
----------
mod : subclass of BaseModule
The module to checkpoint.
prefix : str
The file prefix for this checkpoint.
period : int
How many epochs to wait before checkpointing. Defaults to 1.
save_optimizer_states : bool
Indicates whether or not to save optimizer states for continued training.
Returns
-------
callback : function
The callback function that can be passed as iter_end_callback to fit. | Below is the the instruction that describes the task:
### Input:
Callback to checkpoint Module to prefix every epoch.
Parameters
----------
mod : subclass of BaseModule
The module to checkpoint.
prefix : str
The file prefix for this checkpoint.
period : int
How many epochs to wait before checkpointing. Defaults to 1.
save_optimizer_states : bool
Indicates whether or not to save optimizer states for continued training.
Returns
-------
callback : function
The callback function that can be passed as iter_end_callback to fit.
### Response:
def module_checkpoint(mod, prefix, period=1, save_optimizer_states=False):
"""Callback to checkpoint Module to prefix every epoch.
Parameters
----------
mod : subclass of BaseModule
The module to checkpoint.
prefix : str
The file prefix for this checkpoint.
period : int
How many epochs to wait before checkpointing. Defaults to 1.
save_optimizer_states : bool
Indicates whether or not to save optimizer states for continued training.
Returns
-------
callback : function
The callback function that can be passed as iter_end_callback to fit.
"""
period = int(max(1, period))
# pylint: disable=unused-argument
def _callback(iter_no, sym=None, arg=None, aux=None):
"""The checkpoint function."""
if (iter_no + 1) % period == 0:
mod.save_checkpoint(prefix, iter_no + 1, save_optimizer_states)
return _callback |
def _format_dict(self, info_dict):
"""Replaces empty content with 'NA's"""
for key, value in info_dict.items():
if not value:
info_dict[key] = "NA"
return info_dict | Replaces empty content with 'NA's | Below is the the instruction that describes the task:
### Input:
Replaces empty content with 'NA's
### Response:
def _format_dict(self, info_dict):
"""Replaces empty content with 'NA's"""
for key, value in info_dict.items():
if not value:
info_dict[key] = "NA"
return info_dict |
def iteritems(self):
"""Iterate over all header lines, including duplicate ones."""
for key in self:
vals = _dict_getitem(self, key)
for val in vals[1:]:
yield vals[0], val | Iterate over all header lines, including duplicate ones. | Below is the the instruction that describes the task:
### Input:
Iterate over all header lines, including duplicate ones.
### Response:
def iteritems(self):
"""Iterate over all header lines, including duplicate ones."""
for key in self:
vals = _dict_getitem(self, key)
for val in vals[1:]:
yield vals[0], val |
def on_view_not_found(
self,
environ: Dict[str, Any],
start_response: Callable) -> Iterable[bytes]: # pragma: nocover
""" called when view is not found"""
raise NotImplementedError() | called when view is not found | Below is the the instruction that describes the task:
### Input:
called when view is not found
### Response:
def on_view_not_found(
self,
environ: Dict[str, Any],
start_response: Callable) -> Iterable[bytes]: # pragma: nocover
""" called when view is not found"""
raise NotImplementedError() |
def _getProcessedImage(self):
"""Returns the image data after it has been processed by any normalization options in use.
This method also sets the attributes self.levelMin and self.levelMax
to indicate the range of data in the image."""
if self.imageDisp is None:
self.imageDisp = self.image
self.levelMin, self.levelMax = self._quickLevels(
self.imageDisp)
#list( map(float, self._quickLevels(self.imageDisp)))
return self.imageDisp | Returns the image data after it has been processed by any normalization options in use.
This method also sets the attributes self.levelMin and self.levelMax
to indicate the range of data in the image. | Below is the the instruction that describes the task:
### Input:
Returns the image data after it has been processed by any normalization options in use.
This method also sets the attributes self.levelMin and self.levelMax
to indicate the range of data in the image.
### Response:
def _getProcessedImage(self):
"""Returns the image data after it has been processed by any normalization options in use.
This method also sets the attributes self.levelMin and self.levelMax
to indicate the range of data in the image."""
if self.imageDisp is None:
self.imageDisp = self.image
self.levelMin, self.levelMax = self._quickLevels(
self.imageDisp)
#list( map(float, self._quickLevels(self.imageDisp)))
return self.imageDisp |
def tvdb_login(api_key):
""" Logs into TVDb using the provided api key
Note: You can register for a free TVDb key at thetvdb.com/?tab=apiregister
Online docs: api.thetvdb.com/swagger#!/Authentication/post_login=
"""
url = "https://api.thetvdb.com/login"
body = {"apikey": api_key}
status, content = _request_json(url, body=body, cache=False)
if status == 401:
raise MapiProviderException("invalid api key")
elif status != 200 or not content.get("token"):
raise MapiNetworkException("TVDb down or unavailable?")
return content["token"] | Logs into TVDb using the provided api key
Note: You can register for a free TVDb key at thetvdb.com/?tab=apiregister
Online docs: api.thetvdb.com/swagger#!/Authentication/post_login= | Below is the the instruction that describes the task:
### Input:
Logs into TVDb using the provided api key
Note: You can register for a free TVDb key at thetvdb.com/?tab=apiregister
Online docs: api.thetvdb.com/swagger#!/Authentication/post_login=
### Response:
def tvdb_login(api_key):
""" Logs into TVDb using the provided api key
Note: You can register for a free TVDb key at thetvdb.com/?tab=apiregister
Online docs: api.thetvdb.com/swagger#!/Authentication/post_login=
"""
url = "https://api.thetvdb.com/login"
body = {"apikey": api_key}
status, content = _request_json(url, body=body, cache=False)
if status == 401:
raise MapiProviderException("invalid api key")
elif status != 200 or not content.get("token"):
raise MapiNetworkException("TVDb down or unavailable?")
return content["token"] |
def apply_step(self, variables, deltas):
"""
Applies the given (and already calculated) step deltas to the variable values.
Args:
variables: List of variables.
deltas: List of deltas of same length.
Returns:
The step-applied operation. A tf.group of tf.assign_add ops.
"""
if len(variables) != len(deltas):
raise TensorForceError("Invalid variables and deltas lists.")
return tf.group(
*(tf.assign_add(ref=variable, value=delta) for variable, delta in zip(variables, deltas))
) | Applies the given (and already calculated) step deltas to the variable values.
Args:
variables: List of variables.
deltas: List of deltas of same length.
Returns:
The step-applied operation. A tf.group of tf.assign_add ops. | Below is the the instruction that describes the task:
### Input:
Applies the given (and already calculated) step deltas to the variable values.
Args:
variables: List of variables.
deltas: List of deltas of same length.
Returns:
The step-applied operation. A tf.group of tf.assign_add ops.
### Response:
def apply_step(self, variables, deltas):
"""
Applies the given (and already calculated) step deltas to the variable values.
Args:
variables: List of variables.
deltas: List of deltas of same length.
Returns:
The step-applied operation. A tf.group of tf.assign_add ops.
"""
if len(variables) != len(deltas):
raise TensorForceError("Invalid variables and deltas lists.")
return tf.group(
*(tf.assign_add(ref=variable, value=delta) for variable, delta in zip(variables, deltas))
) |
def data_changed(self, change):
""" Notify the model that data has changed in this cell! """
index = self.index
if index:
self.view.model.dataChanged.emit(index, index) | Notify the model that data has changed in this cell! | Below is the the instruction that describes the task:
### Input:
Notify the model that data has changed in this cell!
### Response:
def data_changed(self, change):
""" Notify the model that data has changed in this cell! """
index = self.index
if index:
self.view.model.dataChanged.emit(index, index) |
def get_dev_alarms(auth, url, devid=None, devip=None):
"""
function takes the devId of a specific device and issues a RESTFUL call to get the current
alarms for the target device.
:param devid: int or str value of the target device
:param devip: str of ipv4 address of the target device
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return:list of dictionaries containing the alarms for this device
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.alarms import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> dev_alarms = get_dev_alarms(auth.creds, auth.url, devip='10.101.0.221')
>>> assert 'ackStatus' in dev_alarms[0]
"""
# checks to see if the imc credentials are already available
if devip is not None:
devid = get_dev_details(devip, auth, url)['id']
f_url = url + "/imcrs/fault/alarm?operatorName=admin&deviceId=" + \
str(devid) + "&desc=false"
response = requests.get(f_url, auth=auth, headers=HEADERS)
try:
if response.status_code == 200:
dev_alarm = (json.loads(response.text))
if 'alarm' in dev_alarm:
return dev_alarm['alarm']
else:
return "Device has no alarms"
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + ' get_dev_alarms: An Error has occured' | function takes the devId of a specific device and issues a RESTFUL call to get the current
alarms for the target device.
:param devid: int or str value of the target device
:param devip: str of ipv4 address of the target device
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return:list of dictionaries containing the alarms for this device
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.alarms import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> dev_alarms = get_dev_alarms(auth.creds, auth.url, devip='10.101.0.221')
>>> assert 'ackStatus' in dev_alarms[0] | Below is the the instruction that describes the task:
### Input:
function takes the devId of a specific device and issues a RESTFUL call to get the current
alarms for the target device.
:param devid: int or str value of the target device
:param devip: str of ipv4 address of the target device
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return:list of dictionaries containing the alarms for this device
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.alarms import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> dev_alarms = get_dev_alarms(auth.creds, auth.url, devip='10.101.0.221')
>>> assert 'ackStatus' in dev_alarms[0]
### Response:
def get_dev_alarms(auth, url, devid=None, devip=None):
"""
function takes the devId of a specific device and issues a RESTFUL call to get the current
alarms for the target device.
:param devid: int or str value of the target device
:param devip: str of ipv4 address of the target device
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return:list of dictionaries containing the alarms for this device
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.alarms import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> dev_alarms = get_dev_alarms(auth.creds, auth.url, devip='10.101.0.221')
>>> assert 'ackStatus' in dev_alarms[0]
"""
# checks to see if the imc credentials are already available
if devip is not None:
devid = get_dev_details(devip, auth, url)['id']
f_url = url + "/imcrs/fault/alarm?operatorName=admin&deviceId=" + \
str(devid) + "&desc=false"
response = requests.get(f_url, auth=auth, headers=HEADERS)
try:
if response.status_code == 200:
dev_alarm = (json.loads(response.text))
if 'alarm' in dev_alarm:
return dev_alarm['alarm']
else:
return "Device has no alarms"
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + ' get_dev_alarms: An Error has occured' |
def reset_password(self, token):
"""
View function verify a users reset password token from the email we sent to them.
It also handles the form for them to set a new password.
Supports html and json requests.
"""
expired, invalid, user = \
self.security_utils_service.reset_password_token_status(token)
if invalid:
self.flash(
_('flask_unchained.bundles.security:flash.invalid_reset_password_token'),
category='error')
return self.redirect('SECURITY_INVALID_RESET_TOKEN_REDIRECT')
elif expired:
self.security_service.send_reset_password_instructions(user)
self.flash(_('flask_unchained.bundles.security:flash.password_reset_expired',
email=user.email,
within=app.config.SECURITY_RESET_PASSWORD_WITHIN),
category='error')
return self.redirect('SECURITY_EXPIRED_RESET_TOKEN_REDIRECT')
spa_redirect = app.config.SECURITY_API_RESET_PASSWORD_HTTP_GET_REDIRECT
if request.method == 'GET' and spa_redirect:
return self.redirect(spa_redirect, token=token, _external=True)
form = self._get_form('SECURITY_RESET_PASSWORD_FORM')
if form.validate_on_submit():
self.security_service.reset_password(user, form.password.data)
self.security_service.login_user(user)
self.after_this_request(self._commit)
self.flash(_('flask_unchained.bundles.security:flash.password_reset'),
category='success')
if request.is_json:
return self.jsonify({'token': user.get_auth_token(),
'user': user})
return self.redirect('SECURITY_POST_RESET_REDIRECT_ENDPOINT',
'SECURITY_POST_LOGIN_REDIRECT_ENDPOINT')
elif form.errors and request.is_json:
return self.errors(form.errors)
return self.render('reset_password',
reset_password_form=form,
reset_password_token=token,
**self.security.run_ctx_processor('reset_password')) | View function verify a users reset password token from the email we sent to them.
It also handles the form for them to set a new password.
Supports html and json requests. | Below is the the instruction that describes the task:
### Input:
View function verify a users reset password token from the email we sent to them.
It also handles the form for them to set a new password.
Supports html and json requests.
### Response:
def reset_password(self, token):
"""
View function verify a users reset password token from the email we sent to them.
It also handles the form for them to set a new password.
Supports html and json requests.
"""
expired, invalid, user = \
self.security_utils_service.reset_password_token_status(token)
if invalid:
self.flash(
_('flask_unchained.bundles.security:flash.invalid_reset_password_token'),
category='error')
return self.redirect('SECURITY_INVALID_RESET_TOKEN_REDIRECT')
elif expired:
self.security_service.send_reset_password_instructions(user)
self.flash(_('flask_unchained.bundles.security:flash.password_reset_expired',
email=user.email,
within=app.config.SECURITY_RESET_PASSWORD_WITHIN),
category='error')
return self.redirect('SECURITY_EXPIRED_RESET_TOKEN_REDIRECT')
spa_redirect = app.config.SECURITY_API_RESET_PASSWORD_HTTP_GET_REDIRECT
if request.method == 'GET' and spa_redirect:
return self.redirect(spa_redirect, token=token, _external=True)
form = self._get_form('SECURITY_RESET_PASSWORD_FORM')
if form.validate_on_submit():
self.security_service.reset_password(user, form.password.data)
self.security_service.login_user(user)
self.after_this_request(self._commit)
self.flash(_('flask_unchained.bundles.security:flash.password_reset'),
category='success')
if request.is_json:
return self.jsonify({'token': user.get_auth_token(),
'user': user})
return self.redirect('SECURITY_POST_RESET_REDIRECT_ENDPOINT',
'SECURITY_POST_LOGIN_REDIRECT_ENDPOINT')
elif form.errors and request.is_json:
return self.errors(form.errors)
return self.render('reset_password',
reset_password_form=form,
reset_password_token=token,
**self.security.run_ctx_processor('reset_password')) |
def live_unread_notification_list(request):
''' Return a json with a unread notification list '''
try:
user_is_authenticated = request.user.is_authenticated()
except TypeError: # Django >= 1.11
user_is_authenticated = request.user.is_authenticated
if not user_is_authenticated:
data = {
'unread_count': 0,
'unread_list': []
}
return JsonResponse(data)
default_num_to_fetch = get_config()['NUM_TO_FETCH']
try:
# If they don't specify, make it 5.
num_to_fetch = request.GET.get('max', default_num_to_fetch)
num_to_fetch = int(num_to_fetch)
if not (1 <= num_to_fetch <= 100):
num_to_fetch = default_num_to_fetch
except ValueError: # If casting to an int fails.
num_to_fetch = default_num_to_fetch
unread_list = []
for notification in request.user.notifications.unread()[0:num_to_fetch]:
struct = model_to_dict(notification)
struct['slug'] = id2slug(notification.id)
if notification.actor:
struct['actor'] = str(notification.actor)
if notification.target:
struct['target'] = str(notification.target)
if notification.action_object:
struct['action_object'] = str(notification.action_object)
if notification.data:
struct['data'] = notification.data
unread_list.append(struct)
if request.GET.get('mark_as_read'):
notification.mark_as_read()
data = {
'unread_count': request.user.notifications.unread().count(),
'unread_list': unread_list
}
return JsonResponse(data) | Return a json with a unread notification list | Below is the the instruction that describes the task:
### Input:
Return a json with a unread notification list
### Response:
def live_unread_notification_list(request):
''' Return a json with a unread notification list '''
try:
user_is_authenticated = request.user.is_authenticated()
except TypeError: # Django >= 1.11
user_is_authenticated = request.user.is_authenticated
if not user_is_authenticated:
data = {
'unread_count': 0,
'unread_list': []
}
return JsonResponse(data)
default_num_to_fetch = get_config()['NUM_TO_FETCH']
try:
# If they don't specify, make it 5.
num_to_fetch = request.GET.get('max', default_num_to_fetch)
num_to_fetch = int(num_to_fetch)
if not (1 <= num_to_fetch <= 100):
num_to_fetch = default_num_to_fetch
except ValueError: # If casting to an int fails.
num_to_fetch = default_num_to_fetch
unread_list = []
for notification in request.user.notifications.unread()[0:num_to_fetch]:
struct = model_to_dict(notification)
struct['slug'] = id2slug(notification.id)
if notification.actor:
struct['actor'] = str(notification.actor)
if notification.target:
struct['target'] = str(notification.target)
if notification.action_object:
struct['action_object'] = str(notification.action_object)
if notification.data:
struct['data'] = notification.data
unread_list.append(struct)
if request.GET.get('mark_as_read'):
notification.mark_as_read()
data = {
'unread_count': request.user.notifications.unread().count(),
'unread_list': unread_list
}
return JsonResponse(data) |
def solveConsAggMarkov(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,MrkvArray,
PermGroFac,PermGroFacAgg,aXtraGrid,BoroCnstArt,Mgrid,
AFunc,Rfunc,wFunc,DeprFac):
'''
Solve one period of a consumption-saving problem with idiosyncratic and
aggregate shocks (transitory and permanent). Moreover, the macroeconomic
state follows a Markov process that determines the income distribution and
aggregate permanent growth factor. This is a basic solver that can't handle
cubic splines, nor can it calculate a value function.
Parameters
----------
solution_next : ConsumerSolution
The solution to the succeeding one period problem.
IncomeDstn : [[np.array]]
A list of lists, each containing five arrays of floats, representing a
discrete approximation to the income process between the period being
solved and the one immediately following (in solution_next). Order: event
probabilities, idisyncratic permanent shocks, idiosyncratic transitory
shocks, aggregate permanent shocks, aggregate transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
MrkvArray : np.array
Markov transition matrix between discrete macroeconomic states.
MrkvArray[i,j] is probability of being in state j next period conditional
on being in state i this period.
PermGroFac : float
Expected permanent income growth factor at the end of this period,
for the *individual*'s productivity.
PermGroFacAgg : [float]
Expected aggregate productivity growth in each Markov macro state.
aXtraGrid : np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
BoroCnstArt : float
Artificial borrowing constraint; minimum allowable end-of-period asset-to-
permanent-income ratio. Unlike other models, this *can't* be None.
Mgrid : np.array
A grid of aggregate market resourses to permanent income in the economy.
AFunc : [function]
Aggregate savings as a function of aggregate market resources, for each
Markov macro state.
Rfunc : function
The net interest factor on assets as a function of capital ratio k.
wFunc : function
The wage rate for labor as a function of capital-to-labor ratio k.
DeprFac : float
Capital Depreciation Rate
Returns
-------
solution_now : ConsumerSolution
The solution to the single period consumption-saving problem. Includes
a consumption function cFunc (linear interpolation over linear interpola-
tions) and marginal value function vPfunc.
'''
# Get sizes of grids
aCount = aXtraGrid.size
Mcount = Mgrid.size
StateCount = MrkvArray.shape[0]
# Loop through next period's states, assuming we reach each one at a time.
# Construct EndOfPrdvP_cond functions for each state.
EndOfPrdvPfunc_cond = []
BoroCnstNat_cond = []
for j in range(StateCount):
# Unpack next period's solution
vPfuncNext = solution_next.vPfunc[j]
mNrmMinNext = solution_next.mNrmMin[j]
# Unpack the income shocks
ShkPrbsNext = IncomeDstn[j][0]
PermShkValsNext = IncomeDstn[j][1]
TranShkValsNext = IncomeDstn[j][2]
PermShkAggValsNext = IncomeDstn[j][3]
TranShkAggValsNext = IncomeDstn[j][4]
ShkCount = ShkPrbsNext.size
aXtra_tiled = np.tile(np.reshape(aXtraGrid,(1,aCount,1)),(Mcount,1,ShkCount))
# Make tiled versions of the income shocks
# Dimension order: Mnow, aNow, Shk
ShkPrbsNext_tiled = np.tile(np.reshape(ShkPrbsNext,(1,1,ShkCount)),(Mcount,aCount,1))
PermShkValsNext_tiled = np.tile(np.reshape(PermShkValsNext,(1,1,ShkCount)),(Mcount,aCount,1))
TranShkValsNext_tiled = np.tile(np.reshape(TranShkValsNext,(1,1,ShkCount)),(Mcount,aCount,1))
PermShkAggValsNext_tiled = np.tile(np.reshape(PermShkAggValsNext,(1,1,ShkCount)),(Mcount,aCount,1))
TranShkAggValsNext_tiled = np.tile(np.reshape(TranShkAggValsNext,(1,1,ShkCount)),(Mcount,aCount,1))
# Make a tiled grid of end-of-period aggregate assets. These lines use
# next prd state j's aggregate saving rule to get a relevant set of Aagg,
# which will be used to make an interpolated EndOfPrdvP_cond function.
# After constructing these functions, we will use the aggregate saving
# rule for *current* state i to get values of Aagg at which to evaluate
# these conditional marginal value functions. In the strange, maybe even
# impossible case where the aggregate saving rules differ wildly across
# macro states *and* there is "anti-persistence", so that the macro state
# is very likely to change each period, then this procedure will lead to
# an inaccurate solution because the grid of Aagg values on which the
# conditional marginal value functions are constructed is not relevant
# to the values at which it will actually be evaluated.
AaggGrid = AFunc[j](Mgrid)
AaggNow_tiled = np.tile(np.reshape(AaggGrid,(Mcount,1,1)),(1,aCount,ShkCount))
# Calculate returns to capital and labor in the next period
kNext_array = AaggNow_tiled/(PermGroFacAgg[j]*PermShkAggValsNext_tiled) # Next period's aggregate capital to labor ratio
kNextEff_array = kNext_array/TranShkAggValsNext_tiled # Same thing, but account for *transitory* shock
R_array = Rfunc(kNextEff_array) # Interest factor on aggregate assets
Reff_array = R_array/LivPrb # Effective interest factor on individual assets *for survivors*
wEff_array = wFunc(kNextEff_array)*TranShkAggValsNext_tiled # Effective wage rate (accounts for labor supply)
PermShkTotal_array = PermGroFac*PermGroFacAgg[j]*PermShkValsNext_tiled*PermShkAggValsNext_tiled # total / combined permanent shock
Mnext_array = kNext_array*R_array + wEff_array # next period's aggregate market resources
# Find the natural borrowing constraint for each value of M in the Mgrid.
# There is likely a faster way to do this, but someone needs to do the math:
# is aNrmMin determined by getting the worst shock of all four types?
aNrmMin_candidates = PermGroFac*PermGroFacAgg[j]*PermShkValsNext_tiled[:,0,:]*PermShkAggValsNext_tiled[:,0,:]/Reff_array[:,0,:]*\
(mNrmMinNext(Mnext_array[:,0,:]) - wEff_array[:,0,:]*TranShkValsNext_tiled[:,0,:])
aNrmMin_vec = np.max(aNrmMin_candidates,axis=1)
BoroCnstNat_vec = aNrmMin_vec
aNrmMin_tiled = np.tile(np.reshape(aNrmMin_vec,(Mcount,1,1)),(1,aCount,ShkCount))
aNrmNow_tiled = aNrmMin_tiled + aXtra_tiled
# Calculate market resources next period (and a constant array of capital-to-labor ratio)
mNrmNext_array = Reff_array*aNrmNow_tiled/PermShkTotal_array + TranShkValsNext_tiled*wEff_array
# Find marginal value next period at every income shock realization and every aggregate market resource gridpoint
vPnext_array = Reff_array*PermShkTotal_array**(-CRRA)*vPfuncNext(mNrmNext_array,Mnext_array)
# Calculate expectated marginal value at the end of the period at every asset gridpoint
EndOfPrdvP = DiscFac*LivPrb*np.sum(vPnext_array*ShkPrbsNext_tiled,axis=2)
# Make the conditional end-of-period marginal value function
BoroCnstNat = LinearInterp(np.insert(AaggGrid,0,0.0),np.insert(BoroCnstNat_vec,0,0.0))
EndOfPrdvPnvrs = np.concatenate((np.zeros((Mcount,1)),EndOfPrdvP**(-1./CRRA)),axis=1)
EndOfPrdvPnvrsFunc_base = BilinearInterp(np.transpose(EndOfPrdvPnvrs),np.insert(aXtraGrid,0,0.0),AaggGrid)
EndOfPrdvPnvrsFunc = VariableLowerBoundFunc2D(EndOfPrdvPnvrsFunc_base,BoroCnstNat)
EndOfPrdvPfunc_cond.append(MargValueFunc2D(EndOfPrdvPnvrsFunc,CRRA))
BoroCnstNat_cond.append(BoroCnstNat)
# Prepare some objects that are the same across all current states
aXtra_tiled = np.tile(np.reshape(aXtraGrid,(1,aCount)),(Mcount,1))
cFuncCnst = BilinearInterp(np.array([[0.0,0.0],[1.0,1.0]]),np.array([BoroCnstArt,BoroCnstArt+1.0]),np.array([0.0,1.0]))
# Now loop through *this* period's discrete states, calculating end-of-period
# marginal value (weighting across state transitions), then construct consumption
# and marginal value function for each state.
cFuncNow = []
vPfuncNow = []
mNrmMinNow = []
for i in range(StateCount):
# Find natural borrowing constraint for this state by Aagg
AaggNow = AFunc[i](Mgrid)
aNrmMin_candidates = np.zeros((StateCount,Mcount)) + np.nan
for j in range(StateCount):
if MrkvArray[i,j] > 0.: # Irrelevant if transition is impossible
aNrmMin_candidates[j,:] = BoroCnstNat_cond[j](AaggNow)
aNrmMin_vec = np.nanmax(aNrmMin_candidates,axis=0)
BoroCnstNat_vec = aNrmMin_vec
# Make tiled grids of aNrm and Aagg
aNrmMin_tiled = np.tile(np.reshape(aNrmMin_vec,(Mcount,1)),(1,aCount))
aNrmNow_tiled = aNrmMin_tiled + aXtra_tiled
AaggNow_tiled = np.tile(np.reshape(AaggNow,(Mcount,1)),(1,aCount))
# Loop through feasible transitions and calculate end-of-period marginal value
EndOfPrdvP = np.zeros((Mcount,aCount))
for j in range(StateCount):
if MrkvArray[i,j] > 0.:
temp = EndOfPrdvPfunc_cond[j](aNrmNow_tiled,AaggNow_tiled)
EndOfPrdvP += MrkvArray[i,j]*temp
# Calculate consumption and the endogenous mNrm gridpoints for this state
cNrmNow = EndOfPrdvP**(-1./CRRA)
mNrmNow = aNrmNow_tiled + cNrmNow
# Loop through the values in Mgrid and make a piecewise linear consumption function for each
cFuncBaseByM_list = []
for n in range(Mcount):
c_temp = np.insert(cNrmNow[n,:],0,0.0) # Add point at bottom
m_temp = np.insert(mNrmNow[n,:] - BoroCnstNat_vec[n],0,0.0)
cFuncBaseByM_list.append(LinearInterp(m_temp,c_temp))
# Add the M-specific consumption function to the list
# Construct the unconstrained consumption function by combining the M-specific functions
BoroCnstNat = LinearInterp(np.insert(Mgrid,0,0.0),np.insert(BoroCnstNat_vec,0,0.0))
cFuncBase = LinearInterpOnInterp1D(cFuncBaseByM_list,Mgrid)
cFuncUnc = VariableLowerBoundFunc2D(cFuncBase,BoroCnstNat)
# Combine the constrained consumption function with unconstrained component
cFuncNow.append(LowerEnvelope2D(cFuncUnc,cFuncCnst))
# Make the minimum m function as the greater of the natural and artificial constraints
mNrmMinNow.append(UpperEnvelope(BoroCnstNat,ConstantFunction(BoroCnstArt)))
# Construct the marginal value function using the envelope condition
vPfuncNow.append(MargValueFunc2D(cFuncNow[-1],CRRA))
# Pack up and return the solution
solution_now = ConsumerSolution(cFunc=cFuncNow,vPfunc=vPfuncNow,mNrmMin=mNrmMinNow)
return solution_now | Solve one period of a consumption-saving problem with idiosyncratic and
aggregate shocks (transitory and permanent). Moreover, the macroeconomic
state follows a Markov process that determines the income distribution and
aggregate permanent growth factor. This is a basic solver that can't handle
cubic splines, nor can it calculate a value function.
Parameters
----------
solution_next : ConsumerSolution
The solution to the succeeding one period problem.
IncomeDstn : [[np.array]]
A list of lists, each containing five arrays of floats, representing a
discrete approximation to the income process between the period being
solved and the one immediately following (in solution_next). Order: event
probabilities, idisyncratic permanent shocks, idiosyncratic transitory
shocks, aggregate permanent shocks, aggregate transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
MrkvArray : np.array
Markov transition matrix between discrete macroeconomic states.
MrkvArray[i,j] is probability of being in state j next period conditional
on being in state i this period.
PermGroFac : float
Expected permanent income growth factor at the end of this period,
for the *individual*'s productivity.
PermGroFacAgg : [float]
Expected aggregate productivity growth in each Markov macro state.
aXtraGrid : np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
BoroCnstArt : float
Artificial borrowing constraint; minimum allowable end-of-period asset-to-
permanent-income ratio. Unlike other models, this *can't* be None.
Mgrid : np.array
A grid of aggregate market resourses to permanent income in the economy.
AFunc : [function]
Aggregate savings as a function of aggregate market resources, for each
Markov macro state.
Rfunc : function
The net interest factor on assets as a function of capital ratio k.
wFunc : function
The wage rate for labor as a function of capital-to-labor ratio k.
DeprFac : float
Capital Depreciation Rate
Returns
-------
solution_now : ConsumerSolution
The solution to the single period consumption-saving problem. Includes
a consumption function cFunc (linear interpolation over linear interpola-
tions) and marginal value function vPfunc. | Below is the the instruction that describes the task:
### Input:
Solve one period of a consumption-saving problem with idiosyncratic and
aggregate shocks (transitory and permanent). Moreover, the macroeconomic
state follows a Markov process that determines the income distribution and
aggregate permanent growth factor. This is a basic solver that can't handle
cubic splines, nor can it calculate a value function.
Parameters
----------
solution_next : ConsumerSolution
The solution to the succeeding one period problem.
IncomeDstn : [[np.array]]
A list of lists, each containing five arrays of floats, representing a
discrete approximation to the income process between the period being
solved and the one immediately following (in solution_next). Order: event
probabilities, idisyncratic permanent shocks, idiosyncratic transitory
shocks, aggregate permanent shocks, aggregate transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
MrkvArray : np.array
Markov transition matrix between discrete macroeconomic states.
MrkvArray[i,j] is probability of being in state j next period conditional
on being in state i this period.
PermGroFac : float
Expected permanent income growth factor at the end of this period,
for the *individual*'s productivity.
PermGroFacAgg : [float]
Expected aggregate productivity growth in each Markov macro state.
aXtraGrid : np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
BoroCnstArt : float
Artificial borrowing constraint; minimum allowable end-of-period asset-to-
permanent-income ratio. Unlike other models, this *can't* be None.
Mgrid : np.array
A grid of aggregate market resourses to permanent income in the economy.
AFunc : [function]
Aggregate savings as a function of aggregate market resources, for each
Markov macro state.
Rfunc : function
The net interest factor on assets as a function of capital ratio k.
wFunc : function
The wage rate for labor as a function of capital-to-labor ratio k.
DeprFac : float
Capital Depreciation Rate
Returns
-------
solution_now : ConsumerSolution
The solution to the single period consumption-saving problem. Includes
a consumption function cFunc (linear interpolation over linear interpola-
tions) and marginal value function vPfunc.
### Response:
def solveConsAggMarkov(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,MrkvArray,
PermGroFac,PermGroFacAgg,aXtraGrid,BoroCnstArt,Mgrid,
AFunc,Rfunc,wFunc,DeprFac):
'''
Solve one period of a consumption-saving problem with idiosyncratic and
aggregate shocks (transitory and permanent). Moreover, the macroeconomic
state follows a Markov process that determines the income distribution and
aggregate permanent growth factor. This is a basic solver that can't handle
cubic splines, nor can it calculate a value function.
Parameters
----------
solution_next : ConsumerSolution
The solution to the succeeding one period problem.
IncomeDstn : [[np.array]]
A list of lists, each containing five arrays of floats, representing a
discrete approximation to the income process between the period being
solved and the one immediately following (in solution_next). Order: event
probabilities, idisyncratic permanent shocks, idiosyncratic transitory
shocks, aggregate permanent shocks, aggregate transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
MrkvArray : np.array
Markov transition matrix between discrete macroeconomic states.
MrkvArray[i,j] is probability of being in state j next period conditional
on being in state i this period.
PermGroFac : float
Expected permanent income growth factor at the end of this period,
for the *individual*'s productivity.
PermGroFacAgg : [float]
Expected aggregate productivity growth in each Markov macro state.
aXtraGrid : np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
BoroCnstArt : float
Artificial borrowing constraint; minimum allowable end-of-period asset-to-
permanent-income ratio. Unlike other models, this *can't* be None.
Mgrid : np.array
A grid of aggregate market resourses to permanent income in the economy.
AFunc : [function]
Aggregate savings as a function of aggregate market resources, for each
Markov macro state.
Rfunc : function
The net interest factor on assets as a function of capital ratio k.
wFunc : function
The wage rate for labor as a function of capital-to-labor ratio k.
DeprFac : float
Capital Depreciation Rate
Returns
-------
solution_now : ConsumerSolution
The solution to the single period consumption-saving problem. Includes
a consumption function cFunc (linear interpolation over linear interpola-
tions) and marginal value function vPfunc.
'''
# Get sizes of grids
aCount = aXtraGrid.size
Mcount = Mgrid.size
StateCount = MrkvArray.shape[0]
# Loop through next period's states, assuming we reach each one at a time.
# Construct EndOfPrdvP_cond functions for each state.
EndOfPrdvPfunc_cond = []
BoroCnstNat_cond = []
for j in range(StateCount):
# Unpack next period's solution
vPfuncNext = solution_next.vPfunc[j]
mNrmMinNext = solution_next.mNrmMin[j]
# Unpack the income shocks
ShkPrbsNext = IncomeDstn[j][0]
PermShkValsNext = IncomeDstn[j][1]
TranShkValsNext = IncomeDstn[j][2]
PermShkAggValsNext = IncomeDstn[j][3]
TranShkAggValsNext = IncomeDstn[j][4]
ShkCount = ShkPrbsNext.size
aXtra_tiled = np.tile(np.reshape(aXtraGrid,(1,aCount,1)),(Mcount,1,ShkCount))
# Make tiled versions of the income shocks
# Dimension order: Mnow, aNow, Shk
ShkPrbsNext_tiled = np.tile(np.reshape(ShkPrbsNext,(1,1,ShkCount)),(Mcount,aCount,1))
PermShkValsNext_tiled = np.tile(np.reshape(PermShkValsNext,(1,1,ShkCount)),(Mcount,aCount,1))
TranShkValsNext_tiled = np.tile(np.reshape(TranShkValsNext,(1,1,ShkCount)),(Mcount,aCount,1))
PermShkAggValsNext_tiled = np.tile(np.reshape(PermShkAggValsNext,(1,1,ShkCount)),(Mcount,aCount,1))
TranShkAggValsNext_tiled = np.tile(np.reshape(TranShkAggValsNext,(1,1,ShkCount)),(Mcount,aCount,1))
# Make a tiled grid of end-of-period aggregate assets. These lines use
# next prd state j's aggregate saving rule to get a relevant set of Aagg,
# which will be used to make an interpolated EndOfPrdvP_cond function.
# After constructing these functions, we will use the aggregate saving
# rule for *current* state i to get values of Aagg at which to evaluate
# these conditional marginal value functions. In the strange, maybe even
# impossible case where the aggregate saving rules differ wildly across
# macro states *and* there is "anti-persistence", so that the macro state
# is very likely to change each period, then this procedure will lead to
# an inaccurate solution because the grid of Aagg values on which the
# conditional marginal value functions are constructed is not relevant
# to the values at which it will actually be evaluated.
AaggGrid = AFunc[j](Mgrid)
AaggNow_tiled = np.tile(np.reshape(AaggGrid,(Mcount,1,1)),(1,aCount,ShkCount))
# Calculate returns to capital and labor in the next period
kNext_array = AaggNow_tiled/(PermGroFacAgg[j]*PermShkAggValsNext_tiled) # Next period's aggregate capital to labor ratio
kNextEff_array = kNext_array/TranShkAggValsNext_tiled # Same thing, but account for *transitory* shock
R_array = Rfunc(kNextEff_array) # Interest factor on aggregate assets
Reff_array = R_array/LivPrb # Effective interest factor on individual assets *for survivors*
wEff_array = wFunc(kNextEff_array)*TranShkAggValsNext_tiled # Effective wage rate (accounts for labor supply)
PermShkTotal_array = PermGroFac*PermGroFacAgg[j]*PermShkValsNext_tiled*PermShkAggValsNext_tiled # total / combined permanent shock
Mnext_array = kNext_array*R_array + wEff_array # next period's aggregate market resources
# Find the natural borrowing constraint for each value of M in the Mgrid.
# There is likely a faster way to do this, but someone needs to do the math:
# is aNrmMin determined by getting the worst shock of all four types?
aNrmMin_candidates = PermGroFac*PermGroFacAgg[j]*PermShkValsNext_tiled[:,0,:]*PermShkAggValsNext_tiled[:,0,:]/Reff_array[:,0,:]*\
(mNrmMinNext(Mnext_array[:,0,:]) - wEff_array[:,0,:]*TranShkValsNext_tiled[:,0,:])
aNrmMin_vec = np.max(aNrmMin_candidates,axis=1)
BoroCnstNat_vec = aNrmMin_vec
aNrmMin_tiled = np.tile(np.reshape(aNrmMin_vec,(Mcount,1,1)),(1,aCount,ShkCount))
aNrmNow_tiled = aNrmMin_tiled + aXtra_tiled
# Calculate market resources next period (and a constant array of capital-to-labor ratio)
mNrmNext_array = Reff_array*aNrmNow_tiled/PermShkTotal_array + TranShkValsNext_tiled*wEff_array
# Find marginal value next period at every income shock realization and every aggregate market resource gridpoint
vPnext_array = Reff_array*PermShkTotal_array**(-CRRA)*vPfuncNext(mNrmNext_array,Mnext_array)
# Calculate expectated marginal value at the end of the period at every asset gridpoint
EndOfPrdvP = DiscFac*LivPrb*np.sum(vPnext_array*ShkPrbsNext_tiled,axis=2)
# Make the conditional end-of-period marginal value function
BoroCnstNat = LinearInterp(np.insert(AaggGrid,0,0.0),np.insert(BoroCnstNat_vec,0,0.0))
EndOfPrdvPnvrs = np.concatenate((np.zeros((Mcount,1)),EndOfPrdvP**(-1./CRRA)),axis=1)
EndOfPrdvPnvrsFunc_base = BilinearInterp(np.transpose(EndOfPrdvPnvrs),np.insert(aXtraGrid,0,0.0),AaggGrid)
EndOfPrdvPnvrsFunc = VariableLowerBoundFunc2D(EndOfPrdvPnvrsFunc_base,BoroCnstNat)
EndOfPrdvPfunc_cond.append(MargValueFunc2D(EndOfPrdvPnvrsFunc,CRRA))
BoroCnstNat_cond.append(BoroCnstNat)
# Prepare some objects that are the same across all current states
aXtra_tiled = np.tile(np.reshape(aXtraGrid,(1,aCount)),(Mcount,1))
cFuncCnst = BilinearInterp(np.array([[0.0,0.0],[1.0,1.0]]),np.array([BoroCnstArt,BoroCnstArt+1.0]),np.array([0.0,1.0]))
# Now loop through *this* period's discrete states, calculating end-of-period
# marginal value (weighting across state transitions), then construct consumption
# and marginal value function for each state.
cFuncNow = []
vPfuncNow = []
mNrmMinNow = []
for i in range(StateCount):
# Find natural borrowing constraint for this state by Aagg
AaggNow = AFunc[i](Mgrid)
aNrmMin_candidates = np.zeros((StateCount,Mcount)) + np.nan
for j in range(StateCount):
if MrkvArray[i,j] > 0.: # Irrelevant if transition is impossible
aNrmMin_candidates[j,:] = BoroCnstNat_cond[j](AaggNow)
aNrmMin_vec = np.nanmax(aNrmMin_candidates,axis=0)
BoroCnstNat_vec = aNrmMin_vec
# Make tiled grids of aNrm and Aagg
aNrmMin_tiled = np.tile(np.reshape(aNrmMin_vec,(Mcount,1)),(1,aCount))
aNrmNow_tiled = aNrmMin_tiled + aXtra_tiled
AaggNow_tiled = np.tile(np.reshape(AaggNow,(Mcount,1)),(1,aCount))
# Loop through feasible transitions and calculate end-of-period marginal value
EndOfPrdvP = np.zeros((Mcount,aCount))
for j in range(StateCount):
if MrkvArray[i,j] > 0.:
temp = EndOfPrdvPfunc_cond[j](aNrmNow_tiled,AaggNow_tiled)
EndOfPrdvP += MrkvArray[i,j]*temp
# Calculate consumption and the endogenous mNrm gridpoints for this state
cNrmNow = EndOfPrdvP**(-1./CRRA)
mNrmNow = aNrmNow_tiled + cNrmNow
# Loop through the values in Mgrid and make a piecewise linear consumption function for each
cFuncBaseByM_list = []
for n in range(Mcount):
c_temp = np.insert(cNrmNow[n,:],0,0.0) # Add point at bottom
m_temp = np.insert(mNrmNow[n,:] - BoroCnstNat_vec[n],0,0.0)
cFuncBaseByM_list.append(LinearInterp(m_temp,c_temp))
# Add the M-specific consumption function to the list
# Construct the unconstrained consumption function by combining the M-specific functions
BoroCnstNat = LinearInterp(np.insert(Mgrid,0,0.0),np.insert(BoroCnstNat_vec,0,0.0))
cFuncBase = LinearInterpOnInterp1D(cFuncBaseByM_list,Mgrid)
cFuncUnc = VariableLowerBoundFunc2D(cFuncBase,BoroCnstNat)
# Combine the constrained consumption function with unconstrained component
cFuncNow.append(LowerEnvelope2D(cFuncUnc,cFuncCnst))
# Make the minimum m function as the greater of the natural and artificial constraints
mNrmMinNow.append(UpperEnvelope(BoroCnstNat,ConstantFunction(BoroCnstArt)))
# Construct the marginal value function using the envelope condition
vPfuncNow.append(MargValueFunc2D(cFuncNow[-1],CRRA))
# Pack up and return the solution
solution_now = ConsumerSolution(cFunc=cFuncNow,vPfunc=vPfuncNow,mNrmMin=mNrmMinNow)
return solution_now |
def lookup_zone_exception(self, callsign, timestamp=datetime.utcnow().replace(tzinfo=UTC)):
"""
Returns a CQ Zone if an exception exists for the given callsign
Args:
callsign (string): Amateur radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
int: Value of the the CQ Zone exception which exists for this callsign (at the given time)
Raises:
KeyError: No matching callsign found
APIKeyMissingError: API Key for Clublog missing or incorrect
Example:
The following code checks the Clublog XML database if a CQ Zone exception exists for the callsign DP0GVN.
>>> from pyhamtools import LookupLib
>>> my_lookuplib = LookupLib(lookuptype="clublogxml", apikey="myapikey")
>>> print my_lookuplib.lookup_zone_exception("DP0GVN")
38
The prefix "DP" It is assigned to Germany, but the station is located in Antarctica, and therefore
in CQ Zone 38
Note:
This method is available for
- clublogxml
- redis
"""
callsign = callsign.strip().upper()
if self._lookuptype == "clublogxml":
return self._check_zone_exception_for_date(callsign, timestamp, self._zone_exceptions, self._zone_exceptions_index)
elif self._lookuptype == "redis":
data_dict, index = self._get_dicts_from_redis("_zone_ex_", "_zone_ex_index_", self._redis_prefix, callsign)
return self._check_zone_exception_for_date(callsign, timestamp, data_dict, index)
#no matching case
raise KeyError | Returns a CQ Zone if an exception exists for the given callsign
Args:
callsign (string): Amateur radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
int: Value of the the CQ Zone exception which exists for this callsign (at the given time)
Raises:
KeyError: No matching callsign found
APIKeyMissingError: API Key for Clublog missing or incorrect
Example:
The following code checks the Clublog XML database if a CQ Zone exception exists for the callsign DP0GVN.
>>> from pyhamtools import LookupLib
>>> my_lookuplib = LookupLib(lookuptype="clublogxml", apikey="myapikey")
>>> print my_lookuplib.lookup_zone_exception("DP0GVN")
38
The prefix "DP" It is assigned to Germany, but the station is located in Antarctica, and therefore
in CQ Zone 38
Note:
This method is available for
- clublogxml
- redis | Below is the the instruction that describes the task:
### Input:
Returns a CQ Zone if an exception exists for the given callsign
Args:
callsign (string): Amateur radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
int: Value of the the CQ Zone exception which exists for this callsign (at the given time)
Raises:
KeyError: No matching callsign found
APIKeyMissingError: API Key for Clublog missing or incorrect
Example:
The following code checks the Clublog XML database if a CQ Zone exception exists for the callsign DP0GVN.
>>> from pyhamtools import LookupLib
>>> my_lookuplib = LookupLib(lookuptype="clublogxml", apikey="myapikey")
>>> print my_lookuplib.lookup_zone_exception("DP0GVN")
38
The prefix "DP" It is assigned to Germany, but the station is located in Antarctica, and therefore
in CQ Zone 38
Note:
This method is available for
- clublogxml
- redis
### Response:
def lookup_zone_exception(self, callsign, timestamp=datetime.utcnow().replace(tzinfo=UTC)):
"""
Returns a CQ Zone if an exception exists for the given callsign
Args:
callsign (string): Amateur radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
int: Value of the the CQ Zone exception which exists for this callsign (at the given time)
Raises:
KeyError: No matching callsign found
APIKeyMissingError: API Key for Clublog missing or incorrect
Example:
The following code checks the Clublog XML database if a CQ Zone exception exists for the callsign DP0GVN.
>>> from pyhamtools import LookupLib
>>> my_lookuplib = LookupLib(lookuptype="clublogxml", apikey="myapikey")
>>> print my_lookuplib.lookup_zone_exception("DP0GVN")
38
The prefix "DP" It is assigned to Germany, but the station is located in Antarctica, and therefore
in CQ Zone 38
Note:
This method is available for
- clublogxml
- redis
"""
callsign = callsign.strip().upper()
if self._lookuptype == "clublogxml":
return self._check_zone_exception_for_date(callsign, timestamp, self._zone_exceptions, self._zone_exceptions_index)
elif self._lookuptype == "redis":
data_dict, index = self._get_dicts_from_redis("_zone_ex_", "_zone_ex_index_", self._redis_prefix, callsign)
return self._check_zone_exception_for_date(callsign, timestamp, data_dict, index)
#no matching case
raise KeyError |
def __parse_json_data(self, data):
"""Process Json data
:@param data
:@type data: json/dict
:throws TypeError
"""
if isinstance(data, dict) or isinstance(data, list):
self._raw_data = data
self._json_data = copy.deepcopy(self._raw_data)
else:
raise TypeError("Provided Data is not json") | Process Json data
:@param data
:@type data: json/dict
:throws TypeError | Below is the the instruction that describes the task:
### Input:
Process Json data
:@param data
:@type data: json/dict
:throws TypeError
### Response:
def __parse_json_data(self, data):
"""Process Json data
:@param data
:@type data: json/dict
:throws TypeError
"""
if isinstance(data, dict) or isinstance(data, list):
self._raw_data = data
self._json_data = copy.deepcopy(self._raw_data)
else:
raise TypeError("Provided Data is not json") |
def extract(self, text: str, confidence=0.5, filter=['Person', 'Place', 'Organisation']) -> List[Extraction]:
"""
Extract with the input text, confidence and fields filter to be used.
Args:
text (str): text input to be annotated
confidence (float): the confidence of the annotation
filter (List[str]): the fields that to be extracted
Returns:
List[Extraction]
"""
filter = ','.join(filter)
search_data = [('confidence', confidence),
('text', text),
('types', filter)]
search_headers = {'Accept': 'application/json'}
r = requests.post(self._search_url,
data=search_data,
headers=search_headers)
results = r.json()
last_results = self._combiner(results)
return last_results | Extract with the input text, confidence and fields filter to be used.
Args:
text (str): text input to be annotated
confidence (float): the confidence of the annotation
filter (List[str]): the fields that to be extracted
Returns:
List[Extraction] | Below is the the instruction that describes the task:
### Input:
Extract with the input text, confidence and fields filter to be used.
Args:
text (str): text input to be annotated
confidence (float): the confidence of the annotation
filter (List[str]): the fields that to be extracted
Returns:
List[Extraction]
### Response:
def extract(self, text: str, confidence=0.5, filter=['Person', 'Place', 'Organisation']) -> List[Extraction]:
"""
Extract with the input text, confidence and fields filter to be used.
Args:
text (str): text input to be annotated
confidence (float): the confidence of the annotation
filter (List[str]): the fields that to be extracted
Returns:
List[Extraction]
"""
filter = ','.join(filter)
search_data = [('confidence', confidence),
('text', text),
('types', filter)]
search_headers = {'Accept': 'application/json'}
r = requests.post(self._search_url,
data=search_data,
headers=search_headers)
results = r.json()
last_results = self._combiner(results)
return last_results |
def debug_sync(self, conn_id, cmd_name, cmd_args, progress_callback):
"""Asynchronously complete a named debug command.
The command name and arguments are passed to the underlying device adapter
and interpreted there. If the command is long running, progress_callback
may be used to provide status updates. Callback is called when the command
has finished.
Args:
conn_id (int): A unique identifier that will refer to this connection
cmd_name (string): the name of the debug command we want to invoke
cmd_args (dict): any arguments that we want to send with this command.
progress_callback (callable): A function to be called with status on our progress, called as:
progress_callback(done_count, total_count)
"""
done = threading.Event()
result = {}
def _debug_done(conn_id, adapter_id, success, retval, reason):
result['success'] = success
result['failure_reason'] = reason
result['return_value'] = retval
done.set()
self.debug_async(conn_id, cmd_name, cmd_args, progress_callback, _debug_done)
done.wait()
return result | Asynchronously complete a named debug command.
The command name and arguments are passed to the underlying device adapter
and interpreted there. If the command is long running, progress_callback
may be used to provide status updates. Callback is called when the command
has finished.
Args:
conn_id (int): A unique identifier that will refer to this connection
cmd_name (string): the name of the debug command we want to invoke
cmd_args (dict): any arguments that we want to send with this command.
progress_callback (callable): A function to be called with status on our progress, called as:
progress_callback(done_count, total_count) | Below is the the instruction that describes the task:
### Input:
Asynchronously complete a named debug command.
The command name and arguments are passed to the underlying device adapter
and interpreted there. If the command is long running, progress_callback
may be used to provide status updates. Callback is called when the command
has finished.
Args:
conn_id (int): A unique identifier that will refer to this connection
cmd_name (string): the name of the debug command we want to invoke
cmd_args (dict): any arguments that we want to send with this command.
progress_callback (callable): A function to be called with status on our progress, called as:
progress_callback(done_count, total_count)
### Response:
def debug_sync(self, conn_id, cmd_name, cmd_args, progress_callback):
"""Asynchronously complete a named debug command.
The command name and arguments are passed to the underlying device adapter
and interpreted there. If the command is long running, progress_callback
may be used to provide status updates. Callback is called when the command
has finished.
Args:
conn_id (int): A unique identifier that will refer to this connection
cmd_name (string): the name of the debug command we want to invoke
cmd_args (dict): any arguments that we want to send with this command.
progress_callback (callable): A function to be called with status on our progress, called as:
progress_callback(done_count, total_count)
"""
done = threading.Event()
result = {}
def _debug_done(conn_id, adapter_id, success, retval, reason):
result['success'] = success
result['failure_reason'] = reason
result['return_value'] = retval
done.set()
self.debug_async(conn_id, cmd_name, cmd_args, progress_callback, _debug_done)
done.wait()
return result |
def submit_async_work(self, work, workunit_parent=None, on_success=None, on_failure=None):
"""Submit work to be executed in the background.
:param work: The work to execute.
:param workunit_parent: If specified, work is accounted for under this workunit.
:param on_success: If specified, a callable taking a single argument, which will be a list
of return values of each invocation, in order. Called only if all work succeeded.
:param on_failure: If specified, a callable taking a single argument, which is an exception
thrown in the work.
:return: `multiprocessing.pool.MapResult`
Don't do work in on_success: not only will it block the result handling thread, but
that thread is not a worker and doesn't have a logging context etc. Use it just to
submit further work to the pool.
"""
if work is None or len(work.args_tuples) == 0: # map_async hangs on 0-length iterables.
if on_success:
on_success([])
else:
def do_work(*args):
self._do_work(work.func, *args, workunit_name=work.workunit_name,
workunit_parent=workunit_parent, on_failure=on_failure)
return self._pool.map_async(do_work, work.args_tuples, chunksize=1, callback=on_success) | Submit work to be executed in the background.
:param work: The work to execute.
:param workunit_parent: If specified, work is accounted for under this workunit.
:param on_success: If specified, a callable taking a single argument, which will be a list
of return values of each invocation, in order. Called only if all work succeeded.
:param on_failure: If specified, a callable taking a single argument, which is an exception
thrown in the work.
:return: `multiprocessing.pool.MapResult`
Don't do work in on_success: not only will it block the result handling thread, but
that thread is not a worker and doesn't have a logging context etc. Use it just to
submit further work to the pool. | Below is the the instruction that describes the task:
### Input:
Submit work to be executed in the background.
:param work: The work to execute.
:param workunit_parent: If specified, work is accounted for under this workunit.
:param on_success: If specified, a callable taking a single argument, which will be a list
of return values of each invocation, in order. Called only if all work succeeded.
:param on_failure: If specified, a callable taking a single argument, which is an exception
thrown in the work.
:return: `multiprocessing.pool.MapResult`
Don't do work in on_success: not only will it block the result handling thread, but
that thread is not a worker and doesn't have a logging context etc. Use it just to
submit further work to the pool.
### Response:
def submit_async_work(self, work, workunit_parent=None, on_success=None, on_failure=None):
"""Submit work to be executed in the background.
:param work: The work to execute.
:param workunit_parent: If specified, work is accounted for under this workunit.
:param on_success: If specified, a callable taking a single argument, which will be a list
of return values of each invocation, in order. Called only if all work succeeded.
:param on_failure: If specified, a callable taking a single argument, which is an exception
thrown in the work.
:return: `multiprocessing.pool.MapResult`
Don't do work in on_success: not only will it block the result handling thread, but
that thread is not a worker and doesn't have a logging context etc. Use it just to
submit further work to the pool.
"""
if work is None or len(work.args_tuples) == 0: # map_async hangs on 0-length iterables.
if on_success:
on_success([])
else:
def do_work(*args):
self._do_work(work.func, *args, workunit_name=work.workunit_name,
workunit_parent=workunit_parent, on_failure=on_failure)
return self._pool.map_async(do_work, work.args_tuples, chunksize=1, callback=on_success) |
def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where relevant)
the schema of the training data, description of the training data,
training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object.
"""
section_titles=['Schema','Settings']
vocab_length = len(self.vocabulary)
verbose = self.verbose == 1
sections=[
[
('Vocabulary Size',_precomputed_field(vocab_length))
],
[
('Number of Topics', 'num_topics'),
('alpha','alpha'),
('beta','beta'),
('Iterations', 'num_iterations'),
('Training time', 'training_time'),
('Verbose', _precomputed_field(verbose))
]
]
return (sections, section_titles) | Returns a structured description of the model, including (where relevant)
the schema of the training data, description of the training data,
training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object. | Below is the the instruction that describes the task:
### Input:
Returns a structured description of the model, including (where relevant)
the schema of the training data, description of the training data,
training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object.
### Response:
def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where relevant)
the schema of the training data, description of the training data,
training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object.
"""
section_titles=['Schema','Settings']
vocab_length = len(self.vocabulary)
verbose = self.verbose == 1
sections=[
[
('Vocabulary Size',_precomputed_field(vocab_length))
],
[
('Number of Topics', 'num_topics'),
('alpha','alpha'),
('beta','beta'),
('Iterations', 'num_iterations'),
('Training time', 'training_time'),
('Verbose', _precomputed_field(verbose))
]
]
return (sections, section_titles) |
def run(self, b, compute, times=[], **kwargs):
"""
if within mpirun, workers should call _run_worker instead of run
"""
self.run_checks(b, compute, times, **kwargs)
logger.debug("rank:{}/{} calling get_packet_and_syns".format(mpi.myrank, mpi.nprocs))
packet, new_syns = self.get_packet_and_syns(b, compute, times, **kwargs)
if mpi.enabled:
# broadcast the packet to ALL workers
mpi.comm.bcast(packet, root=0)
# now even the master can become a worker and take on a chunk
packet['b'] = b
rpacketlists = self._run_chunk(**packet)
# now receive all packetlists
rpacketlists_per_worker = mpi.comm.gather(rpacketlists, root=0)
else:
rpacketlists_per_worker = [self._run_chunk(**packet)]
return self._fill_syns(new_syns, rpacketlists_per_worker) | if within mpirun, workers should call _run_worker instead of run | Below is the the instruction that describes the task:
### Input:
if within mpirun, workers should call _run_worker instead of run
### Response:
def run(self, b, compute, times=[], **kwargs):
"""
if within mpirun, workers should call _run_worker instead of run
"""
self.run_checks(b, compute, times, **kwargs)
logger.debug("rank:{}/{} calling get_packet_and_syns".format(mpi.myrank, mpi.nprocs))
packet, new_syns = self.get_packet_and_syns(b, compute, times, **kwargs)
if mpi.enabled:
# broadcast the packet to ALL workers
mpi.comm.bcast(packet, root=0)
# now even the master can become a worker and take on a chunk
packet['b'] = b
rpacketlists = self._run_chunk(**packet)
# now receive all packetlists
rpacketlists_per_worker = mpi.comm.gather(rpacketlists, root=0)
else:
rpacketlists_per_worker = [self._run_chunk(**packet)]
return self._fill_syns(new_syns, rpacketlists_per_worker) |
def match(self, text):
"""If text is matched with pattern, return variable names specified(%{pattern:variable name})
in pattern and their corresponding values.If not matched, return None.
custom patterns can be passed in by custom_patterns(pattern name, pattern regular expression pair)
or custom_patterns_dir.
"""
match_obj = None
if self.fullmatch:
match_obj = self.regex_obj.fullmatch(text)
else:
match_obj = self.regex_obj.search(text)
if match_obj == None:
return None
matches = match_obj.groupdict()
for key,match in matches.items():
try:
if self.type_mapper[key] == 'int':
matches[key] = int(match)
if self.type_mapper[key] == 'float':
matches[key] = float(match)
except (TypeError, KeyError) as e:
pass
return matches | If text is matched with pattern, return variable names specified(%{pattern:variable name})
in pattern and their corresponding values.If not matched, return None.
custom patterns can be passed in by custom_patterns(pattern name, pattern regular expression pair)
or custom_patterns_dir. | Below is the the instruction that describes the task:
### Input:
If text is matched with pattern, return variable names specified(%{pattern:variable name})
in pattern and their corresponding values.If not matched, return None.
custom patterns can be passed in by custom_patterns(pattern name, pattern regular expression pair)
or custom_patterns_dir.
### Response:
def match(self, text):
"""If text is matched with pattern, return variable names specified(%{pattern:variable name})
in pattern and their corresponding values.If not matched, return None.
custom patterns can be passed in by custom_patterns(pattern name, pattern regular expression pair)
or custom_patterns_dir.
"""
match_obj = None
if self.fullmatch:
match_obj = self.regex_obj.fullmatch(text)
else:
match_obj = self.regex_obj.search(text)
if match_obj == None:
return None
matches = match_obj.groupdict()
for key,match in matches.items():
try:
if self.type_mapper[key] == 'int':
matches[key] = int(match)
if self.type_mapper[key] == 'float':
matches[key] = float(match)
except (TypeError, KeyError) as e:
pass
return matches |
def get_artifact_info(self):
"""Returns a tuple composed of a :class:`pants.java.jar.JarDependency`
describing the jar for this target and a bool indicating if this target is exportable.
"""
exported = bool(self.provides)
org = self.provides.org if exported else 'internal'
name = self.provides.name if exported else self.identifier
# TODO(John Sirois): This should return something less than a JarDependency encapsulating just
# the org and name. Perhaps a JarFamily?
return JarDependency(org=org, name=name, rev=None), exported | Returns a tuple composed of a :class:`pants.java.jar.JarDependency`
describing the jar for this target and a bool indicating if this target is exportable. | Below is the the instruction that describes the task:
### Input:
Returns a tuple composed of a :class:`pants.java.jar.JarDependency`
describing the jar for this target and a bool indicating if this target is exportable.
### Response:
def get_artifact_info(self):
"""Returns a tuple composed of a :class:`pants.java.jar.JarDependency`
describing the jar for this target and a bool indicating if this target is exportable.
"""
exported = bool(self.provides)
org = self.provides.org if exported else 'internal'
name = self.provides.name if exported else self.identifier
# TODO(John Sirois): This should return something less than a JarDependency encapsulating just
# the org and name. Perhaps a JarFamily?
return JarDependency(org=org, name=name, rev=None), exported |
def _load_github_repo():
""" Loads the GitHub repository from the users config. """
if 'TRAVIS' in os.environ:
raise RuntimeError('Detected that we are running in Travis. '
'Stopping to prevent infinite loops.')
try:
with open(os.path.join(config_dir, 'repo'), 'r') as f:
return f.read()
except (OSError, IOError):
raise RuntimeError('Could not find your repository. '
'Have you ran `trytravis --repo`?') | Loads the GitHub repository from the users config. | Below is the the instruction that describes the task:
### Input:
Loads the GitHub repository from the users config.
### Response:
def _load_github_repo():
""" Loads the GitHub repository from the users config. """
if 'TRAVIS' in os.environ:
raise RuntimeError('Detected that we are running in Travis. '
'Stopping to prevent infinite loops.')
try:
with open(os.path.join(config_dir, 'repo'), 'r') as f:
return f.read()
except (OSError, IOError):
raise RuntimeError('Could not find your repository. '
'Have you ran `trytravis --repo`?') |
def allocate_elastic_ip(self):
"""Allocates an elastic IP address
:return: Dict with allocation ID and Public IP that were created
:raises: AWSAPIError, EC2UtilError
"""
log = logging.getLogger(self.cls_logger + '.allocate_elastic_ip')
# Attempt to allocate a new elastic IP
log.info('Attempting to allocate an elastic IP...')
try:
response = self.client.allocate_address(
DryRun=False,
Domain='vpc'
)
except ClientError:
_, ex, trace = sys.exc_info()
msg = 'Unable to allocate a new elastic IP address\n{e}'.format(e=str(ex))
log.error(msg)
raise AWSAPIError, msg, trace
allocation_id = response['AllocationId']
public_ip = response['PublicIp']
log.info('Allocated Elastic IP with ID {a} and Public IP address {p}'.
format(a=allocation_id, p=public_ip))
# Verify the Address was allocated successfully
log.info('Verifying the elastic IP address was allocated and is available '
'for use...')
ready = False
verification_timer = [2]*60 + [5]*60 + [10]*18
num_checks = len(verification_timer)
for i in range(0, num_checks):
wait_time = verification_timer[i]
try:
self.client.describe_addresses(
DryRun=False,
AllocationIds=[allocation_id]
)
except ClientError:
_, ex, trace = sys.exc_info()
log.info('Elastic IP address {p} with Allocation ID {a} is not available for use, trying again in '
'{w} sec...\n{e}'.format(p=public_ip, a=allocation_id, w=wait_time, e=str(ex)))
time.sleep(wait_time)
else:
log.info('Elastic IP {p} with Allocation ID {a} is available for use'.format(
p=public_ip, a=allocation_id))
ready = True
break
if ready:
return {'AllocationId': allocation_id, 'PublicIp': public_ip}
else:
msg = 'Unable to verify existence of new Elastic IP {p} with Allocation ID: {a}'. \
format(p=public_ip, a=allocation_id)
log.error(msg)
raise EC2UtilError(msg) | Allocates an elastic IP address
:return: Dict with allocation ID and Public IP that were created
:raises: AWSAPIError, EC2UtilError | Below is the the instruction that describes the task:
### Input:
Allocates an elastic IP address
:return: Dict with allocation ID and Public IP that were created
:raises: AWSAPIError, EC2UtilError
### Response:
def allocate_elastic_ip(self):
"""Allocates an elastic IP address
:return: Dict with allocation ID and Public IP that were created
:raises: AWSAPIError, EC2UtilError
"""
log = logging.getLogger(self.cls_logger + '.allocate_elastic_ip')
# Attempt to allocate a new elastic IP
log.info('Attempting to allocate an elastic IP...')
try:
response = self.client.allocate_address(
DryRun=False,
Domain='vpc'
)
except ClientError:
_, ex, trace = sys.exc_info()
msg = 'Unable to allocate a new elastic IP address\n{e}'.format(e=str(ex))
log.error(msg)
raise AWSAPIError, msg, trace
allocation_id = response['AllocationId']
public_ip = response['PublicIp']
log.info('Allocated Elastic IP with ID {a} and Public IP address {p}'.
format(a=allocation_id, p=public_ip))
# Verify the Address was allocated successfully
log.info('Verifying the elastic IP address was allocated and is available '
'for use...')
ready = False
verification_timer = [2]*60 + [5]*60 + [10]*18
num_checks = len(verification_timer)
for i in range(0, num_checks):
wait_time = verification_timer[i]
try:
self.client.describe_addresses(
DryRun=False,
AllocationIds=[allocation_id]
)
except ClientError:
_, ex, trace = sys.exc_info()
log.info('Elastic IP address {p} with Allocation ID {a} is not available for use, trying again in '
'{w} sec...\n{e}'.format(p=public_ip, a=allocation_id, w=wait_time, e=str(ex)))
time.sleep(wait_time)
else:
log.info('Elastic IP {p} with Allocation ID {a} is available for use'.format(
p=public_ip, a=allocation_id))
ready = True
break
if ready:
return {'AllocationId': allocation_id, 'PublicIp': public_ip}
else:
msg = 'Unable to verify existence of new Elastic IP {p} with Allocation ID: {a}'. \
format(p=public_ip, a=allocation_id)
log.error(msg)
raise EC2UtilError(msg) |
def _get_fields(mcs, bases, namespace):
"""Create fields dictionary to be used in resource class namespace.
Pop all field objects from attributes dict (namespace) and store them
under _field_storage_key atrribute. Also collect all fields from base
classes in order that ensures fields can be overriden.
Args:
bases: all base classes of created serializer class
namespace (dict): namespace as dictionary of attributes
"""
fields = [
(name, namespace.pop(name))
for name, attribute
in list(namespace.items())
if isinstance(attribute, BaseField)
]
for base in reversed(bases):
if hasattr(base, mcs._fields_storage_key):
fields = list(
getattr(base, mcs._fields_storage_key).items()
) + fields
return OrderedDict(fields) | Create fields dictionary to be used in resource class namespace.
Pop all field objects from attributes dict (namespace) and store them
under _field_storage_key atrribute. Also collect all fields from base
classes in order that ensures fields can be overriden.
Args:
bases: all base classes of created serializer class
namespace (dict): namespace as dictionary of attributes | Below is the the instruction that describes the task:
### Input:
Create fields dictionary to be used in resource class namespace.
Pop all field objects from attributes dict (namespace) and store them
under _field_storage_key atrribute. Also collect all fields from base
classes in order that ensures fields can be overriden.
Args:
bases: all base classes of created serializer class
namespace (dict): namespace as dictionary of attributes
### Response:
def _get_fields(mcs, bases, namespace):
"""Create fields dictionary to be used in resource class namespace.
Pop all field objects from attributes dict (namespace) and store them
under _field_storage_key atrribute. Also collect all fields from base
classes in order that ensures fields can be overriden.
Args:
bases: all base classes of created serializer class
namespace (dict): namespace as dictionary of attributes
"""
fields = [
(name, namespace.pop(name))
for name, attribute
in list(namespace.items())
if isinstance(attribute, BaseField)
]
for base in reversed(bases):
if hasattr(base, mcs._fields_storage_key):
fields = list(
getattr(base, mcs._fields_storage_key).items()
) + fields
return OrderedDict(fields) |
def gets(self, conn, key, default=None):
"""Gets a single value from the server together with the cas token.
:param key: ``bytes``, is the key for the item being fetched
:param default: default value if there is no value.
:return: ``bytes``, ``bytes tuple with the value and the cas
"""
values, cas_tokens = yield from self._multi_get(
conn, key, with_cas=True)
return values.get(key, default), cas_tokens.get(key) | Gets a single value from the server together with the cas token.
:param key: ``bytes``, is the key for the item being fetched
:param default: default value if there is no value.
:return: ``bytes``, ``bytes tuple with the value and the cas | Below is the the instruction that describes the task:
### Input:
Gets a single value from the server together with the cas token.
:param key: ``bytes``, is the key for the item being fetched
:param default: default value if there is no value.
:return: ``bytes``, ``bytes tuple with the value and the cas
### Response:
def gets(self, conn, key, default=None):
"""Gets a single value from the server together with the cas token.
:param key: ``bytes``, is the key for the item being fetched
:param default: default value if there is no value.
:return: ``bytes``, ``bytes tuple with the value and the cas
"""
values, cas_tokens = yield from self._multi_get(
conn, key, with_cas=True)
return values.get(key, default), cas_tokens.get(key) |
def flick(self, xspeed, yspeed):
"""
Flicks, starting anywhere on the screen.
:Args:
- xspeed: The X speed in pixels per second.
- yspeed: The Y speed in pixels per second.
"""
self._actions.append(lambda: self._driver.execute(
Command.FLICK, {
'xspeed': int(xspeed),
'yspeed': int(yspeed)}))
return self | Flicks, starting anywhere on the screen.
:Args:
- xspeed: The X speed in pixels per second.
- yspeed: The Y speed in pixels per second. | Below is the the instruction that describes the task:
### Input:
Flicks, starting anywhere on the screen.
:Args:
- xspeed: The X speed in pixels per second.
- yspeed: The Y speed in pixels per second.
### Response:
def flick(self, xspeed, yspeed):
"""
Flicks, starting anywhere on the screen.
:Args:
- xspeed: The X speed in pixels per second.
- yspeed: The Y speed in pixels per second.
"""
self._actions.append(lambda: self._driver.execute(
Command.FLICK, {
'xspeed': int(xspeed),
'yspeed': int(yspeed)}))
return self |
def _result(self) -> ResultLazyType:
"""
``self.config.replacer_function``(``Callable[[str], str]``) must exists.
"""
config = cast(IntervalsCollectionBasedReplacerConfig, self.config)
diff_acc = 0
for interval, aggregated_mark in self.continuous_intervals():
start, end = interval
processed_start = start + diff_acc
processed_end = end + diff_acc
segment = self.input_sequence[start:end]
if aggregated_mark is not None:
processed_segment = config.labeler2repl[cast(Type[workflow.IntervalLabeler],
aggregated_mark)](segment)
if not processed_segment:
# segment is removed.
processed_end = processed_start
else:
processed_end = processed_start + len(processed_segment)
diff_acc += len(processed_segment) - len(segment)
segment = processed_segment
yield segment, (interval, (processed_start, processed_end), aggregated_mark is not None) | ``self.config.replacer_function``(``Callable[[str], str]``) must exists. | Below is the the instruction that describes the task:
### Input:
``self.config.replacer_function``(``Callable[[str], str]``) must exists.
### Response:
def _result(self) -> ResultLazyType:
"""
``self.config.replacer_function``(``Callable[[str], str]``) must exists.
"""
config = cast(IntervalsCollectionBasedReplacerConfig, self.config)
diff_acc = 0
for interval, aggregated_mark in self.continuous_intervals():
start, end = interval
processed_start = start + diff_acc
processed_end = end + diff_acc
segment = self.input_sequence[start:end]
if aggregated_mark is not None:
processed_segment = config.labeler2repl[cast(Type[workflow.IntervalLabeler],
aggregated_mark)](segment)
if not processed_segment:
# segment is removed.
processed_end = processed_start
else:
processed_end = processed_start + len(processed_segment)
diff_acc += len(processed_segment) - len(segment)
segment = processed_segment
yield segment, (interval, (processed_start, processed_end), aggregated_mark is not None) |
def convert(self, request, response, data):
"""
Performs the desired Conversion.
:param request: The webob Request object describing the
request.
:param response: The webob Response object describing the
response.
:param data: The data dictionary returned by the prepare()
method.
:returns: A string, the results of which are the desired
conversion.
"""
# Notes are in bark.notes dictionary
return self.escape(request.environ.get('bark.notes', {}).get(
self.modifier.param, '-')) | Performs the desired Conversion.
:param request: The webob Request object describing the
request.
:param response: The webob Response object describing the
response.
:param data: The data dictionary returned by the prepare()
method.
:returns: A string, the results of which are the desired
conversion. | Below is the the instruction that describes the task:
### Input:
Performs the desired Conversion.
:param request: The webob Request object describing the
request.
:param response: The webob Response object describing the
response.
:param data: The data dictionary returned by the prepare()
method.
:returns: A string, the results of which are the desired
conversion.
### Response:
def convert(self, request, response, data):
"""
Performs the desired Conversion.
:param request: The webob Request object describing the
request.
:param response: The webob Response object describing the
response.
:param data: The data dictionary returned by the prepare()
method.
:returns: A string, the results of which are the desired
conversion.
"""
# Notes are in bark.notes dictionary
return self.escape(request.environ.get('bark.notes', {}).get(
self.modifier.param, '-')) |
def match_rows(rows1, rows2, key, sort_keys=True):
"""
Yield triples of `(value, left_rows, right_rows)` where
`left_rows` and `right_rows` are lists of rows that share the same
column value for *key*. This means that both *rows1* and *rows2*
must have a column with the same name *key*.
.. warning::
Both *rows1* and *rows2* will exist in memory for this
operation, so it is not recommended for very large tables on
low-memory systems.
Args:
rows1: a :class:`Table` or list of :class:`Record` objects
rows2: a :class:`Table` or list of :class:`Record` objects
key (str): the column name on which to match
sort_keys (bool): if `True`, yield matching rows sorted by the
matched key instead of the original order
"""
matched = OrderedDict()
for i, rows in enumerate([rows1, rows2]):
for row in rows:
val = row[key]
try:
data = matched[val]
except KeyError:
matched[val] = ([], [])
data = matched[val]
data[i].append(row)
vals = matched.keys()
if sort_keys:
vals = sorted(vals, key=safe_int)
for val in vals:
left, right = matched[val]
yield (val, left, right) | Yield triples of `(value, left_rows, right_rows)` where
`left_rows` and `right_rows` are lists of rows that share the same
column value for *key*. This means that both *rows1* and *rows2*
must have a column with the same name *key*.
.. warning::
Both *rows1* and *rows2* will exist in memory for this
operation, so it is not recommended for very large tables on
low-memory systems.
Args:
rows1: a :class:`Table` or list of :class:`Record` objects
rows2: a :class:`Table` or list of :class:`Record` objects
key (str): the column name on which to match
sort_keys (bool): if `True`, yield matching rows sorted by the
matched key instead of the original order | Below is the the instruction that describes the task:
### Input:
Yield triples of `(value, left_rows, right_rows)` where
`left_rows` and `right_rows` are lists of rows that share the same
column value for *key*. This means that both *rows1* and *rows2*
must have a column with the same name *key*.
.. warning::
Both *rows1* and *rows2* will exist in memory for this
operation, so it is not recommended for very large tables on
low-memory systems.
Args:
rows1: a :class:`Table` or list of :class:`Record` objects
rows2: a :class:`Table` or list of :class:`Record` objects
key (str): the column name on which to match
sort_keys (bool): if `True`, yield matching rows sorted by the
matched key instead of the original order
### Response:
def match_rows(rows1, rows2, key, sort_keys=True):
"""
Yield triples of `(value, left_rows, right_rows)` where
`left_rows` and `right_rows` are lists of rows that share the same
column value for *key*. This means that both *rows1* and *rows2*
must have a column with the same name *key*.
.. warning::
Both *rows1* and *rows2* will exist in memory for this
operation, so it is not recommended for very large tables on
low-memory systems.
Args:
rows1: a :class:`Table` or list of :class:`Record` objects
rows2: a :class:`Table` or list of :class:`Record` objects
key (str): the column name on which to match
sort_keys (bool): if `True`, yield matching rows sorted by the
matched key instead of the original order
"""
matched = OrderedDict()
for i, rows in enumerate([rows1, rows2]):
for row in rows:
val = row[key]
try:
data = matched[val]
except KeyError:
matched[val] = ([], [])
data = matched[val]
data[i].append(row)
vals = matched.keys()
if sort_keys:
vals = sorted(vals, key=safe_int)
for val in vals:
left, right = matched[val]
yield (val, left, right) |
def _spanning_tree_algorithm(self):
""" Update tree roles.
- Root bridge:
all port is DESIGNATED_PORT.
- Non root bridge:
select one ROOT_PORT and some DESIGNATED_PORT,
and the other port is set to NON_DESIGNATED_PORT."""
port_roles = {}
root_port = self._select_root_port()
if root_port is None:
# My bridge is a root bridge.
self.logger.info('Root bridge.', extra=self.dpid_str)
root_priority = self.root_priority
root_times = self.root_times
for port_no in self.ports:
if self.ports[port_no].state is not PORT_STATE_DISABLE:
port_roles[port_no] = DESIGNATED_PORT
else:
# Other bridge is a root bridge.
self.logger.info('Non root bridge.', extra=self.dpid_str)
root_priority = root_port.designated_priority
root_times = root_port.designated_times
port_roles[root_port.ofport.port_no] = ROOT_PORT
d_ports = self._select_designated_port(root_port)
for port_no in d_ports:
port_roles[port_no] = DESIGNATED_PORT
for port in self.ports.values():
if port.state is not PORT_STATE_DISABLE:
port_roles.setdefault(port.ofport.port_no,
NON_DESIGNATED_PORT)
return port_roles, root_priority, root_times | Update tree roles.
- Root bridge:
all port is DESIGNATED_PORT.
- Non root bridge:
select one ROOT_PORT and some DESIGNATED_PORT,
and the other port is set to NON_DESIGNATED_PORT. | Below is the the instruction that describes the task:
### Input:
Update tree roles.
- Root bridge:
all port is DESIGNATED_PORT.
- Non root bridge:
select one ROOT_PORT and some DESIGNATED_PORT,
and the other port is set to NON_DESIGNATED_PORT.
### Response:
def _spanning_tree_algorithm(self):
""" Update tree roles.
- Root bridge:
all port is DESIGNATED_PORT.
- Non root bridge:
select one ROOT_PORT and some DESIGNATED_PORT,
and the other port is set to NON_DESIGNATED_PORT."""
port_roles = {}
root_port = self._select_root_port()
if root_port is None:
# My bridge is a root bridge.
self.logger.info('Root bridge.', extra=self.dpid_str)
root_priority = self.root_priority
root_times = self.root_times
for port_no in self.ports:
if self.ports[port_no].state is not PORT_STATE_DISABLE:
port_roles[port_no] = DESIGNATED_PORT
else:
# Other bridge is a root bridge.
self.logger.info('Non root bridge.', extra=self.dpid_str)
root_priority = root_port.designated_priority
root_times = root_port.designated_times
port_roles[root_port.ofport.port_no] = ROOT_PORT
d_ports = self._select_designated_port(root_port)
for port_no in d_ports:
port_roles[port_no] = DESIGNATED_PORT
for port in self.ports.values():
if port.state is not PORT_STATE_DISABLE:
port_roles.setdefault(port.ofport.port_no,
NON_DESIGNATED_PORT)
return port_roles, root_priority, root_times |
def _remove_non_serializable_store_entries(store: Store) -> dict:
"""
Copy all serializable data into a new dict, and skip the rest.
This makes sure to keep the items during runtime, even if the user edits and saves the script.
"""
cleaned_store_data = {}
for key, value in store.items():
if Script._is_serializable(key) and Script._is_serializable(value):
cleaned_store_data[key] = value
else:
_logger.info("Skip non-serializable item in the local script store. Key: '{}', Value: '{}'. "
"This item cannot be saved and therefore will be lost when autokey quits.".format(
key, value
))
return cleaned_store_data | Copy all serializable data into a new dict, and skip the rest.
This makes sure to keep the items during runtime, even if the user edits and saves the script. | Below is the the instruction that describes the task:
### Input:
Copy all serializable data into a new dict, and skip the rest.
This makes sure to keep the items during runtime, even if the user edits and saves the script.
### Response:
def _remove_non_serializable_store_entries(store: Store) -> dict:
"""
Copy all serializable data into a new dict, and skip the rest.
This makes sure to keep the items during runtime, even if the user edits and saves the script.
"""
cleaned_store_data = {}
for key, value in store.items():
if Script._is_serializable(key) and Script._is_serializable(value):
cleaned_store_data[key] = value
else:
_logger.info("Skip non-serializable item in the local script store. Key: '{}', Value: '{}'. "
"This item cannot be saved and therefore will be lost when autokey quits.".format(
key, value
))
return cleaned_store_data |
def setup_cluster(self, cluster, extra_args=tuple()):
"""
Configure the cluster by running an Ansible playbook.
The ElastiCluster configuration attribute `<kind>_groups`
determines, for each node kind, what Ansible groups nodes of
that kind are assigned to.
:param cluster: cluster to configure
:type cluster: :py:class:`elasticluster.cluster.Cluster`
:param list extra_args:
List of additional command-line arguments
that are appended to each invocation of the setup program.
:return: ``True`` on success, ``False`` otherwise. Please note, if nothing
has to be configured, then ``True`` is returned.
:raises: `ConfigurationError` if the playbook can not be found
or is corrupt.
"""
return self._run_playbook(cluster, self._playbook_path, extra_args) | Configure the cluster by running an Ansible playbook.
The ElastiCluster configuration attribute `<kind>_groups`
determines, for each node kind, what Ansible groups nodes of
that kind are assigned to.
:param cluster: cluster to configure
:type cluster: :py:class:`elasticluster.cluster.Cluster`
:param list extra_args:
List of additional command-line arguments
that are appended to each invocation of the setup program.
:return: ``True`` on success, ``False`` otherwise. Please note, if nothing
has to be configured, then ``True`` is returned.
:raises: `ConfigurationError` if the playbook can not be found
or is corrupt. | Below is the the instruction that describes the task:
### Input:
Configure the cluster by running an Ansible playbook.
The ElastiCluster configuration attribute `<kind>_groups`
determines, for each node kind, what Ansible groups nodes of
that kind are assigned to.
:param cluster: cluster to configure
:type cluster: :py:class:`elasticluster.cluster.Cluster`
:param list extra_args:
List of additional command-line arguments
that are appended to each invocation of the setup program.
:return: ``True`` on success, ``False`` otherwise. Please note, if nothing
has to be configured, then ``True`` is returned.
:raises: `ConfigurationError` if the playbook can not be found
or is corrupt.
### Response:
def setup_cluster(self, cluster, extra_args=tuple()):
"""
Configure the cluster by running an Ansible playbook.
The ElastiCluster configuration attribute `<kind>_groups`
determines, for each node kind, what Ansible groups nodes of
that kind are assigned to.
:param cluster: cluster to configure
:type cluster: :py:class:`elasticluster.cluster.Cluster`
:param list extra_args:
List of additional command-line arguments
that are appended to each invocation of the setup program.
:return: ``True`` on success, ``False`` otherwise. Please note, if nothing
has to be configured, then ``True`` is returned.
:raises: `ConfigurationError` if the playbook can not be found
or is corrupt.
"""
return self._run_playbook(cluster, self._playbook_path, extra_args) |
def create_with_claims(self, claims):
"""Create credentials that specify additional claims.
Args:
claims: dict, key-value pairs for claims.
Returns:
ServiceAccountCredentials, a copy of the current service account
credentials with updated claims to use when obtaining access
tokens.
"""
new_kwargs = dict(self._kwargs)
new_kwargs.update(claims)
result = self.__class__(self._service_account_email,
self._signer,
scopes=self._scopes,
private_key_id=self._private_key_id,
client_id=self.client_id,
user_agent=self._user_agent,
**new_kwargs)
result.token_uri = self.token_uri
result.revoke_uri = self.revoke_uri
result._private_key_pkcs8_pem = self._private_key_pkcs8_pem
result._private_key_pkcs12 = self._private_key_pkcs12
result._private_key_password = self._private_key_password
return result | Create credentials that specify additional claims.
Args:
claims: dict, key-value pairs for claims.
Returns:
ServiceAccountCredentials, a copy of the current service account
credentials with updated claims to use when obtaining access
tokens. | Below is the the instruction that describes the task:
### Input:
Create credentials that specify additional claims.
Args:
claims: dict, key-value pairs for claims.
Returns:
ServiceAccountCredentials, a copy of the current service account
credentials with updated claims to use when obtaining access
tokens.
### Response:
def create_with_claims(self, claims):
"""Create credentials that specify additional claims.
Args:
claims: dict, key-value pairs for claims.
Returns:
ServiceAccountCredentials, a copy of the current service account
credentials with updated claims to use when obtaining access
tokens.
"""
new_kwargs = dict(self._kwargs)
new_kwargs.update(claims)
result = self.__class__(self._service_account_email,
self._signer,
scopes=self._scopes,
private_key_id=self._private_key_id,
client_id=self.client_id,
user_agent=self._user_agent,
**new_kwargs)
result.token_uri = self.token_uri
result.revoke_uri = self.revoke_uri
result._private_key_pkcs8_pem = self._private_key_pkcs8_pem
result._private_key_pkcs12 = self._private_key_pkcs12
result._private_key_password = self._private_key_password
return result |
def get_dataset_date(self, date_format=None):
# type: (Optional[str]) -> Optional[str]
"""Get dataset date as string in specified format. For range returns start date.
If no format is supplied, an ISO 8601 string is returned.
Args:
date_format (Optional[str]): Date format. None is taken to be ISO 8601. Defaults to None.
Returns:
Optional[str]: Dataset date string or None if no date is set
"""
dataset_date = self.get_dataset_date_as_datetime()
return self._get_formatted_date(dataset_date, date_format) | Get dataset date as string in specified format. For range returns start date.
If no format is supplied, an ISO 8601 string is returned.
Args:
date_format (Optional[str]): Date format. None is taken to be ISO 8601. Defaults to None.
Returns:
Optional[str]: Dataset date string or None if no date is set | Below is the the instruction that describes the task:
### Input:
Get dataset date as string in specified format. For range returns start date.
If no format is supplied, an ISO 8601 string is returned.
Args:
date_format (Optional[str]): Date format. None is taken to be ISO 8601. Defaults to None.
Returns:
Optional[str]: Dataset date string or None if no date is set
### Response:
def get_dataset_date(self, date_format=None):
# type: (Optional[str]) -> Optional[str]
"""Get dataset date as string in specified format. For range returns start date.
If no format is supplied, an ISO 8601 string is returned.
Args:
date_format (Optional[str]): Date format. None is taken to be ISO 8601. Defaults to None.
Returns:
Optional[str]: Dataset date string or None if no date is set
"""
dataset_date = self.get_dataset_date_as_datetime()
return self._get_formatted_date(dataset_date, date_format) |
def set_tts(self, level):
"""
Set the values for
:data:`~aeneas.runtimeconfiguration.RuntimeConfiguration.TTS`
and
:data:`~aeneas.runtimeconfiguration.RuntimeConfiguration.TTS_PATH`
matching the given granularity level.
Currently supported levels:
* ``1`` (paragraph)
* ``2`` (sentence)
* ``3`` (word)
:param int level: the desired granularity level
"""
if level in self.TTS_GRANULARITY_MAP.keys():
tts_key, tts_path_key = self.TTS_GRANULARITY_MAP[level]
self[self.TTS] = self[tts_key]
self[self.TTS_PATH] = self[tts_path_key] | Set the values for
:data:`~aeneas.runtimeconfiguration.RuntimeConfiguration.TTS`
and
:data:`~aeneas.runtimeconfiguration.RuntimeConfiguration.TTS_PATH`
matching the given granularity level.
Currently supported levels:
* ``1`` (paragraph)
* ``2`` (sentence)
* ``3`` (word)
:param int level: the desired granularity level | Below is the the instruction that describes the task:
### Input:
Set the values for
:data:`~aeneas.runtimeconfiguration.RuntimeConfiguration.TTS`
and
:data:`~aeneas.runtimeconfiguration.RuntimeConfiguration.TTS_PATH`
matching the given granularity level.
Currently supported levels:
* ``1`` (paragraph)
* ``2`` (sentence)
* ``3`` (word)
:param int level: the desired granularity level
### Response:
def set_tts(self, level):
"""
Set the values for
:data:`~aeneas.runtimeconfiguration.RuntimeConfiguration.TTS`
and
:data:`~aeneas.runtimeconfiguration.RuntimeConfiguration.TTS_PATH`
matching the given granularity level.
Currently supported levels:
* ``1`` (paragraph)
* ``2`` (sentence)
* ``3`` (word)
:param int level: the desired granularity level
"""
if level in self.TTS_GRANULARITY_MAP.keys():
tts_key, tts_path_key = self.TTS_GRANULARITY_MAP[level]
self[self.TTS] = self[tts_key]
self[self.TTS_PATH] = self[tts_path_key] |
def _to_bel_lines_header(graph) -> Iterable[str]:
"""Iterate the lines of a BEL graph's corresponding BEL script's header.
:param pybel.BELGraph graph: A BEL graph
"""
yield '# This document was created by PyBEL v{} and bel-resources v{} on {}\n'.format(
VERSION, bel_resources.constants.VERSION, time.asctime()
)
yield from make_knowledge_header(
namespace_url=graph.namespace_url,
namespace_patterns=graph.namespace_pattern,
annotation_url=graph.annotation_url,
annotation_patterns=graph.annotation_pattern,
annotation_list=graph.annotation_list,
**graph.document
) | Iterate the lines of a BEL graph's corresponding BEL script's header.
:param pybel.BELGraph graph: A BEL graph | Below is the the instruction that describes the task:
### Input:
Iterate the lines of a BEL graph's corresponding BEL script's header.
:param pybel.BELGraph graph: A BEL graph
### Response:
def _to_bel_lines_header(graph) -> Iterable[str]:
"""Iterate the lines of a BEL graph's corresponding BEL script's header.
:param pybel.BELGraph graph: A BEL graph
"""
yield '# This document was created by PyBEL v{} and bel-resources v{} on {}\n'.format(
VERSION, bel_resources.constants.VERSION, time.asctime()
)
yield from make_knowledge_header(
namespace_url=graph.namespace_url,
namespace_patterns=graph.namespace_pattern,
annotation_url=graph.annotation_url,
annotation_patterns=graph.annotation_pattern,
annotation_list=graph.annotation_list,
**graph.document
) |
def set_default_host(cls, value):
"""
Default: "http://127.0.0.1:80"
A string that will be automatically included at the beginning of the url generated for doing each http request.
"""
if value is None:
cls.DEFAULT_HOST = "http://127.0.0.1:80"
else:
scheme, host, port = get_hostname_parameters_from_url(value)
cls.DEFAULT_HOST = "%s://%s:%s" % (scheme, host, port) | Default: "http://127.0.0.1:80"
A string that will be automatically included at the beginning of the url generated for doing each http request. | Below is the the instruction that describes the task:
### Input:
Default: "http://127.0.0.1:80"
A string that will be automatically included at the beginning of the url generated for doing each http request.
### Response:
def set_default_host(cls, value):
"""
Default: "http://127.0.0.1:80"
A string that will be automatically included at the beginning of the url generated for doing each http request.
"""
if value is None:
cls.DEFAULT_HOST = "http://127.0.0.1:80"
else:
scheme, host, port = get_hostname_parameters_from_url(value)
cls.DEFAULT_HOST = "%s://%s:%s" % (scheme, host, port) |
def _fill_function(*args):
"""Fills in the rest of function data into the skeleton function object
The skeleton itself is create by _make_skel_func().
"""
if len(args) == 2:
func = args[0]
state = args[1]
elif len(args) == 5:
# Backwards compat for cloudpickle v0.4.0, after which the `module`
# argument was introduced
func = args[0]
keys = ['globals', 'defaults', 'dict', 'closure_values']
state = dict(zip(keys, args[1:]))
elif len(args) == 6:
# Backwards compat for cloudpickle v0.4.1, after which the function
# state was passed as a dict to the _fill_function it-self.
func = args[0]
keys = ['globals', 'defaults', 'dict', 'module', 'closure_values']
state = dict(zip(keys, args[1:]))
else:
raise ValueError('Unexpected _fill_value arguments: %r' % (args,))
# - At pickling time, any dynamic global variable used by func is
# serialized by value (in state['globals']).
# - At unpickling time, func's __globals__ attribute is initialized by
# first retrieving an empty isolated namespace that will be shared
# with other functions pickled from the same original module
# by the same CloudPickler instance and then updated with the
# content of state['globals'] to populate the shared isolated
# namespace with all the global variables that are specifically
# referenced for this function.
func.__globals__.update(state['globals'])
func.__defaults__ = state['defaults']
func.__dict__ = state['dict']
if 'annotations' in state:
func.__annotations__ = state['annotations']
if 'doc' in state:
func.__doc__ = state['doc']
if 'name' in state:
func.__name__ = state['name']
if 'module' in state:
func.__module__ = state['module']
if 'qualname' in state:
func.__qualname__ = state['qualname']
cells = func.__closure__
if cells is not None:
for cell, value in zip(cells, state['closure_values']):
if value is not _empty_cell_value:
cell_set(cell, value)
return func | Fills in the rest of function data into the skeleton function object
The skeleton itself is create by _make_skel_func(). | Below is the the instruction that describes the task:
### Input:
Fills in the rest of function data into the skeleton function object
The skeleton itself is create by _make_skel_func().
### Response:
def _fill_function(*args):
"""Fills in the rest of function data into the skeleton function object
The skeleton itself is create by _make_skel_func().
"""
if len(args) == 2:
func = args[0]
state = args[1]
elif len(args) == 5:
# Backwards compat for cloudpickle v0.4.0, after which the `module`
# argument was introduced
func = args[0]
keys = ['globals', 'defaults', 'dict', 'closure_values']
state = dict(zip(keys, args[1:]))
elif len(args) == 6:
# Backwards compat for cloudpickle v0.4.1, after which the function
# state was passed as a dict to the _fill_function it-self.
func = args[0]
keys = ['globals', 'defaults', 'dict', 'module', 'closure_values']
state = dict(zip(keys, args[1:]))
else:
raise ValueError('Unexpected _fill_value arguments: %r' % (args,))
# - At pickling time, any dynamic global variable used by func is
# serialized by value (in state['globals']).
# - At unpickling time, func's __globals__ attribute is initialized by
# first retrieving an empty isolated namespace that will be shared
# with other functions pickled from the same original module
# by the same CloudPickler instance and then updated with the
# content of state['globals'] to populate the shared isolated
# namespace with all the global variables that are specifically
# referenced for this function.
func.__globals__.update(state['globals'])
func.__defaults__ = state['defaults']
func.__dict__ = state['dict']
if 'annotations' in state:
func.__annotations__ = state['annotations']
if 'doc' in state:
func.__doc__ = state['doc']
if 'name' in state:
func.__name__ = state['name']
if 'module' in state:
func.__module__ = state['module']
if 'qualname' in state:
func.__qualname__ = state['qualname']
cells = func.__closure__
if cells is not None:
for cell, value in zip(cells, state['closure_values']):
if value is not _empty_cell_value:
cell_set(cell, value)
return func |
def touch(self, mode=0o666, exist_ok=True):
"""Create this file with the given access mode, if it doesn't exist.
Based on:
https://github.com/python/cpython/blob/master/Lib/pathlib.py)
"""
if exist_ok:
# First try to bump modification time
# Implementation note: GNU touch uses the UTIME_NOW option of
# the utimensat() / futimens() functions.
try:
os.utime(self, None)
except OSError:
# Avoid exception chaining
pass
else:
return
flags = os.O_CREAT | os.O_WRONLY
if not exist_ok:
flags |= os.O_EXCL
fd = os.open(self, flags, mode)
os.close(fd) | Create this file with the given access mode, if it doesn't exist.
Based on:
https://github.com/python/cpython/blob/master/Lib/pathlib.py) | Below is the the instruction that describes the task:
### Input:
Create this file with the given access mode, if it doesn't exist.
Based on:
https://github.com/python/cpython/blob/master/Lib/pathlib.py)
### Response:
def touch(self, mode=0o666, exist_ok=True):
"""Create this file with the given access mode, if it doesn't exist.
Based on:
https://github.com/python/cpython/blob/master/Lib/pathlib.py)
"""
if exist_ok:
# First try to bump modification time
# Implementation note: GNU touch uses the UTIME_NOW option of
# the utimensat() / futimens() functions.
try:
os.utime(self, None)
except OSError:
# Avoid exception chaining
pass
else:
return
flags = os.O_CREAT | os.O_WRONLY
if not exist_ok:
flags |= os.O_EXCL
fd = os.open(self, flags, mode)
os.close(fd) |
def config_args(self, section="main"):
"""
Additional method for feeding input arguments from a config file.
:param section: current config section name
"""
if self._config_parsed:
return
for a in self._filtered_actions("config"):
for o in a.option_strings:
try:
i = sys.argv.index(o)
sys.argv.pop(i) # remove the option string
sys.argv.pop(i) # remove the value that follows
except ValueError:
pass
for a in self._sorted_actions():
self._set_arg(a, section, True)
self._config_parsed = True | Additional method for feeding input arguments from a config file.
:param section: current config section name | Below is the the instruction that describes the task:
### Input:
Additional method for feeding input arguments from a config file.
:param section: current config section name
### Response:
def config_args(self, section="main"):
"""
Additional method for feeding input arguments from a config file.
:param section: current config section name
"""
if self._config_parsed:
return
for a in self._filtered_actions("config"):
for o in a.option_strings:
try:
i = sys.argv.index(o)
sys.argv.pop(i) # remove the option string
sys.argv.pop(i) # remove the value that follows
except ValueError:
pass
for a in self._sorted_actions():
self._set_arg(a, section, True)
self._config_parsed = True |
def check_verifier(self, verifier):
"""Checks that the verifier contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.verifier_length
return (set(verifier) <= self.safe_characters and
lower <= len(verifier) <= upper) | Checks that the verifier contains only safe characters
and is no shorter than lower and no longer than upper. | Below is the the instruction that describes the task:
### Input:
Checks that the verifier contains only safe characters
and is no shorter than lower and no longer than upper.
### Response:
def check_verifier(self, verifier):
"""Checks that the verifier contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.verifier_length
return (set(verifier) <= self.safe_characters and
lower <= len(verifier) <= upper) |
def normalize_lat_lng(arg):
"""Take the various lat/lng representations and return a tuple.
Accepts various representations:
1) dict with two entries - "lat" and "lng"
2) list or tuple - e.g. (-33, 151) or [-33, 151]
:param arg: The lat/lng pair.
:type arg: dict or list or tuple
:rtype: tuple (lat, lng)
"""
if isinstance(arg, dict):
if "lat" in arg and "lng" in arg:
return arg["lat"], arg["lng"]
if "latitude" in arg and "longitude" in arg:
return arg["latitude"], arg["longitude"]
# List or tuple.
if _is_list(arg):
return arg[0], arg[1]
raise TypeError(
"Expected a lat/lng dict or tuple, "
"but got %s" % type(arg).__name__) | Take the various lat/lng representations and return a tuple.
Accepts various representations:
1) dict with two entries - "lat" and "lng"
2) list or tuple - e.g. (-33, 151) or [-33, 151]
:param arg: The lat/lng pair.
:type arg: dict or list or tuple
:rtype: tuple (lat, lng) | Below is the the instruction that describes the task:
### Input:
Take the various lat/lng representations and return a tuple.
Accepts various representations:
1) dict with two entries - "lat" and "lng"
2) list or tuple - e.g. (-33, 151) or [-33, 151]
:param arg: The lat/lng pair.
:type arg: dict or list or tuple
:rtype: tuple (lat, lng)
### Response:
def normalize_lat_lng(arg):
"""Take the various lat/lng representations and return a tuple.
Accepts various representations:
1) dict with two entries - "lat" and "lng"
2) list or tuple - e.g. (-33, 151) or [-33, 151]
:param arg: The lat/lng pair.
:type arg: dict or list or tuple
:rtype: tuple (lat, lng)
"""
if isinstance(arg, dict):
if "lat" in arg and "lng" in arg:
return arg["lat"], arg["lng"]
if "latitude" in arg and "longitude" in arg:
return arg["latitude"], arg["longitude"]
# List or tuple.
if _is_list(arg):
return arg[0], arg[1]
raise TypeError(
"Expected a lat/lng dict or tuple, "
"but got %s" % type(arg).__name__) |
def get_specific(self, id, **kwargs):
"""
Get specific License
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_specific(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: License id (required)
:return: LicenseSingleton
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_specific_with_http_info(id, **kwargs)
else:
(data) = self.get_specific_with_http_info(id, **kwargs)
return data | Get specific License
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_specific(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: License id (required)
:return: LicenseSingleton
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Get specific License
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_specific(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: License id (required)
:return: LicenseSingleton
If the method is called asynchronously,
returns the request thread.
### Response:
def get_specific(self, id, **kwargs):
"""
Get specific License
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_specific(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: License id (required)
:return: LicenseSingleton
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_specific_with_http_info(id, **kwargs)
else:
(data) = self.get_specific_with_http_info(id, **kwargs)
return data |
def transform_file_output(result):
""" Transform to convert SDK file/dir list output to something that
more clearly distinguishes between files and directories. """
from collections import OrderedDict
new_result = []
iterable = result if isinstance(result, list) else result.get('items', result)
for item in iterable:
new_entry = OrderedDict()
entity_type = item['type'] # type property is added by transform_file_directory_result
is_dir = entity_type == 'dir'
new_entry['Name'] = item['name'] + '/' if is_dir else item['name']
new_entry['Content Length'] = ' ' if is_dir else item['properties']['contentLength']
new_entry['Type'] = item['type']
new_entry['Last Modified'] = item['properties']['lastModified'] or ' '
new_result.append(new_entry)
return sorted(new_result, key=lambda k: k['Name']) | Transform to convert SDK file/dir list output to something that
more clearly distinguishes between files and directories. | Below is the the instruction that describes the task:
### Input:
Transform to convert SDK file/dir list output to something that
more clearly distinguishes between files and directories.
### Response:
def transform_file_output(result):
""" Transform to convert SDK file/dir list output to something that
more clearly distinguishes between files and directories. """
from collections import OrderedDict
new_result = []
iterable = result if isinstance(result, list) else result.get('items', result)
for item in iterable:
new_entry = OrderedDict()
entity_type = item['type'] # type property is added by transform_file_directory_result
is_dir = entity_type == 'dir'
new_entry['Name'] = item['name'] + '/' if is_dir else item['name']
new_entry['Content Length'] = ' ' if is_dir else item['properties']['contentLength']
new_entry['Type'] = item['type']
new_entry['Last Modified'] = item['properties']['lastModified'] or ' '
new_result.append(new_entry)
return sorted(new_result, key=lambda k: k['Name']) |
def parse(region_string):
"""Parse DS9 region string into a ShapeList.
Parameters
----------
region_string : str
Region string
Returns
-------
shapes : `ShapeList`
List of `~pyregion.Shape`
"""
rp = RegionParser()
ss = rp.parse(region_string)
sss1 = rp.convert_attr(ss)
sss2 = _check_wcs(sss1)
shape_list, comment_list = rp.filter_shape2(sss2)
return ShapeList(shape_list, comment_list=comment_list) | Parse DS9 region string into a ShapeList.
Parameters
----------
region_string : str
Region string
Returns
-------
shapes : `ShapeList`
List of `~pyregion.Shape` | Below is the the instruction that describes the task:
### Input:
Parse DS9 region string into a ShapeList.
Parameters
----------
region_string : str
Region string
Returns
-------
shapes : `ShapeList`
List of `~pyregion.Shape`
### Response:
def parse(region_string):
"""Parse DS9 region string into a ShapeList.
Parameters
----------
region_string : str
Region string
Returns
-------
shapes : `ShapeList`
List of `~pyregion.Shape`
"""
rp = RegionParser()
ss = rp.parse(region_string)
sss1 = rp.convert_attr(ss)
sss2 = _check_wcs(sss1)
shape_list, comment_list = rp.filter_shape2(sss2)
return ShapeList(shape_list, comment_list=comment_list) |
def jdbc(self, url, table, column=None, lowerBound=None, upperBound=None, numPartitions=None,
predicates=None, properties=None):
"""
Construct a :class:`DataFrame` representing the database table named ``table``
accessible via JDBC URL ``url`` and connection ``properties``.
Partitions of the table will be retrieved in parallel if either ``column`` or
``predicates`` is specified. ``lowerBound`, ``upperBound`` and ``numPartitions``
is needed when ``column`` is specified.
If both ``column`` and ``predicates`` are specified, ``column`` will be used.
.. note:: Don't create too many partitions in parallel on a large cluster;
otherwise Spark might crash your external database systems.
:param url: a JDBC URL of the form ``jdbc:subprotocol:subname``
:param table: the name of the table
:param column: the name of an integer column that will be used for partitioning;
if this parameter is specified, then ``numPartitions``, ``lowerBound``
(inclusive), and ``upperBound`` (exclusive) will form partition strides
for generated WHERE clause expressions used to split the column
``column`` evenly
:param lowerBound: the minimum value of ``column`` used to decide partition stride
:param upperBound: the maximum value of ``column`` used to decide partition stride
:param numPartitions: the number of partitions
:param predicates: a list of expressions suitable for inclusion in WHERE clauses;
each one defines one partition of the :class:`DataFrame`
:param properties: a dictionary of JDBC database connection arguments. Normally at
least properties "user" and "password" with their corresponding values.
For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }
:return: a DataFrame
"""
if properties is None:
properties = dict()
jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)()
for k in properties:
jprop.setProperty(k, properties[k])
if column is not None:
assert lowerBound is not None, "lowerBound can not be None when ``column`` is specified"
assert upperBound is not None, "upperBound can not be None when ``column`` is specified"
assert numPartitions is not None, \
"numPartitions can not be None when ``column`` is specified"
return self._df(self._jreader.jdbc(url, table, column, int(lowerBound), int(upperBound),
int(numPartitions), jprop))
if predicates is not None:
gateway = self._spark._sc._gateway
jpredicates = utils.toJArray(gateway, gateway.jvm.java.lang.String, predicates)
return self._df(self._jreader.jdbc(url, table, jpredicates, jprop))
return self._df(self._jreader.jdbc(url, table, jprop)) | Construct a :class:`DataFrame` representing the database table named ``table``
accessible via JDBC URL ``url`` and connection ``properties``.
Partitions of the table will be retrieved in parallel if either ``column`` or
``predicates`` is specified. ``lowerBound`, ``upperBound`` and ``numPartitions``
is needed when ``column`` is specified.
If both ``column`` and ``predicates`` are specified, ``column`` will be used.
.. note:: Don't create too many partitions in parallel on a large cluster;
otherwise Spark might crash your external database systems.
:param url: a JDBC URL of the form ``jdbc:subprotocol:subname``
:param table: the name of the table
:param column: the name of an integer column that will be used for partitioning;
if this parameter is specified, then ``numPartitions``, ``lowerBound``
(inclusive), and ``upperBound`` (exclusive) will form partition strides
for generated WHERE clause expressions used to split the column
``column`` evenly
:param lowerBound: the minimum value of ``column`` used to decide partition stride
:param upperBound: the maximum value of ``column`` used to decide partition stride
:param numPartitions: the number of partitions
:param predicates: a list of expressions suitable for inclusion in WHERE clauses;
each one defines one partition of the :class:`DataFrame`
:param properties: a dictionary of JDBC database connection arguments. Normally at
least properties "user" and "password" with their corresponding values.
For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }
:return: a DataFrame | Below is the the instruction that describes the task:
### Input:
Construct a :class:`DataFrame` representing the database table named ``table``
accessible via JDBC URL ``url`` and connection ``properties``.
Partitions of the table will be retrieved in parallel if either ``column`` or
``predicates`` is specified. ``lowerBound`, ``upperBound`` and ``numPartitions``
is needed when ``column`` is specified.
If both ``column`` and ``predicates`` are specified, ``column`` will be used.
.. note:: Don't create too many partitions in parallel on a large cluster;
otherwise Spark might crash your external database systems.
:param url: a JDBC URL of the form ``jdbc:subprotocol:subname``
:param table: the name of the table
:param column: the name of an integer column that will be used for partitioning;
if this parameter is specified, then ``numPartitions``, ``lowerBound``
(inclusive), and ``upperBound`` (exclusive) will form partition strides
for generated WHERE clause expressions used to split the column
``column`` evenly
:param lowerBound: the minimum value of ``column`` used to decide partition stride
:param upperBound: the maximum value of ``column`` used to decide partition stride
:param numPartitions: the number of partitions
:param predicates: a list of expressions suitable for inclusion in WHERE clauses;
each one defines one partition of the :class:`DataFrame`
:param properties: a dictionary of JDBC database connection arguments. Normally at
least properties "user" and "password" with their corresponding values.
For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }
:return: a DataFrame
### Response:
def jdbc(self, url, table, column=None, lowerBound=None, upperBound=None, numPartitions=None,
predicates=None, properties=None):
"""
Construct a :class:`DataFrame` representing the database table named ``table``
accessible via JDBC URL ``url`` and connection ``properties``.
Partitions of the table will be retrieved in parallel if either ``column`` or
``predicates`` is specified. ``lowerBound`, ``upperBound`` and ``numPartitions``
is needed when ``column`` is specified.
If both ``column`` and ``predicates`` are specified, ``column`` will be used.
.. note:: Don't create too many partitions in parallel on a large cluster;
otherwise Spark might crash your external database systems.
:param url: a JDBC URL of the form ``jdbc:subprotocol:subname``
:param table: the name of the table
:param column: the name of an integer column that will be used for partitioning;
if this parameter is specified, then ``numPartitions``, ``lowerBound``
(inclusive), and ``upperBound`` (exclusive) will form partition strides
for generated WHERE clause expressions used to split the column
``column`` evenly
:param lowerBound: the minimum value of ``column`` used to decide partition stride
:param upperBound: the maximum value of ``column`` used to decide partition stride
:param numPartitions: the number of partitions
:param predicates: a list of expressions suitable for inclusion in WHERE clauses;
each one defines one partition of the :class:`DataFrame`
:param properties: a dictionary of JDBC database connection arguments. Normally at
least properties "user" and "password" with their corresponding values.
For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }
:return: a DataFrame
"""
if properties is None:
properties = dict()
jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)()
for k in properties:
jprop.setProperty(k, properties[k])
if column is not None:
assert lowerBound is not None, "lowerBound can not be None when ``column`` is specified"
assert upperBound is not None, "upperBound can not be None when ``column`` is specified"
assert numPartitions is not None, \
"numPartitions can not be None when ``column`` is specified"
return self._df(self._jreader.jdbc(url, table, column, int(lowerBound), int(upperBound),
int(numPartitions), jprop))
if predicates is not None:
gateway = self._spark._sc._gateway
jpredicates = utils.toJArray(gateway, gateway.jvm.java.lang.String, predicates)
return self._df(self._jreader.jdbc(url, table, jpredicates, jprop))
return self._df(self._jreader.jdbc(url, table, jprop)) |
def _diff_emit_update(self, new_bookmarks):
"""
Diff the bookmark cache and the new bookmark state, emit signals as
needed and set the bookmark cache to the new data.
"""
self.logger.debug("diffing %s, %s", self._bookmark_cache,
new_bookmarks)
def subdivide(level, old, new):
"""
Subdivide the bookmarks according to the data item
``bookmark.secondary[level]`` and emit the appropriate
events.
"""
if len(old) == len(new) == 1:
old_entry = old.pop()
new_entry = new.pop()
if old_entry == new_entry:
pass
else:
self.on_bookmark_changed(old_entry, new_entry)
return ([], [])
elif len(old) == 0:
return ([], new)
elif len(new) == 0:
return (old, [])
else:
try:
groups = {}
for entry in old:
group = groups.setdefault(
entry.secondary[level],
([], [])
)
group[0].append(entry)
for entry in new:
group = groups.setdefault(
entry.secondary[level],
([], [])
)
group[1].append(entry)
except IndexError:
# the classification is exhausted, this means
# all entries in this bin are equal by the
# defininition of bookmark equivalence!
common = min(len(old), len(new))
assert old[:common] == new[:common]
return (old[common:], new[common:])
old_unhandled, new_unhandled = [], []
for old, new in groups.values():
unhandled = subdivide(level+1, old, new)
old_unhandled += unhandled[0]
new_unhandled += unhandled[1]
# match up unhandleds as changes as early as possible
i = -1
for i, (old_entry, new_entry) in enumerate(
zip(old_unhandled, new_unhandled)):
self.logger.debug("changed %s -> %s", old_entry, new_entry)
self.on_bookmark_changed(old_entry, new_entry)
i += 1
return old_unhandled[i:], new_unhandled[i:]
# group the bookmarks into groups whose elements may transform
# among one another by on_bookmark_changed events. This information
# is given by the type of the bookmark and the .primary property
changable_groups = {}
for item in self._bookmark_cache:
group = changable_groups.setdefault(
(type(item), item.primary),
([], [])
)
group[0].append(item)
for item in new_bookmarks:
group = changable_groups.setdefault(
(type(item), item.primary),
([], [])
)
group[1].append(item)
for old, new in changable_groups.values():
# the first branches are fast paths which should catch
# most cases – especially all cases where each bare jid of
# a conference bookmark or each url of an url bookmark is
# only used in one bookmark
if len(old) == len(new) == 1:
old_entry = old.pop()
new_entry = new.pop()
if old_entry == new_entry:
# the bookmark is unchanged, do not emit an event
pass
else:
self.logger.debug("changed %s -> %s", old_entry, new_entry)
self.on_bookmark_changed(old_entry, new_entry)
elif len(new) == 0:
for removed in old:
self.logger.debug("removed %s", removed)
self.on_bookmark_removed(removed)
elif len(old) == 0:
for added in new:
self.logger.debug("added %s", added)
self.on_bookmark_added(added)
else:
old, new = subdivide(0, old, new)
assert len(old) == 0 or len(new) == 0
for removed in old:
self.logger.debug("removed %s", removed)
self.on_bookmark_removed(removed)
for added in new:
self.logger.debug("added %s", added)
self.on_bookmark_added(added)
self._bookmark_cache = new_bookmarks | Diff the bookmark cache and the new bookmark state, emit signals as
needed and set the bookmark cache to the new data. | Below is the the instruction that describes the task:
### Input:
Diff the bookmark cache and the new bookmark state, emit signals as
needed and set the bookmark cache to the new data.
### Response:
def _diff_emit_update(self, new_bookmarks):
"""
Diff the bookmark cache and the new bookmark state, emit signals as
needed and set the bookmark cache to the new data.
"""
self.logger.debug("diffing %s, %s", self._bookmark_cache,
new_bookmarks)
def subdivide(level, old, new):
"""
Subdivide the bookmarks according to the data item
``bookmark.secondary[level]`` and emit the appropriate
events.
"""
if len(old) == len(new) == 1:
old_entry = old.pop()
new_entry = new.pop()
if old_entry == new_entry:
pass
else:
self.on_bookmark_changed(old_entry, new_entry)
return ([], [])
elif len(old) == 0:
return ([], new)
elif len(new) == 0:
return (old, [])
else:
try:
groups = {}
for entry in old:
group = groups.setdefault(
entry.secondary[level],
([], [])
)
group[0].append(entry)
for entry in new:
group = groups.setdefault(
entry.secondary[level],
([], [])
)
group[1].append(entry)
except IndexError:
# the classification is exhausted, this means
# all entries in this bin are equal by the
# defininition of bookmark equivalence!
common = min(len(old), len(new))
assert old[:common] == new[:common]
return (old[common:], new[common:])
old_unhandled, new_unhandled = [], []
for old, new in groups.values():
unhandled = subdivide(level+1, old, new)
old_unhandled += unhandled[0]
new_unhandled += unhandled[1]
# match up unhandleds as changes as early as possible
i = -1
for i, (old_entry, new_entry) in enumerate(
zip(old_unhandled, new_unhandled)):
self.logger.debug("changed %s -> %s", old_entry, new_entry)
self.on_bookmark_changed(old_entry, new_entry)
i += 1
return old_unhandled[i:], new_unhandled[i:]
# group the bookmarks into groups whose elements may transform
# among one another by on_bookmark_changed events. This information
# is given by the type of the bookmark and the .primary property
changable_groups = {}
for item in self._bookmark_cache:
group = changable_groups.setdefault(
(type(item), item.primary),
([], [])
)
group[0].append(item)
for item in new_bookmarks:
group = changable_groups.setdefault(
(type(item), item.primary),
([], [])
)
group[1].append(item)
for old, new in changable_groups.values():
# the first branches are fast paths which should catch
# most cases – especially all cases where each bare jid of
# a conference bookmark or each url of an url bookmark is
# only used in one bookmark
if len(old) == len(new) == 1:
old_entry = old.pop()
new_entry = new.pop()
if old_entry == new_entry:
# the bookmark is unchanged, do not emit an event
pass
else:
self.logger.debug("changed %s -> %s", old_entry, new_entry)
self.on_bookmark_changed(old_entry, new_entry)
elif len(new) == 0:
for removed in old:
self.logger.debug("removed %s", removed)
self.on_bookmark_removed(removed)
elif len(old) == 0:
for added in new:
self.logger.debug("added %s", added)
self.on_bookmark_added(added)
else:
old, new = subdivide(0, old, new)
assert len(old) == 0 or len(new) == 0
for removed in old:
self.logger.debug("removed %s", removed)
self.on_bookmark_removed(removed)
for added in new:
self.logger.debug("added %s", added)
self.on_bookmark_added(added)
self._bookmark_cache = new_bookmarks |
def memorize(func):
"""
Simply memorize the calculated result :data:`func`. previously returned.
Simply cached all calculated results from the decorated method/function into
a global `dict`.
"""
@wraps(func)
def wrapped_func(*args, **kwargs):
if (len(args) > 0 and len(kwargs) > 0):
cacheKey = list(args)
cacheKey.append(kwargs)
elif (len(args) > 0):
cacheKey = args
else:
cacheKey = func.__name__
global __cache__
result = __cache__.get(cacheKey)
if result is None:
result = func(*args, **kwargs)
__cache__[cacheKey] = result
return result
return wrapped_func | Simply memorize the calculated result :data:`func`. previously returned.
Simply cached all calculated results from the decorated method/function into
a global `dict`. | Below is the the instruction that describes the task:
### Input:
Simply memorize the calculated result :data:`func`. previously returned.
Simply cached all calculated results from the decorated method/function into
a global `dict`.
### Response:
def memorize(func):
"""
Simply memorize the calculated result :data:`func`. previously returned.
Simply cached all calculated results from the decorated method/function into
a global `dict`.
"""
@wraps(func)
def wrapped_func(*args, **kwargs):
if (len(args) > 0 and len(kwargs) > 0):
cacheKey = list(args)
cacheKey.append(kwargs)
elif (len(args) > 0):
cacheKey = args
else:
cacheKey = func.__name__
global __cache__
result = __cache__.get(cacheKey)
if result is None:
result = func(*args, **kwargs)
__cache__[cacheKey] = result
return result
return wrapped_func |
def resolve_path(schema, fragment):
"""
Return definition from path.
Path is unescaped according https://tools.ietf.org/html/rfc6901
"""
fragment = fragment.lstrip('/')
parts = unquote(fragment).split('/') if fragment else []
for part in parts:
part = part.replace('~1', '/').replace('~0', '~')
if isinstance(schema, list):
schema = schema[int(part)]
elif part in schema:
schema = schema[part]
else:
raise JsonSchemaException('Unresolvable ref: {}'.format(part))
return schema | Return definition from path.
Path is unescaped according https://tools.ietf.org/html/rfc6901 | Below is the the instruction that describes the task:
### Input:
Return definition from path.
Path is unescaped according https://tools.ietf.org/html/rfc6901
### Response:
def resolve_path(schema, fragment):
"""
Return definition from path.
Path is unescaped according https://tools.ietf.org/html/rfc6901
"""
fragment = fragment.lstrip('/')
parts = unquote(fragment).split('/') if fragment else []
for part in parts:
part = part.replace('~1', '/').replace('~0', '~')
if isinstance(schema, list):
schema = schema[int(part)]
elif part in schema:
schema = schema[part]
else:
raise JsonSchemaException('Unresolvable ref: {}'.format(part))
return schema |
def delete_grade_entry(self, grade_entry_id):
"""Deletes the ``GradeEntry`` identified by the given ``Id``.
arg: grade_entry_id (osid.id.Id): the ``Id`` of the
``GradeEntry`` to delete
raise: NotFound - a ``GradeEntry`` was not found identified by
the given ``Id``
raise: NullArgument - ``grade_entry_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.delete_resource_template
collection = JSONClientValidated('grading',
collection='GradeEntry',
runtime=self._runtime)
if not isinstance(grade_entry_id, ABCId):
raise errors.InvalidArgument('the argument is not a valid OSID Id')
grade_entry_map = collection.find_one(
dict({'_id': ObjectId(grade_entry_id.get_identifier())},
**self._view_filter()))
objects.GradeEntry(osid_object_map=grade_entry_map, runtime=self._runtime, proxy=self._proxy)._delete()
collection.delete_one({'_id': ObjectId(grade_entry_id.get_identifier())}) | Deletes the ``GradeEntry`` identified by the given ``Id``.
arg: grade_entry_id (osid.id.Id): the ``Id`` of the
``GradeEntry`` to delete
raise: NotFound - a ``GradeEntry`` was not found identified by
the given ``Id``
raise: NullArgument - ``grade_entry_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Deletes the ``GradeEntry`` identified by the given ``Id``.
arg: grade_entry_id (osid.id.Id): the ``Id`` of the
``GradeEntry`` to delete
raise: NotFound - a ``GradeEntry`` was not found identified by
the given ``Id``
raise: NullArgument - ``grade_entry_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def delete_grade_entry(self, grade_entry_id):
"""Deletes the ``GradeEntry`` identified by the given ``Id``.
arg: grade_entry_id (osid.id.Id): the ``Id`` of the
``GradeEntry`` to delete
raise: NotFound - a ``GradeEntry`` was not found identified by
the given ``Id``
raise: NullArgument - ``grade_entry_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.delete_resource_template
collection = JSONClientValidated('grading',
collection='GradeEntry',
runtime=self._runtime)
if not isinstance(grade_entry_id, ABCId):
raise errors.InvalidArgument('the argument is not a valid OSID Id')
grade_entry_map = collection.find_one(
dict({'_id': ObjectId(grade_entry_id.get_identifier())},
**self._view_filter()))
objects.GradeEntry(osid_object_map=grade_entry_map, runtime=self._runtime, proxy=self._proxy)._delete()
collection.delete_one({'_id': ObjectId(grade_entry_id.get_identifier())}) |
def fetchmany(self, size=None):
"""Fetch up to size rows from the cursor. Result set may be smaller
than size. If size is not defined, cursor.arraysize is used."""
self._check_executed()
r = self._fetch_row(size or self.arraysize)
self.rownumber = self.rownumber + len(r)
if not r:
self._warning_check()
return r | Fetch up to size rows from the cursor. Result set may be smaller
than size. If size is not defined, cursor.arraysize is used. | Below is the the instruction that describes the task:
### Input:
Fetch up to size rows from the cursor. Result set may be smaller
than size. If size is not defined, cursor.arraysize is used.
### Response:
def fetchmany(self, size=None):
"""Fetch up to size rows from the cursor. Result set may be smaller
than size. If size is not defined, cursor.arraysize is used."""
self._check_executed()
r = self._fetch_row(size or self.arraysize)
self.rownumber = self.rownumber + len(r)
if not r:
self._warning_check()
return r |
def clean_indicators(indicators):
"""Remove any extra details from indicators."""
output = list()
for indicator in indicators:
strip = ['http://', 'https://']
for item in strip:
indicator = indicator.replace(item, '')
indicator = indicator.strip('.').strip()
parts = indicator.split('/')
if len(parts) > 0:
indicator = parts.pop(0)
output.append(indicator)
output = list(set(output))
return output | Remove any extra details from indicators. | Below is the the instruction that describes the task:
### Input:
Remove any extra details from indicators.
### Response:
def clean_indicators(indicators):
"""Remove any extra details from indicators."""
output = list()
for indicator in indicators:
strip = ['http://', 'https://']
for item in strip:
indicator = indicator.replace(item, '')
indicator = indicator.strip('.').strip()
parts = indicator.split('/')
if len(parts) > 0:
indicator = parts.pop(0)
output.append(indicator)
output = list(set(output))
return output |
def listen(bot, receivers=None, token=None, port=10245, status_report=False, status_receiver=None,
status_interval=DEFAULT_REPORT_TIME):
"""
传入 bot 实例并启动 wechat_sender 服务
:param bot: (必填|Bot对象) - wxpy 的 Bot 对象实例
:param receivers: (选填|wxpy.Chat 对象|Chat 对象列表) - 消息接收者,wxpy 的 Chat 对象实例, 或 Chat 对象列表,如果为 list 第一个 Chat 为默认接收者。如果为 Chat 对象,则默认接收者也是此对象。 不填为当前 bot 对象的文件接收者
:param token: (选填|str) - 信令,防止 receiver 被非法滥用,建议加上 token 防止非法使用,如果使用 token 请在初始化 `Sender()` 时也使用统一 token,否则无法发送。token 建议为 32 位及以上的无规律字符串
:param port: (选填|int) - 监听端口, 监听端口默认为 10245 ,如有冲突或特殊需要请自行指定,需要和 `Sender()` 统一
:param status_report: (选填|bool) - 是否开启状态报告,如果开启,wechat_sender 将会定时发送状态信息到 status_receiver
:param status_receiver: (选填|Chat 对象) - 指定 status_receiver,不填将会发送状态消息给默认接收者
:param status_interval: (选填|int|datetime.timedelta) - 指定状态报告发送间隔时间,为 integer 时代表毫秒
"""
global glb
periodic_list = []
app = Application()
wxbot = WxBot(bot, receivers, status_receiver)
register_listener_handle(wxbot)
process = psutil.Process()
app.listen(port)
if status_report:
if isinstance(status_interval, datetime.timedelta):
status_interval = status_interval.seconds * 1000
check_periodic = tornado.ioloop.PeriodicCallback(functools.partial(check_bot, SYSTEM_TASK), status_interval)
check_periodic.start()
periodic_list.append(check_periodic)
glb = Global(wxbot=wxbot, run_info=process, periodic_list=periodic_list, ioloop=tornado.ioloop.IOLoop.instance(),
token=token)
tornado.ioloop.IOLoop.current().start() | 传入 bot 实例并启动 wechat_sender 服务
:param bot: (必填|Bot对象) - wxpy 的 Bot 对象实例
:param receivers: (选填|wxpy.Chat 对象|Chat 对象列表) - 消息接收者,wxpy 的 Chat 对象实例, 或 Chat 对象列表,如果为 list 第一个 Chat 为默认接收者。如果为 Chat 对象,则默认接收者也是此对象。 不填为当前 bot 对象的文件接收者
:param token: (选填|str) - 信令,防止 receiver 被非法滥用,建议加上 token 防止非法使用,如果使用 token 请在初始化 `Sender()` 时也使用统一 token,否则无法发送。token 建议为 32 位及以上的无规律字符串
:param port: (选填|int) - 监听端口, 监听端口默认为 10245 ,如有冲突或特殊需要请自行指定,需要和 `Sender()` 统一
:param status_report: (选填|bool) - 是否开启状态报告,如果开启,wechat_sender 将会定时发送状态信息到 status_receiver
:param status_receiver: (选填|Chat 对象) - 指定 status_receiver,不填将会发送状态消息给默认接收者
:param status_interval: (选填|int|datetime.timedelta) - 指定状态报告发送间隔时间,为 integer 时代表毫秒 | Below is the the instruction that describes the task:
### Input:
传入 bot 实例并启动 wechat_sender 服务
:param bot: (必填|Bot对象) - wxpy 的 Bot 对象实例
:param receivers: (选填|wxpy.Chat 对象|Chat 对象列表) - 消息接收者,wxpy 的 Chat 对象实例, 或 Chat 对象列表,如果为 list 第一个 Chat 为默认接收者。如果为 Chat 对象,则默认接收者也是此对象。 不填为当前 bot 对象的文件接收者
:param token: (选填|str) - 信令,防止 receiver 被非法滥用,建议加上 token 防止非法使用,如果使用 token 请在初始化 `Sender()` 时也使用统一 token,否则无法发送。token 建议为 32 位及以上的无规律字符串
:param port: (选填|int) - 监听端口, 监听端口默认为 10245 ,如有冲突或特殊需要请自行指定,需要和 `Sender()` 统一
:param status_report: (选填|bool) - 是否开启状态报告,如果开启,wechat_sender 将会定时发送状态信息到 status_receiver
:param status_receiver: (选填|Chat 对象) - 指定 status_receiver,不填将会发送状态消息给默认接收者
:param status_interval: (选填|int|datetime.timedelta) - 指定状态报告发送间隔时间,为 integer 时代表毫秒
### Response:
def listen(bot, receivers=None, token=None, port=10245, status_report=False, status_receiver=None,
status_interval=DEFAULT_REPORT_TIME):
"""
传入 bot 实例并启动 wechat_sender 服务
:param bot: (必填|Bot对象) - wxpy 的 Bot 对象实例
:param receivers: (选填|wxpy.Chat 对象|Chat 对象列表) - 消息接收者,wxpy 的 Chat 对象实例, 或 Chat 对象列表,如果为 list 第一个 Chat 为默认接收者。如果为 Chat 对象,则默认接收者也是此对象。 不填为当前 bot 对象的文件接收者
:param token: (选填|str) - 信令,防止 receiver 被非法滥用,建议加上 token 防止非法使用,如果使用 token 请在初始化 `Sender()` 时也使用统一 token,否则无法发送。token 建议为 32 位及以上的无规律字符串
:param port: (选填|int) - 监听端口, 监听端口默认为 10245 ,如有冲突或特殊需要请自行指定,需要和 `Sender()` 统一
:param status_report: (选填|bool) - 是否开启状态报告,如果开启,wechat_sender 将会定时发送状态信息到 status_receiver
:param status_receiver: (选填|Chat 对象) - 指定 status_receiver,不填将会发送状态消息给默认接收者
:param status_interval: (选填|int|datetime.timedelta) - 指定状态报告发送间隔时间,为 integer 时代表毫秒
"""
global glb
periodic_list = []
app = Application()
wxbot = WxBot(bot, receivers, status_receiver)
register_listener_handle(wxbot)
process = psutil.Process()
app.listen(port)
if status_report:
if isinstance(status_interval, datetime.timedelta):
status_interval = status_interval.seconds * 1000
check_periodic = tornado.ioloop.PeriodicCallback(functools.partial(check_bot, SYSTEM_TASK), status_interval)
check_periodic.start()
periodic_list.append(check_periodic)
glb = Global(wxbot=wxbot, run_info=process, periodic_list=periodic_list, ioloop=tornado.ioloop.IOLoop.instance(),
token=token)
tornado.ioloop.IOLoop.current().start() |
def convert_values(self, value, field):
"""
Coerce the value returned by the database backend into a consistent
type that is compatible with the field type.
In our case, cater for the fact that SQL Server < 2008 has no
separate Date and Time data types.
TODO: See how we'll handle this for SQL Server >= 2008
"""
if value is None:
return None
if field and field.get_internal_type() == 'DateTimeField':
if isinstance(value, string_types) and value:
value = parse_datetime(value)
return value
elif field and field.get_internal_type() == 'DateField':
if isinstance(value, datetime.datetime):
value = value.date() # extract date
elif isinstance(value, string_types):
value = parse_date(value)
elif field and field.get_internal_type() == 'TimeField':
if (isinstance(value, datetime.datetime) and value.year == 1900 and value.month == value.day == 1):
value = value.time() # extract time
elif isinstance(value, string_types):
# If the value is a string, parse it using parse_time.
value = parse_time(value)
# Some cases (for example when select_related() is used) aren't
# caught by the DateField case above and date fields arrive from
# the DB as datetime instances.
# Implement a workaround stealing the idea from the Oracle
# backend. It's not perfect so the same warning applies (i.e. if a
# query results in valid date+time values with the time part set
# to midnight, this workaround can surprise us by converting them
# to the datetime.date Python type).
elif isinstance(value, datetime.datetime) and value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date()
# Force floats to the correct type
elif value is not None and field and field.get_internal_type() == 'FloatField':
value = float(value)
return value | Coerce the value returned by the database backend into a consistent
type that is compatible with the field type.
In our case, cater for the fact that SQL Server < 2008 has no
separate Date and Time data types.
TODO: See how we'll handle this for SQL Server >= 2008 | Below is the the instruction that describes the task:
### Input:
Coerce the value returned by the database backend into a consistent
type that is compatible with the field type.
In our case, cater for the fact that SQL Server < 2008 has no
separate Date and Time data types.
TODO: See how we'll handle this for SQL Server >= 2008
### Response:
def convert_values(self, value, field):
"""
Coerce the value returned by the database backend into a consistent
type that is compatible with the field type.
In our case, cater for the fact that SQL Server < 2008 has no
separate Date and Time data types.
TODO: See how we'll handle this for SQL Server >= 2008
"""
if value is None:
return None
if field and field.get_internal_type() == 'DateTimeField':
if isinstance(value, string_types) and value:
value = parse_datetime(value)
return value
elif field and field.get_internal_type() == 'DateField':
if isinstance(value, datetime.datetime):
value = value.date() # extract date
elif isinstance(value, string_types):
value = parse_date(value)
elif field and field.get_internal_type() == 'TimeField':
if (isinstance(value, datetime.datetime) and value.year == 1900 and value.month == value.day == 1):
value = value.time() # extract time
elif isinstance(value, string_types):
# If the value is a string, parse it using parse_time.
value = parse_time(value)
# Some cases (for example when select_related() is used) aren't
# caught by the DateField case above and date fields arrive from
# the DB as datetime instances.
# Implement a workaround stealing the idea from the Oracle
# backend. It's not perfect so the same warning applies (i.e. if a
# query results in valid date+time values with the time part set
# to midnight, this workaround can surprise us by converting them
# to the datetime.date Python type).
elif isinstance(value, datetime.datetime) and value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date()
# Force floats to the correct type
elif value is not None and field and field.get_internal_type() == 'FloatField':
value = float(value)
return value |
def get_config_value(self, overrides, skip_environment=False):
"""Get the configuration value from all overrides.
Iterates over all overrides given to see if a value can be pulled
out from them. It will convert each of these values to ensure they
are the correct type.
Args:
overrides: A list of tuples where each tuple is a label and a
dictionary representing a configuration.
skip_environment: Skip looking through the environment.
Returns:
The converted configuration value.
Raises:
YapconfItemNotFound: If an item is required but could not be found
in the configuration.
YapconfItemError: If a possible value was found but the type
cannot be determined.
YapconfValueError: If a possible value is found but during
conversion, an exception was raised.
"""
label, override, key = self._search_overrides(
overrides, skip_environment
)
if override is None and self.default is None and self.required:
raise YapconfItemNotFound(
'Could not find config value for {0}'.format(self.fq_name),
self
)
if override is None:
self.logger.debug(
'Config value not found for {0}, falling back to default.'
.format(self.name)
)
value = self.default
else:
value = override[key]
if value is None:
return value
converted_value = self.convert_config_value(value, label)
self._validate_value(converted_value)
return converted_value | Get the configuration value from all overrides.
Iterates over all overrides given to see if a value can be pulled
out from them. It will convert each of these values to ensure they
are the correct type.
Args:
overrides: A list of tuples where each tuple is a label and a
dictionary representing a configuration.
skip_environment: Skip looking through the environment.
Returns:
The converted configuration value.
Raises:
YapconfItemNotFound: If an item is required but could not be found
in the configuration.
YapconfItemError: If a possible value was found but the type
cannot be determined.
YapconfValueError: If a possible value is found but during
conversion, an exception was raised. | Below is the the instruction that describes the task:
### Input:
Get the configuration value from all overrides.
Iterates over all overrides given to see if a value can be pulled
out from them. It will convert each of these values to ensure they
are the correct type.
Args:
overrides: A list of tuples where each tuple is a label and a
dictionary representing a configuration.
skip_environment: Skip looking through the environment.
Returns:
The converted configuration value.
Raises:
YapconfItemNotFound: If an item is required but could not be found
in the configuration.
YapconfItemError: If a possible value was found but the type
cannot be determined.
YapconfValueError: If a possible value is found but during
conversion, an exception was raised.
### Response:
def get_config_value(self, overrides, skip_environment=False):
"""Get the configuration value from all overrides.
Iterates over all overrides given to see if a value can be pulled
out from them. It will convert each of these values to ensure they
are the correct type.
Args:
overrides: A list of tuples where each tuple is a label and a
dictionary representing a configuration.
skip_environment: Skip looking through the environment.
Returns:
The converted configuration value.
Raises:
YapconfItemNotFound: If an item is required but could not be found
in the configuration.
YapconfItemError: If a possible value was found but the type
cannot be determined.
YapconfValueError: If a possible value is found but during
conversion, an exception was raised.
"""
label, override, key = self._search_overrides(
overrides, skip_environment
)
if override is None and self.default is None and self.required:
raise YapconfItemNotFound(
'Could not find config value for {0}'.format(self.fq_name),
self
)
if override is None:
self.logger.debug(
'Config value not found for {0}, falling back to default.'
.format(self.name)
)
value = self.default
else:
value = override[key]
if value is None:
return value
converted_value = self.convert_config_value(value, label)
self._validate_value(converted_value)
return converted_value |
def binned_bitsets_by_chrom( f, chrom, chrom_col=0, start_col=1, end_col=2):
"""Read a file by chrom name into a bitset"""
bitset = BinnedBitSet( MAX )
for line in f:
if line.startswith("#"): continue
fields = line.split()
if fields[chrom_col] == chrom:
start, end = int( fields[start_col] ), int( fields[end_col] )
bitset.set_range( start, end-start )
return bitset | Read a file by chrom name into a bitset | Below is the the instruction that describes the task:
### Input:
Read a file by chrom name into a bitset
### Response:
def binned_bitsets_by_chrom( f, chrom, chrom_col=0, start_col=1, end_col=2):
"""Read a file by chrom name into a bitset"""
bitset = BinnedBitSet( MAX )
for line in f:
if line.startswith("#"): continue
fields = line.split()
if fields[chrom_col] == chrom:
start, end = int( fields[start_col] ), int( fields[end_col] )
bitset.set_range( start, end-start )
return bitset |
def CAS_from_any(ID, autoload=False):
'''Looks up the CAS number of a chemical by searching and testing for the
string being any of the following types of chemical identifiers:
* Name, in IUPAC form or common form or a synonym registered in PubChem
* InChI name, prefixed by 'InChI=1S/' or 'InChI=1/'
* InChI key, prefixed by 'InChIKey='
* PubChem CID, prefixed by 'PubChem='
* SMILES (prefix with 'SMILES=' to ensure smiles parsing; ex.
'C' will return Carbon as it is an element whereas the SMILES
interpretation for 'C' is methane)
* CAS number (obsolete numbers may point to the current number)
If the input is an ID representing an element, the following additional
inputs may be specified as
* Atomic symbol (ex 'Na')
* Atomic number (as a string)
Parameters
----------
ID : str
One of the name formats described above
Returns
-------
CASRN : string
A three-piece, dash-separated set of numbers
Notes
-----
An exception is raised if the name cannot be identified. The PubChem
database includes a wide variety of other synonyms, but these may not be
present for all chemcials.
Examples
--------
>>> CAS_from_any('water')
'7732-18-5'
>>> CAS_from_any('InChI=1S/C2H6O/c1-2-3/h3H,2H2,1H3')
'64-17-5'
>>> CAS_from_any('CCCCCCCCCC')
'124-18-5'
>>> CAS_from_any('InChIKey=LFQSCWFLJHTTHZ-UHFFFAOYSA-N')
'64-17-5'
>>> CAS_from_any('pubchem=702')
'64-17-5'
>>> CAS_from_any('O') # only elements can be specified by symbol
'17778-80-2'
'''
ID = ID.strip()
ID_lower = ID.lower()
if ID in periodic_table:
if periodic_table[ID].number not in homonuclear_elemental_gases:
return periodic_table[ID].CAS
else:
for i in [periodic_table.symbol_to_elements,
periodic_table.number_to_elements,
periodic_table.CAS_to_elements]:
if i == periodic_table.number_to_elements:
if int(ID in i):
return periodic_table[int(ID)].CAS
else:
if ID in i:
return periodic_table[ID].CAS
if checkCAS(ID):
CAS_lookup = pubchem_db.search_CAS(ID, autoload)
if CAS_lookup:
return CAS_lookup.CASs
# handle the case of synonyms
CAS_alternate_loopup = pubchem_db.search_name(ID, autoload)
if CAS_alternate_loopup:
return CAS_alternate_loopup.CASs
if not autoload:
return CAS_from_any(ID, autoload=True)
raise Exception('A valid CAS number was recognized, but is not in the database')
ID_len = len(ID)
if ID_len > 9:
inchi_search = False
# normal upper case is 'InChI=1S/'
if ID_lower[0:9] == 'inchi=1s/':
inchi_search = ID[9:]
elif ID_lower[0:8] == 'inchi=1/':
inchi_search = ID[8:]
if inchi_search:
inchi_lookup = pubchem_db.search_InChI(inchi_search, autoload)
if inchi_lookup:
return inchi_lookup.CASs
else:
if not autoload:
return CAS_from_any(ID, autoload=True)
raise Exception('A valid InChI name was recognized, but it is not in the database')
if ID_lower[0:9] == 'inchikey=':
inchi_key_lookup = pubchem_db.search_InChI_key(ID[9:], autoload)
if inchi_key_lookup:
return inchi_key_lookup.CASs
else:
if not autoload:
return CAS_from_any(ID, autoload=True)
raise Exception('A valid InChI Key was recognized, but it is not in the database')
if ID_len > 8:
if ID_lower[0:8] == 'pubchem=':
pubchem_lookup = pubchem_db.search_pubchem(ID[8:], autoload)
if pubchem_lookup:
return pubchem_lookup.CASs
else:
if not autoload:
return CAS_from_any(ID, autoload=True)
raise Exception('A PubChem integer identifier was recognized, but it is not in the database.')
if ID_len > 7:
if ID_lower[0:7] == 'smiles=':
smiles_lookup = pubchem_db.search_smiles(ID[7:], autoload)
if smiles_lookup:
return smiles_lookup.CASs
else:
if not autoload:
return CAS_from_any(ID, autoload=True)
raise Exception('A SMILES identifier was recognized, but it is not in the database.')
# Try the smiles lookup anyway
# Parsing SMILES is an option, but this is faster
# Pybel API also prints messages to console on failure
smiles_lookup = pubchem_db.search_smiles(ID, autoload)
if smiles_lookup:
return smiles_lookup.CASs
try:
formula_query = pubchem_db.search_formula(serialize_formula(ID), autoload)
if formula_query and type(formula_query) == ChemicalMetadata:
return formula_query.CASs
except:
pass
# Try a direct lookup with the name - the fastest
name_lookup = pubchem_db.search_name(ID, autoload)
if name_lookup:
return name_lookup.CASs
# Permutate through various name options
ID_no_space = ID.replace(' ', '')
ID_no_space_dash = ID_no_space.replace('-', '')
for name in [ID, ID_no_space, ID_no_space_dash]:
for name2 in [name, name.lower()]:
name_lookup = pubchem_db.search_name(name2, autoload)
if name_lookup:
return name_lookup.CASs
if ID[-1] == ')' and '(' in ID:#
# Try to matck in the form 'water (H2O)'
first_identifier, second_identifier = ID[0:-1].split('(', 1)
try:
CAS1 = CAS_from_any(first_identifier)
CAS2 = CAS_from_any(second_identifier)
assert CAS1 == CAS2
return CAS1
except:
pass
if not autoload:
return CAS_from_any(ID, autoload=True)
raise Exception('Chemical name not recognized') | Looks up the CAS number of a chemical by searching and testing for the
string being any of the following types of chemical identifiers:
* Name, in IUPAC form or common form or a synonym registered in PubChem
* InChI name, prefixed by 'InChI=1S/' or 'InChI=1/'
* InChI key, prefixed by 'InChIKey='
* PubChem CID, prefixed by 'PubChem='
* SMILES (prefix with 'SMILES=' to ensure smiles parsing; ex.
'C' will return Carbon as it is an element whereas the SMILES
interpretation for 'C' is methane)
* CAS number (obsolete numbers may point to the current number)
If the input is an ID representing an element, the following additional
inputs may be specified as
* Atomic symbol (ex 'Na')
* Atomic number (as a string)
Parameters
----------
ID : str
One of the name formats described above
Returns
-------
CASRN : string
A three-piece, dash-separated set of numbers
Notes
-----
An exception is raised if the name cannot be identified. The PubChem
database includes a wide variety of other synonyms, but these may not be
present for all chemcials.
Examples
--------
>>> CAS_from_any('water')
'7732-18-5'
>>> CAS_from_any('InChI=1S/C2H6O/c1-2-3/h3H,2H2,1H3')
'64-17-5'
>>> CAS_from_any('CCCCCCCCCC')
'124-18-5'
>>> CAS_from_any('InChIKey=LFQSCWFLJHTTHZ-UHFFFAOYSA-N')
'64-17-5'
>>> CAS_from_any('pubchem=702')
'64-17-5'
>>> CAS_from_any('O') # only elements can be specified by symbol
'17778-80-2' | Below is the the instruction that describes the task:
### Input:
Looks up the CAS number of a chemical by searching and testing for the
string being any of the following types of chemical identifiers:
* Name, in IUPAC form or common form or a synonym registered in PubChem
* InChI name, prefixed by 'InChI=1S/' or 'InChI=1/'
* InChI key, prefixed by 'InChIKey='
* PubChem CID, prefixed by 'PubChem='
* SMILES (prefix with 'SMILES=' to ensure smiles parsing; ex.
'C' will return Carbon as it is an element whereas the SMILES
interpretation for 'C' is methane)
* CAS number (obsolete numbers may point to the current number)
If the input is an ID representing an element, the following additional
inputs may be specified as
* Atomic symbol (ex 'Na')
* Atomic number (as a string)
Parameters
----------
ID : str
One of the name formats described above
Returns
-------
CASRN : string
A three-piece, dash-separated set of numbers
Notes
-----
An exception is raised if the name cannot be identified. The PubChem
database includes a wide variety of other synonyms, but these may not be
present for all chemcials.
Examples
--------
>>> CAS_from_any('water')
'7732-18-5'
>>> CAS_from_any('InChI=1S/C2H6O/c1-2-3/h3H,2H2,1H3')
'64-17-5'
>>> CAS_from_any('CCCCCCCCCC')
'124-18-5'
>>> CAS_from_any('InChIKey=LFQSCWFLJHTTHZ-UHFFFAOYSA-N')
'64-17-5'
>>> CAS_from_any('pubchem=702')
'64-17-5'
>>> CAS_from_any('O') # only elements can be specified by symbol
'17778-80-2'
### Response:
def CAS_from_any(ID, autoload=False):
'''Looks up the CAS number of a chemical by searching and testing for the
string being any of the following types of chemical identifiers:
* Name, in IUPAC form or common form or a synonym registered in PubChem
* InChI name, prefixed by 'InChI=1S/' or 'InChI=1/'
* InChI key, prefixed by 'InChIKey='
* PubChem CID, prefixed by 'PubChem='
* SMILES (prefix with 'SMILES=' to ensure smiles parsing; ex.
'C' will return Carbon as it is an element whereas the SMILES
interpretation for 'C' is methane)
* CAS number (obsolete numbers may point to the current number)
If the input is an ID representing an element, the following additional
inputs may be specified as
* Atomic symbol (ex 'Na')
* Atomic number (as a string)
Parameters
----------
ID : str
One of the name formats described above
Returns
-------
CASRN : string
A three-piece, dash-separated set of numbers
Notes
-----
An exception is raised if the name cannot be identified. The PubChem
database includes a wide variety of other synonyms, but these may not be
present for all chemcials.
Examples
--------
>>> CAS_from_any('water')
'7732-18-5'
>>> CAS_from_any('InChI=1S/C2H6O/c1-2-3/h3H,2H2,1H3')
'64-17-5'
>>> CAS_from_any('CCCCCCCCCC')
'124-18-5'
>>> CAS_from_any('InChIKey=LFQSCWFLJHTTHZ-UHFFFAOYSA-N')
'64-17-5'
>>> CAS_from_any('pubchem=702')
'64-17-5'
>>> CAS_from_any('O') # only elements can be specified by symbol
'17778-80-2'
'''
ID = ID.strip()
ID_lower = ID.lower()
if ID in periodic_table:
if periodic_table[ID].number not in homonuclear_elemental_gases:
return periodic_table[ID].CAS
else:
for i in [periodic_table.symbol_to_elements,
periodic_table.number_to_elements,
periodic_table.CAS_to_elements]:
if i == periodic_table.number_to_elements:
if int(ID in i):
return periodic_table[int(ID)].CAS
else:
if ID in i:
return periodic_table[ID].CAS
if checkCAS(ID):
CAS_lookup = pubchem_db.search_CAS(ID, autoload)
if CAS_lookup:
return CAS_lookup.CASs
# handle the case of synonyms
CAS_alternate_loopup = pubchem_db.search_name(ID, autoload)
if CAS_alternate_loopup:
return CAS_alternate_loopup.CASs
if not autoload:
return CAS_from_any(ID, autoload=True)
raise Exception('A valid CAS number was recognized, but is not in the database')
ID_len = len(ID)
if ID_len > 9:
inchi_search = False
# normal upper case is 'InChI=1S/'
if ID_lower[0:9] == 'inchi=1s/':
inchi_search = ID[9:]
elif ID_lower[0:8] == 'inchi=1/':
inchi_search = ID[8:]
if inchi_search:
inchi_lookup = pubchem_db.search_InChI(inchi_search, autoload)
if inchi_lookup:
return inchi_lookup.CASs
else:
if not autoload:
return CAS_from_any(ID, autoload=True)
raise Exception('A valid InChI name was recognized, but it is not in the database')
if ID_lower[0:9] == 'inchikey=':
inchi_key_lookup = pubchem_db.search_InChI_key(ID[9:], autoload)
if inchi_key_lookup:
return inchi_key_lookup.CASs
else:
if not autoload:
return CAS_from_any(ID, autoload=True)
raise Exception('A valid InChI Key was recognized, but it is not in the database')
if ID_len > 8:
if ID_lower[0:8] == 'pubchem=':
pubchem_lookup = pubchem_db.search_pubchem(ID[8:], autoload)
if pubchem_lookup:
return pubchem_lookup.CASs
else:
if not autoload:
return CAS_from_any(ID, autoload=True)
raise Exception('A PubChem integer identifier was recognized, but it is not in the database.')
if ID_len > 7:
if ID_lower[0:7] == 'smiles=':
smiles_lookup = pubchem_db.search_smiles(ID[7:], autoload)
if smiles_lookup:
return smiles_lookup.CASs
else:
if not autoload:
return CAS_from_any(ID, autoload=True)
raise Exception('A SMILES identifier was recognized, but it is not in the database.')
# Try the smiles lookup anyway
# Parsing SMILES is an option, but this is faster
# Pybel API also prints messages to console on failure
smiles_lookup = pubchem_db.search_smiles(ID, autoload)
if smiles_lookup:
return smiles_lookup.CASs
try:
formula_query = pubchem_db.search_formula(serialize_formula(ID), autoload)
if formula_query and type(formula_query) == ChemicalMetadata:
return formula_query.CASs
except:
pass
# Try a direct lookup with the name - the fastest
name_lookup = pubchem_db.search_name(ID, autoload)
if name_lookup:
return name_lookup.CASs
# Permutate through various name options
ID_no_space = ID.replace(' ', '')
ID_no_space_dash = ID_no_space.replace('-', '')
for name in [ID, ID_no_space, ID_no_space_dash]:
for name2 in [name, name.lower()]:
name_lookup = pubchem_db.search_name(name2, autoload)
if name_lookup:
return name_lookup.CASs
if ID[-1] == ')' and '(' in ID:#
# Try to matck in the form 'water (H2O)'
first_identifier, second_identifier = ID[0:-1].split('(', 1)
try:
CAS1 = CAS_from_any(first_identifier)
CAS2 = CAS_from_any(second_identifier)
assert CAS1 == CAS2
return CAS1
except:
pass
if not autoload:
return CAS_from_any(ID, autoload=True)
raise Exception('Chemical name not recognized') |
def get_recipients(self, name):
"""
For example get_recipients('to')
"""
to_str = self.render_string(self.data[name])
formatted_emails = [
email.utils.formataddr(addr_pair)
for addr_pair in email.utils.getaddresses([to_str])
]
return [i for i in formatted_emails if i] | For example get_recipients('to') | Below is the the instruction that describes the task:
### Input:
For example get_recipients('to')
### Response:
def get_recipients(self, name):
"""
For example get_recipients('to')
"""
to_str = self.render_string(self.data[name])
formatted_emails = [
email.utils.formataddr(addr_pair)
for addr_pair in email.utils.getaddresses([to_str])
]
return [i for i in formatted_emails if i] |
def get_network_by_name(self, nwk_name):
"""Search for a openstack network by name. """
ret_net_lst = []
try:
body = {}
net_list = self.neutronclient.list_networks(body=body)
net_list = net_list.get('networks')
for net in net_list:
if net.get('name') == nwk_name:
ret_net_lst.append(net)
except Exception as exc:
LOG.error("Failed to get network by name %(name)s, "
"Exc %(exc)s",
{'name': nwk_name, 'exc': str(exc)})
return ret_net_lst | Search for a openstack network by name. | Below is the the instruction that describes the task:
### Input:
Search for a openstack network by name.
### Response:
def get_network_by_name(self, nwk_name):
"""Search for a openstack network by name. """
ret_net_lst = []
try:
body = {}
net_list = self.neutronclient.list_networks(body=body)
net_list = net_list.get('networks')
for net in net_list:
if net.get('name') == nwk_name:
ret_net_lst.append(net)
except Exception as exc:
LOG.error("Failed to get network by name %(name)s, "
"Exc %(exc)s",
{'name': nwk_name, 'exc': str(exc)})
return ret_net_lst |
def edit(self, body):
"""
:calls: `PATCH /repos/:owner/:repo/pulls/comments/:number <http://developer.github.com/v3/pulls/comments>`_
:param body: string
:rtype: None
"""
assert isinstance(body, (str, unicode)), body
post_parameters = {
"body": body,
}
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
self._useAttributes(data) | :calls: `PATCH /repos/:owner/:repo/pulls/comments/:number <http://developer.github.com/v3/pulls/comments>`_
:param body: string
:rtype: None | Below is the the instruction that describes the task:
### Input:
:calls: `PATCH /repos/:owner/:repo/pulls/comments/:number <http://developer.github.com/v3/pulls/comments>`_
:param body: string
:rtype: None
### Response:
def edit(self, body):
"""
:calls: `PATCH /repos/:owner/:repo/pulls/comments/:number <http://developer.github.com/v3/pulls/comments>`_
:param body: string
:rtype: None
"""
assert isinstance(body, (str, unicode)), body
post_parameters = {
"body": body,
}
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
self._useAttributes(data) |
def _closeElements(childs, HTMLElement):
"""
Create `endtags` to elements which looks like openers, but doesn't have
proper :attr:`HTMLElement.endtag`.
Args:
childs (list): List of childs (:class:`HTMLElement` obj) - typically
from :attr:`HTMLElement.childs` property.
Returns:
list: List of closed elements.
"""
out = []
# close all unclosed pair tags
for e in childs:
if not e.isTag():
out.append(e)
continue
if not e.isNonPairTag() and not e.isEndTag() and not e.isComment() \
and e.endtag is None:
e.childs = _closeElements(e.childs, HTMLElement)
out.append(e)
out.append(HTMLElement("</" + e.getTagName() + ">"))
# join opener and endtag
e.endtag = out[-1]
out[-1].openertag = e
else:
out.append(e)
return out | Create `endtags` to elements which looks like openers, but doesn't have
proper :attr:`HTMLElement.endtag`.
Args:
childs (list): List of childs (:class:`HTMLElement` obj) - typically
from :attr:`HTMLElement.childs` property.
Returns:
list: List of closed elements. | Below is the the instruction that describes the task:
### Input:
Create `endtags` to elements which looks like openers, but doesn't have
proper :attr:`HTMLElement.endtag`.
Args:
childs (list): List of childs (:class:`HTMLElement` obj) - typically
from :attr:`HTMLElement.childs` property.
Returns:
list: List of closed elements.
### Response:
def _closeElements(childs, HTMLElement):
"""
Create `endtags` to elements which looks like openers, but doesn't have
proper :attr:`HTMLElement.endtag`.
Args:
childs (list): List of childs (:class:`HTMLElement` obj) - typically
from :attr:`HTMLElement.childs` property.
Returns:
list: List of closed elements.
"""
out = []
# close all unclosed pair tags
for e in childs:
if not e.isTag():
out.append(e)
continue
if not e.isNonPairTag() and not e.isEndTag() and not e.isComment() \
and e.endtag is None:
e.childs = _closeElements(e.childs, HTMLElement)
out.append(e)
out.append(HTMLElement("</" + e.getTagName() + ">"))
# join opener and endtag
e.endtag = out[-1]
out[-1].openertag = e
else:
out.append(e)
return out |
def load_external_types(self, path):
"""
Given a path to a python package or module, load that module, search for all defined variables
inside of it that do not start with _ or __ and inject them into the type system. If any of the
types cannot be injected, silently ignore them unless verbose is True. If path points to a module
it should not contain the trailing .py since this is added automatically by the python import system
"""
folder, filename = os.path.split(path)
try:
fileobj, pathname, description = imp.find_module(filename, [folder])
mod = imp.load_module(filename, fileobj, pathname, description)
except ImportError as exc:
raise ArgumentError("could not import module in order to load external types", module_path=path, parent_directory=folder, module_name=filename, error=str(exc))
self.load_type_module(mod) | Given a path to a python package or module, load that module, search for all defined variables
inside of it that do not start with _ or __ and inject them into the type system. If any of the
types cannot be injected, silently ignore them unless verbose is True. If path points to a module
it should not contain the trailing .py since this is added automatically by the python import system | Below is the the instruction that describes the task:
### Input:
Given a path to a python package or module, load that module, search for all defined variables
inside of it that do not start with _ or __ and inject them into the type system. If any of the
types cannot be injected, silently ignore them unless verbose is True. If path points to a module
it should not contain the trailing .py since this is added automatically by the python import system
### Response:
def load_external_types(self, path):
"""
Given a path to a python package or module, load that module, search for all defined variables
inside of it that do not start with _ or __ and inject them into the type system. If any of the
types cannot be injected, silently ignore them unless verbose is True. If path points to a module
it should not contain the trailing .py since this is added automatically by the python import system
"""
folder, filename = os.path.split(path)
try:
fileobj, pathname, description = imp.find_module(filename, [folder])
mod = imp.load_module(filename, fileobj, pathname, description)
except ImportError as exc:
raise ArgumentError("could not import module in order to load external types", module_path=path, parent_directory=folder, module_name=filename, error=str(exc))
self.load_type_module(mod) |
def process_jwt(jwt):
"""
Process a JSON Web Token without verifying it.
Call this before :func:`verify_jwt` if you need access to the header or claims in the token before verifying it. For example, the claims might identify the issuer such that you can retrieve the appropriate public key.
:param jwt: The JSON Web Token to verify.
:type jwt: str or unicode
:rtype: tuple
:returns: ``(header, claims)``
"""
header, claims, _ = jwt.split('.')
parsed_header = json_decode(base64url_decode(header))
parsed_claims = json_decode(base64url_decode(claims))
return parsed_header, parsed_claims | Process a JSON Web Token without verifying it.
Call this before :func:`verify_jwt` if you need access to the header or claims in the token before verifying it. For example, the claims might identify the issuer such that you can retrieve the appropriate public key.
:param jwt: The JSON Web Token to verify.
:type jwt: str or unicode
:rtype: tuple
:returns: ``(header, claims)`` | Below is the the instruction that describes the task:
### Input:
Process a JSON Web Token without verifying it.
Call this before :func:`verify_jwt` if you need access to the header or claims in the token before verifying it. For example, the claims might identify the issuer such that you can retrieve the appropriate public key.
:param jwt: The JSON Web Token to verify.
:type jwt: str or unicode
:rtype: tuple
:returns: ``(header, claims)``
### Response:
def process_jwt(jwt):
"""
Process a JSON Web Token without verifying it.
Call this before :func:`verify_jwt` if you need access to the header or claims in the token before verifying it. For example, the claims might identify the issuer such that you can retrieve the appropriate public key.
:param jwt: The JSON Web Token to verify.
:type jwt: str or unicode
:rtype: tuple
:returns: ``(header, claims)``
"""
header, claims, _ = jwt.split('.')
parsed_header = json_decode(base64url_decode(header))
parsed_claims = json_decode(base64url_decode(claims))
return parsed_header, parsed_claims |
def validate_revocation_request(self, request):
"""Ensure the request is valid.
The client constructs the request by including the following parameters
using the "application/x-www-form-urlencoded" format in the HTTP
request entity-body:
token (REQUIRED). The token that the client wants to get revoked.
token_type_hint (OPTIONAL). A hint about the type of the token
submitted for revocation. Clients MAY pass this parameter in order to
help the authorization server to optimize the token lookup. If the
server is unable to locate the token using the given hint, it MUST
extend its search accross all of its supported token types. An
authorization server MAY ignore this parameter, particularly if it is
able to detect the token type automatically. This specification
defines two such values:
* access_token: An Access Token as defined in [RFC6749],
`section 1.4`_
* refresh_token: A Refresh Token as defined in [RFC6749],
`section 1.5`_
Specific implementations, profiles, and extensions of this
specification MAY define other values for this parameter using
the registry defined in `Section 4.1.2`_.
The client also includes its authentication credentials as described in
`Section 2.3`_. of [`RFC6749`_].
.. _`section 1.4`: https://tools.ietf.org/html/rfc6749#section-1.4
.. _`section 1.5`: https://tools.ietf.org/html/rfc6749#section-1.5
.. _`section 2.3`: https://tools.ietf.org/html/rfc6749#section-2.3
.. _`Section 4.1.2`: https://tools.ietf.org/html/draft-ietf-oauth-revocation-11#section-4.1.2
.. _`RFC6749`: https://tools.ietf.org/html/rfc6749
"""
self._raise_on_missing_token(request)
self._raise_on_invalid_client(request)
self._raise_on_unsupported_token(request) | Ensure the request is valid.
The client constructs the request by including the following parameters
using the "application/x-www-form-urlencoded" format in the HTTP
request entity-body:
token (REQUIRED). The token that the client wants to get revoked.
token_type_hint (OPTIONAL). A hint about the type of the token
submitted for revocation. Clients MAY pass this parameter in order to
help the authorization server to optimize the token lookup. If the
server is unable to locate the token using the given hint, it MUST
extend its search accross all of its supported token types. An
authorization server MAY ignore this parameter, particularly if it is
able to detect the token type automatically. This specification
defines two such values:
* access_token: An Access Token as defined in [RFC6749],
`section 1.4`_
* refresh_token: A Refresh Token as defined in [RFC6749],
`section 1.5`_
Specific implementations, profiles, and extensions of this
specification MAY define other values for this parameter using
the registry defined in `Section 4.1.2`_.
The client also includes its authentication credentials as described in
`Section 2.3`_. of [`RFC6749`_].
.. _`section 1.4`: https://tools.ietf.org/html/rfc6749#section-1.4
.. _`section 1.5`: https://tools.ietf.org/html/rfc6749#section-1.5
.. _`section 2.3`: https://tools.ietf.org/html/rfc6749#section-2.3
.. _`Section 4.1.2`: https://tools.ietf.org/html/draft-ietf-oauth-revocation-11#section-4.1.2
.. _`RFC6749`: https://tools.ietf.org/html/rfc6749 | Below is the the instruction that describes the task:
### Input:
Ensure the request is valid.
The client constructs the request by including the following parameters
using the "application/x-www-form-urlencoded" format in the HTTP
request entity-body:
token (REQUIRED). The token that the client wants to get revoked.
token_type_hint (OPTIONAL). A hint about the type of the token
submitted for revocation. Clients MAY pass this parameter in order to
help the authorization server to optimize the token lookup. If the
server is unable to locate the token using the given hint, it MUST
extend its search accross all of its supported token types. An
authorization server MAY ignore this parameter, particularly if it is
able to detect the token type automatically. This specification
defines two such values:
* access_token: An Access Token as defined in [RFC6749],
`section 1.4`_
* refresh_token: A Refresh Token as defined in [RFC6749],
`section 1.5`_
Specific implementations, profiles, and extensions of this
specification MAY define other values for this parameter using
the registry defined in `Section 4.1.2`_.
The client also includes its authentication credentials as described in
`Section 2.3`_. of [`RFC6749`_].
.. _`section 1.4`: https://tools.ietf.org/html/rfc6749#section-1.4
.. _`section 1.5`: https://tools.ietf.org/html/rfc6749#section-1.5
.. _`section 2.3`: https://tools.ietf.org/html/rfc6749#section-2.3
.. _`Section 4.1.2`: https://tools.ietf.org/html/draft-ietf-oauth-revocation-11#section-4.1.2
.. _`RFC6749`: https://tools.ietf.org/html/rfc6749
### Response:
def validate_revocation_request(self, request):
"""Ensure the request is valid.
The client constructs the request by including the following parameters
using the "application/x-www-form-urlencoded" format in the HTTP
request entity-body:
token (REQUIRED). The token that the client wants to get revoked.
token_type_hint (OPTIONAL). A hint about the type of the token
submitted for revocation. Clients MAY pass this parameter in order to
help the authorization server to optimize the token lookup. If the
server is unable to locate the token using the given hint, it MUST
extend its search accross all of its supported token types. An
authorization server MAY ignore this parameter, particularly if it is
able to detect the token type automatically. This specification
defines two such values:
* access_token: An Access Token as defined in [RFC6749],
`section 1.4`_
* refresh_token: A Refresh Token as defined in [RFC6749],
`section 1.5`_
Specific implementations, profiles, and extensions of this
specification MAY define other values for this parameter using
the registry defined in `Section 4.1.2`_.
The client also includes its authentication credentials as described in
`Section 2.3`_. of [`RFC6749`_].
.. _`section 1.4`: https://tools.ietf.org/html/rfc6749#section-1.4
.. _`section 1.5`: https://tools.ietf.org/html/rfc6749#section-1.5
.. _`section 2.3`: https://tools.ietf.org/html/rfc6749#section-2.3
.. _`Section 4.1.2`: https://tools.ietf.org/html/draft-ietf-oauth-revocation-11#section-4.1.2
.. _`RFC6749`: https://tools.ietf.org/html/rfc6749
"""
self._raise_on_missing_token(request)
self._raise_on_invalid_client(request)
self._raise_on_unsupported_token(request) |
def percent_overlapping_calls(records, min_gab=300):
"""
Return the percentage of calls that overlap with the next call.
Parameters
----------
records : list
The records for a single user.
min_gab : int
Number of seconds that the calls must overlap to be considered an issue.
Defaults to 5 minutes.
"""
calls = [r for r in records if r.interaction == "call"]
if len(calls) == 0:
return 0.
overlapping_calls = 0
for i, r in enumerate(calls):
if i <= len(calls) - 2:
if r.datetime + timedelta(seconds=r.call_duration - min_gab) >= calls[i + 1].datetime:
overlapping_calls += 1
return (float(overlapping_calls) / len(calls)) | Return the percentage of calls that overlap with the next call.
Parameters
----------
records : list
The records for a single user.
min_gab : int
Number of seconds that the calls must overlap to be considered an issue.
Defaults to 5 minutes. | Below is the the instruction that describes the task:
### Input:
Return the percentage of calls that overlap with the next call.
Parameters
----------
records : list
The records for a single user.
min_gab : int
Number of seconds that the calls must overlap to be considered an issue.
Defaults to 5 minutes.
### Response:
def percent_overlapping_calls(records, min_gab=300):
"""
Return the percentage of calls that overlap with the next call.
Parameters
----------
records : list
The records for a single user.
min_gab : int
Number of seconds that the calls must overlap to be considered an issue.
Defaults to 5 minutes.
"""
calls = [r for r in records if r.interaction == "call"]
if len(calls) == 0:
return 0.
overlapping_calls = 0
for i, r in enumerate(calls):
if i <= len(calls) - 2:
if r.datetime + timedelta(seconds=r.call_duration - min_gab) >= calls[i + 1].datetime:
overlapping_calls += 1
return (float(overlapping_calls) / len(calls)) |
def set_cache_implementation(self, cache_name, impl_name, maxsize, **kwargs):
"""
Changes the cache implementation for the named cache
"""
self._get_cache(cache_name).set_cache_impl(impl_name, maxsize, **kwargs) | Changes the cache implementation for the named cache | Below is the the instruction that describes the task:
### Input:
Changes the cache implementation for the named cache
### Response:
def set_cache_implementation(self, cache_name, impl_name, maxsize, **kwargs):
"""
Changes the cache implementation for the named cache
"""
self._get_cache(cache_name).set_cache_impl(impl_name, maxsize, **kwargs) |
def _inject_synthetic_target(self, vt, sources):
"""Create, inject, and return a synthetic target for the given target and workdir.
:param vt: A codegen input VersionedTarget to inject a synthetic target for.
:param sources: A FilesetWithSpec to inject for the target.
"""
target = vt.target
# NB: For stability, the injected target exposes the stable-symlinked `vt.results_dir`,
# rather than the hash-named `vt.current_results_dir`.
synthetic_target_dir = self.synthetic_target_dir(target, vt.results_dir)
synthetic_target_type = self.synthetic_target_type(target)
synthetic_extra_dependencies = self.synthetic_target_extra_dependencies(target, synthetic_target_dir)
copied_attributes = {}
for attribute in self._copy_target_attributes:
copied_attributes[attribute] = getattr(target, attribute)
if self._supports_exports(synthetic_target_type):
extra_exports = self.synthetic_target_extra_exports(target, synthetic_target_dir)
extra_exports_not_in_extra_dependencies = set(extra_exports).difference(
set(synthetic_extra_dependencies))
if len(extra_exports_not_in_extra_dependencies) > 0:
raise self.MismatchedExtraExports(
'Extra synthetic exports included targets not in the extra dependencies: {}. Affected target: {}'
.format(extra_exports_not_in_extra_dependencies, target))
extra_export_specs = {e.address.spec for e in extra_exports}
original_export_specs = self._original_export_specs(target)
union = set(original_export_specs).union(extra_export_specs)
copied_attributes['exports'] = sorted(union)
synthetic_target = self.context.add_new_target(
address=self._get_synthetic_address(target, synthetic_target_dir),
target_type=synthetic_target_type,
dependencies=synthetic_extra_dependencies,
sources=sources,
derived_from=target,
**copied_attributes
)
build_graph = self.context.build_graph
# NB(pl): This bypasses the convenience function (Target.inject_dependency) in order
# to improve performance. Note that we can walk the transitive dependee subgraph once
# for transitive invalidation rather than walking a smaller subgraph for every single
# dependency injected.
for dependent_address in build_graph.dependents_of(target.address):
build_graph.inject_dependency(
dependent=dependent_address,
dependency=synthetic_target.address,
)
# NB(pl): See the above comment. The same note applies.
for concrete_dependency_address in build_graph.dependencies_of(target.address):
build_graph.inject_dependency(
dependent=synthetic_target.address,
dependency=concrete_dependency_address,
)
if target in self.context.target_roots:
self.context.target_roots.append(synthetic_target)
return synthetic_target | Create, inject, and return a synthetic target for the given target and workdir.
:param vt: A codegen input VersionedTarget to inject a synthetic target for.
:param sources: A FilesetWithSpec to inject for the target. | Below is the the instruction that describes the task:
### Input:
Create, inject, and return a synthetic target for the given target and workdir.
:param vt: A codegen input VersionedTarget to inject a synthetic target for.
:param sources: A FilesetWithSpec to inject for the target.
### Response:
def _inject_synthetic_target(self, vt, sources):
"""Create, inject, and return a synthetic target for the given target and workdir.
:param vt: A codegen input VersionedTarget to inject a synthetic target for.
:param sources: A FilesetWithSpec to inject for the target.
"""
target = vt.target
# NB: For stability, the injected target exposes the stable-symlinked `vt.results_dir`,
# rather than the hash-named `vt.current_results_dir`.
synthetic_target_dir = self.synthetic_target_dir(target, vt.results_dir)
synthetic_target_type = self.synthetic_target_type(target)
synthetic_extra_dependencies = self.synthetic_target_extra_dependencies(target, synthetic_target_dir)
copied_attributes = {}
for attribute in self._copy_target_attributes:
copied_attributes[attribute] = getattr(target, attribute)
if self._supports_exports(synthetic_target_type):
extra_exports = self.synthetic_target_extra_exports(target, synthetic_target_dir)
extra_exports_not_in_extra_dependencies = set(extra_exports).difference(
set(synthetic_extra_dependencies))
if len(extra_exports_not_in_extra_dependencies) > 0:
raise self.MismatchedExtraExports(
'Extra synthetic exports included targets not in the extra dependencies: {}. Affected target: {}'
.format(extra_exports_not_in_extra_dependencies, target))
extra_export_specs = {e.address.spec for e in extra_exports}
original_export_specs = self._original_export_specs(target)
union = set(original_export_specs).union(extra_export_specs)
copied_attributes['exports'] = sorted(union)
synthetic_target = self.context.add_new_target(
address=self._get_synthetic_address(target, synthetic_target_dir),
target_type=synthetic_target_type,
dependencies=synthetic_extra_dependencies,
sources=sources,
derived_from=target,
**copied_attributes
)
build_graph = self.context.build_graph
# NB(pl): This bypasses the convenience function (Target.inject_dependency) in order
# to improve performance. Note that we can walk the transitive dependee subgraph once
# for transitive invalidation rather than walking a smaller subgraph for every single
# dependency injected.
for dependent_address in build_graph.dependents_of(target.address):
build_graph.inject_dependency(
dependent=dependent_address,
dependency=synthetic_target.address,
)
# NB(pl): See the above comment. The same note applies.
for concrete_dependency_address in build_graph.dependencies_of(target.address):
build_graph.inject_dependency(
dependent=synthetic_target.address,
dependency=concrete_dependency_address,
)
if target in self.context.target_roots:
self.context.target_roots.append(synthetic_target)
return synthetic_target |
def sync(self, raw_data, row_change_callback=None):
""" Equivalent to the inject method but will delete rows from the
google spreadsheet if their key is not found in the input (raw_data)
dictionary.
Args:
raw_data (dict): See inject method
row_change_callback (Optional) (func): See inject method
Returns:
UpdateResults (object): See inject method
"""
return self._update(raw_data, row_change_callback, delete_rows=True) | Equivalent to the inject method but will delete rows from the
google spreadsheet if their key is not found in the input (raw_data)
dictionary.
Args:
raw_data (dict): See inject method
row_change_callback (Optional) (func): See inject method
Returns:
UpdateResults (object): See inject method | Below is the the instruction that describes the task:
### Input:
Equivalent to the inject method but will delete rows from the
google spreadsheet if their key is not found in the input (raw_data)
dictionary.
Args:
raw_data (dict): See inject method
row_change_callback (Optional) (func): See inject method
Returns:
UpdateResults (object): See inject method
### Response:
def sync(self, raw_data, row_change_callback=None):
""" Equivalent to the inject method but will delete rows from the
google spreadsheet if their key is not found in the input (raw_data)
dictionary.
Args:
raw_data (dict): See inject method
row_change_callback (Optional) (func): See inject method
Returns:
UpdateResults (object): See inject method
"""
return self._update(raw_data, row_change_callback, delete_rows=True) |
def synchronize_simultaneous(self, node_ip):
"""
Because adjacent mappings for certain NAT types can
be stolen by other connections, the purpose of this
function is to ensure the last connection by a passive
simultaneous node is recent compared to the time for
a candidate to increase the chance that the precited
mappings remain active for the TCP hole punching
attempt.
"""
for candidate in self.factory.candidates[node_ip]:
# Only if candidate is connected.
if not candidate["con"].connected:
continue
# Synchronise simultaneous node.
if candidate["time"] -\
self.factory.nodes["simultaneous"][node_ip]["time"] >\
self.challege_timeout:
msg = "RECONNECT"
self.factory.nodes["simultaneous"][node_ip]["con"].\
send_line(msg)
return
self.cleanup_candidates(node_ip)
self.propogate_candidates(node_ip) | Because adjacent mappings for certain NAT types can
be stolen by other connections, the purpose of this
function is to ensure the last connection by a passive
simultaneous node is recent compared to the time for
a candidate to increase the chance that the precited
mappings remain active for the TCP hole punching
attempt. | Below is the the instruction that describes the task:
### Input:
Because adjacent mappings for certain NAT types can
be stolen by other connections, the purpose of this
function is to ensure the last connection by a passive
simultaneous node is recent compared to the time for
a candidate to increase the chance that the precited
mappings remain active for the TCP hole punching
attempt.
### Response:
def synchronize_simultaneous(self, node_ip):
"""
Because adjacent mappings for certain NAT types can
be stolen by other connections, the purpose of this
function is to ensure the last connection by a passive
simultaneous node is recent compared to the time for
a candidate to increase the chance that the precited
mappings remain active for the TCP hole punching
attempt.
"""
for candidate in self.factory.candidates[node_ip]:
# Only if candidate is connected.
if not candidate["con"].connected:
continue
# Synchronise simultaneous node.
if candidate["time"] -\
self.factory.nodes["simultaneous"][node_ip]["time"] >\
self.challege_timeout:
msg = "RECONNECT"
self.factory.nodes["simultaneous"][node_ip]["con"].\
send_line(msg)
return
self.cleanup_candidates(node_ip)
self.propogate_candidates(node_ip) |
def _init_metadata(self):
"""stub"""
DecimalValuesFormRecord._init_metadata(self)
IntegerValuesFormRecord._init_metadata(self)
TextAnswerFormRecord._init_metadata(self)
super(MultiLanguageCalculationInteractionFeedbackAndFilesAnswerFormRecord, self)._init_metadata()
self._tolerance_mode_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'tolerance_mode'),
'element_label': 'tolerance_mode',
'instructions': 'enter the tolerance mode',
'required': True,
'read_only': False,
'linked': False,
'array': False,
'default_string_values': [{
'text': '',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
}],
'syntax': 'STRING',
'minimum_string_length': 0,
'maximum_string_length': 1024,
'string_set': []
} | stub | Below is the the instruction that describes the task:
### Input:
stub
### Response:
def _init_metadata(self):
"""stub"""
DecimalValuesFormRecord._init_metadata(self)
IntegerValuesFormRecord._init_metadata(self)
TextAnswerFormRecord._init_metadata(self)
super(MultiLanguageCalculationInteractionFeedbackAndFilesAnswerFormRecord, self)._init_metadata()
self._tolerance_mode_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'tolerance_mode'),
'element_label': 'tolerance_mode',
'instructions': 'enter the tolerance mode',
'required': True,
'read_only': False,
'linked': False,
'array': False,
'default_string_values': [{
'text': '',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
}],
'syntax': 'STRING',
'minimum_string_length': 0,
'maximum_string_length': 1024,
'string_set': []
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.