code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def add_disk_encryption_passwords(self, ids, passwords, clear_on_suspend):
"""Adds a password used for hard disk encryption/decryption.
in ids of type str
List of identifiers for the passwords. Must match the identifier
used when the encrypted medium was created.
in passwords of type str
List of passwords.
in clear_on_suspend of type bool
Flag whether to clear the given passwords on VM suspend (due to a suspending host
for example). The passwords must be supplied again before the VM can resume.
raises :class:`VBoxErrorPasswordIncorrect`
The password provided wasn't correct for at least one disk using the provided
ID.
"""
if not isinstance(ids, list):
raise TypeError("ids can only be an instance of type list")
for a in ids[:10]:
if not isinstance(a, basestring):
raise TypeError(
"array can only contain objects of type basestring")
if not isinstance(passwords, list):
raise TypeError("passwords can only be an instance of type list")
for a in passwords[:10]:
if not isinstance(a, basestring):
raise TypeError(
"array can only contain objects of type basestring")
if not isinstance(clear_on_suspend, bool):
raise TypeError("clear_on_suspend can only be an instance of type bool")
self._call("addDiskEncryptionPasswords",
in_p=[ids, passwords, clear_on_suspend]) | Adds a password used for hard disk encryption/decryption.
in ids of type str
List of identifiers for the passwords. Must match the identifier
used when the encrypted medium was created.
in passwords of type str
List of passwords.
in clear_on_suspend of type bool
Flag whether to clear the given passwords on VM suspend (due to a suspending host
for example). The passwords must be supplied again before the VM can resume.
raises :class:`VBoxErrorPasswordIncorrect`
The password provided wasn't correct for at least one disk using the provided
ID. | Below is the the instruction that describes the task:
### Input:
Adds a password used for hard disk encryption/decryption.
in ids of type str
List of identifiers for the passwords. Must match the identifier
used when the encrypted medium was created.
in passwords of type str
List of passwords.
in clear_on_suspend of type bool
Flag whether to clear the given passwords on VM suspend (due to a suspending host
for example). The passwords must be supplied again before the VM can resume.
raises :class:`VBoxErrorPasswordIncorrect`
The password provided wasn't correct for at least one disk using the provided
ID.
### Response:
def add_disk_encryption_passwords(self, ids, passwords, clear_on_suspend):
"""Adds a password used for hard disk encryption/decryption.
in ids of type str
List of identifiers for the passwords. Must match the identifier
used when the encrypted medium was created.
in passwords of type str
List of passwords.
in clear_on_suspend of type bool
Flag whether to clear the given passwords on VM suspend (due to a suspending host
for example). The passwords must be supplied again before the VM can resume.
raises :class:`VBoxErrorPasswordIncorrect`
The password provided wasn't correct for at least one disk using the provided
ID.
"""
if not isinstance(ids, list):
raise TypeError("ids can only be an instance of type list")
for a in ids[:10]:
if not isinstance(a, basestring):
raise TypeError(
"array can only contain objects of type basestring")
if not isinstance(passwords, list):
raise TypeError("passwords can only be an instance of type list")
for a in passwords[:10]:
if not isinstance(a, basestring):
raise TypeError(
"array can only contain objects of type basestring")
if not isinstance(clear_on_suspend, bool):
raise TypeError("clear_on_suspend can only be an instance of type bool")
self._call("addDiskEncryptionPasswords",
in_p=[ids, passwords, clear_on_suspend]) |
def cli(ctx, env):
"""Print shell help text."""
env.out("Welcome to the SoftLayer shell.")
env.out("")
formatter = formatting.HelpFormatter()
commands = []
shell_commands = []
for name in cli_core.cli.list_commands(ctx):
command = cli_core.cli.get_command(ctx, name)
if command.short_help is None:
command.short_help = command.help
details = (name, command.short_help)
if name in dict(routes.ALL_ROUTES):
shell_commands.append(details)
else:
commands.append(details)
with formatter.section('Shell Commands'):
formatter.write_dl(shell_commands)
with formatter.section('Commands'):
formatter.write_dl(commands)
for line in formatter.buffer:
env.out(line, newline=False) | Print shell help text. | Below is the the instruction that describes the task:
### Input:
Print shell help text.
### Response:
def cli(ctx, env):
"""Print shell help text."""
env.out("Welcome to the SoftLayer shell.")
env.out("")
formatter = formatting.HelpFormatter()
commands = []
shell_commands = []
for name in cli_core.cli.list_commands(ctx):
command = cli_core.cli.get_command(ctx, name)
if command.short_help is None:
command.short_help = command.help
details = (name, command.short_help)
if name in dict(routes.ALL_ROUTES):
shell_commands.append(details)
else:
commands.append(details)
with formatter.section('Shell Commands'):
formatter.write_dl(shell_commands)
with formatter.section('Commands'):
formatter.write_dl(commands)
for line in formatter.buffer:
env.out(line, newline=False) |
def find_repositories_with_locate(path):
"""Use locate to return a sequence of (directory, dotdir) pairs."""
command = [b'locate', b'-0']
for dotdir in DOTDIRS:
# Escaping the slash (using '\/' rather than '/') is an
# important signal to locate(1) that these glob patterns are
# supposed to match the full path, so that things like
# '.hgignore' files do not show up in the result.
command.append(br'%s\/%s' % (escape(path), escape(dotdir)))
command.append(br'%s\/*/%s' % (escape(path), escape(dotdir)))
try:
paths = check_output(command).strip(b'\0').split(b'\0')
except CalledProcessError:
return []
return [os.path.split(p) for p in paths
if not os.path.islink(p) and os.path.isdir(p)] | Use locate to return a sequence of (directory, dotdir) pairs. | Below is the the instruction that describes the task:
### Input:
Use locate to return a sequence of (directory, dotdir) pairs.
### Response:
def find_repositories_with_locate(path):
"""Use locate to return a sequence of (directory, dotdir) pairs."""
command = [b'locate', b'-0']
for dotdir in DOTDIRS:
# Escaping the slash (using '\/' rather than '/') is an
# important signal to locate(1) that these glob patterns are
# supposed to match the full path, so that things like
# '.hgignore' files do not show up in the result.
command.append(br'%s\/%s' % (escape(path), escape(dotdir)))
command.append(br'%s\/*/%s' % (escape(path), escape(dotdir)))
try:
paths = check_output(command).strip(b'\0').split(b'\0')
except CalledProcessError:
return []
return [os.path.split(p) for p in paths
if not os.path.islink(p) and os.path.isdir(p)] |
def div(self, key, value=2):
"""Divides the specified key value by the specified value.
:param str|unicode key:
:param int value:
:rtype: bool
"""
return uwsgi.cache_mul(key, value, self.timeout, self.name) | Divides the specified key value by the specified value.
:param str|unicode key:
:param int value:
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Divides the specified key value by the specified value.
:param str|unicode key:
:param int value:
:rtype: bool
### Response:
def div(self, key, value=2):
"""Divides the specified key value by the specified value.
:param str|unicode key:
:param int value:
:rtype: bool
"""
return uwsgi.cache_mul(key, value, self.timeout, self.name) |
def post(self, path, data=None, json=None, headers=None, **kwargs):
"""
Sends a POST request to host/path.
:param path: String, resource path on server
:param data: Dictionary, bytes or file-like object to send in the body of the request
:param json: JSON formatted data to send in the body of the request
:param headers: Dictionary of HTTP headers to be sent with the request,
overwrites default headers if there is overlap
:param kwargs: Other arguments used in the requests.request call
valid parameters in kwargs are the optional parameters of Requests.Request
http://docs.python-requests.org/en/master/api/
:return: requests.Response
:raises: RequestException
"""
if headers is not None:
merger = jsonmerge.Merger(SCHEMA)
kwargs["headers"] = merger.merge(self.defaultHeaders, headers)
else:
kwargs["headers"] = self.defaultHeaders
url = combine_urls(self.host, path)
if self.cert is not None:
kwargs["cert"] = self.cert
self.logger.debug("Trying to send HTTP POST to {}".format(url))
try:
resp = requests.post(url, data, json, **kwargs)
self._log_response(resp)
except requests.RequestException as es:
self._log_exception(es)
raise
return resp | Sends a POST request to host/path.
:param path: String, resource path on server
:param data: Dictionary, bytes or file-like object to send in the body of the request
:param json: JSON formatted data to send in the body of the request
:param headers: Dictionary of HTTP headers to be sent with the request,
overwrites default headers if there is overlap
:param kwargs: Other arguments used in the requests.request call
valid parameters in kwargs are the optional parameters of Requests.Request
http://docs.python-requests.org/en/master/api/
:return: requests.Response
:raises: RequestException | Below is the the instruction that describes the task:
### Input:
Sends a POST request to host/path.
:param path: String, resource path on server
:param data: Dictionary, bytes or file-like object to send in the body of the request
:param json: JSON formatted data to send in the body of the request
:param headers: Dictionary of HTTP headers to be sent with the request,
overwrites default headers if there is overlap
:param kwargs: Other arguments used in the requests.request call
valid parameters in kwargs are the optional parameters of Requests.Request
http://docs.python-requests.org/en/master/api/
:return: requests.Response
:raises: RequestException
### Response:
def post(self, path, data=None, json=None, headers=None, **kwargs):
"""
Sends a POST request to host/path.
:param path: String, resource path on server
:param data: Dictionary, bytes or file-like object to send in the body of the request
:param json: JSON formatted data to send in the body of the request
:param headers: Dictionary of HTTP headers to be sent with the request,
overwrites default headers if there is overlap
:param kwargs: Other arguments used in the requests.request call
valid parameters in kwargs are the optional parameters of Requests.Request
http://docs.python-requests.org/en/master/api/
:return: requests.Response
:raises: RequestException
"""
if headers is not None:
merger = jsonmerge.Merger(SCHEMA)
kwargs["headers"] = merger.merge(self.defaultHeaders, headers)
else:
kwargs["headers"] = self.defaultHeaders
url = combine_urls(self.host, path)
if self.cert is not None:
kwargs["cert"] = self.cert
self.logger.debug("Trying to send HTTP POST to {}".format(url))
try:
resp = requests.post(url, data, json, **kwargs)
self._log_response(resp)
except requests.RequestException as es:
self._log_exception(es)
raise
return resp |
def _get_server(self):
"""
Get server to use for request.
Also process inactive server list, re-add them after given interval.
"""
with self._lock:
inactive_server_count = len(self._inactive_servers)
for i in range(inactive_server_count):
try:
ts, server, message = heapq.heappop(self._inactive_servers)
except IndexError:
pass
else:
if (ts + self.retry_interval) > time():
# Not yet, put it back
heapq.heappush(self._inactive_servers,
(ts, server, message))
else:
self._active_servers.append(server)
logger.warn("Restored server %s into active pool",
server)
# if none is old enough, use oldest
if not self._active_servers:
ts, server, message = heapq.heappop(self._inactive_servers)
self._active_servers.append(server)
logger.info("Restored server %s into active pool", server)
server = self._active_servers[0]
self._roundrobin()
return server | Get server to use for request.
Also process inactive server list, re-add them after given interval. | Below is the the instruction that describes the task:
### Input:
Get server to use for request.
Also process inactive server list, re-add them after given interval.
### Response:
def _get_server(self):
"""
Get server to use for request.
Also process inactive server list, re-add them after given interval.
"""
with self._lock:
inactive_server_count = len(self._inactive_servers)
for i in range(inactive_server_count):
try:
ts, server, message = heapq.heappop(self._inactive_servers)
except IndexError:
pass
else:
if (ts + self.retry_interval) > time():
# Not yet, put it back
heapq.heappush(self._inactive_servers,
(ts, server, message))
else:
self._active_servers.append(server)
logger.warn("Restored server %s into active pool",
server)
# if none is old enough, use oldest
if not self._active_servers:
ts, server, message = heapq.heappop(self._inactive_servers)
self._active_servers.append(server)
logger.info("Restored server %s into active pool", server)
server = self._active_servers[0]
self._roundrobin()
return server |
def pyobj_role(make_node, name, rawtext, text, lineno, inliner, options=None, content=None):
"""Include Python object value, rendering it to text using str.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
:param make_node: A callable which accepts (rawtext, app, prefixed_name, obj, parent, modname, options) and which returns a node
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked with the role.
:param lineno: The line number where rawtext appears in the input.
:param inliner: The inliner instance that called us.
:param options: Directive options for customization.
:param content: The directive content for customization.
"""
if options is None:
options = {}
if content is None:
content = []
try:
prefixed_name, obj, parent, modname = import_by_name(text)
except ImportError:
msg = inliner.reporter.error("Could not locate Python object {}".format(text), line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
app = inliner.document.settings.env.app
node = make_node(rawtext, app, prefixed_name, obj, parent, modname, options)
return [node], [] | Include Python object value, rendering it to text using str.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
:param make_node: A callable which accepts (rawtext, app, prefixed_name, obj, parent, modname, options) and which returns a node
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked with the role.
:param lineno: The line number where rawtext appears in the input.
:param inliner: The inliner instance that called us.
:param options: Directive options for customization.
:param content: The directive content for customization. | Below is the the instruction that describes the task:
### Input:
Include Python object value, rendering it to text using str.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
:param make_node: A callable which accepts (rawtext, app, prefixed_name, obj, parent, modname, options) and which returns a node
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked with the role.
:param lineno: The line number where rawtext appears in the input.
:param inliner: The inliner instance that called us.
:param options: Directive options for customization.
:param content: The directive content for customization.
### Response:
def pyobj_role(make_node, name, rawtext, text, lineno, inliner, options=None, content=None):
"""Include Python object value, rendering it to text using str.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
:param make_node: A callable which accepts (rawtext, app, prefixed_name, obj, parent, modname, options) and which returns a node
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked with the role.
:param lineno: The line number where rawtext appears in the input.
:param inliner: The inliner instance that called us.
:param options: Directive options for customization.
:param content: The directive content for customization.
"""
if options is None:
options = {}
if content is None:
content = []
try:
prefixed_name, obj, parent, modname = import_by_name(text)
except ImportError:
msg = inliner.reporter.error("Could not locate Python object {}".format(text), line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
app = inliner.document.settings.env.app
node = make_node(rawtext, app, prefixed_name, obj, parent, modname, options)
return [node], [] |
def read_csv(self, file: str, table: str = '_csv', libref: str = '', results: str = '',
opts: dict = None) -> 'SASdata':
"""
:param file: either the OS filesystem path of the file, or HTTP://... for a url accessible file
:param table: the name of the SAS Data Set to create
:param libref: the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned
:param results: format of results, SASsession.results is default, PANDAS, HTML or TEXT are the alternatives
:param opts: a dictionary containing any of the following Proc Import options(datarow, delimiter, getnames, guessingrows)
:return: SASdata object
"""
opts = opts if opts is not None else {}
if results == '':
results = self.results
self._io.read_csv(file, table, libref, self.nosub, opts)
if self.exist(table, libref):
return SASdata(self, libref, table, results)
else:
return None | :param file: either the OS filesystem path of the file, or HTTP://... for a url accessible file
:param table: the name of the SAS Data Set to create
:param libref: the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned
:param results: format of results, SASsession.results is default, PANDAS, HTML or TEXT are the alternatives
:param opts: a dictionary containing any of the following Proc Import options(datarow, delimiter, getnames, guessingrows)
:return: SASdata object | Below is the the instruction that describes the task:
### Input:
:param file: either the OS filesystem path of the file, or HTTP://... for a url accessible file
:param table: the name of the SAS Data Set to create
:param libref: the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned
:param results: format of results, SASsession.results is default, PANDAS, HTML or TEXT are the alternatives
:param opts: a dictionary containing any of the following Proc Import options(datarow, delimiter, getnames, guessingrows)
:return: SASdata object
### Response:
def read_csv(self, file: str, table: str = '_csv', libref: str = '', results: str = '',
opts: dict = None) -> 'SASdata':
"""
:param file: either the OS filesystem path of the file, or HTTP://... for a url accessible file
:param table: the name of the SAS Data Set to create
:param libref: the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned
:param results: format of results, SASsession.results is default, PANDAS, HTML or TEXT are the alternatives
:param opts: a dictionary containing any of the following Proc Import options(datarow, delimiter, getnames, guessingrows)
:return: SASdata object
"""
opts = opts if opts is not None else {}
if results == '':
results = self.results
self._io.read_csv(file, table, libref, self.nosub, opts)
if self.exist(table, libref):
return SASdata(self, libref, table, results)
else:
return None |
def get_sentence_xpath_tuples(filename_url_or_filelike,
xpath_to_text=TEXT_FINDER_XPATH):
"""
Given a url and xpath, this function will download, parse, then
iterate though queried text-nodes. From the resulting text-nodes,
extract a list of (text, exact-xpath) tuples.
"""
parsed_html = get_html_tree(filename_url_or_filelike)
try:
xpath_finder = parsed_html.getroot().getroottree().getpath
except(AttributeError):
xpath_finder = parsed_html.getroottree().getpath
nodes_with_text = parsed_html.xpath(xpath_to_text)
sent_xpath_pairs = [
# hard-code paragraph breaks (there has to be a better way)
('\n\n' + s, xpath_finder(n)) if e == 0
else (s, xpath_finder(n))
for n in nodes_with_text
for e, s in enumerate(SENTENCE_TOKEN_PATTERN.split(
BRACKET_PATTERN.sub('', ''.join(n.xpath('.//text()')))))
if s.endswith(tuple(SENTENCE_ENDING))
]
return sent_xpath_pairs | Given a url and xpath, this function will download, parse, then
iterate though queried text-nodes. From the resulting text-nodes,
extract a list of (text, exact-xpath) tuples. | Below is the the instruction that describes the task:
### Input:
Given a url and xpath, this function will download, parse, then
iterate though queried text-nodes. From the resulting text-nodes,
extract a list of (text, exact-xpath) tuples.
### Response:
def get_sentence_xpath_tuples(filename_url_or_filelike,
xpath_to_text=TEXT_FINDER_XPATH):
"""
Given a url and xpath, this function will download, parse, then
iterate though queried text-nodes. From the resulting text-nodes,
extract a list of (text, exact-xpath) tuples.
"""
parsed_html = get_html_tree(filename_url_or_filelike)
try:
xpath_finder = parsed_html.getroot().getroottree().getpath
except(AttributeError):
xpath_finder = parsed_html.getroottree().getpath
nodes_with_text = parsed_html.xpath(xpath_to_text)
sent_xpath_pairs = [
# hard-code paragraph breaks (there has to be a better way)
('\n\n' + s, xpath_finder(n)) if e == 0
else (s, xpath_finder(n))
for n in nodes_with_text
for e, s in enumerate(SENTENCE_TOKEN_PATTERN.split(
BRACKET_PATTERN.sub('', ''.join(n.xpath('.//text()')))))
if s.endswith(tuple(SENTENCE_ENDING))
]
return sent_xpath_pairs |
def validate(self):
"""Ensure the Fold block is valid."""
if not isinstance(self.fold_scope_location, FoldScopeLocation):
raise TypeError(u'Expected a FoldScopeLocation for fold_scope_location, got: {} '
u'{}'.format(type(self.fold_scope_location), self.fold_scope_location)) | Ensure the Fold block is valid. | Below is the the instruction that describes the task:
### Input:
Ensure the Fold block is valid.
### Response:
def validate(self):
"""Ensure the Fold block is valid."""
if not isinstance(self.fold_scope_location, FoldScopeLocation):
raise TypeError(u'Expected a FoldScopeLocation for fold_scope_location, got: {} '
u'{}'.format(type(self.fold_scope_location), self.fold_scope_location)) |
def close_session(self):
"""Close current session."""
if not self._session.closed:
if self._session._connector_owner:
self._session._connector.close()
self._session._connector = None | Close current session. | Below is the the instruction that describes the task:
### Input:
Close current session.
### Response:
def close_session(self):
"""Close current session."""
if not self._session.closed:
if self._session._connector_owner:
self._session._connector.close()
self._session._connector = None |
def update_filenames(self):
"""Does nothing currently. May not need this method"""
self.sky_file = os.path.abspath(os.path.join(os.path.join(self.input_path, 'sky_files'),
'sky_' + self.sky_state + '_z' + str(
self.sky_zenith) + '_a' + str(
self.sky_azimuth) + '_' + str(
self.num_bands) + '_' + self.ds_code)) | Does nothing currently. May not need this method | Below is the the instruction that describes the task:
### Input:
Does nothing currently. May not need this method
### Response:
def update_filenames(self):
"""Does nothing currently. May not need this method"""
self.sky_file = os.path.abspath(os.path.join(os.path.join(self.input_path, 'sky_files'),
'sky_' + self.sky_state + '_z' + str(
self.sky_zenith) + '_a' + str(
self.sky_azimuth) + '_' + str(
self.num_bands) + '_' + self.ds_code)) |
def detranslify(text):
"""Detranslify russian text"""
try:
res = translit.detranslify(text)
except Exception as err:
# because filter must die silently
res = default_value % {'error': err, 'value': text}
return res | Detranslify russian text | Below is the the instruction that describes the task:
### Input:
Detranslify russian text
### Response:
def detranslify(text):
"""Detranslify russian text"""
try:
res = translit.detranslify(text)
except Exception as err:
# because filter must die silently
res = default_value % {'error': err, 'value': text}
return res |
def score(self):
"The total score for the words found, according to the rules."
return sum([self.scores[len(w)] for w in self.words()]) | The total score for the words found, according to the rules. | Below is the the instruction that describes the task:
### Input:
The total score for the words found, according to the rules.
### Response:
def score(self):
"The total score for the words found, according to the rules."
return sum([self.scores[len(w)] for w in self.words()]) |
def eval_option_value(self, option):
""" Evaluates an option
:param option: a string
:return: an object of type str, bool, int, float or list
"""
try:
value = eval(option, {}, {})
except (SyntaxError, NameError, TypeError):
return option
if type(value) in (str, bool, int, float):
return value
elif type(value) in (list, tuple):
for v in value:
if type(v) not in (str, bool, int, float):
self._write_error("Value of element of list object has wrong type %s" % v)
return value
return option | Evaluates an option
:param option: a string
:return: an object of type str, bool, int, float or list | Below is the the instruction that describes the task:
### Input:
Evaluates an option
:param option: a string
:return: an object of type str, bool, int, float or list
### Response:
def eval_option_value(self, option):
""" Evaluates an option
:param option: a string
:return: an object of type str, bool, int, float or list
"""
try:
value = eval(option, {}, {})
except (SyntaxError, NameError, TypeError):
return option
if type(value) in (str, bool, int, float):
return value
elif type(value) in (list, tuple):
for v in value:
if type(v) not in (str, bool, int, float):
self._write_error("Value of element of list object has wrong type %s" % v)
return value
return option |
def login(self, **kwargs):
"""登录"""
payload = {
'username': self.username,
'password': self.password,
}
headers = kwargs.setdefault('headers', {})
headers.setdefault(
'Referer',
'https://www.shanbay.com/web/account/login'
)
url = 'https://www.shanbay.com/api/v1/account/login/web/'
response = self.request(url, 'put', json=payload, **kwargs)
r_json = response.json()
return r_json['status_code'] == 0 | 登录 | Below is the the instruction that describes the task:
### Input:
登录
### Response:
def login(self, **kwargs):
"""登录"""
payload = {
'username': self.username,
'password': self.password,
}
headers = kwargs.setdefault('headers', {})
headers.setdefault(
'Referer',
'https://www.shanbay.com/web/account/login'
)
url = 'https://www.shanbay.com/api/v1/account/login/web/'
response = self.request(url, 'put', json=payload, **kwargs)
r_json = response.json()
return r_json['status_code'] == 0 |
def object(self, object):
"""
Sets the object of this ServicePackageQuotaHistoryResponse.
Always set to 'service-package-quota-history'.
:param object: The object of this ServicePackageQuotaHistoryResponse.
:type: str
"""
if object is None:
raise ValueError("Invalid value for `object`, must not be `None`")
allowed_values = ["service-package-quota-history"]
if object not in allowed_values:
raise ValueError(
"Invalid value for `object` ({0}), must be one of {1}"
.format(object, allowed_values)
)
self._object = object | Sets the object of this ServicePackageQuotaHistoryResponse.
Always set to 'service-package-quota-history'.
:param object: The object of this ServicePackageQuotaHistoryResponse.
:type: str | Below is the the instruction that describes the task:
### Input:
Sets the object of this ServicePackageQuotaHistoryResponse.
Always set to 'service-package-quota-history'.
:param object: The object of this ServicePackageQuotaHistoryResponse.
:type: str
### Response:
def object(self, object):
"""
Sets the object of this ServicePackageQuotaHistoryResponse.
Always set to 'service-package-quota-history'.
:param object: The object of this ServicePackageQuotaHistoryResponse.
:type: str
"""
if object is None:
raise ValueError("Invalid value for `object`, must not be `None`")
allowed_values = ["service-package-quota-history"]
if object not in allowed_values:
raise ValueError(
"Invalid value for `object` ({0}), must be one of {1}"
.format(object, allowed_values)
)
self._object = object |
def _train_model(
self, train_data, loss_fn, valid_data=None, log_writer=None, restore_state={}
):
"""The internal training routine called by train_model() after setup
Args:
train_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the train split
loss_fn: the loss function to minimize (maps *data -> loss)
valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the dev split
restore_state: a dictionary containing model weights (optimizer, main network) and training information
If valid_data is not provided, then no checkpointing or
evaluation on the dev set will occur.
"""
# Set model to train mode
self.train()
train_config = self.config["train_config"]
# Convert data to DataLoaders
train_loader = self._create_data_loader(train_data)
valid_loader = self._create_data_loader(valid_data)
epoch_size = len(train_loader.dataset)
# Move model to GPU
if self.config["verbose"] and self.config["device"] != "cpu":
print("Using GPU...")
self.to(self.config["device"])
# Set training components
self._set_writer(train_config)
self._set_logger(train_config, epoch_size)
self._set_checkpointer(train_config)
self._set_optimizer(train_config)
self._set_scheduler(train_config)
# Restore model if necessary
if restore_state:
start_iteration = self._restore_training_state(restore_state)
else:
start_iteration = 0
# Train the model
metrics_hist = {} # The most recently seen value for all metrics
for epoch in range(start_iteration, train_config["n_epochs"]):
progress_bar = (
train_config["progress_bar"]
and self.config["verbose"]
and self.logger.log_unit == "epochs"
)
t = tqdm(
enumerate(train_loader),
total=len(train_loader),
disable=(not progress_bar),
)
self.running_loss = 0.0
self.running_examples = 0
for batch_num, data in t:
# NOTE: actual batch_size may not equal config's target batch_size
batch_size = len(data[0])
# Moving data to device
if self.config["device"] != "cpu":
data = place_on_gpu(data)
# Zero the parameter gradients
self.optimizer.zero_grad()
# Forward pass to calculate the average loss per example
loss = loss_fn(*data)
if torch.isnan(loss):
msg = "Loss is NaN. Consider reducing learning rate."
raise Exception(msg)
# Backward pass to calculate gradients
# Loss is an average loss per example
loss.backward()
# Perform optimizer step
self.optimizer.step()
# Calculate metrics, log, and checkpoint as necessary
metrics_dict = self._execute_logging(
train_loader, valid_loader, loss, batch_size
)
metrics_hist.update(metrics_dict)
# tqdm output
t.set_postfix(loss=metrics_dict["train/loss"])
# Apply learning rate scheduler
self._update_scheduler(epoch, metrics_hist)
self.eval()
# Restore best model if applicable
if self.checkpointer:
self.checkpointer.load_best_model(model=self)
# Write log if applicable
if self.writer:
if self.writer.include_config:
self.writer.add_config(self.config)
self.writer.close()
# Print confusion matrix if applicable
if self.config["verbose"]:
print("Finished Training")
if valid_loader is not None:
self.score(
valid_loader,
metric=train_config["validation_metric"],
verbose=True,
print_confusion_matrix=True,
) | The internal training routine called by train_model() after setup
Args:
train_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the train split
loss_fn: the loss function to minimize (maps *data -> loss)
valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the dev split
restore_state: a dictionary containing model weights (optimizer, main network) and training information
If valid_data is not provided, then no checkpointing or
evaluation on the dev set will occur. | Below is the the instruction that describes the task:
### Input:
The internal training routine called by train_model() after setup
Args:
train_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the train split
loss_fn: the loss function to minimize (maps *data -> loss)
valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the dev split
restore_state: a dictionary containing model weights (optimizer, main network) and training information
If valid_data is not provided, then no checkpointing or
evaluation on the dev set will occur.
### Response:
def _train_model(
self, train_data, loss_fn, valid_data=None, log_writer=None, restore_state={}
):
"""The internal training routine called by train_model() after setup
Args:
train_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the train split
loss_fn: the loss function to minimize (maps *data -> loss)
valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the dev split
restore_state: a dictionary containing model weights (optimizer, main network) and training information
If valid_data is not provided, then no checkpointing or
evaluation on the dev set will occur.
"""
# Set model to train mode
self.train()
train_config = self.config["train_config"]
# Convert data to DataLoaders
train_loader = self._create_data_loader(train_data)
valid_loader = self._create_data_loader(valid_data)
epoch_size = len(train_loader.dataset)
# Move model to GPU
if self.config["verbose"] and self.config["device"] != "cpu":
print("Using GPU...")
self.to(self.config["device"])
# Set training components
self._set_writer(train_config)
self._set_logger(train_config, epoch_size)
self._set_checkpointer(train_config)
self._set_optimizer(train_config)
self._set_scheduler(train_config)
# Restore model if necessary
if restore_state:
start_iteration = self._restore_training_state(restore_state)
else:
start_iteration = 0
# Train the model
metrics_hist = {} # The most recently seen value for all metrics
for epoch in range(start_iteration, train_config["n_epochs"]):
progress_bar = (
train_config["progress_bar"]
and self.config["verbose"]
and self.logger.log_unit == "epochs"
)
t = tqdm(
enumerate(train_loader),
total=len(train_loader),
disable=(not progress_bar),
)
self.running_loss = 0.0
self.running_examples = 0
for batch_num, data in t:
# NOTE: actual batch_size may not equal config's target batch_size
batch_size = len(data[0])
# Moving data to device
if self.config["device"] != "cpu":
data = place_on_gpu(data)
# Zero the parameter gradients
self.optimizer.zero_grad()
# Forward pass to calculate the average loss per example
loss = loss_fn(*data)
if torch.isnan(loss):
msg = "Loss is NaN. Consider reducing learning rate."
raise Exception(msg)
# Backward pass to calculate gradients
# Loss is an average loss per example
loss.backward()
# Perform optimizer step
self.optimizer.step()
# Calculate metrics, log, and checkpoint as necessary
metrics_dict = self._execute_logging(
train_loader, valid_loader, loss, batch_size
)
metrics_hist.update(metrics_dict)
# tqdm output
t.set_postfix(loss=metrics_dict["train/loss"])
# Apply learning rate scheduler
self._update_scheduler(epoch, metrics_hist)
self.eval()
# Restore best model if applicable
if self.checkpointer:
self.checkpointer.load_best_model(model=self)
# Write log if applicable
if self.writer:
if self.writer.include_config:
self.writer.add_config(self.config)
self.writer.close()
# Print confusion matrix if applicable
if self.config["verbose"]:
print("Finished Training")
if valid_loader is not None:
self.score(
valid_loader,
metric=train_config["validation_metric"],
verbose=True,
print_confusion_matrix=True,
) |
def get_object_reference(obj: Object) -> str:
"""Gets an object reference string from the obj instance.
This adds the object type to ALL_RESOURCES so that it gets documented and
returns a str which contains a sphinx reference to the documented object.
:param obj: The Object instance.
:returns: A sphinx docs reference str.
"""
resource_name = obj.title
if resource_name is None:
class_name = obj.__name__
resource_name = class_name_to_resource_name(class_name)
ALL_RESOURCES[resource_name] = obj
return ' See :ref:`resource-{}`.'.format(
'-'.join(resource_name.split(' ')).lower().strip()) | Gets an object reference string from the obj instance.
This adds the object type to ALL_RESOURCES so that it gets documented and
returns a str which contains a sphinx reference to the documented object.
:param obj: The Object instance.
:returns: A sphinx docs reference str. | Below is the the instruction that describes the task:
### Input:
Gets an object reference string from the obj instance.
This adds the object type to ALL_RESOURCES so that it gets documented and
returns a str which contains a sphinx reference to the documented object.
:param obj: The Object instance.
:returns: A sphinx docs reference str.
### Response:
def get_object_reference(obj: Object) -> str:
"""Gets an object reference string from the obj instance.
This adds the object type to ALL_RESOURCES so that it gets documented and
returns a str which contains a sphinx reference to the documented object.
:param obj: The Object instance.
:returns: A sphinx docs reference str.
"""
resource_name = obj.title
if resource_name is None:
class_name = obj.__name__
resource_name = class_name_to_resource_name(class_name)
ALL_RESOURCES[resource_name] = obj
return ' See :ref:`resource-{}`.'.format(
'-'.join(resource_name.split(' ')).lower().strip()) |
def get_mouse_pos(self, window_pos=None):
"""Return a MousePos filled with the world position and surf it hit."""
window_pos = window_pos or pygame.mouse.get_pos()
# +0.5 to center the point on the middle of the pixel.
window_pt = point.Point(*window_pos) + 0.5
for surf in reversed(self._surfaces):
if (surf.surf_type != SurfType.CHROME and
surf.surf_rect.contains_point(window_pt)):
surf_rel_pt = window_pt - surf.surf_rect.tl
world_pt = surf.world_to_surf.back_pt(surf_rel_pt)
return MousePos(world_pt, surf) | Return a MousePos filled with the world position and surf it hit. | Below is the the instruction that describes the task:
### Input:
Return a MousePos filled with the world position and surf it hit.
### Response:
def get_mouse_pos(self, window_pos=None):
"""Return a MousePos filled with the world position and surf it hit."""
window_pos = window_pos or pygame.mouse.get_pos()
# +0.5 to center the point on the middle of the pixel.
window_pt = point.Point(*window_pos) + 0.5
for surf in reversed(self._surfaces):
if (surf.surf_type != SurfType.CHROME and
surf.surf_rect.contains_point(window_pt)):
surf_rel_pt = window_pt - surf.surf_rect.tl
world_pt = surf.world_to_surf.back_pt(surf_rel_pt)
return MousePos(world_pt, surf) |
def write_numeric_array(fd, header, array):
"""Write the numeric array"""
# make a memory file for writing array data
bd = BytesIO()
# write matrix header to memory file
write_var_header(bd, header)
if not isinstance(array, basestring) and header['dims'][0] > 1:
# list array data in column major order
array = list(chain.from_iterable(izip(*array)))
# write matrix data to memory file
write_elements(bd, header['mtp'], array)
# write the variable to disk file
data = bd.getvalue()
bd.close()
write_var_data(fd, data) | Write the numeric array | Below is the the instruction that describes the task:
### Input:
Write the numeric array
### Response:
def write_numeric_array(fd, header, array):
"""Write the numeric array"""
# make a memory file for writing array data
bd = BytesIO()
# write matrix header to memory file
write_var_header(bd, header)
if not isinstance(array, basestring) and header['dims'][0] > 1:
# list array data in column major order
array = list(chain.from_iterable(izip(*array)))
# write matrix data to memory file
write_elements(bd, header['mtp'], array)
# write the variable to disk file
data = bd.getvalue()
bd.close()
write_var_data(fd, data) |
def require_at_least_one_query_parameter(*query_parameter_names):
"""
Ensure at least one of the specified query parameters are included in the request.
This decorator checks for the existence of at least one of the specified query
parameters and passes the values as function parameters to the decorated view.
If none of the specified query parameters are included in the request, a
ValidationError is raised.
Usage::
@require_at_least_one_query_parameter('program_uuids', 'course_run_ids')
def my_view(request, program_uuids, course_run_ids):
# Some functionality ...
"""
def outer_wrapper(view):
""" Allow the passing of parameters to require_at_least_one_query_parameter. """
@wraps(view)
def wrapper(request, *args, **kwargs):
"""
Checks for the existence of the specified query parameters, raises a
ValidationError if none of them were included in the request.
"""
requirement_satisfied = False
for query_parameter_name in query_parameter_names:
query_parameter_values = request.query_params.getlist(query_parameter_name)
kwargs[query_parameter_name] = query_parameter_values
if query_parameter_values:
requirement_satisfied = True
if not requirement_satisfied:
raise ValidationError(
detail='You must provide at least one of the following query parameters: {params}.'.format(
params=', '.join(query_parameter_names)
)
)
return view(request, *args, **kwargs)
return wrapper
return outer_wrapper | Ensure at least one of the specified query parameters are included in the request.
This decorator checks for the existence of at least one of the specified query
parameters and passes the values as function parameters to the decorated view.
If none of the specified query parameters are included in the request, a
ValidationError is raised.
Usage::
@require_at_least_one_query_parameter('program_uuids', 'course_run_ids')
def my_view(request, program_uuids, course_run_ids):
# Some functionality ... | Below is the the instruction that describes the task:
### Input:
Ensure at least one of the specified query parameters are included in the request.
This decorator checks for the existence of at least one of the specified query
parameters and passes the values as function parameters to the decorated view.
If none of the specified query parameters are included in the request, a
ValidationError is raised.
Usage::
@require_at_least_one_query_parameter('program_uuids', 'course_run_ids')
def my_view(request, program_uuids, course_run_ids):
# Some functionality ...
### Response:
def require_at_least_one_query_parameter(*query_parameter_names):
"""
Ensure at least one of the specified query parameters are included in the request.
This decorator checks for the existence of at least one of the specified query
parameters and passes the values as function parameters to the decorated view.
If none of the specified query parameters are included in the request, a
ValidationError is raised.
Usage::
@require_at_least_one_query_parameter('program_uuids', 'course_run_ids')
def my_view(request, program_uuids, course_run_ids):
# Some functionality ...
"""
def outer_wrapper(view):
""" Allow the passing of parameters to require_at_least_one_query_parameter. """
@wraps(view)
def wrapper(request, *args, **kwargs):
"""
Checks for the existence of the specified query parameters, raises a
ValidationError if none of them were included in the request.
"""
requirement_satisfied = False
for query_parameter_name in query_parameter_names:
query_parameter_values = request.query_params.getlist(query_parameter_name)
kwargs[query_parameter_name] = query_parameter_values
if query_parameter_values:
requirement_satisfied = True
if not requirement_satisfied:
raise ValidationError(
detail='You must provide at least one of the following query parameters: {params}.'.format(
params=', '.join(query_parameter_names)
)
)
return view(request, *args, **kwargs)
return wrapper
return outer_wrapper |
def count_(self):
"""
Returns the number of rows of the main dataframe
"""
try:
num = len(self.df.index)
except Exception as e:
self.err(e, "Can not count data")
return
return num | Returns the number of rows of the main dataframe | Below is the the instruction that describes the task:
### Input:
Returns the number of rows of the main dataframe
### Response:
def count_(self):
"""
Returns the number of rows of the main dataframe
"""
try:
num = len(self.df.index)
except Exception as e:
self.err(e, "Can not count data")
return
return num |
def attach_volume_to_device(self, volume_id, device_id):
"""Attaches the created Volume to a Device.
"""
try:
volume = self.manager.get_volume(volume_id)
volume.attach(device_id)
except packet.baseapi.Error as msg:
raise PacketManagerException(msg)
return volume | Attaches the created Volume to a Device. | Below is the the instruction that describes the task:
### Input:
Attaches the created Volume to a Device.
### Response:
def attach_volume_to_device(self, volume_id, device_id):
"""Attaches the created Volume to a Device.
"""
try:
volume = self.manager.get_volume(volume_id)
volume.attach(device_id)
except packet.baseapi.Error as msg:
raise PacketManagerException(msg)
return volume |
def mock_attr(self, *args, **kwargs):
"""
Empty method to call to slurp up args and kwargs.
`args` get pushed onto the url path.
`kwargs` are converted to a query string and appended to the URL.
"""
self.path.extend(args)
self.qs.update(kwargs)
return self | Empty method to call to slurp up args and kwargs.
`args` get pushed onto the url path.
`kwargs` are converted to a query string and appended to the URL. | Below is the the instruction that describes the task:
### Input:
Empty method to call to slurp up args and kwargs.
`args` get pushed onto the url path.
`kwargs` are converted to a query string and appended to the URL.
### Response:
def mock_attr(self, *args, **kwargs):
"""
Empty method to call to slurp up args and kwargs.
`args` get pushed onto the url path.
`kwargs` are converted to a query string and appended to the URL.
"""
self.path.extend(args)
self.qs.update(kwargs)
return self |
def open_handle(self, dwDesiredAccess = win32.THREAD_ALL_ACCESS):
"""
Opens a new handle to the thread, closing the previous one.
The new handle is stored in the L{hThread} property.
@warn: Normally you should call L{get_handle} instead, since it's much
"smarter" and tries to reuse handles and merge access rights.
@type dwDesiredAccess: int
@param dwDesiredAccess: Desired access rights.
Defaults to L{win32.THREAD_ALL_ACCESS}.
See: U{http://msdn.microsoft.com/en-us/library/windows/desktop/ms686769(v=vs.85).aspx}
@raise WindowsError: It's not possible to open a handle to the thread
with the requested access rights. This tipically happens because
the target thread belongs to system process and the debugger is not
runnning with administrative rights.
"""
hThread = win32.OpenThread(dwDesiredAccess, win32.FALSE, self.dwThreadId)
# In case hThread was set to an actual handle value instead of a Handle
# object. This shouldn't happen unless the user tinkered with it.
if not hasattr(self.hThread, '__del__'):
self.close_handle()
self.hThread = hThread | Opens a new handle to the thread, closing the previous one.
The new handle is stored in the L{hThread} property.
@warn: Normally you should call L{get_handle} instead, since it's much
"smarter" and tries to reuse handles and merge access rights.
@type dwDesiredAccess: int
@param dwDesiredAccess: Desired access rights.
Defaults to L{win32.THREAD_ALL_ACCESS}.
See: U{http://msdn.microsoft.com/en-us/library/windows/desktop/ms686769(v=vs.85).aspx}
@raise WindowsError: It's not possible to open a handle to the thread
with the requested access rights. This tipically happens because
the target thread belongs to system process and the debugger is not
runnning with administrative rights. | Below is the the instruction that describes the task:
### Input:
Opens a new handle to the thread, closing the previous one.
The new handle is stored in the L{hThread} property.
@warn: Normally you should call L{get_handle} instead, since it's much
"smarter" and tries to reuse handles and merge access rights.
@type dwDesiredAccess: int
@param dwDesiredAccess: Desired access rights.
Defaults to L{win32.THREAD_ALL_ACCESS}.
See: U{http://msdn.microsoft.com/en-us/library/windows/desktop/ms686769(v=vs.85).aspx}
@raise WindowsError: It's not possible to open a handle to the thread
with the requested access rights. This tipically happens because
the target thread belongs to system process and the debugger is not
runnning with administrative rights.
### Response:
def open_handle(self, dwDesiredAccess = win32.THREAD_ALL_ACCESS):
"""
Opens a new handle to the thread, closing the previous one.
The new handle is stored in the L{hThread} property.
@warn: Normally you should call L{get_handle} instead, since it's much
"smarter" and tries to reuse handles and merge access rights.
@type dwDesiredAccess: int
@param dwDesiredAccess: Desired access rights.
Defaults to L{win32.THREAD_ALL_ACCESS}.
See: U{http://msdn.microsoft.com/en-us/library/windows/desktop/ms686769(v=vs.85).aspx}
@raise WindowsError: It's not possible to open a handle to the thread
with the requested access rights. This tipically happens because
the target thread belongs to system process and the debugger is not
runnning with administrative rights.
"""
hThread = win32.OpenThread(dwDesiredAccess, win32.FALSE, self.dwThreadId)
# In case hThread was set to an actual handle value instead of a Handle
# object. This shouldn't happen unless the user tinkered with it.
if not hasattr(self.hThread, '__del__'):
self.close_handle()
self.hThread = hThread |
async def nextset(self):
"""Get the next query set"""
conn = self._get_db()
current_result = self._result
if current_result is None or current_result is not conn._result:
return
if not current_result.has_next:
return
self._result = None
self._clear_result()
await conn.next_result()
await self._do_get_result()
return True | Get the next query set | Below is the the instruction that describes the task:
### Input:
Get the next query set
### Response:
async def nextset(self):
"""Get the next query set"""
conn = self._get_db()
current_result = self._result
if current_result is None or current_result is not conn._result:
return
if not current_result.has_next:
return
self._result = None
self._clear_result()
await conn.next_result()
await self._do_get_result()
return True |
def _set_output_arguments(self):
"""Activate output arguments parsing"""
group = self.parser.add_argument_group('output arguments')
group.add_argument('-o', '--output', type=argparse.FileType('w'),
dest='outfile', default=sys.stdout,
help="output file")
group.add_argument('--json-line', dest='json_line', action='store_true',
help="produce a JSON line for each output item") | Activate output arguments parsing | Below is the the instruction that describes the task:
### Input:
Activate output arguments parsing
### Response:
def _set_output_arguments(self):
"""Activate output arguments parsing"""
group = self.parser.add_argument_group('output arguments')
group.add_argument('-o', '--output', type=argparse.FileType('w'),
dest='outfile', default=sys.stdout,
help="output file")
group.add_argument('--json-line', dest='json_line', action='store_true',
help="produce a JSON line for each output item") |
def savorSessionCookie(self, request):
"""
Make the session cookie last as long as the persistent session.
@type request: L{nevow.inevow.IRequest}
@param request: The HTTP request object for the guard login URL.
"""
cookieValue = request.getSession().uid
request.addCookie(
self.cookieKey, cookieValue, path='/',
max_age=PERSISTENT_SESSION_LIFETIME,
domain=self.cookieDomainForRequest(request)) | Make the session cookie last as long as the persistent session.
@type request: L{nevow.inevow.IRequest}
@param request: The HTTP request object for the guard login URL. | Below is the the instruction that describes the task:
### Input:
Make the session cookie last as long as the persistent session.
@type request: L{nevow.inevow.IRequest}
@param request: The HTTP request object for the guard login URL.
### Response:
def savorSessionCookie(self, request):
"""
Make the session cookie last as long as the persistent session.
@type request: L{nevow.inevow.IRequest}
@param request: The HTTP request object for the guard login URL.
"""
cookieValue = request.getSession().uid
request.addCookie(
self.cookieKey, cookieValue, path='/',
max_age=PERSISTENT_SESSION_LIFETIME,
domain=self.cookieDomainForRequest(request)) |
def ContextTupleToDict(context):
"""Convert a tuple representing a context into a dict of (key, value) pairs
"""
d = {}
if not context:
return d
for k, v in zip(ExceptionWithContext.CONTEXT_PARTS, context):
if v != '' and v != None: # Don't ignore int(0), a valid row_num
d[k] = v
return d | Convert a tuple representing a context into a dict of (key, value) pairs | Below is the the instruction that describes the task:
### Input:
Convert a tuple representing a context into a dict of (key, value) pairs
### Response:
def ContextTupleToDict(context):
"""Convert a tuple representing a context into a dict of (key, value) pairs
"""
d = {}
if not context:
return d
for k, v in zip(ExceptionWithContext.CONTEXT_PARTS, context):
if v != '' and v != None: # Don't ignore int(0), a valid row_num
d[k] = v
return d |
def churn_rate(user, summary='default', **kwargs):
"""
Computes the frequency spent at every towers each week, and returns the
distribution of the cosine similarity between two consecutives week.
.. note:: The churn rate is always computed between pairs of weeks.
"""
if len(user.records) == 0:
return statistics([], summary=summary)
query = {
'groupby': 'week',
'divide_by': OrderedDict([
('part_of_week', ['allweek']),
('part_of_day', ['allday'])
]),
'using': 'records',
'filter_empty': True,
'binning': True
}
rv = grouping_query(user, query)
weekly_positions = rv[0][1]
all_positions = list(set(p for l in weekly_positions for p in l))
frequencies = {}
cos_dist = []
for week, week_positions in enumerate(weekly_positions):
count = Counter(week_positions)
total = sum(count.values())
frequencies[week] = [count.get(p, 0) / total for p in all_positions]
all_indexes = range(len(all_positions))
for f_1, f_2 in pairwise(list(frequencies.values())):
num = sum(f_1[a] * f_2[a] for a in all_indexes)
denom_1 = sum(f ** 2 for f in f_1)
denom_2 = sum(f ** 2 for f in f_2)
cos_dist.append(1 - num / (denom_1 ** .5 * denom_2 ** .5))
return statistics(cos_dist, summary=summary) | Computes the frequency spent at every towers each week, and returns the
distribution of the cosine similarity between two consecutives week.
.. note:: The churn rate is always computed between pairs of weeks. | Below is the the instruction that describes the task:
### Input:
Computes the frequency spent at every towers each week, and returns the
distribution of the cosine similarity between two consecutives week.
.. note:: The churn rate is always computed between pairs of weeks.
### Response:
def churn_rate(user, summary='default', **kwargs):
"""
Computes the frequency spent at every towers each week, and returns the
distribution of the cosine similarity between two consecutives week.
.. note:: The churn rate is always computed between pairs of weeks.
"""
if len(user.records) == 0:
return statistics([], summary=summary)
query = {
'groupby': 'week',
'divide_by': OrderedDict([
('part_of_week', ['allweek']),
('part_of_day', ['allday'])
]),
'using': 'records',
'filter_empty': True,
'binning': True
}
rv = grouping_query(user, query)
weekly_positions = rv[0][1]
all_positions = list(set(p for l in weekly_positions for p in l))
frequencies = {}
cos_dist = []
for week, week_positions in enumerate(weekly_positions):
count = Counter(week_positions)
total = sum(count.values())
frequencies[week] = [count.get(p, 0) / total for p in all_positions]
all_indexes = range(len(all_positions))
for f_1, f_2 in pairwise(list(frequencies.values())):
num = sum(f_1[a] * f_2[a] for a in all_indexes)
denom_1 = sum(f ** 2 for f in f_1)
denom_2 = sum(f ** 2 for f in f_2)
cos_dist.append(1 - num / (denom_1 ** .5 * denom_2 ** .5))
return statistics(cos_dist, summary=summary) |
def get_branch_info(self):
"""
Retrieve branch_info from Satellite Server
"""
branch_info = None
if os.path.exists(constants.cached_branch_info):
# use cached branch info file if less than 10 minutes old
# (failsafe, should be deleted at end of client run normally)
logger.debug(u'Reading branch info from cached file.')
ctime = datetime.utcfromtimestamp(
os.path.getctime(constants.cached_branch_info))
if datetime.utcnow() < (ctime + timedelta(minutes=5)):
with io.open(constants.cached_branch_info, encoding='utf8', mode='r') as f:
branch_info = json.load(f)
return branch_info
else:
logger.debug(u'Cached branch info is older than 5 minutes.')
logger.debug(u'Obtaining branch information from %s',
self.branch_info_url)
net_logger.info(u'GET %s', self.branch_info_url)
response = self.session.get(self.branch_info_url,
timeout=self.config.http_timeout)
logger.debug(u'GET branch_info status: %s', response.status_code)
if response.status_code != 200:
logger.debug("There was an error obtaining branch information.")
logger.debug(u'Bad status from server: %s', response.status_code)
logger.debug("Assuming default branch information %s" % self.branch_info)
return False
branch_info = response.json()
logger.debug(u'Branch information: %s', json.dumps(branch_info))
# Determine if we are connected to Satellite 5
if ((branch_info[u'remote_branch'] is not -1 and
branch_info[u'remote_leaf'] is -1)):
self.get_satellite5_info(branch_info)
logger.debug(u'Saving branch info to file.')
with io.open(constants.cached_branch_info, encoding='utf8', mode='w') as f:
# json.dump is broke in py2 so use dumps
bi_str = json.dumps(branch_info, ensure_ascii=False)
f.write(bi_str)
self.branch_info = branch_info
return branch_info | Retrieve branch_info from Satellite Server | Below is the the instruction that describes the task:
### Input:
Retrieve branch_info from Satellite Server
### Response:
def get_branch_info(self):
"""
Retrieve branch_info from Satellite Server
"""
branch_info = None
if os.path.exists(constants.cached_branch_info):
# use cached branch info file if less than 10 minutes old
# (failsafe, should be deleted at end of client run normally)
logger.debug(u'Reading branch info from cached file.')
ctime = datetime.utcfromtimestamp(
os.path.getctime(constants.cached_branch_info))
if datetime.utcnow() < (ctime + timedelta(minutes=5)):
with io.open(constants.cached_branch_info, encoding='utf8', mode='r') as f:
branch_info = json.load(f)
return branch_info
else:
logger.debug(u'Cached branch info is older than 5 minutes.')
logger.debug(u'Obtaining branch information from %s',
self.branch_info_url)
net_logger.info(u'GET %s', self.branch_info_url)
response = self.session.get(self.branch_info_url,
timeout=self.config.http_timeout)
logger.debug(u'GET branch_info status: %s', response.status_code)
if response.status_code != 200:
logger.debug("There was an error obtaining branch information.")
logger.debug(u'Bad status from server: %s', response.status_code)
logger.debug("Assuming default branch information %s" % self.branch_info)
return False
branch_info = response.json()
logger.debug(u'Branch information: %s', json.dumps(branch_info))
# Determine if we are connected to Satellite 5
if ((branch_info[u'remote_branch'] is not -1 and
branch_info[u'remote_leaf'] is -1)):
self.get_satellite5_info(branch_info)
logger.debug(u'Saving branch info to file.')
with io.open(constants.cached_branch_info, encoding='utf8', mode='w') as f:
# json.dump is broke in py2 so use dumps
bi_str = json.dumps(branch_info, ensure_ascii=False)
f.write(bi_str)
self.branch_info = branch_info
return branch_info |
def apply_perturbations(i, j, X, increase, theta, clip_min, clip_max):
"""
TensorFlow implementation for apply perturbations to input features based
on salency maps
:param i: index of first selected feature
:param j: index of second selected feature
:param X: a matrix containing our input features for our sample
:param increase: boolean; true if we are increasing pixels, false otherwise
:param theta: delta for each feature adjustment
:param clip_min: mininum value for a feature in our sample
:param clip_max: maximum value for a feature in our sample
: return: a perturbed input feature matrix for a target class
"""
warnings.warn(
"This function is dead code and will be removed on or after 2019-07-18")
# perturb our input sample
if increase:
X[0, i] = np.minimum(clip_max, X[0, i] + theta)
X[0, j] = np.minimum(clip_max, X[0, j] + theta)
else:
X[0, i] = np.maximum(clip_min, X[0, i] - theta)
X[0, j] = np.maximum(clip_min, X[0, j] - theta)
return X | TensorFlow implementation for apply perturbations to input features based
on salency maps
:param i: index of first selected feature
:param j: index of second selected feature
:param X: a matrix containing our input features for our sample
:param increase: boolean; true if we are increasing pixels, false otherwise
:param theta: delta for each feature adjustment
:param clip_min: mininum value for a feature in our sample
:param clip_max: maximum value for a feature in our sample
: return: a perturbed input feature matrix for a target class | Below is the the instruction that describes the task:
### Input:
TensorFlow implementation for apply perturbations to input features based
on salency maps
:param i: index of first selected feature
:param j: index of second selected feature
:param X: a matrix containing our input features for our sample
:param increase: boolean; true if we are increasing pixels, false otherwise
:param theta: delta for each feature adjustment
:param clip_min: mininum value for a feature in our sample
:param clip_max: maximum value for a feature in our sample
: return: a perturbed input feature matrix for a target class
### Response:
def apply_perturbations(i, j, X, increase, theta, clip_min, clip_max):
"""
TensorFlow implementation for apply perturbations to input features based
on salency maps
:param i: index of first selected feature
:param j: index of second selected feature
:param X: a matrix containing our input features for our sample
:param increase: boolean; true if we are increasing pixels, false otherwise
:param theta: delta for each feature adjustment
:param clip_min: mininum value for a feature in our sample
:param clip_max: maximum value for a feature in our sample
: return: a perturbed input feature matrix for a target class
"""
warnings.warn(
"This function is dead code and will be removed on or after 2019-07-18")
# perturb our input sample
if increase:
X[0, i] = np.minimum(clip_max, X[0, i] + theta)
X[0, j] = np.minimum(clip_max, X[0, j] + theta)
else:
X[0, i] = np.maximum(clip_min, X[0, i] - theta)
X[0, j] = np.maximum(clip_min, X[0, j] - theta)
return X |
def _get_media(media_types):
"""Helper method to map the media types."""
get_mapped_media = (lambda x: maps.VIRTUAL_MEDIA_TYPES_MAP[x]
if x in maps.VIRTUAL_MEDIA_TYPES_MAP else None)
return list(map(get_mapped_media, media_types)) | Helper method to map the media types. | Below is the the instruction that describes the task:
### Input:
Helper method to map the media types.
### Response:
def _get_media(media_types):
"""Helper method to map the media types."""
get_mapped_media = (lambda x: maps.VIRTUAL_MEDIA_TYPES_MAP[x]
if x in maps.VIRTUAL_MEDIA_TYPES_MAP else None)
return list(map(get_mapped_media, media_types)) |
def __get_zero_seq_indexes(self, message: str, following_zeros: int):
"""
:rtype: list[tuple of int]
"""
result = []
if following_zeros > len(message):
return result
zero_counter = 0
for i in range(0, len(message)):
if message[i] == "0":
zero_counter += 1
else:
if zero_counter >= following_zeros:
result.append((i - zero_counter, i))
zero_counter = 0
if zero_counter >= following_zeros:
result.append((len(message) - 1 - following_zeros, len(message) - 1))
return result | :rtype: list[tuple of int] | Below is the the instruction that describes the task:
### Input:
:rtype: list[tuple of int]
### Response:
def __get_zero_seq_indexes(self, message: str, following_zeros: int):
"""
:rtype: list[tuple of int]
"""
result = []
if following_zeros > len(message):
return result
zero_counter = 0
for i in range(0, len(message)):
if message[i] == "0":
zero_counter += 1
else:
if zero_counter >= following_zeros:
result.append((i - zero_counter, i))
zero_counter = 0
if zero_counter >= following_zeros:
result.append((len(message) - 1 - following_zeros, len(message) - 1))
return result |
def inner(tensor0: BKTensor, tensor1: BKTensor) -> BKTensor:
"""Return the inner product between two tensors"""
# Note: Relying on fact that vdot flattens arrays
return np.vdot(tensor0, tensor1) | Return the inner product between two tensors | Below is the the instruction that describes the task:
### Input:
Return the inner product between two tensors
### Response:
def inner(tensor0: BKTensor, tensor1: BKTensor) -> BKTensor:
"""Return the inner product between two tensors"""
# Note: Relying on fact that vdot flattens arrays
return np.vdot(tensor0, tensor1) |
def get_volume_options(volumes):
"""
Generates volume options to run methods.
:param volumes: tuple or list of tuples in form target x source,target x source,target,mode.
:return: list of the form ["-v", "/source:/target", "-v", "/other/source:/destination:z", ...]
"""
if not isinstance(volumes, list):
volumes = [volumes]
volumes = [Volume.create_from_tuple(v) for v in volumes]
result = []
for v in volumes:
result += ["-v", str(v)]
return result | Generates volume options to run methods.
:param volumes: tuple or list of tuples in form target x source,target x source,target,mode.
:return: list of the form ["-v", "/source:/target", "-v", "/other/source:/destination:z", ...] | Below is the the instruction that describes the task:
### Input:
Generates volume options to run methods.
:param volumes: tuple or list of tuples in form target x source,target x source,target,mode.
:return: list of the form ["-v", "/source:/target", "-v", "/other/source:/destination:z", ...]
### Response:
def get_volume_options(volumes):
"""
Generates volume options to run methods.
:param volumes: tuple or list of tuples in form target x source,target x source,target,mode.
:return: list of the form ["-v", "/source:/target", "-v", "/other/source:/destination:z", ...]
"""
if not isinstance(volumes, list):
volumes = [volumes]
volumes = [Volume.create_from_tuple(v) for v in volumes]
result = []
for v in volumes:
result += ["-v", str(v)]
return result |
def compute_elementary_effects(model_inputs, model_outputs, trajectory_size,
delta):
'''
Arguments
---------
model_inputs : matrix of inputs to the model under analysis.
x-by-r where x is the number of variables and
r is the number of rows (a function of x and num_trajectories)
model_outputs
an r-length vector of model outputs
trajectory_size
a scalar indicating the number of rows in a trajectory
delta : float
scaling factor computed from `num_levels`
'''
num_vars = model_inputs.shape[1]
num_rows = model_inputs.shape[0]
num_trajectories = int(num_rows / trajectory_size)
ee = np.zeros((num_trajectories, num_vars), dtype=np.float)
ip_vec = model_inputs.reshape(num_trajectories, trajectory_size, num_vars)
ip_cha = np.subtract(ip_vec[:, 1:, :], ip_vec[:, 0:-1, :])
up = (ip_cha > 0)
lo = (ip_cha < 0)
op_vec = model_outputs.reshape(num_trajectories, trajectory_size)
result_up = get_increased_values(op_vec, up, lo)
result_lo = get_decreased_values(op_vec, up, lo)
ee = np.subtract(result_up, result_lo)
np.divide(ee, delta, out=ee)
return ee | Arguments
---------
model_inputs : matrix of inputs to the model under analysis.
x-by-r where x is the number of variables and
r is the number of rows (a function of x and num_trajectories)
model_outputs
an r-length vector of model outputs
trajectory_size
a scalar indicating the number of rows in a trajectory
delta : float
scaling factor computed from `num_levels` | Below is the the instruction that describes the task:
### Input:
Arguments
---------
model_inputs : matrix of inputs to the model under analysis.
x-by-r where x is the number of variables and
r is the number of rows (a function of x and num_trajectories)
model_outputs
an r-length vector of model outputs
trajectory_size
a scalar indicating the number of rows in a trajectory
delta : float
scaling factor computed from `num_levels`
### Response:
def compute_elementary_effects(model_inputs, model_outputs, trajectory_size,
delta):
'''
Arguments
---------
model_inputs : matrix of inputs to the model under analysis.
x-by-r where x is the number of variables and
r is the number of rows (a function of x and num_trajectories)
model_outputs
an r-length vector of model outputs
trajectory_size
a scalar indicating the number of rows in a trajectory
delta : float
scaling factor computed from `num_levels`
'''
num_vars = model_inputs.shape[1]
num_rows = model_inputs.shape[0]
num_trajectories = int(num_rows / trajectory_size)
ee = np.zeros((num_trajectories, num_vars), dtype=np.float)
ip_vec = model_inputs.reshape(num_trajectories, trajectory_size, num_vars)
ip_cha = np.subtract(ip_vec[:, 1:, :], ip_vec[:, 0:-1, :])
up = (ip_cha > 0)
lo = (ip_cha < 0)
op_vec = model_outputs.reshape(num_trajectories, trajectory_size)
result_up = get_increased_values(op_vec, up, lo)
result_lo = get_decreased_values(op_vec, up, lo)
ee = np.subtract(result_up, result_lo)
np.divide(ee, delta, out=ee)
return ee |
def mergeGraphs(targetGraph, addedGraph, incrementedNodeVal = 'count', incrementedEdgeVal = 'weight'):
"""A quick way of merging graphs, this is meant to be quick and is only intended for graphs generated by metaknowledge. This does not check anything and as such may cause unexpected results if the source and target were not generated by the same method.
**mergeGraphs**() will **modify** _targetGraph_ in place by adding the nodes and edges found in the second, _addedGraph_. If a node or edge exists _targetGraph_ is given precedence, but the edge and node attributes given by _incrementedNodeVal_ and incrementedEdgeVal are added instead of being overwritten.
# Parameters
_targetGraph_ : `networkx Graph`
> the graph to be modified, it has precedence.
_addedGraph_ : `networkx Graph`
> the graph that is unmodified, it is added and does **not** have precedence.
_incrementedNodeVal_ : `optional [str]`
> default `'count'`, the name of the count attribute for the graph's nodes. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value.
_incrementedEdgeVal_ : `optional [str]`
> default `'weight'`, the name of the weight attribute for the graph's edges. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value.
"""
for addedNode, attribs in addedGraph.nodes(data = True):
if incrementedNodeVal:
try:
targetGraph.node[addedNode][incrementedNodeVal] += attribs[incrementedNodeVal]
except KeyError:
targetGraph.add_node(addedNode, **attribs)
else:
if not targetGraph.has_node(addedNode):
targetGraph.add_node(addedNode, **attribs)
for edgeNode1, edgeNode2, attribs in addedGraph.edges(data = True):
if incrementedEdgeVal:
try:
targetGraph.edges[edgeNode1, edgeNode2][incrementedEdgeVal] += attribs[incrementedEdgeVal]
except KeyError:
targetGraph.add_edge(edgeNode1, edgeNode2, **attribs)
else:
if not targetGraph.Graph.has_edge(edgeNode1, edgeNode2):
targetGraph.add_edge(edgeNode1, edgeNode2, **attribs) | A quick way of merging graphs, this is meant to be quick and is only intended for graphs generated by metaknowledge. This does not check anything and as such may cause unexpected results if the source and target were not generated by the same method.
**mergeGraphs**() will **modify** _targetGraph_ in place by adding the nodes and edges found in the second, _addedGraph_. If a node or edge exists _targetGraph_ is given precedence, but the edge and node attributes given by _incrementedNodeVal_ and incrementedEdgeVal are added instead of being overwritten.
# Parameters
_targetGraph_ : `networkx Graph`
> the graph to be modified, it has precedence.
_addedGraph_ : `networkx Graph`
> the graph that is unmodified, it is added and does **not** have precedence.
_incrementedNodeVal_ : `optional [str]`
> default `'count'`, the name of the count attribute for the graph's nodes. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value.
_incrementedEdgeVal_ : `optional [str]`
> default `'weight'`, the name of the weight attribute for the graph's edges. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value. | Below is the the instruction that describes the task:
### Input:
A quick way of merging graphs, this is meant to be quick and is only intended for graphs generated by metaknowledge. This does not check anything and as such may cause unexpected results if the source and target were not generated by the same method.
**mergeGraphs**() will **modify** _targetGraph_ in place by adding the nodes and edges found in the second, _addedGraph_. If a node or edge exists _targetGraph_ is given precedence, but the edge and node attributes given by _incrementedNodeVal_ and incrementedEdgeVal are added instead of being overwritten.
# Parameters
_targetGraph_ : `networkx Graph`
> the graph to be modified, it has precedence.
_addedGraph_ : `networkx Graph`
> the graph that is unmodified, it is added and does **not** have precedence.
_incrementedNodeVal_ : `optional [str]`
> default `'count'`, the name of the count attribute for the graph's nodes. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value.
_incrementedEdgeVal_ : `optional [str]`
> default `'weight'`, the name of the weight attribute for the graph's edges. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value.
### Response:
def mergeGraphs(targetGraph, addedGraph, incrementedNodeVal = 'count', incrementedEdgeVal = 'weight'):
"""A quick way of merging graphs, this is meant to be quick and is only intended for graphs generated by metaknowledge. This does not check anything and as such may cause unexpected results if the source and target were not generated by the same method.
**mergeGraphs**() will **modify** _targetGraph_ in place by adding the nodes and edges found in the second, _addedGraph_. If a node or edge exists _targetGraph_ is given precedence, but the edge and node attributes given by _incrementedNodeVal_ and incrementedEdgeVal are added instead of being overwritten.
# Parameters
_targetGraph_ : `networkx Graph`
> the graph to be modified, it has precedence.
_addedGraph_ : `networkx Graph`
> the graph that is unmodified, it is added and does **not** have precedence.
_incrementedNodeVal_ : `optional [str]`
> default `'count'`, the name of the count attribute for the graph's nodes. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value.
_incrementedEdgeVal_ : `optional [str]`
> default `'weight'`, the name of the weight attribute for the graph's edges. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value.
"""
for addedNode, attribs in addedGraph.nodes(data = True):
if incrementedNodeVal:
try:
targetGraph.node[addedNode][incrementedNodeVal] += attribs[incrementedNodeVal]
except KeyError:
targetGraph.add_node(addedNode, **attribs)
else:
if not targetGraph.has_node(addedNode):
targetGraph.add_node(addedNode, **attribs)
for edgeNode1, edgeNode2, attribs in addedGraph.edges(data = True):
if incrementedEdgeVal:
try:
targetGraph.edges[edgeNode1, edgeNode2][incrementedEdgeVal] += attribs[incrementedEdgeVal]
except KeyError:
targetGraph.add_edge(edgeNode1, edgeNode2, **attribs)
else:
if not targetGraph.Graph.has_edge(edgeNode1, edgeNode2):
targetGraph.add_edge(edgeNode1, edgeNode2, **attribs) |
def deliver_tx(self, raw_transaction):
"""Validate the transaction before mutating the state.
Args:
raw_tx: a raw string (in bytes) transaction.
"""
self.abort_if_abci_chain_is_not_synced()
logger.debug('deliver_tx: %s', raw_transaction)
transaction = self.bigchaindb.is_valid_transaction(
decode_transaction(raw_transaction), self.block_transactions)
if not transaction:
logger.debug('deliver_tx: INVALID')
return ResponseDeliverTx(code=CodeTypeError)
else:
logger.debug('storing tx')
self.block_txn_ids.append(transaction.id)
self.block_transactions.append(transaction)
return ResponseDeliverTx(code=CodeTypeOk) | Validate the transaction before mutating the state.
Args:
raw_tx: a raw string (in bytes) transaction. | Below is the the instruction that describes the task:
### Input:
Validate the transaction before mutating the state.
Args:
raw_tx: a raw string (in bytes) transaction.
### Response:
def deliver_tx(self, raw_transaction):
"""Validate the transaction before mutating the state.
Args:
raw_tx: a raw string (in bytes) transaction.
"""
self.abort_if_abci_chain_is_not_synced()
logger.debug('deliver_tx: %s', raw_transaction)
transaction = self.bigchaindb.is_valid_transaction(
decode_transaction(raw_transaction), self.block_transactions)
if not transaction:
logger.debug('deliver_tx: INVALID')
return ResponseDeliverTx(code=CodeTypeError)
else:
logger.debug('storing tx')
self.block_txn_ids.append(transaction.id)
self.block_transactions.append(transaction)
return ResponseDeliverTx(code=CodeTypeOk) |
def deepgetattr(obj, attr, default=AttributeError):
"""
Recurses through an attribute chain to get the ultimate value (obj/data/member/value)
from: http://pingfive.typepad.com/blog/2010/04/deep-getattr-python-function.html
>>> class Universe(object):
... def __init__(self, galaxy):
... self.galaxy = galaxy
...
>>> class Galaxy(object):
... def __init__(self, solarsystem):
... self.solarsystem = solarsystem
...
>>> class SolarSystem(object):
... def __init__(self, planet):
... self.planet = planet
...
>>> class Planet(object):
... def __init__(self, name):
... self.name = name
...
>>> universe = Universe(Galaxy(SolarSystem(Planet('Earth'))))
>>> deepgetattr(universe, 'galaxy.solarsystem.planet.name')
'Earth'
>>> deepgetattr(universe, 'solarsystem.planet.name', default=TypeError)
<class 'TypeError'>
"""
try:
return reduce(getattr, attr.split('.'), obj)
except AttributeError:
if default is not AttributeError:
return default
raise | Recurses through an attribute chain to get the ultimate value (obj/data/member/value)
from: http://pingfive.typepad.com/blog/2010/04/deep-getattr-python-function.html
>>> class Universe(object):
... def __init__(self, galaxy):
... self.galaxy = galaxy
...
>>> class Galaxy(object):
... def __init__(self, solarsystem):
... self.solarsystem = solarsystem
...
>>> class SolarSystem(object):
... def __init__(self, planet):
... self.planet = planet
...
>>> class Planet(object):
... def __init__(self, name):
... self.name = name
...
>>> universe = Universe(Galaxy(SolarSystem(Planet('Earth'))))
>>> deepgetattr(universe, 'galaxy.solarsystem.planet.name')
'Earth'
>>> deepgetattr(universe, 'solarsystem.planet.name', default=TypeError)
<class 'TypeError'> | Below is the the instruction that describes the task:
### Input:
Recurses through an attribute chain to get the ultimate value (obj/data/member/value)
from: http://pingfive.typepad.com/blog/2010/04/deep-getattr-python-function.html
>>> class Universe(object):
... def __init__(self, galaxy):
... self.galaxy = galaxy
...
>>> class Galaxy(object):
... def __init__(self, solarsystem):
... self.solarsystem = solarsystem
...
>>> class SolarSystem(object):
... def __init__(self, planet):
... self.planet = planet
...
>>> class Planet(object):
... def __init__(self, name):
... self.name = name
...
>>> universe = Universe(Galaxy(SolarSystem(Planet('Earth'))))
>>> deepgetattr(universe, 'galaxy.solarsystem.planet.name')
'Earth'
>>> deepgetattr(universe, 'solarsystem.planet.name', default=TypeError)
<class 'TypeError'>
### Response:
def deepgetattr(obj, attr, default=AttributeError):
"""
Recurses through an attribute chain to get the ultimate value (obj/data/member/value)
from: http://pingfive.typepad.com/blog/2010/04/deep-getattr-python-function.html
>>> class Universe(object):
... def __init__(self, galaxy):
... self.galaxy = galaxy
...
>>> class Galaxy(object):
... def __init__(self, solarsystem):
... self.solarsystem = solarsystem
...
>>> class SolarSystem(object):
... def __init__(self, planet):
... self.planet = planet
...
>>> class Planet(object):
... def __init__(self, name):
... self.name = name
...
>>> universe = Universe(Galaxy(SolarSystem(Planet('Earth'))))
>>> deepgetattr(universe, 'galaxy.solarsystem.planet.name')
'Earth'
>>> deepgetattr(universe, 'solarsystem.planet.name', default=TypeError)
<class 'TypeError'>
"""
try:
return reduce(getattr, attr.split('.'), obj)
except AttributeError:
if default is not AttributeError:
return default
raise |
def run_solr_text_on(solrInstance, category, q, qf, fields, optionals):
"""
Return the result of a solr query on the given solrInstance (Enum ESOLR), for a certain document_category (ESOLRDoc) and id
"""
if optionals == None:
optionals = ""
query = solrInstance.value + "select?q=" + q + "&qf=" + qf + "&fq=document_category:\"" + category.value + "\"&fl=" + fields + "&wt=json&indent=on" + optionals
# print("QUERY: ", query)
response = requests.get(query)
return response.json()['response']['docs'] | Return the result of a solr query on the given solrInstance (Enum ESOLR), for a certain document_category (ESOLRDoc) and id | Below is the the instruction that describes the task:
### Input:
Return the result of a solr query on the given solrInstance (Enum ESOLR), for a certain document_category (ESOLRDoc) and id
### Response:
def run_solr_text_on(solrInstance, category, q, qf, fields, optionals):
"""
Return the result of a solr query on the given solrInstance (Enum ESOLR), for a certain document_category (ESOLRDoc) and id
"""
if optionals == None:
optionals = ""
query = solrInstance.value + "select?q=" + q + "&qf=" + qf + "&fq=document_category:\"" + category.value + "\"&fl=" + fields + "&wt=json&indent=on" + optionals
# print("QUERY: ", query)
response = requests.get(query)
return response.json()['response']['docs'] |
def radec2azel(ra_deg: float, dec_deg: float,
lat_deg: float, lon_deg: float,
time: datetime, usevallado: bool = False) -> Tuple[float, float]:
"""
sky coordinates (ra, dec) to viewing angle (az, el)
Parameters
----------
ra_deg : float or numpy.ndarray of float
ecliptic right ascension (degress)
dec_deg : float or numpy.ndarray of float
ecliptic declination (degrees)
lat_deg : float
observer latitude [-90, 90]
lon_deg : float
observer longitude [-180, 180] (degrees)
time : datetime.datetime
time of observation
usevallado : bool, optional
default use astropy. If true, use Vallado algorithm
Returns
-------
az_deg : float or numpy.ndarray of float
azimuth [degrees clockwize from North]
el_deg : float or numpy.ndarray of float
elevation [degrees above horizon (neglecting aberration)]
"""
if usevallado or Time is None:
return vradec2azel(ra_deg, dec_deg, lat_deg, lon_deg, time)
# %% input trapping
lat = np.atleast_1d(lat_deg)
lon = np.atleast_1d(lon_deg)
ra = np.atleast_1d(ra_deg)
dec = np.atleast_1d(dec_deg)
obs = EarthLocation(lat=lat * u.deg,
lon=lon * u.deg)
points = SkyCoord(Angle(ra, unit=u.deg),
Angle(dec, unit=u.deg),
equinox='J2000.0')
altaz = points.transform_to(AltAz(location=obs, obstime=Time(str2dt(time))))
return altaz.az.degree, altaz.alt.degree | sky coordinates (ra, dec) to viewing angle (az, el)
Parameters
----------
ra_deg : float or numpy.ndarray of float
ecliptic right ascension (degress)
dec_deg : float or numpy.ndarray of float
ecliptic declination (degrees)
lat_deg : float
observer latitude [-90, 90]
lon_deg : float
observer longitude [-180, 180] (degrees)
time : datetime.datetime
time of observation
usevallado : bool, optional
default use astropy. If true, use Vallado algorithm
Returns
-------
az_deg : float or numpy.ndarray of float
azimuth [degrees clockwize from North]
el_deg : float or numpy.ndarray of float
elevation [degrees above horizon (neglecting aberration)] | Below is the the instruction that describes the task:
### Input:
sky coordinates (ra, dec) to viewing angle (az, el)
Parameters
----------
ra_deg : float or numpy.ndarray of float
ecliptic right ascension (degress)
dec_deg : float or numpy.ndarray of float
ecliptic declination (degrees)
lat_deg : float
observer latitude [-90, 90]
lon_deg : float
observer longitude [-180, 180] (degrees)
time : datetime.datetime
time of observation
usevallado : bool, optional
default use astropy. If true, use Vallado algorithm
Returns
-------
az_deg : float or numpy.ndarray of float
azimuth [degrees clockwize from North]
el_deg : float or numpy.ndarray of float
elevation [degrees above horizon (neglecting aberration)]
### Response:
def radec2azel(ra_deg: float, dec_deg: float,
lat_deg: float, lon_deg: float,
time: datetime, usevallado: bool = False) -> Tuple[float, float]:
"""
sky coordinates (ra, dec) to viewing angle (az, el)
Parameters
----------
ra_deg : float or numpy.ndarray of float
ecliptic right ascension (degress)
dec_deg : float or numpy.ndarray of float
ecliptic declination (degrees)
lat_deg : float
observer latitude [-90, 90]
lon_deg : float
observer longitude [-180, 180] (degrees)
time : datetime.datetime
time of observation
usevallado : bool, optional
default use astropy. If true, use Vallado algorithm
Returns
-------
az_deg : float or numpy.ndarray of float
azimuth [degrees clockwize from North]
el_deg : float or numpy.ndarray of float
elevation [degrees above horizon (neglecting aberration)]
"""
if usevallado or Time is None:
return vradec2azel(ra_deg, dec_deg, lat_deg, lon_deg, time)
# %% input trapping
lat = np.atleast_1d(lat_deg)
lon = np.atleast_1d(lon_deg)
ra = np.atleast_1d(ra_deg)
dec = np.atleast_1d(dec_deg)
obs = EarthLocation(lat=lat * u.deg,
lon=lon * u.deg)
points = SkyCoord(Angle(ra, unit=u.deg),
Angle(dec, unit=u.deg),
equinox='J2000.0')
altaz = points.transform_to(AltAz(location=obs, obstime=Time(str2dt(time))))
return altaz.az.degree, altaz.alt.degree |
def _simulate_coef_from_bootstraps(
self, n_draws, coef_bootstraps, cov_bootstraps):
"""Simulate coefficients using bootstrap samples."""
# Sample indices uniformly from {0, ..., n_bootstraps - 1}
# (Wood pg. 199 step 6)
random_bootstrap_indices = np.random.choice(
np.arange(len(coef_bootstraps)), size=n_draws, replace=True)
# Simulate `n_draws` many random coefficient vectors from a
# multivariate normal distribution with mean and covariance given by
# the bootstrap samples (indexed by `random_bootstrap_indices`) of
# `coef_bootstraps` and `cov_bootstraps`. Because it's faster to draw
# many samples from a certain distribution all at once, we make a dict
# mapping bootstrap indices to draw indices and use the `size`
# parameter of `np.random.multivariate_normal` to sample the draws
# needed from that bootstrap sample all at once.
bootstrap_index_to_draw_indices = defaultdict(list)
for draw_index, bootstrap_index in enumerate(random_bootstrap_indices):
bootstrap_index_to_draw_indices[bootstrap_index].append(draw_index)
coef_draws = np.empty((n_draws, len(self.coef_)))
for bootstrap, draw_indices in bootstrap_index_to_draw_indices.items():
coef_draws[draw_indices] = np.random.multivariate_normal(
coef_bootstraps[bootstrap], cov_bootstraps[bootstrap],
size=len(draw_indices))
return coef_draws | Simulate coefficients using bootstrap samples. | Below is the the instruction that describes the task:
### Input:
Simulate coefficients using bootstrap samples.
### Response:
def _simulate_coef_from_bootstraps(
self, n_draws, coef_bootstraps, cov_bootstraps):
"""Simulate coefficients using bootstrap samples."""
# Sample indices uniformly from {0, ..., n_bootstraps - 1}
# (Wood pg. 199 step 6)
random_bootstrap_indices = np.random.choice(
np.arange(len(coef_bootstraps)), size=n_draws, replace=True)
# Simulate `n_draws` many random coefficient vectors from a
# multivariate normal distribution with mean and covariance given by
# the bootstrap samples (indexed by `random_bootstrap_indices`) of
# `coef_bootstraps` and `cov_bootstraps`. Because it's faster to draw
# many samples from a certain distribution all at once, we make a dict
# mapping bootstrap indices to draw indices and use the `size`
# parameter of `np.random.multivariate_normal` to sample the draws
# needed from that bootstrap sample all at once.
bootstrap_index_to_draw_indices = defaultdict(list)
for draw_index, bootstrap_index in enumerate(random_bootstrap_indices):
bootstrap_index_to_draw_indices[bootstrap_index].append(draw_index)
coef_draws = np.empty((n_draws, len(self.coef_)))
for bootstrap, draw_indices in bootstrap_index_to_draw_indices.items():
coef_draws[draw_indices] = np.random.multivariate_normal(
coef_bootstraps[bootstrap], cov_bootstraps[bootstrap],
size=len(draw_indices))
return coef_draws |
def run_shell_command(commands, **kwargs):
"""Run a shell command."""
p = subprocess.Popen(commands,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs)
output, error = p.communicate()
return p.returncode, output, error | Run a shell command. | Below is the the instruction that describes the task:
### Input:
Run a shell command.
### Response:
def run_shell_command(commands, **kwargs):
"""Run a shell command."""
p = subprocess.Popen(commands,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs)
output, error = p.communicate()
return p.returncode, output, error |
def db_create(name,
user=None,
host=None,
port=None,
maintenance_db=None,
password=None,
tablespace=None,
encoding=None,
lc_collate=None,
lc_ctype=None,
owner=None,
template=None,
runas=None):
'''
Adds a databases to the Postgres server.
CLI Example:
.. code-block:: bash
salt '*' postgres.db_create 'dbname'
salt '*' postgres.db_create 'dbname' template=template_postgis
'''
# Base query to create a database
query = 'CREATE DATABASE "{0}"'.format(name)
# "With"-options to create a database
with_args = salt.utils.odict.OrderedDict([
('TABLESPACE', _quote_ddl_value(tablespace, '"')),
# owner needs to be enclosed in double quotes so postgres
# doesn't get thrown by dashes in the name
('OWNER', _quote_ddl_value(owner, '"')),
('TEMPLATE', template),
('ENCODING', _quote_ddl_value(encoding)),
('LC_COLLATE', _quote_ddl_value(lc_collate)),
('LC_CTYPE', _quote_ddl_value(lc_ctype)),
])
with_chunks = []
for key, value in with_args.items():
if value is not None:
with_chunks += [key, '=', value]
# Build a final query
if with_chunks:
with_chunks.insert(0, ' WITH')
query += ' '.join(with_chunks)
# Execute the command
ret = _psql_prepare_and_run(['-c', query],
user=user, host=host, port=port,
maintenance_db=maintenance_db,
password=password, runas=runas)
return ret['retcode'] == 0 | Adds a databases to the Postgres server.
CLI Example:
.. code-block:: bash
salt '*' postgres.db_create 'dbname'
salt '*' postgres.db_create 'dbname' template=template_postgis | Below is the the instruction that describes the task:
### Input:
Adds a databases to the Postgres server.
CLI Example:
.. code-block:: bash
salt '*' postgres.db_create 'dbname'
salt '*' postgres.db_create 'dbname' template=template_postgis
### Response:
def db_create(name,
user=None,
host=None,
port=None,
maintenance_db=None,
password=None,
tablespace=None,
encoding=None,
lc_collate=None,
lc_ctype=None,
owner=None,
template=None,
runas=None):
'''
Adds a databases to the Postgres server.
CLI Example:
.. code-block:: bash
salt '*' postgres.db_create 'dbname'
salt '*' postgres.db_create 'dbname' template=template_postgis
'''
# Base query to create a database
query = 'CREATE DATABASE "{0}"'.format(name)
# "With"-options to create a database
with_args = salt.utils.odict.OrderedDict([
('TABLESPACE', _quote_ddl_value(tablespace, '"')),
# owner needs to be enclosed in double quotes so postgres
# doesn't get thrown by dashes in the name
('OWNER', _quote_ddl_value(owner, '"')),
('TEMPLATE', template),
('ENCODING', _quote_ddl_value(encoding)),
('LC_COLLATE', _quote_ddl_value(lc_collate)),
('LC_CTYPE', _quote_ddl_value(lc_ctype)),
])
with_chunks = []
for key, value in with_args.items():
if value is not None:
with_chunks += [key, '=', value]
# Build a final query
if with_chunks:
with_chunks.insert(0, ' WITH')
query += ' '.join(with_chunks)
# Execute the command
ret = _psql_prepare_and_run(['-c', query],
user=user, host=host, port=port,
maintenance_db=maintenance_db,
password=password, runas=runas)
return ret['retcode'] == 0 |
def addcomment(self, comment, private=False):
"""
Add the given comment to this bug. Set private to True to mark this
comment as private.
"""
# Note: fedora bodhi uses this function
vals = self.bugzilla.build_update(comment=comment,
comment_private=private)
log.debug("addcomment: update=%s", vals)
return self.bugzilla.update_bugs(self.bug_id, vals) | Add the given comment to this bug. Set private to True to mark this
comment as private. | Below is the the instruction that describes the task:
### Input:
Add the given comment to this bug. Set private to True to mark this
comment as private.
### Response:
def addcomment(self, comment, private=False):
"""
Add the given comment to this bug. Set private to True to mark this
comment as private.
"""
# Note: fedora bodhi uses this function
vals = self.bugzilla.build_update(comment=comment,
comment_private=private)
log.debug("addcomment: update=%s", vals)
return self.bugzilla.update_bugs(self.bug_id, vals) |
def _safe_dump(data):
'''
this presenter magic makes yaml.safe_dump
work with the objects returned from
boto.describe_alarms()
'''
custom_dumper = __utils__['yaml.get_dumper']('SafeOrderedDumper')
def boto_listelement_presenter(dumper, data):
return dumper.represent_list(list(data))
yaml.add_representer(boto.ec2.cloudwatch.listelement.ListElement,
boto_listelement_presenter,
Dumper=custom_dumper)
def dimension_presenter(dumper, data):
return dumper.represent_dict(dict(data))
yaml.add_representer(boto.ec2.cloudwatch.dimension.Dimension,
dimension_presenter, Dumper=custom_dumper)
return __utils__['yaml.dump'](data, Dumper=custom_dumper) | this presenter magic makes yaml.safe_dump
work with the objects returned from
boto.describe_alarms() | Below is the the instruction that describes the task:
### Input:
this presenter magic makes yaml.safe_dump
work with the objects returned from
boto.describe_alarms()
### Response:
def _safe_dump(data):
'''
this presenter magic makes yaml.safe_dump
work with the objects returned from
boto.describe_alarms()
'''
custom_dumper = __utils__['yaml.get_dumper']('SafeOrderedDumper')
def boto_listelement_presenter(dumper, data):
return dumper.represent_list(list(data))
yaml.add_representer(boto.ec2.cloudwatch.listelement.ListElement,
boto_listelement_presenter,
Dumper=custom_dumper)
def dimension_presenter(dumper, data):
return dumper.represent_dict(dict(data))
yaml.add_representer(boto.ec2.cloudwatch.dimension.Dimension,
dimension_presenter, Dumper=custom_dumper)
return __utils__['yaml.dump'](data, Dumper=custom_dumper) |
def remove_tmp_prefix_from_filename(filename):
"""
Remove tmp prefix from filename.
"""
if not filename.startswith(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX):
raise RuntimeError(ERROR_MESSAGES['filename_hasnt_tmp_prefix'] % {'filename': filename})
return filename[len(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX):] | Remove tmp prefix from filename. | Below is the the instruction that describes the task:
### Input:
Remove tmp prefix from filename.
### Response:
def remove_tmp_prefix_from_filename(filename):
"""
Remove tmp prefix from filename.
"""
if not filename.startswith(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX):
raise RuntimeError(ERROR_MESSAGES['filename_hasnt_tmp_prefix'] % {'filename': filename})
return filename[len(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX):] |
def from_ZNM(cls, Z, N, M, name=''):
"""
Creates a table from arrays Z, N and M
Example:
________
>>> Z = [82, 82, 83]
>>> N = [126, 127, 130]
>>> M = [-21.34, -18.0, -14.45]
>>> Table.from_ZNM(Z, N, M, name='Custom Table')
Z N
82 126 -21.34
127 -18.00
83 130 -14.45
Name: Custom Table, dtype: float64
"""
df = pd.DataFrame.from_dict({'Z': Z, 'N': N, 'M': M}).set_index(['Z', 'N'])['M']
df.name = name
return cls(df=df, name=name) | Creates a table from arrays Z, N and M
Example:
________
>>> Z = [82, 82, 83]
>>> N = [126, 127, 130]
>>> M = [-21.34, -18.0, -14.45]
>>> Table.from_ZNM(Z, N, M, name='Custom Table')
Z N
82 126 -21.34
127 -18.00
83 130 -14.45
Name: Custom Table, dtype: float64 | Below is the the instruction that describes the task:
### Input:
Creates a table from arrays Z, N and M
Example:
________
>>> Z = [82, 82, 83]
>>> N = [126, 127, 130]
>>> M = [-21.34, -18.0, -14.45]
>>> Table.from_ZNM(Z, N, M, name='Custom Table')
Z N
82 126 -21.34
127 -18.00
83 130 -14.45
Name: Custom Table, dtype: float64
### Response:
def from_ZNM(cls, Z, N, M, name=''):
"""
Creates a table from arrays Z, N and M
Example:
________
>>> Z = [82, 82, 83]
>>> N = [126, 127, 130]
>>> M = [-21.34, -18.0, -14.45]
>>> Table.from_ZNM(Z, N, M, name='Custom Table')
Z N
82 126 -21.34
127 -18.00
83 130 -14.45
Name: Custom Table, dtype: float64
"""
df = pd.DataFrame.from_dict({'Z': Z, 'N': N, 'M': M}).set_index(['Z', 'N'])['M']
df.name = name
return cls(df=df, name=name) |
def p_plus_assignment(self, t):
'''plus_assignment : IDENT EQ PLUS'''
self.accu.add(Term('obs_vlabel', [self.name,"gen(\""+t[1]+"\")","1"])) | plus_assignment : IDENT EQ PLUS | Below is the the instruction that describes the task:
### Input:
plus_assignment : IDENT EQ PLUS
### Response:
def p_plus_assignment(self, t):
'''plus_assignment : IDENT EQ PLUS'''
self.accu.add(Term('obs_vlabel', [self.name,"gen(\""+t[1]+"\")","1"])) |
def url(self, pattern, method=None, type_cast=None):
"""Decorator for registering a path pattern.
Args:
pattern (str): Regex pattern to match a certain path
method (str, optional): Usually used to define one of GET, POST,
PUT, DELETE. You may use whatever fits your situation though.
Defaults to None.
type_cast (dict, optional): Mapping between the param name and
one of `int`, `float` or `bool`. The value reflected by the
provided param name will than be casted to the given type.
Defaults to None.
"""
if not type_cast:
type_cast = {}
def decorator(function):
self.add(pattern, function, method, type_cast)
return function
return decorator | Decorator for registering a path pattern.
Args:
pattern (str): Regex pattern to match a certain path
method (str, optional): Usually used to define one of GET, POST,
PUT, DELETE. You may use whatever fits your situation though.
Defaults to None.
type_cast (dict, optional): Mapping between the param name and
one of `int`, `float` or `bool`. The value reflected by the
provided param name will than be casted to the given type.
Defaults to None. | Below is the the instruction that describes the task:
### Input:
Decorator for registering a path pattern.
Args:
pattern (str): Regex pattern to match a certain path
method (str, optional): Usually used to define one of GET, POST,
PUT, DELETE. You may use whatever fits your situation though.
Defaults to None.
type_cast (dict, optional): Mapping between the param name and
one of `int`, `float` or `bool`. The value reflected by the
provided param name will than be casted to the given type.
Defaults to None.
### Response:
def url(self, pattern, method=None, type_cast=None):
"""Decorator for registering a path pattern.
Args:
pattern (str): Regex pattern to match a certain path
method (str, optional): Usually used to define one of GET, POST,
PUT, DELETE. You may use whatever fits your situation though.
Defaults to None.
type_cast (dict, optional): Mapping between the param name and
one of `int`, `float` or `bool`. The value reflected by the
provided param name will than be casted to the given type.
Defaults to None.
"""
if not type_cast:
type_cast = {}
def decorator(function):
self.add(pattern, function, method, type_cast)
return function
return decorator |
def load_table_from_config(input_dir, config):
"""Load table from table config dict
Args:
input_dir (path-like): directory containing input files
config (dict): mapping with keys 'name', 'path', and 'pd_read_kwargs'.
Returns:
pd.DataFrame
"""
path = pathlib.Path(input_dir).joinpath(config['path'])
kwargs = config['pd_read_kwargs']
return pd.read_csv(path, **kwargs) | Load table from table config dict
Args:
input_dir (path-like): directory containing input files
config (dict): mapping with keys 'name', 'path', and 'pd_read_kwargs'.
Returns:
pd.DataFrame | Below is the the instruction that describes the task:
### Input:
Load table from table config dict
Args:
input_dir (path-like): directory containing input files
config (dict): mapping with keys 'name', 'path', and 'pd_read_kwargs'.
Returns:
pd.DataFrame
### Response:
def load_table_from_config(input_dir, config):
"""Load table from table config dict
Args:
input_dir (path-like): directory containing input files
config (dict): mapping with keys 'name', 'path', and 'pd_read_kwargs'.
Returns:
pd.DataFrame
"""
path = pathlib.Path(input_dir).joinpath(config['path'])
kwargs = config['pd_read_kwargs']
return pd.read_csv(path, **kwargs) |
def _get_k(self):
'''
Accessing self.k indirectly allows for creating the kvstore table
if necessary.
'''
if not self.ready:
self.k.create() # create table if it does not exist.
self.ready = True
return self.k | Accessing self.k indirectly allows for creating the kvstore table
if necessary. | Below is the the instruction that describes the task:
### Input:
Accessing self.k indirectly allows for creating the kvstore table
if necessary.
### Response:
def _get_k(self):
'''
Accessing self.k indirectly allows for creating the kvstore table
if necessary.
'''
if not self.ready:
self.k.create() # create table if it does not exist.
self.ready = True
return self.k |
def has_project_permissions(user: 'User', project: 'Project', request_method: str) -> bool:
"""This logic is extracted here to be used also with Sanic api."""
# Superusers and the creator is allowed to do everything
if user.is_staff or user.is_superuser or project.user == user:
return True
# Other user
return request_method in permissions.SAFE_METHODS and project.is_public | This logic is extracted here to be used also with Sanic api. | Below is the the instruction that describes the task:
### Input:
This logic is extracted here to be used also with Sanic api.
### Response:
def has_project_permissions(user: 'User', project: 'Project', request_method: str) -> bool:
"""This logic is extracted here to be used also with Sanic api."""
# Superusers and the creator is allowed to do everything
if user.is_staff or user.is_superuser or project.user == user:
return True
# Other user
return request_method in permissions.SAFE_METHODS and project.is_public |
def divide(self, phi1, inplace=True):
"""
DiscreteFactor division by `phi1`.
Parameters
----------
phi1 : `DiscreteFactor` instance
The denominator for division.
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new factor.
Returns
-------
DiscreteFactor or None: if inplace=True (default) returns None
if inplace=False returns a new `DiscreteFactor` instance.
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x1'], [2, 2], range(1, 5)])
>>> phi1.divide(phi2)
>>> phi1.variables
['x1', 'x2', 'x3']
>>> phi1.cardinality
array([2, 3, 2])
>>> phi1.values
array([[[ 0. , 0.33333333],
[ 2. , 1. ],
[ 4. , 1.66666667]],
[[ 3. , 1.75 ],
[ 4. , 2.25 ],
[ 5. , 2.75 ]]])
"""
phi = self if inplace else self.copy()
phi1 = phi1.copy()
if set(phi1.variables) - set(phi.variables):
raise ValueError("Scope of divisor should be a subset of dividend")
# Adding extra variables in phi1.
extra_vars = set(phi.variables) - set(phi1.variables)
if extra_vars:
slice_ = [slice(None)] * len(phi1.variables)
slice_.extend([np.newaxis] * len(extra_vars))
phi1.values = phi1.values[tuple(slice_)]
phi1.variables.extend(extra_vars)
# Rearranging the axes of phi1 to match phi
for axis in range(phi.values.ndim):
exchange_index = phi1.variables.index(phi.variables[axis])
phi1.variables[axis], phi1.variables[exchange_index] = phi1.variables[exchange_index], phi1.variables[axis]
phi1.values = phi1.values.swapaxes(axis, exchange_index)
phi.values = phi.values / phi1.values
# If factor division 0/0 = 0 but is undefined for x/0. In pgmpy we are using
# np.inf to represent x/0 cases.
phi.values[np.isnan(phi.values)] = 0
if not inplace:
return phi | DiscreteFactor division by `phi1`.
Parameters
----------
phi1 : `DiscreteFactor` instance
The denominator for division.
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new factor.
Returns
-------
DiscreteFactor or None: if inplace=True (default) returns None
if inplace=False returns a new `DiscreteFactor` instance.
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x1'], [2, 2], range(1, 5)])
>>> phi1.divide(phi2)
>>> phi1.variables
['x1', 'x2', 'x3']
>>> phi1.cardinality
array([2, 3, 2])
>>> phi1.values
array([[[ 0. , 0.33333333],
[ 2. , 1. ],
[ 4. , 1.66666667]],
[[ 3. , 1.75 ],
[ 4. , 2.25 ],
[ 5. , 2.75 ]]]) | Below is the the instruction that describes the task:
### Input:
DiscreteFactor division by `phi1`.
Parameters
----------
phi1 : `DiscreteFactor` instance
The denominator for division.
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new factor.
Returns
-------
DiscreteFactor or None: if inplace=True (default) returns None
if inplace=False returns a new `DiscreteFactor` instance.
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x1'], [2, 2], range(1, 5)])
>>> phi1.divide(phi2)
>>> phi1.variables
['x1', 'x2', 'x3']
>>> phi1.cardinality
array([2, 3, 2])
>>> phi1.values
array([[[ 0. , 0.33333333],
[ 2. , 1. ],
[ 4. , 1.66666667]],
[[ 3. , 1.75 ],
[ 4. , 2.25 ],
[ 5. , 2.75 ]]])
### Response:
def divide(self, phi1, inplace=True):
"""
DiscreteFactor division by `phi1`.
Parameters
----------
phi1 : `DiscreteFactor` instance
The denominator for division.
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new factor.
Returns
-------
DiscreteFactor or None: if inplace=True (default) returns None
if inplace=False returns a new `DiscreteFactor` instance.
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x1'], [2, 2], range(1, 5)])
>>> phi1.divide(phi2)
>>> phi1.variables
['x1', 'x2', 'x3']
>>> phi1.cardinality
array([2, 3, 2])
>>> phi1.values
array([[[ 0. , 0.33333333],
[ 2. , 1. ],
[ 4. , 1.66666667]],
[[ 3. , 1.75 ],
[ 4. , 2.25 ],
[ 5. , 2.75 ]]])
"""
phi = self if inplace else self.copy()
phi1 = phi1.copy()
if set(phi1.variables) - set(phi.variables):
raise ValueError("Scope of divisor should be a subset of dividend")
# Adding extra variables in phi1.
extra_vars = set(phi.variables) - set(phi1.variables)
if extra_vars:
slice_ = [slice(None)] * len(phi1.variables)
slice_.extend([np.newaxis] * len(extra_vars))
phi1.values = phi1.values[tuple(slice_)]
phi1.variables.extend(extra_vars)
# Rearranging the axes of phi1 to match phi
for axis in range(phi.values.ndim):
exchange_index = phi1.variables.index(phi.variables[axis])
phi1.variables[axis], phi1.variables[exchange_index] = phi1.variables[exchange_index], phi1.variables[axis]
phi1.values = phi1.values.swapaxes(axis, exchange_index)
phi.values = phi.values / phi1.values
# If factor division 0/0 = 0 but is undefined for x/0. In pgmpy we are using
# np.inf to represent x/0 cases.
phi.values[np.isnan(phi.values)] = 0
if not inplace:
return phi |
def get_archive(self, archive_name, default_version=None):
'''
Retrieve a data archive
Parameters
----------
archive_name: str
Name of the archive to retrieve
default_version: version
str or :py:class:`~distutils.StrictVersion` giving the default
version number to be used on read operations
Returns
-------
archive: object
New :py:class:`~datafs.core.data_archive.DataArchive` object
Raises
------
KeyError:
A KeyError is raised when the ``archive_name`` is not found
'''
auth, archive_name = self._normalize_archive_name(archive_name)
res = self.manager.get_archive(archive_name)
if default_version is None:
default_version = self._default_versions.get(archive_name, None)
if (auth is not None) and (auth != res['authority_name']):
raise ValueError(
'Archive "{}" not found on {}.'.format(archive_name, auth) +
' Did you mean "{}://{}"?'.format(
res['authority_name'], archive_name))
return self._ArchiveConstructor(
api=self,
default_version=default_version,
**res) | Retrieve a data archive
Parameters
----------
archive_name: str
Name of the archive to retrieve
default_version: version
str or :py:class:`~distutils.StrictVersion` giving the default
version number to be used on read operations
Returns
-------
archive: object
New :py:class:`~datafs.core.data_archive.DataArchive` object
Raises
------
KeyError:
A KeyError is raised when the ``archive_name`` is not found | Below is the the instruction that describes the task:
### Input:
Retrieve a data archive
Parameters
----------
archive_name: str
Name of the archive to retrieve
default_version: version
str or :py:class:`~distutils.StrictVersion` giving the default
version number to be used on read operations
Returns
-------
archive: object
New :py:class:`~datafs.core.data_archive.DataArchive` object
Raises
------
KeyError:
A KeyError is raised when the ``archive_name`` is not found
### Response:
def get_archive(self, archive_name, default_version=None):
'''
Retrieve a data archive
Parameters
----------
archive_name: str
Name of the archive to retrieve
default_version: version
str or :py:class:`~distutils.StrictVersion` giving the default
version number to be used on read operations
Returns
-------
archive: object
New :py:class:`~datafs.core.data_archive.DataArchive` object
Raises
------
KeyError:
A KeyError is raised when the ``archive_name`` is not found
'''
auth, archive_name = self._normalize_archive_name(archive_name)
res = self.manager.get_archive(archive_name)
if default_version is None:
default_version = self._default_versions.get(archive_name, None)
if (auth is not None) and (auth != res['authority_name']):
raise ValueError(
'Archive "{}" not found on {}.'.format(archive_name, auth) +
' Did you mean "{}://{}"?'.format(
res['authority_name'], archive_name))
return self._ArchiveConstructor(
api=self,
default_version=default_version,
**res) |
def walk(self):
"""Walk proposal kernel"""
if self.verbose > 1:
print_('\t' + self._id + ' Running Walk proposal kernel')
# Mask for values to move
phi = self.phi
theta = self.walk_theta
u = random(len(phi))
z = (theta / (1 + theta)) * (theta * u ** 2 + 2 * u - 1)
if self._prime:
xp, x = self.values
else:
x, xp = self.values
if self.verbose > 1:
print_('\t' + 'Current value = ' + str(x))
x = x + phi * (x - xp) * z
if self.verbose > 1:
print_('\t' + 'Proposed value = ' + str(x))
self.stochastic.value = x
# Set proposal adjustment factor
self.hastings_factor = 0.0 | Walk proposal kernel | Below is the the instruction that describes the task:
### Input:
Walk proposal kernel
### Response:
def walk(self):
"""Walk proposal kernel"""
if self.verbose > 1:
print_('\t' + self._id + ' Running Walk proposal kernel')
# Mask for values to move
phi = self.phi
theta = self.walk_theta
u = random(len(phi))
z = (theta / (1 + theta)) * (theta * u ** 2 + 2 * u - 1)
if self._prime:
xp, x = self.values
else:
x, xp = self.values
if self.verbose > 1:
print_('\t' + 'Current value = ' + str(x))
x = x + phi * (x - xp) * z
if self.verbose > 1:
print_('\t' + 'Proposed value = ' + str(x))
self.stochastic.value = x
# Set proposal adjustment factor
self.hastings_factor = 0.0 |
def from_string(cls, string, relpath=None, encoding=None, is_sass=None):
"""Read Sass source from the contents of a string.
The origin is always None. `relpath` defaults to "string:...".
"""
if isinstance(string, six.text_type):
# Already decoded; we don't know what encoding to use for output,
# though, so still check for a @charset.
# TODO what if the given encoding conflicts with the one in the
# file? do we care?
if encoding is None:
encoding = determine_encoding(string)
byte_contents = string.encode(encoding)
text_contents = string
elif isinstance(string, six.binary_type):
encoding = determine_encoding(string)
byte_contents = string
text_contents = string.decode(encoding)
else:
raise TypeError("Expected text or bytes, got {0!r}".format(string))
origin = None
if relpath is None:
m = hashlib.sha256()
m.update(byte_contents)
relpath = repr("string:{0}:{1}".format(
m.hexdigest()[:16], text_contents[:100]))
return cls(
origin, relpath, text_contents, encoding=encoding,
is_sass=is_sass,
) | Read Sass source from the contents of a string.
The origin is always None. `relpath` defaults to "string:...". | Below is the the instruction that describes the task:
### Input:
Read Sass source from the contents of a string.
The origin is always None. `relpath` defaults to "string:...".
### Response:
def from_string(cls, string, relpath=None, encoding=None, is_sass=None):
"""Read Sass source from the contents of a string.
The origin is always None. `relpath` defaults to "string:...".
"""
if isinstance(string, six.text_type):
# Already decoded; we don't know what encoding to use for output,
# though, so still check for a @charset.
# TODO what if the given encoding conflicts with the one in the
# file? do we care?
if encoding is None:
encoding = determine_encoding(string)
byte_contents = string.encode(encoding)
text_contents = string
elif isinstance(string, six.binary_type):
encoding = determine_encoding(string)
byte_contents = string
text_contents = string.decode(encoding)
else:
raise TypeError("Expected text or bytes, got {0!r}".format(string))
origin = None
if relpath is None:
m = hashlib.sha256()
m.update(byte_contents)
relpath = repr("string:{0}:{1}".format(
m.hexdigest()[:16], text_contents[:100]))
return cls(
origin, relpath, text_contents, encoding=encoding,
is_sass=is_sass,
) |
def abbreviations(text):
"""Remove periods after an abbreviation from a list of known
abbrevations that can be spoken the same without that period. This
prevents having to handle tokenization of that period.
Note:
Could potentially remove the ending period of a sentence.
Note:
Abbreviations that Google Translate can't pronounce without
(or even with) a period should be added as a word substitution with a
:class:`PreProcessorSub` pre-processor. Ex.: 'Esq.', 'Esquire'.
"""
return PreProcessorRegex(
search_args=symbols.ABBREVIATIONS,
search_func=lambda x: r"(?<={})(?=\.).".format(x),
repl='', flags=re.IGNORECASE).run(text) | Remove periods after an abbreviation from a list of known
abbrevations that can be spoken the same without that period. This
prevents having to handle tokenization of that period.
Note:
Could potentially remove the ending period of a sentence.
Note:
Abbreviations that Google Translate can't pronounce without
(or even with) a period should be added as a word substitution with a
:class:`PreProcessorSub` pre-processor. Ex.: 'Esq.', 'Esquire'. | Below is the the instruction that describes the task:
### Input:
Remove periods after an abbreviation from a list of known
abbrevations that can be spoken the same without that period. This
prevents having to handle tokenization of that period.
Note:
Could potentially remove the ending period of a sentence.
Note:
Abbreviations that Google Translate can't pronounce without
(or even with) a period should be added as a word substitution with a
:class:`PreProcessorSub` pre-processor. Ex.: 'Esq.', 'Esquire'.
### Response:
def abbreviations(text):
"""Remove periods after an abbreviation from a list of known
abbrevations that can be spoken the same without that period. This
prevents having to handle tokenization of that period.
Note:
Could potentially remove the ending period of a sentence.
Note:
Abbreviations that Google Translate can't pronounce without
(or even with) a period should be added as a word substitution with a
:class:`PreProcessorSub` pre-processor. Ex.: 'Esq.', 'Esquire'.
"""
return PreProcessorRegex(
search_args=symbols.ABBREVIATIONS,
search_func=lambda x: r"(?<={})(?=\.).".format(x),
repl='', flags=re.IGNORECASE).run(text) |
def isDiurnal(self):
""" Returns true if this chart is diurnal. """
sun = self.getObject(const.SUN)
mc = self.getAngle(const.MC)
# Get ecliptical positions and check if the
# sun is above the horizon.
lat = self.pos.lat
sunRA, sunDecl = utils.eqCoords(sun.lon, sun.lat)
mcRA, mcDecl = utils.eqCoords(mc.lon, 0)
return utils.isAboveHorizon(sunRA, sunDecl, mcRA, lat) | Returns true if this chart is diurnal. | Below is the the instruction that describes the task:
### Input:
Returns true if this chart is diurnal.
### Response:
def isDiurnal(self):
""" Returns true if this chart is diurnal. """
sun = self.getObject(const.SUN)
mc = self.getAngle(const.MC)
# Get ecliptical positions and check if the
# sun is above the horizon.
lat = self.pos.lat
sunRA, sunDecl = utils.eqCoords(sun.lon, sun.lat)
mcRA, mcDecl = utils.eqCoords(mc.lon, 0)
return utils.isAboveHorizon(sunRA, sunDecl, mcRA, lat) |
def service_highstate(requires=True):
'''
Return running and enabled services in a highstate structure. By default
also returns package dependencies for those services, which means that
package definitions must be created outside this function. To drop the
package dependencies, set ``requires`` to False.
CLI Example:
salt myminion introspect.service_highstate
salt myminion introspect.service_highstate requires=False
'''
ret = {}
running = running_service_owners()
for service in running:
ret[service] = {'service': ['running']}
if requires:
ret[service]['service'].append(
{'require': {'pkg': running[service]}}
)
enabled = enabled_service_owners()
for service in enabled:
if service in ret:
ret[service]['service'].append({'enabled': True})
else:
ret[service] = {'service': [{'enabled': True}]}
if requires:
exists = False
for item in ret[service]['service']:
if isinstance(item, dict) and next(six.iterkeys(item)) == 'require':
exists = True
if not exists:
ret[service]['service'].append(
{'require': {'pkg': enabled[service]}}
)
return ret | Return running and enabled services in a highstate structure. By default
also returns package dependencies for those services, which means that
package definitions must be created outside this function. To drop the
package dependencies, set ``requires`` to False.
CLI Example:
salt myminion introspect.service_highstate
salt myminion introspect.service_highstate requires=False | Below is the the instruction that describes the task:
### Input:
Return running and enabled services in a highstate structure. By default
also returns package dependencies for those services, which means that
package definitions must be created outside this function. To drop the
package dependencies, set ``requires`` to False.
CLI Example:
salt myminion introspect.service_highstate
salt myminion introspect.service_highstate requires=False
### Response:
def service_highstate(requires=True):
'''
Return running and enabled services in a highstate structure. By default
also returns package dependencies for those services, which means that
package definitions must be created outside this function. To drop the
package dependencies, set ``requires`` to False.
CLI Example:
salt myminion introspect.service_highstate
salt myminion introspect.service_highstate requires=False
'''
ret = {}
running = running_service_owners()
for service in running:
ret[service] = {'service': ['running']}
if requires:
ret[service]['service'].append(
{'require': {'pkg': running[service]}}
)
enabled = enabled_service_owners()
for service in enabled:
if service in ret:
ret[service]['service'].append({'enabled': True})
else:
ret[service] = {'service': [{'enabled': True}]}
if requires:
exists = False
for item in ret[service]['service']:
if isinstance(item, dict) and next(six.iterkeys(item)) == 'require':
exists = True
if not exists:
ret[service]['service'].append(
{'require': {'pkg': enabled[service]}}
)
return ret |
def get_filters(cls, raw_filters, num_cols, columns_dict): # noqa
"""Given Superset filter data structure, returns pydruid Filter(s)"""
filters = None
for flt in raw_filters:
col = flt.get('col')
op = flt.get('op')
eq = flt.get('val')
if (
not col or
not op or
(eq is None and op not in ('IS NULL', 'IS NOT NULL'))):
continue
# Check if this dimension uses an extraction function
# If so, create the appropriate pydruid extraction object
column_def = columns_dict.get(col)
dim_spec = column_def.dimension_spec if column_def else None
extraction_fn = None
if dim_spec and 'extractionFn' in dim_spec:
(col, extraction_fn) = DruidDatasource._create_extraction_fn(dim_spec)
cond = None
is_numeric_col = col in num_cols
is_list_target = op in ('in', 'not in')
eq = cls.filter_values_handler(
eq, is_list_target=is_list_target,
target_column_is_numeric=is_numeric_col)
# For these two ops, could have used Dimension,
# but it doesn't support extraction functions
if op == '==':
cond = Filter(dimension=col, value=eq, extraction_function=extraction_fn)
elif op == '!=':
cond = ~Filter(dimension=col, value=eq, extraction_function=extraction_fn)
elif op in ('in', 'not in'):
fields = []
# ignore the filter if it has no value
if not len(eq):
continue
# if it uses an extraction fn, use the "in" operator
# as Dimension isn't supported
elif extraction_fn is not None:
cond = Filter(
dimension=col,
values=eq,
type='in',
extraction_function=extraction_fn,
)
elif len(eq) == 1:
cond = Dimension(col) == eq[0]
else:
for s in eq:
fields.append(Dimension(col) == s)
cond = Filter(type='or', fields=fields)
if op == 'not in':
cond = ~cond
elif op == 'regex':
cond = Filter(
extraction_function=extraction_fn,
type='regex',
pattern=eq,
dimension=col,
)
# For the ops below, could have used pydruid's Bound,
# but it doesn't support extraction functions
elif op == '>=':
cond = Filter(
type='bound',
extraction_function=extraction_fn,
dimension=col,
lowerStrict=False,
upperStrict=False,
lower=eq,
upper=None,
alphaNumeric=is_numeric_col,
)
elif op == '<=':
cond = Filter(
type='bound',
extraction_function=extraction_fn,
dimension=col,
lowerStrict=False,
upperStrict=False,
lower=None,
upper=eq,
alphaNumeric=is_numeric_col,
)
elif op == '>':
cond = Filter(
type='bound',
extraction_function=extraction_fn,
lowerStrict=True,
upperStrict=False,
dimension=col,
lower=eq,
upper=None,
alphaNumeric=is_numeric_col,
)
elif op == '<':
cond = Filter(
type='bound',
extraction_function=extraction_fn,
upperStrict=True,
lowerStrict=False,
dimension=col,
lower=None,
upper=eq,
alphaNumeric=is_numeric_col,
)
elif op == 'IS NULL':
cond = Dimension(col) == None # NOQA
elif op == 'IS NOT NULL':
cond = Dimension(col) != None # NOQA
if filters:
filters = Filter(type='and', fields=[
cond,
filters,
])
else:
filters = cond
return filters | Given Superset filter data structure, returns pydruid Filter(s) | Below is the the instruction that describes the task:
### Input:
Given Superset filter data structure, returns pydruid Filter(s)
### Response:
def get_filters(cls, raw_filters, num_cols, columns_dict): # noqa
"""Given Superset filter data structure, returns pydruid Filter(s)"""
filters = None
for flt in raw_filters:
col = flt.get('col')
op = flt.get('op')
eq = flt.get('val')
if (
not col or
not op or
(eq is None and op not in ('IS NULL', 'IS NOT NULL'))):
continue
# Check if this dimension uses an extraction function
# If so, create the appropriate pydruid extraction object
column_def = columns_dict.get(col)
dim_spec = column_def.dimension_spec if column_def else None
extraction_fn = None
if dim_spec and 'extractionFn' in dim_spec:
(col, extraction_fn) = DruidDatasource._create_extraction_fn(dim_spec)
cond = None
is_numeric_col = col in num_cols
is_list_target = op in ('in', 'not in')
eq = cls.filter_values_handler(
eq, is_list_target=is_list_target,
target_column_is_numeric=is_numeric_col)
# For these two ops, could have used Dimension,
# but it doesn't support extraction functions
if op == '==':
cond = Filter(dimension=col, value=eq, extraction_function=extraction_fn)
elif op == '!=':
cond = ~Filter(dimension=col, value=eq, extraction_function=extraction_fn)
elif op in ('in', 'not in'):
fields = []
# ignore the filter if it has no value
if not len(eq):
continue
# if it uses an extraction fn, use the "in" operator
# as Dimension isn't supported
elif extraction_fn is not None:
cond = Filter(
dimension=col,
values=eq,
type='in',
extraction_function=extraction_fn,
)
elif len(eq) == 1:
cond = Dimension(col) == eq[0]
else:
for s in eq:
fields.append(Dimension(col) == s)
cond = Filter(type='or', fields=fields)
if op == 'not in':
cond = ~cond
elif op == 'regex':
cond = Filter(
extraction_function=extraction_fn,
type='regex',
pattern=eq,
dimension=col,
)
# For the ops below, could have used pydruid's Bound,
# but it doesn't support extraction functions
elif op == '>=':
cond = Filter(
type='bound',
extraction_function=extraction_fn,
dimension=col,
lowerStrict=False,
upperStrict=False,
lower=eq,
upper=None,
alphaNumeric=is_numeric_col,
)
elif op == '<=':
cond = Filter(
type='bound',
extraction_function=extraction_fn,
dimension=col,
lowerStrict=False,
upperStrict=False,
lower=None,
upper=eq,
alphaNumeric=is_numeric_col,
)
elif op == '>':
cond = Filter(
type='bound',
extraction_function=extraction_fn,
lowerStrict=True,
upperStrict=False,
dimension=col,
lower=eq,
upper=None,
alphaNumeric=is_numeric_col,
)
elif op == '<':
cond = Filter(
type='bound',
extraction_function=extraction_fn,
upperStrict=True,
lowerStrict=False,
dimension=col,
lower=None,
upper=eq,
alphaNumeric=is_numeric_col,
)
elif op == 'IS NULL':
cond = Dimension(col) == None # NOQA
elif op == 'IS NOT NULL':
cond = Dimension(col) != None # NOQA
if filters:
filters = Filter(type='and', fields=[
cond,
filters,
])
else:
filters = cond
return filters |
def get_model_from_path_string(root_model, path):
""" Return a model class for a related model
root_model is the class of the initial model
path is like foo__bar where bar is related to foo
"""
for path_section in path.split('__'):
if path_section:
try:
field, model, direct, m2m = _get_field_by_name(root_model, path_section)
except FieldDoesNotExist:
return root_model
if direct:
if _get_remote_field(field):
try:
root_model = _get_remote_field(field).parent_model()
except AttributeError:
root_model = _get_remote_field(field).model
else:
if hasattr(field, 'related_model'):
root_model = field.related_model
else:
root_model = field.model
return root_model | Return a model class for a related model
root_model is the class of the initial model
path is like foo__bar where bar is related to foo | Below is the the instruction that describes the task:
### Input:
Return a model class for a related model
root_model is the class of the initial model
path is like foo__bar where bar is related to foo
### Response:
def get_model_from_path_string(root_model, path):
""" Return a model class for a related model
root_model is the class of the initial model
path is like foo__bar where bar is related to foo
"""
for path_section in path.split('__'):
if path_section:
try:
field, model, direct, m2m = _get_field_by_name(root_model, path_section)
except FieldDoesNotExist:
return root_model
if direct:
if _get_remote_field(field):
try:
root_model = _get_remote_field(field).parent_model()
except AttributeError:
root_model = _get_remote_field(field).model
else:
if hasattr(field, 'related_model'):
root_model = field.related_model
else:
root_model = field.model
return root_model |
def calc_steady_state_dist(R):
"""Calculate the steady state dist of a 4 state markov transition matrix.
Parameters
----------
R : ndarray
Markov transition matrix
Returns
-------
p_ss : ndarray
Steady state probability distribution
"""
#Calc steady state distribution for a dinucleotide bias matrix
w, v = np.linalg.eig(R)
for i in range(4):
if np.abs(w[i] - 1) < 1e-8:
return np.real(v[:, i] / np.sum(v[:, i]))
return -1 | Calculate the steady state dist of a 4 state markov transition matrix.
Parameters
----------
R : ndarray
Markov transition matrix
Returns
-------
p_ss : ndarray
Steady state probability distribution | Below is the the instruction that describes the task:
### Input:
Calculate the steady state dist of a 4 state markov transition matrix.
Parameters
----------
R : ndarray
Markov transition matrix
Returns
-------
p_ss : ndarray
Steady state probability distribution
### Response:
def calc_steady_state_dist(R):
"""Calculate the steady state dist of a 4 state markov transition matrix.
Parameters
----------
R : ndarray
Markov transition matrix
Returns
-------
p_ss : ndarray
Steady state probability distribution
"""
#Calc steady state distribution for a dinucleotide bias matrix
w, v = np.linalg.eig(R)
for i in range(4):
if np.abs(w[i] - 1) < 1e-8:
return np.real(v[:, i] / np.sum(v[:, i]))
return -1 |
def logger(name=None, save=False):
"""Init and configure logger."""
logger = logging.getLogger(name)
if save:
logformat = '%(asctime)s [%(levelname)s] [%(name)s] %(funcName)s: %(message)s (line %(lineno)d)'
log_file_path = 'fut.log' # TODO: define logpath
open(log_file_path, 'w').write('') # remove old logs
logger.setLevel(logging.DEBUG)
logger_handler = logging.FileHandler(log_file_path)
logger_handler.setFormatter(logging.Formatter(logformat))
else:
logger_handler = NullHandler()
logger.addHandler(logger_handler)
return logger | Init and configure logger. | Below is the the instruction that describes the task:
### Input:
Init and configure logger.
### Response:
def logger(name=None, save=False):
"""Init and configure logger."""
logger = logging.getLogger(name)
if save:
logformat = '%(asctime)s [%(levelname)s] [%(name)s] %(funcName)s: %(message)s (line %(lineno)d)'
log_file_path = 'fut.log' # TODO: define logpath
open(log_file_path, 'w').write('') # remove old logs
logger.setLevel(logging.DEBUG)
logger_handler = logging.FileHandler(log_file_path)
logger_handler.setFormatter(logging.Formatter(logformat))
else:
logger_handler = NullHandler()
logger.addHandler(logger_handler)
return logger |
def find_usage(self):
"""
Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`.
"""
logger.debug("Checking usage for service %s", self.service_name)
self.connect()
for lim in self.limits.values():
lim._reset_usage()
self._find_usage_nodes()
self._find_usage_subnet_groups()
self._find_usage_parameter_groups()
self._find_usage_security_groups()
self._have_usage = True
logger.debug("Done checking usage.") | Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`. | Below is the the instruction that describes the task:
### Input:
Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`.
### Response:
def find_usage(self):
"""
Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`.
"""
logger.debug("Checking usage for service %s", self.service_name)
self.connect()
for lim in self.limits.values():
lim._reset_usage()
self._find_usage_nodes()
self._find_usage_subnet_groups()
self._find_usage_parameter_groups()
self._find_usage_security_groups()
self._have_usage = True
logger.debug("Done checking usage.") |
def parse_raxml(handle):
"""Parse RAxML's summary output.
*handle* should be an open file handle containing the RAxML
output. It is parsed and a dictionary returned.
"""
s = ''.join(handle.readlines())
result = {}
try_set_fields(result, r'(?P<program>RAxML version [0-9.]+)', s)
try_set_fields(result, r'(?P<datatype>DNA|RNA|AA)', s)
result['empirical_frequencies'] = (
result['datatype'] != 'AA' or
re.search('empirical base frequencies', s, re.IGNORECASE) is not None)
try_set_fields(result, r'Substitution Matrix: (?P<subs_model>\w+)', s)
rates = {}
if result['datatype'] != 'AA':
try_set_fields(rates,
(r"rates\[0\] ac ag at cg ct gt: "
r"(?P<ac>[0-9.]+) (?P<ag>[0-9.]+) (?P<at>[0-9.]+) "
r"(?P<cg>[0-9.]+) (?P<ct>[0-9.]+) (?P<gt>[0-9.]+)"),
s, hook=float)
try_set_fields(rates, r'rate A <-> C: (?P<ac>[0-9.]+)', s, hook=float)
try_set_fields(rates, r'rate A <-> G: (?P<ag>[0-9.]+)', s, hook=float)
try_set_fields(rates, r'rate A <-> T: (?P<at>[0-9.]+)', s, hook=float)
try_set_fields(rates, r'rate C <-> G: (?P<cg>[0-9.]+)', s, hook=float)
try_set_fields(rates, r'rate C <-> T: (?P<ct>[0-9.]+)', s, hook=float)
try_set_fields(rates, r'rate G <-> T: (?P<gt>[0-9.]+)', s, hook=float)
if len(rates) > 0:
result['subs_rates'] = rates
result['gamma'] = {'n_cats': 4}
try_set_fields(result['gamma'],
r"alpha[\[\]0-9]*: (?P<alpha>[0-9.]+)", s, hook=float)
result['ras_model'] = 'gamma'
return result | Parse RAxML's summary output.
*handle* should be an open file handle containing the RAxML
output. It is parsed and a dictionary returned. | Below is the the instruction that describes the task:
### Input:
Parse RAxML's summary output.
*handle* should be an open file handle containing the RAxML
output. It is parsed and a dictionary returned.
### Response:
def parse_raxml(handle):
"""Parse RAxML's summary output.
*handle* should be an open file handle containing the RAxML
output. It is parsed and a dictionary returned.
"""
s = ''.join(handle.readlines())
result = {}
try_set_fields(result, r'(?P<program>RAxML version [0-9.]+)', s)
try_set_fields(result, r'(?P<datatype>DNA|RNA|AA)', s)
result['empirical_frequencies'] = (
result['datatype'] != 'AA' or
re.search('empirical base frequencies', s, re.IGNORECASE) is not None)
try_set_fields(result, r'Substitution Matrix: (?P<subs_model>\w+)', s)
rates = {}
if result['datatype'] != 'AA':
try_set_fields(rates,
(r"rates\[0\] ac ag at cg ct gt: "
r"(?P<ac>[0-9.]+) (?P<ag>[0-9.]+) (?P<at>[0-9.]+) "
r"(?P<cg>[0-9.]+) (?P<ct>[0-9.]+) (?P<gt>[0-9.]+)"),
s, hook=float)
try_set_fields(rates, r'rate A <-> C: (?P<ac>[0-9.]+)', s, hook=float)
try_set_fields(rates, r'rate A <-> G: (?P<ag>[0-9.]+)', s, hook=float)
try_set_fields(rates, r'rate A <-> T: (?P<at>[0-9.]+)', s, hook=float)
try_set_fields(rates, r'rate C <-> G: (?P<cg>[0-9.]+)', s, hook=float)
try_set_fields(rates, r'rate C <-> T: (?P<ct>[0-9.]+)', s, hook=float)
try_set_fields(rates, r'rate G <-> T: (?P<gt>[0-9.]+)', s, hook=float)
if len(rates) > 0:
result['subs_rates'] = rates
result['gamma'] = {'n_cats': 4}
try_set_fields(result['gamma'],
r"alpha[\[\]0-9]*: (?P<alpha>[0-9.]+)", s, hook=float)
result['ras_model'] = 'gamma'
return result |
def current_changed(self, i):
"""Slot for when the current index changes.
Emits the :data:`AbstractLevel.new_root` signal.
:param index: the new current index
:type index: int
:returns: None
:rtype: None
:raises: None
"""
m = self.model()
ri = self.rootModelIndex()
index = m.index(i, 0, ri)
self.new_root.emit(index) | Slot for when the current index changes.
Emits the :data:`AbstractLevel.new_root` signal.
:param index: the new current index
:type index: int
:returns: None
:rtype: None
:raises: None | Below is the the instruction that describes the task:
### Input:
Slot for when the current index changes.
Emits the :data:`AbstractLevel.new_root` signal.
:param index: the new current index
:type index: int
:returns: None
:rtype: None
:raises: None
### Response:
def current_changed(self, i):
"""Slot for when the current index changes.
Emits the :data:`AbstractLevel.new_root` signal.
:param index: the new current index
:type index: int
:returns: None
:rtype: None
:raises: None
"""
m = self.model()
ri = self.rootModelIndex()
index = m.index(i, 0, ri)
self.new_root.emit(index) |
def line(self, serie, rescale=False):
"""Draw the line serie"""
serie_node = self.svg.serie(serie)
if rescale and self.secondary_series:
points = self._rescale(serie.points)
else:
points = serie.points
view_values = list(map(self.view, points))
if serie.show_dots:
for i, (x, y) in enumerate(view_values):
if None in (x, y):
continue
if self.logarithmic:
if points[i][1] is None or points[i][1] <= 0:
continue
if (serie.show_only_major_dots and self.x_labels
and i < len(self.x_labels)
and self.x_labels[i] not in self._x_labels_major):
continue
metadata = serie.metadata.get(i)
classes = []
if x > self.view.width / 2:
classes.append('left')
if y > self.view.height / 2:
classes.append('top')
classes = ' '.join(classes)
self._confidence_interval(
serie_node['overlay'], x, y, serie.values[i], metadata
)
dots = decorate(
self.svg,
self.svg.node(serie_node['overlay'], class_="dots"),
metadata
)
val = self._format(serie, i)
alter(
self.svg.transposable_node(
dots,
'circle',
cx=x,
cy=y,
r=serie.dots_size,
class_='dot reactive tooltip-trigger'
), metadata
)
self._tooltip_data(
dots, val, x, y, xlabel=self._get_x_label(i)
)
self._static_value(
serie_node, val, x + self.style.value_font_size,
y + self.style.value_font_size, metadata
)
if serie.stroke:
if self.interpolate:
points = serie.interpolated
if rescale and self.secondary_series:
points = self._rescale(points)
view_values = list(map(self.view, points))
if serie.fill:
view_values = self._fill(view_values)
if serie.allow_interruptions:
# view_values are in form [(x1, y1), (x2, y2)]. We
# need to split that into multiple sequences if a
# None is present here
sequences = []
cur_sequence = []
for x, y in view_values:
if y is None and len(cur_sequence) > 0:
# emit current subsequence
sequences.append(cur_sequence)
cur_sequence = []
elif y is None: # just discard
continue
else:
cur_sequence.append((x, y)) # append the element
if len(cur_sequence) > 0: # emit last possible sequence
sequences.append(cur_sequence)
else:
# plain vanilla rendering
sequences = [view_values]
if self.logarithmic:
for seq in sequences:
for ele in seq[::-1]:
y = points[seq.index(ele)][1]
if y is None or y <= 0:
del seq[seq.index(ele)]
for seq in sequences:
self.svg.line(
serie_node['plot'],
seq,
close=self._self_close,
class_='line reactive' +
(' nofill' if not serie.fill else '')
) | Draw the line serie | Below is the the instruction that describes the task:
### Input:
Draw the line serie
### Response:
def line(self, serie, rescale=False):
"""Draw the line serie"""
serie_node = self.svg.serie(serie)
if rescale and self.secondary_series:
points = self._rescale(serie.points)
else:
points = serie.points
view_values = list(map(self.view, points))
if serie.show_dots:
for i, (x, y) in enumerate(view_values):
if None in (x, y):
continue
if self.logarithmic:
if points[i][1] is None or points[i][1] <= 0:
continue
if (serie.show_only_major_dots and self.x_labels
and i < len(self.x_labels)
and self.x_labels[i] not in self._x_labels_major):
continue
metadata = serie.metadata.get(i)
classes = []
if x > self.view.width / 2:
classes.append('left')
if y > self.view.height / 2:
classes.append('top')
classes = ' '.join(classes)
self._confidence_interval(
serie_node['overlay'], x, y, serie.values[i], metadata
)
dots = decorate(
self.svg,
self.svg.node(serie_node['overlay'], class_="dots"),
metadata
)
val = self._format(serie, i)
alter(
self.svg.transposable_node(
dots,
'circle',
cx=x,
cy=y,
r=serie.dots_size,
class_='dot reactive tooltip-trigger'
), metadata
)
self._tooltip_data(
dots, val, x, y, xlabel=self._get_x_label(i)
)
self._static_value(
serie_node, val, x + self.style.value_font_size,
y + self.style.value_font_size, metadata
)
if serie.stroke:
if self.interpolate:
points = serie.interpolated
if rescale and self.secondary_series:
points = self._rescale(points)
view_values = list(map(self.view, points))
if serie.fill:
view_values = self._fill(view_values)
if serie.allow_interruptions:
# view_values are in form [(x1, y1), (x2, y2)]. We
# need to split that into multiple sequences if a
# None is present here
sequences = []
cur_sequence = []
for x, y in view_values:
if y is None and len(cur_sequence) > 0:
# emit current subsequence
sequences.append(cur_sequence)
cur_sequence = []
elif y is None: # just discard
continue
else:
cur_sequence.append((x, y)) # append the element
if len(cur_sequence) > 0: # emit last possible sequence
sequences.append(cur_sequence)
else:
# plain vanilla rendering
sequences = [view_values]
if self.logarithmic:
for seq in sequences:
for ele in seq[::-1]:
y = points[seq.index(ele)][1]
if y is None or y <= 0:
del seq[seq.index(ele)]
for seq in sequences:
self.svg.line(
serie_node['plot'],
seq,
close=self._self_close,
class_='line reactive' +
(' nofill' if not serie.fill else '')
) |
def as_dict(self, cache=None, fetch=True):
"""Return torrent properties as a dictionary.
Set the cache flag to False to disable the cache. On the other hand,
set the fetch flag to False to avoid fetching data if it's not cached.
"""
if not self._fetched and fetch:
info = self.fetch(cache)
elif self._use_cache(cache):
info = self._attrs.copy()
else:
info = {}
info.update(url=self.url)
return info | Return torrent properties as a dictionary.
Set the cache flag to False to disable the cache. On the other hand,
set the fetch flag to False to avoid fetching data if it's not cached. | Below is the the instruction that describes the task:
### Input:
Return torrent properties as a dictionary.
Set the cache flag to False to disable the cache. On the other hand,
set the fetch flag to False to avoid fetching data if it's not cached.
### Response:
def as_dict(self, cache=None, fetch=True):
"""Return torrent properties as a dictionary.
Set the cache flag to False to disable the cache. On the other hand,
set the fetch flag to False to avoid fetching data if it's not cached.
"""
if not self._fetched and fetch:
info = self.fetch(cache)
elif self._use_cache(cache):
info = self._attrs.copy()
else:
info = {}
info.update(url=self.url)
return info |
def create_ui(self):
'''
.. versionchanged:: 0.21.2
Load the builder configuration file using :func:`pkgutil.getdata`,
which supports loading from `.zip` archives (e.g., in an app
packaged with Py2Exe).
'''
builder = gtk.Builder()
# Read glade file using `pkgutil` to also support loading from `.zip`
# files (e.g., in app packaged with Py2Exe).
glade_str = pkgutil.get_data(__name__,
'glade/form_view_dialog.glade')
builder.add_from_string(glade_str)
self.window = builder.get_object('form_view_dialog')
self.vbox_form = builder.get_object('vbox_form')
if self.title:
self.window.set_title(self.title)
if self.short_desc:
self.short_label = gtk.Label()
self.short_label.set_text(self.short_desc)
self.short_label.set_alignment(0, .5)
self.vbox_form.pack_start(self.short_label, expand=True, fill=True)
if self.long_desc:
self.long_label = gtk.Label()
self.long_label.set_text(self.long_desc)
self.long_label.set_alignment(.1, .5)
self.long_expander = gtk.Expander(label='Details')
self.long_expander.set_spacing(5)
self.long_expander.add(self.long_label)
self.vbox_form.pack_start(self.long_expander, expand=True,
fill=True)
if self.parent is None:
self.parent = self.default_parent
self.window.set_default_response(gtk.RESPONSE_OK)
self.window.set_position(gtk.WIN_POS_CENTER_ON_PARENT)
if self.parent:
self.window.set_transient_for(self.parent)
self.window.show_all() | .. versionchanged:: 0.21.2
Load the builder configuration file using :func:`pkgutil.getdata`,
which supports loading from `.zip` archives (e.g., in an app
packaged with Py2Exe). | Below is the the instruction that describes the task:
### Input:
.. versionchanged:: 0.21.2
Load the builder configuration file using :func:`pkgutil.getdata`,
which supports loading from `.zip` archives (e.g., in an app
packaged with Py2Exe).
### Response:
def create_ui(self):
'''
.. versionchanged:: 0.21.2
Load the builder configuration file using :func:`pkgutil.getdata`,
which supports loading from `.zip` archives (e.g., in an app
packaged with Py2Exe).
'''
builder = gtk.Builder()
# Read glade file using `pkgutil` to also support loading from `.zip`
# files (e.g., in app packaged with Py2Exe).
glade_str = pkgutil.get_data(__name__,
'glade/form_view_dialog.glade')
builder.add_from_string(glade_str)
self.window = builder.get_object('form_view_dialog')
self.vbox_form = builder.get_object('vbox_form')
if self.title:
self.window.set_title(self.title)
if self.short_desc:
self.short_label = gtk.Label()
self.short_label.set_text(self.short_desc)
self.short_label.set_alignment(0, .5)
self.vbox_form.pack_start(self.short_label, expand=True, fill=True)
if self.long_desc:
self.long_label = gtk.Label()
self.long_label.set_text(self.long_desc)
self.long_label.set_alignment(.1, .5)
self.long_expander = gtk.Expander(label='Details')
self.long_expander.set_spacing(5)
self.long_expander.add(self.long_label)
self.vbox_form.pack_start(self.long_expander, expand=True,
fill=True)
if self.parent is None:
self.parent = self.default_parent
self.window.set_default_response(gtk.RESPONSE_OK)
self.window.set_position(gtk.WIN_POS_CENTER_ON_PARENT)
if self.parent:
self.window.set_transient_for(self.parent)
self.window.show_all() |
def which(cmd):
"""
Returns full path to a executable.
Args:
cmd (str): Executable command to search for.
Returns:
(str) Full path to command. None if it is not found.
Example::
full_path_to_python = which("python")
"""
def is_exe(fp):
return os.path.isfile(fp) and os.access(fp, os.X_OK)
fpath, fname = os.path.split(cmd)
if fpath:
if is_exe(cmd):
return cmd
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, cmd)
if is_exe(exe_file):
return exe_file
return None | Returns full path to a executable.
Args:
cmd (str): Executable command to search for.
Returns:
(str) Full path to command. None if it is not found.
Example::
full_path_to_python = which("python") | Below is the the instruction that describes the task:
### Input:
Returns full path to a executable.
Args:
cmd (str): Executable command to search for.
Returns:
(str) Full path to command. None if it is not found.
Example::
full_path_to_python = which("python")
### Response:
def which(cmd):
"""
Returns full path to a executable.
Args:
cmd (str): Executable command to search for.
Returns:
(str) Full path to command. None if it is not found.
Example::
full_path_to_python = which("python")
"""
def is_exe(fp):
return os.path.isfile(fp) and os.access(fp, os.X_OK)
fpath, fname = os.path.split(cmd)
if fpath:
if is_exe(cmd):
return cmd
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, cmd)
if is_exe(exe_file):
return exe_file
return None |
def find_n50(contig_lengths_dict, genome_length_dict):
"""
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n50_dict: dictionary of strain name: N50
"""
# Initialise the dictionary
n50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise a variable to store a running total of contig lengths
currentlength = 0
for contig_length in contig_lengths:
# Increment the current length with the length of the current contig
currentlength += contig_length
# If the current length is now greater than the total genome / 2, the current contig length is the N50
if currentlength >= genome_length_dict[file_name] * 0.5:
# Populate the dictionary, and break the loop
n50_dict[file_name] = contig_length
break
return n50_dict | Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n50_dict: dictionary of strain name: N50 | Below is the the instruction that describes the task:
### Input:
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n50_dict: dictionary of strain name: N50
### Response:
def find_n50(contig_lengths_dict, genome_length_dict):
"""
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n50_dict: dictionary of strain name: N50
"""
# Initialise the dictionary
n50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise a variable to store a running total of contig lengths
currentlength = 0
for contig_length in contig_lengths:
# Increment the current length with the length of the current contig
currentlength += contig_length
# If the current length is now greater than the total genome / 2, the current contig length is the N50
if currentlength >= genome_length_dict[file_name] * 0.5:
# Populate the dictionary, and break the loop
n50_dict[file_name] = contig_length
break
return n50_dict |
def transform(self, maps):
"""This function transforms from chirp mass and symmetric mass ratio to
component masses.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
>>> import numpy
>>> from pycbc import transforms
>>> t = transforms.MchirpEtaToMass1Mass2()
>>> t.transform({'mchirp': numpy.array([10.]), 'eta': numpy.array([0.25])})
{'mass1': array([ 16.4375183]), 'mass2': array([ 8.21875915]),
'mchirp': array([ 10.]), 'eta': array([ 0.25])}
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
out = {}
out[parameters.mass1] = conversions.mass1_from_mchirp_eta(
maps[parameters.mchirp],
maps[parameters.eta])
out[parameters.mass2] = conversions.mass2_from_mchirp_eta(
maps[parameters.mchirp],
maps[parameters.eta])
return self.format_output(maps, out) | This function transforms from chirp mass and symmetric mass ratio to
component masses.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
>>> import numpy
>>> from pycbc import transforms
>>> t = transforms.MchirpEtaToMass1Mass2()
>>> t.transform({'mchirp': numpy.array([10.]), 'eta': numpy.array([0.25])})
{'mass1': array([ 16.4375183]), 'mass2': array([ 8.21875915]),
'mchirp': array([ 10.]), 'eta': array([ 0.25])}
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values. | Below is the the instruction that describes the task:
### Input:
This function transforms from chirp mass and symmetric mass ratio to
component masses.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
>>> import numpy
>>> from pycbc import transforms
>>> t = transforms.MchirpEtaToMass1Mass2()
>>> t.transform({'mchirp': numpy.array([10.]), 'eta': numpy.array([0.25])})
{'mass1': array([ 16.4375183]), 'mass2': array([ 8.21875915]),
'mchirp': array([ 10.]), 'eta': array([ 0.25])}
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
### Response:
def transform(self, maps):
"""This function transforms from chirp mass and symmetric mass ratio to
component masses.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
>>> import numpy
>>> from pycbc import transforms
>>> t = transforms.MchirpEtaToMass1Mass2()
>>> t.transform({'mchirp': numpy.array([10.]), 'eta': numpy.array([0.25])})
{'mass1': array([ 16.4375183]), 'mass2': array([ 8.21875915]),
'mchirp': array([ 10.]), 'eta': array([ 0.25])}
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
out = {}
out[parameters.mass1] = conversions.mass1_from_mchirp_eta(
maps[parameters.mchirp],
maps[parameters.eta])
out[parameters.mass2] = conversions.mass2_from_mchirp_eta(
maps[parameters.mchirp],
maps[parameters.eta])
return self.format_output(maps, out) |
def splitterfields(data, commdct):
"""get splitter fields to diagram it"""
objkey = "Connector:Splitter".upper()
fieldlists = splittermixerfieldlists(data, commdct, objkey)
return extractfields(data, commdct, objkey, fieldlists) | get splitter fields to diagram it | Below is the the instruction that describes the task:
### Input:
get splitter fields to diagram it
### Response:
def splitterfields(data, commdct):
"""get splitter fields to diagram it"""
objkey = "Connector:Splitter".upper()
fieldlists = splittermixerfieldlists(data, commdct, objkey)
return extractfields(data, commdct, objkey, fieldlists) |
def report(*arrays, **kwargs):
"""
Outputs a standalone HTML 'report card' for a
measurement (or several grouped measurements),
including relevant statistical information.
"""
name = kwargs.pop("name",None)
grouped = len(arrays) > 1
if grouped:
arr = N.concatenate(arrays)
components = [PCAOrientation(a)
for a in arrays]
else:
arr = arrays[0]
components = []
#r = LinearOrientation(arr)
pca = PCAOrientation(arr)
distances = list(distance_from_group(components,pca))
kwargs = dict(
levels=[1,2,3],
alpha=[0.8,0.5,0.2],
linewidth=2)
#ellipse=error_ellipse(pca)
kwargs = dict(n=500,levels=[1,2], ellipse=True)
stereonet_data = dict(
main=pca.error_coords(**kwargs),
components=[i.error_coords(**kwargs)
for i in components])
t = env.get_template("report.html")
return t.render(
name=name,
pca=pca,
stereonet_data=stereonet_data,
angular_errors=tuple(N.degrees(i)
for i in pca.angular_errors()[::-1]),
aligned=plot_aligned(pca),
distances=distances) | Outputs a standalone HTML 'report card' for a
measurement (or several grouped measurements),
including relevant statistical information. | Below is the the instruction that describes the task:
### Input:
Outputs a standalone HTML 'report card' for a
measurement (or several grouped measurements),
including relevant statistical information.
### Response:
def report(*arrays, **kwargs):
"""
Outputs a standalone HTML 'report card' for a
measurement (or several grouped measurements),
including relevant statistical information.
"""
name = kwargs.pop("name",None)
grouped = len(arrays) > 1
if grouped:
arr = N.concatenate(arrays)
components = [PCAOrientation(a)
for a in arrays]
else:
arr = arrays[0]
components = []
#r = LinearOrientation(arr)
pca = PCAOrientation(arr)
distances = list(distance_from_group(components,pca))
kwargs = dict(
levels=[1,2,3],
alpha=[0.8,0.5,0.2],
linewidth=2)
#ellipse=error_ellipse(pca)
kwargs = dict(n=500,levels=[1,2], ellipse=True)
stereonet_data = dict(
main=pca.error_coords(**kwargs),
components=[i.error_coords(**kwargs)
for i in components])
t = env.get_template("report.html")
return t.render(
name=name,
pca=pca,
stereonet_data=stereonet_data,
angular_errors=tuple(N.degrees(i)
for i in pca.angular_errors()[::-1]),
aligned=plot_aligned(pca),
distances=distances) |
def get_timer(self, name=None):
'''Shortcut for getting a :class:`~statsd.timer.Timer` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Timer) | Shortcut for getting a :class:`~statsd.timer.Timer` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str | Below is the the instruction that describes the task:
### Input:
Shortcut for getting a :class:`~statsd.timer.Timer` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
### Response:
def get_timer(self, name=None):
'''Shortcut for getting a :class:`~statsd.timer.Timer` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Timer) |
def write_to_conll_eval_file(prediction_file: TextIO,
gold_file: TextIO,
verb_index: Optional[int],
sentence: List[str],
prediction: List[str],
gold_labels: List[str]):
"""
Prints predicate argument predictions and gold labels for a single verbal
predicate in a sentence to two provided file references.
Parameters
----------
prediction_file : TextIO, required.
A file reference to print predictions to.
gold_file : TextIO, required.
A file reference to print gold labels to.
verb_index : Optional[int], required.
The index of the verbal predicate in the sentence which
the gold labels are the arguments for, or None if the sentence
contains no verbal predicate.
sentence : List[str], required.
The word tokens.
prediction : List[str], required.
The predicted BIO labels.
gold_labels : List[str], required.
The gold BIO labels.
"""
verb_only_sentence = ["-"] * len(sentence)
if verb_index:
verb_only_sentence[verb_index] = sentence[verb_index]
conll_format_predictions = convert_bio_tags_to_conll_format(prediction)
conll_format_gold_labels = convert_bio_tags_to_conll_format(gold_labels)
for word, predicted, gold in zip(verb_only_sentence,
conll_format_predictions,
conll_format_gold_labels):
prediction_file.write(word.ljust(15))
prediction_file.write(predicted.rjust(15) + "\n")
gold_file.write(word.ljust(15))
gold_file.write(gold.rjust(15) + "\n")
prediction_file.write("\n")
gold_file.write("\n") | Prints predicate argument predictions and gold labels for a single verbal
predicate in a sentence to two provided file references.
Parameters
----------
prediction_file : TextIO, required.
A file reference to print predictions to.
gold_file : TextIO, required.
A file reference to print gold labels to.
verb_index : Optional[int], required.
The index of the verbal predicate in the sentence which
the gold labels are the arguments for, or None if the sentence
contains no verbal predicate.
sentence : List[str], required.
The word tokens.
prediction : List[str], required.
The predicted BIO labels.
gold_labels : List[str], required.
The gold BIO labels. | Below is the the instruction that describes the task:
### Input:
Prints predicate argument predictions and gold labels for a single verbal
predicate in a sentence to two provided file references.
Parameters
----------
prediction_file : TextIO, required.
A file reference to print predictions to.
gold_file : TextIO, required.
A file reference to print gold labels to.
verb_index : Optional[int], required.
The index of the verbal predicate in the sentence which
the gold labels are the arguments for, or None if the sentence
contains no verbal predicate.
sentence : List[str], required.
The word tokens.
prediction : List[str], required.
The predicted BIO labels.
gold_labels : List[str], required.
The gold BIO labels.
### Response:
def write_to_conll_eval_file(prediction_file: TextIO,
gold_file: TextIO,
verb_index: Optional[int],
sentence: List[str],
prediction: List[str],
gold_labels: List[str]):
"""
Prints predicate argument predictions and gold labels for a single verbal
predicate in a sentence to two provided file references.
Parameters
----------
prediction_file : TextIO, required.
A file reference to print predictions to.
gold_file : TextIO, required.
A file reference to print gold labels to.
verb_index : Optional[int], required.
The index of the verbal predicate in the sentence which
the gold labels are the arguments for, or None if the sentence
contains no verbal predicate.
sentence : List[str], required.
The word tokens.
prediction : List[str], required.
The predicted BIO labels.
gold_labels : List[str], required.
The gold BIO labels.
"""
verb_only_sentence = ["-"] * len(sentence)
if verb_index:
verb_only_sentence[verb_index] = sentence[verb_index]
conll_format_predictions = convert_bio_tags_to_conll_format(prediction)
conll_format_gold_labels = convert_bio_tags_to_conll_format(gold_labels)
for word, predicted, gold in zip(verb_only_sentence,
conll_format_predictions,
conll_format_gold_labels):
prediction_file.write(word.ljust(15))
prediction_file.write(predicted.rjust(15) + "\n")
gold_file.write(word.ljust(15))
gold_file.write(gold.rjust(15) + "\n")
prediction_file.write("\n")
gold_file.write("\n") |
def _extract_properties(config):
"""
Parse a line within a lease block
The line should basically match the expression:
>>> r"\s+(?P<key>(?:option|set)\s+\S+|\S+) (?P<value>[\s\S]+?);"
For easier seperation of the cases and faster parsing this is done using substrings etc..
:param config:
:return: tuple of properties dict, options dict and sets dict
"""
general, options, sets = {}, {}, {}
for line in config.splitlines():
# skip empty & malformed lines
if not line or not line[-1:] == ';' and '; #' not in line:
continue
# strip the trailing ';' and remove any whitespaces on the left side
line = line[:-1].lstrip()
# seperate the three cases
if line[:6] == 'option':
key, value = _extract_prop_option(line)
options[key] = value
elif line[:3] == 'set':
key, value = _extract_prop_set(line)
sets[key] = value
else:
# fall through to generic case
key, value = _extract_prop_general(line)
general[key] = value
return general, options, sets | Parse a line within a lease block
The line should basically match the expression:
>>> r"\s+(?P<key>(?:option|set)\s+\S+|\S+) (?P<value>[\s\S]+?);"
For easier seperation of the cases and faster parsing this is done using substrings etc..
:param config:
:return: tuple of properties dict, options dict and sets dict | Below is the the instruction that describes the task:
### Input:
Parse a line within a lease block
The line should basically match the expression:
>>> r"\s+(?P<key>(?:option|set)\s+\S+|\S+) (?P<value>[\s\S]+?);"
For easier seperation of the cases and faster parsing this is done using substrings etc..
:param config:
:return: tuple of properties dict, options dict and sets dict
### Response:
def _extract_properties(config):
"""
Parse a line within a lease block
The line should basically match the expression:
>>> r"\s+(?P<key>(?:option|set)\s+\S+|\S+) (?P<value>[\s\S]+?);"
For easier seperation of the cases and faster parsing this is done using substrings etc..
:param config:
:return: tuple of properties dict, options dict and sets dict
"""
general, options, sets = {}, {}, {}
for line in config.splitlines():
# skip empty & malformed lines
if not line or not line[-1:] == ';' and '; #' not in line:
continue
# strip the trailing ';' and remove any whitespaces on the left side
line = line[:-1].lstrip()
# seperate the three cases
if line[:6] == 'option':
key, value = _extract_prop_option(line)
options[key] = value
elif line[:3] == 'set':
key, value = _extract_prop_set(line)
sets[key] = value
else:
# fall through to generic case
key, value = _extract_prop_general(line)
general[key] = value
return general, options, sets |
def find(self, name):
"""
Returns the Tag that matches the name.
:param name: the string representation of the tag
:type name: str
:return: the tag, None if not found
:rtype: Tag
"""
result = None
for t in self.array:
if str(t) == name:
result = Tag(t.jobject)
break
return result | Returns the Tag that matches the name.
:param name: the string representation of the tag
:type name: str
:return: the tag, None if not found
:rtype: Tag | Below is the the instruction that describes the task:
### Input:
Returns the Tag that matches the name.
:param name: the string representation of the tag
:type name: str
:return: the tag, None if not found
:rtype: Tag
### Response:
def find(self, name):
"""
Returns the Tag that matches the name.
:param name: the string representation of the tag
:type name: str
:return: the tag, None if not found
:rtype: Tag
"""
result = None
for t in self.array:
if str(t) == name:
result = Tag(t.jobject)
break
return result |
def find_kernel_specs(self):
"""Returns a dict mapping kernel names to resource directories."""
# let real installed kernels overwrite envs with the same name:
# this is the same order as the get_kernel_spec way, which also prefers
# kernels from the jupyter dir over env kernels.
specs = self.find_kernel_specs_for_envs()
specs.update(super(EnvironmentKernelSpecManager,
self).find_kernel_specs())
return specs | Returns a dict mapping kernel names to resource directories. | Below is the the instruction that describes the task:
### Input:
Returns a dict mapping kernel names to resource directories.
### Response:
def find_kernel_specs(self):
"""Returns a dict mapping kernel names to resource directories."""
# let real installed kernels overwrite envs with the same name:
# this is the same order as the get_kernel_spec way, which also prefers
# kernels from the jupyter dir over env kernels.
specs = self.find_kernel_specs_for_envs()
specs.update(super(EnvironmentKernelSpecManager,
self).find_kernel_specs())
return specs |
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name] | Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import. | Below is the the instruction that describes the task:
### Input:
Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
### Response:
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name] |
def url(self):
"""return the full request url as an Url() instance"""
scheme = self.scheme
host = self.host
path = self.path
query = self.query
port = self.port
# normalize the port
host_domain, host_port = Url.split_hostname_from_port(host)
if host_port:
port = host_port
controller_path = ""
if self.controller_info:
controller_path = self.controller_info.get("path", "")
u = Url(
scheme=scheme,
hostname=host,
path=path,
query=query,
port=port,
controller_path=controller_path,
)
return u | return the full request url as an Url() instance | Below is the the instruction that describes the task:
### Input:
return the full request url as an Url() instance
### Response:
def url(self):
"""return the full request url as an Url() instance"""
scheme = self.scheme
host = self.host
path = self.path
query = self.query
port = self.port
# normalize the port
host_domain, host_port = Url.split_hostname_from_port(host)
if host_port:
port = host_port
controller_path = ""
if self.controller_info:
controller_path = self.controller_info.get("path", "")
u = Url(
scheme=scheme,
hostname=host,
path=path,
query=query,
port=port,
controller_path=controller_path,
)
return u |
def converter_pm_log10(data):
"""Convert the given data to:
log10(subdata) for subdata > 0
log10(-subdata') for subdata' < 0
0 for subdata'' == 0
Parameters
----------
data: array
input data
Returns
-------
array_converted: array
converted data
"""
# indices_zero = np.where(data == 0)
indices_gt_zero = np.where(data > 0)
indices_lt_zero = np.where(data < 0)
data_converted = np.zeros(data.shape)
data_converted[indices_gt_zero] = np.log10(data[indices_gt_zero])
data_converted[indices_lt_zero] = -np.log10(-data[indices_lt_zero])
return indices_gt_zero, indices_lt_zero, data_converted | Convert the given data to:
log10(subdata) for subdata > 0
log10(-subdata') for subdata' < 0
0 for subdata'' == 0
Parameters
----------
data: array
input data
Returns
-------
array_converted: array
converted data | Below is the the instruction that describes the task:
### Input:
Convert the given data to:
log10(subdata) for subdata > 0
log10(-subdata') for subdata' < 0
0 for subdata'' == 0
Parameters
----------
data: array
input data
Returns
-------
array_converted: array
converted data
### Response:
def converter_pm_log10(data):
"""Convert the given data to:
log10(subdata) for subdata > 0
log10(-subdata') for subdata' < 0
0 for subdata'' == 0
Parameters
----------
data: array
input data
Returns
-------
array_converted: array
converted data
"""
# indices_zero = np.where(data == 0)
indices_gt_zero = np.where(data > 0)
indices_lt_zero = np.where(data < 0)
data_converted = np.zeros(data.shape)
data_converted[indices_gt_zero] = np.log10(data[indices_gt_zero])
data_converted[indices_lt_zero] = -np.log10(-data[indices_lt_zero])
return indices_gt_zero, indices_lt_zero, data_converted |
def is_uniform_join_units(join_units):
"""
Check if the join units consist of blocks of uniform type that can
be concatenated using Block.concat_same_type instead of the generic
concatenate_join_units (which uses `_concat._concat_compat`).
"""
return (
# all blocks need to have the same type
all(type(ju.block) is type(join_units[0].block) for ju in join_units) and # noqa
# no blocks that would get missing values (can lead to type upcasts)
# unless we're an extension dtype.
all(not ju.is_na or ju.block.is_extension for ju in join_units) and
# no blocks with indexers (as then the dimensions do not fit)
all(not ju.indexers for ju in join_units) and
# disregard Panels
all(ju.block.ndim <= 2 for ju in join_units) and
# only use this path when there is something to concatenate
len(join_units) > 1) | Check if the join units consist of blocks of uniform type that can
be concatenated using Block.concat_same_type instead of the generic
concatenate_join_units (which uses `_concat._concat_compat`). | Below is the the instruction that describes the task:
### Input:
Check if the join units consist of blocks of uniform type that can
be concatenated using Block.concat_same_type instead of the generic
concatenate_join_units (which uses `_concat._concat_compat`).
### Response:
def is_uniform_join_units(join_units):
"""
Check if the join units consist of blocks of uniform type that can
be concatenated using Block.concat_same_type instead of the generic
concatenate_join_units (which uses `_concat._concat_compat`).
"""
return (
# all blocks need to have the same type
all(type(ju.block) is type(join_units[0].block) for ju in join_units) and # noqa
# no blocks that would get missing values (can lead to type upcasts)
# unless we're an extension dtype.
all(not ju.is_na or ju.block.is_extension for ju in join_units) and
# no blocks with indexers (as then the dimensions do not fit)
all(not ju.indexers for ju in join_units) and
# disregard Panels
all(ju.block.ndim <= 2 for ju in join_units) and
# only use this path when there is something to concatenate
len(join_units) > 1) |
def get_mutation_rates(transcripts, mut_dict, ensembl):
""" determines mutation rates per functional category for transcripts
Args:
transcripts: list of transcript IDs for a gene
mut_dict: dictionary of local sequence context mutation rates
ensembl: EnsemblRequest object, to retrieve information from Ensembl.
Returns:
tuple of (rates, merged transcript, and transcript CDS length)
"""
rates = {'missense': 0, 'nonsense': 0, 'splice_lof': 0,
'splice_region': 0, 'synonymous': 0}
combined = None
for tx_id in transcripts:
try:
tx = construct_gene_object(ensembl, tx_id)
except ValueError:
continue
if len(tx.get_cds_sequence()) % 3 != 0:
raise ValueError("anomalous_coding_sequence")
# ignore mitochondrial genes
if tx.get_chrom() == "MT":
continue
sites = SiteRates(tx, mut_dict, masked_sites=combined)
combined = tx + combined
for cq in ['missense', 'nonsense', 'splice_lof', 'splice_region', 'synonymous']:
rates[cq] += sites[cq].get_summed_rate()
if combined is None:
raise ValueError('no tx found')
length = combined.get_coding_distance(combined.get_cds_end())['pos']
return rates, combined, length | determines mutation rates per functional category for transcripts
Args:
transcripts: list of transcript IDs for a gene
mut_dict: dictionary of local sequence context mutation rates
ensembl: EnsemblRequest object, to retrieve information from Ensembl.
Returns:
tuple of (rates, merged transcript, and transcript CDS length) | Below is the the instruction that describes the task:
### Input:
determines mutation rates per functional category for transcripts
Args:
transcripts: list of transcript IDs for a gene
mut_dict: dictionary of local sequence context mutation rates
ensembl: EnsemblRequest object, to retrieve information from Ensembl.
Returns:
tuple of (rates, merged transcript, and transcript CDS length)
### Response:
def get_mutation_rates(transcripts, mut_dict, ensembl):
""" determines mutation rates per functional category for transcripts
Args:
transcripts: list of transcript IDs for a gene
mut_dict: dictionary of local sequence context mutation rates
ensembl: EnsemblRequest object, to retrieve information from Ensembl.
Returns:
tuple of (rates, merged transcript, and transcript CDS length)
"""
rates = {'missense': 0, 'nonsense': 0, 'splice_lof': 0,
'splice_region': 0, 'synonymous': 0}
combined = None
for tx_id in transcripts:
try:
tx = construct_gene_object(ensembl, tx_id)
except ValueError:
continue
if len(tx.get_cds_sequence()) % 3 != 0:
raise ValueError("anomalous_coding_sequence")
# ignore mitochondrial genes
if tx.get_chrom() == "MT":
continue
sites = SiteRates(tx, mut_dict, masked_sites=combined)
combined = tx + combined
for cq in ['missense', 'nonsense', 'splice_lof', 'splice_region', 'synonymous']:
rates[cq] += sites[cq].get_summed_rate()
if combined is None:
raise ValueError('no tx found')
length = combined.get_coding_distance(combined.get_cds_end())['pos']
return rates, combined, length |
def run(self):
"""
Run the container.
Logic is as follows:
Generate container info (eventually from task definition)
Start container
Loop whilst not asked to stop and the container is running.
Get all logs from container between the last time I checked and now.
Convert logs into cloudwatch format
Put logs into cloudwatch
:return:
"""
try:
self.job_state = 'PENDING'
time.sleep(1)
image = 'alpine:latest'
cmd = '/bin/sh -c "for a in `seq 1 10`; do echo Hello World; sleep 1; done"'
name = '{0}-{1}'.format(self.job_name, self.job_id)
self.job_state = 'RUNNABLE'
# TODO setup ecs container instance
time.sleep(1)
self.job_state = 'STARTING'
container = self.docker_client.containers.run(
image, cmd,
detach=True,
name=name
)
self.job_state = 'RUNNING'
self.job_started_at = datetime.datetime.now()
try:
# Log collection
logs_stdout = []
logs_stderr = []
container.reload()
# Dodgy hack, we can only check docker logs once a second, but we want to loop more
# so we can stop if asked to in a quick manner, should all go away if we go async
# There also be some dodgyness when sending an integer to docker logs and some
# events seem to be duplicated.
now = datetime.datetime.now()
i = 1
while container.status == 'running' and not self.stop:
time.sleep(0.15)
if i % 10 == 0:
logs_stderr.extend(container.logs(stdout=False, stderr=True, timestamps=True, since=datetime2int(now)).decode().split('\n'))
logs_stdout.extend(container.logs(stdout=True, stderr=False, timestamps=True, since=datetime2int(now)).decode().split('\n'))
now = datetime.datetime.now()
container.reload()
i += 1
# Container should be stopped by this point... unless asked to stop
if container.status == 'running':
container.kill()
self.job_stopped_at = datetime.datetime.now()
# Get final logs
logs_stderr.extend(container.logs(stdout=False, stderr=True, timestamps=True, since=datetime2int(now)).decode().split('\n'))
logs_stdout.extend(container.logs(stdout=True, stderr=False, timestamps=True, since=datetime2int(now)).decode().split('\n'))
self.job_state = 'SUCCEEDED' if not self.stop else 'FAILED'
# Process logs
logs_stdout = [x for x in logs_stdout if len(x) > 0]
logs_stderr = [x for x in logs_stderr if len(x) > 0]
logs = []
for line in logs_stdout + logs_stderr:
date, line = line.split(' ', 1)
date = dateutil.parser.parse(date)
date = int(date.timestamp())
logs.append({'timestamp': date, 'message': line.strip()})
# Send to cloudwatch
log_group = '/aws/batch/job'
stream_name = '{0}/default/{1}'.format(self.job_definition.name, self.job_id)
self.log_stream_name = stream_name
self._log_backend.ensure_log_group(log_group, None)
self._log_backend.create_log_stream(log_group, stream_name)
self._log_backend.put_log_events(log_group, stream_name, logs, None)
except Exception as err:
logger.error('Failed to run AWS Batch container {0}. Error {1}'.format(self.name, err))
self.job_state = 'FAILED'
container.kill()
finally:
container.remove()
except Exception as err:
logger.error('Failed to run AWS Batch container {0}. Error {1}'.format(self.name, err))
self.job_state = 'FAILED'
self.job_stopped = True
self.job_stopped_at = datetime.datetime.now() | Run the container.
Logic is as follows:
Generate container info (eventually from task definition)
Start container
Loop whilst not asked to stop and the container is running.
Get all logs from container between the last time I checked and now.
Convert logs into cloudwatch format
Put logs into cloudwatch
:return: | Below is the the instruction that describes the task:
### Input:
Run the container.
Logic is as follows:
Generate container info (eventually from task definition)
Start container
Loop whilst not asked to stop and the container is running.
Get all logs from container between the last time I checked and now.
Convert logs into cloudwatch format
Put logs into cloudwatch
:return:
### Response:
def run(self):
"""
Run the container.
Logic is as follows:
Generate container info (eventually from task definition)
Start container
Loop whilst not asked to stop and the container is running.
Get all logs from container between the last time I checked and now.
Convert logs into cloudwatch format
Put logs into cloudwatch
:return:
"""
try:
self.job_state = 'PENDING'
time.sleep(1)
image = 'alpine:latest'
cmd = '/bin/sh -c "for a in `seq 1 10`; do echo Hello World; sleep 1; done"'
name = '{0}-{1}'.format(self.job_name, self.job_id)
self.job_state = 'RUNNABLE'
# TODO setup ecs container instance
time.sleep(1)
self.job_state = 'STARTING'
container = self.docker_client.containers.run(
image, cmd,
detach=True,
name=name
)
self.job_state = 'RUNNING'
self.job_started_at = datetime.datetime.now()
try:
# Log collection
logs_stdout = []
logs_stderr = []
container.reload()
# Dodgy hack, we can only check docker logs once a second, but we want to loop more
# so we can stop if asked to in a quick manner, should all go away if we go async
# There also be some dodgyness when sending an integer to docker logs and some
# events seem to be duplicated.
now = datetime.datetime.now()
i = 1
while container.status == 'running' and not self.stop:
time.sleep(0.15)
if i % 10 == 0:
logs_stderr.extend(container.logs(stdout=False, stderr=True, timestamps=True, since=datetime2int(now)).decode().split('\n'))
logs_stdout.extend(container.logs(stdout=True, stderr=False, timestamps=True, since=datetime2int(now)).decode().split('\n'))
now = datetime.datetime.now()
container.reload()
i += 1
# Container should be stopped by this point... unless asked to stop
if container.status == 'running':
container.kill()
self.job_stopped_at = datetime.datetime.now()
# Get final logs
logs_stderr.extend(container.logs(stdout=False, stderr=True, timestamps=True, since=datetime2int(now)).decode().split('\n'))
logs_stdout.extend(container.logs(stdout=True, stderr=False, timestamps=True, since=datetime2int(now)).decode().split('\n'))
self.job_state = 'SUCCEEDED' if not self.stop else 'FAILED'
# Process logs
logs_stdout = [x for x in logs_stdout if len(x) > 0]
logs_stderr = [x for x in logs_stderr if len(x) > 0]
logs = []
for line in logs_stdout + logs_stderr:
date, line = line.split(' ', 1)
date = dateutil.parser.parse(date)
date = int(date.timestamp())
logs.append({'timestamp': date, 'message': line.strip()})
# Send to cloudwatch
log_group = '/aws/batch/job'
stream_name = '{0}/default/{1}'.format(self.job_definition.name, self.job_id)
self.log_stream_name = stream_name
self._log_backend.ensure_log_group(log_group, None)
self._log_backend.create_log_stream(log_group, stream_name)
self._log_backend.put_log_events(log_group, stream_name, logs, None)
except Exception as err:
logger.error('Failed to run AWS Batch container {0}. Error {1}'.format(self.name, err))
self.job_state = 'FAILED'
container.kill()
finally:
container.remove()
except Exception as err:
logger.error('Failed to run AWS Batch container {0}. Error {1}'.format(self.name, err))
self.job_state = 'FAILED'
self.job_stopped = True
self.job_stopped_at = datetime.datetime.now() |
def _acquire_lock(self, identifier, atime=30, ltime=5):
'''Acquire a lock for a given identifier.
If the lock cannot be obtained immediately, keep trying at random
intervals, up to 3 seconds, until `atime` has passed. Once the
lock has been obtained, continue to hold it for `ltime`.
:param str identifier: lock token to write
:param int atime: maximum time (in seconds) to acquire lock
:param int ltime: maximum time (in seconds) to own lock
:return: `identifier` if the lock was obtained, :const:`False`
otherwise
'''
conn = redis.Redis(connection_pool=self.pool)
end = time.time() + atime
while end > time.time():
if conn.set(self._lock_name, identifier, ex=ltime, nx=True):
# logger.debug("won lock %s" % self._lock_name)
return identifier
sleep_time = random.uniform(0, 3)
time.sleep(sleep_time)
logger.warn('failed to acquire lock %s for %f seconds',
self._lock_name, atime)
return False | Acquire a lock for a given identifier.
If the lock cannot be obtained immediately, keep trying at random
intervals, up to 3 seconds, until `atime` has passed. Once the
lock has been obtained, continue to hold it for `ltime`.
:param str identifier: lock token to write
:param int atime: maximum time (in seconds) to acquire lock
:param int ltime: maximum time (in seconds) to own lock
:return: `identifier` if the lock was obtained, :const:`False`
otherwise | Below is the the instruction that describes the task:
### Input:
Acquire a lock for a given identifier.
If the lock cannot be obtained immediately, keep trying at random
intervals, up to 3 seconds, until `atime` has passed. Once the
lock has been obtained, continue to hold it for `ltime`.
:param str identifier: lock token to write
:param int atime: maximum time (in seconds) to acquire lock
:param int ltime: maximum time (in seconds) to own lock
:return: `identifier` if the lock was obtained, :const:`False`
otherwise
### Response:
def _acquire_lock(self, identifier, atime=30, ltime=5):
'''Acquire a lock for a given identifier.
If the lock cannot be obtained immediately, keep trying at random
intervals, up to 3 seconds, until `atime` has passed. Once the
lock has been obtained, continue to hold it for `ltime`.
:param str identifier: lock token to write
:param int atime: maximum time (in seconds) to acquire lock
:param int ltime: maximum time (in seconds) to own lock
:return: `identifier` if the lock was obtained, :const:`False`
otherwise
'''
conn = redis.Redis(connection_pool=self.pool)
end = time.time() + atime
while end > time.time():
if conn.set(self._lock_name, identifier, ex=ltime, nx=True):
# logger.debug("won lock %s" % self._lock_name)
return identifier
sleep_time = random.uniform(0, 3)
time.sleep(sleep_time)
logger.warn('failed to acquire lock %s for %f seconds',
self._lock_name, atime)
return False |
def set_active_scalar(self, name, preference='cell'):
"""Finds the scalar by name and appropriately sets it as active"""
_, field = get_scalar(self, name, preference=preference, info=True)
if field == POINT_DATA_FIELD:
self.GetPointData().SetActiveScalars(name)
elif field == CELL_DATA_FIELD:
self.GetCellData().SetActiveScalars(name)
else:
raise RuntimeError('Data field ({}) not useable'.format(field))
self._active_scalar_info = [field, name] | Finds the scalar by name and appropriately sets it as active | Below is the the instruction that describes the task:
### Input:
Finds the scalar by name and appropriately sets it as active
### Response:
def set_active_scalar(self, name, preference='cell'):
"""Finds the scalar by name and appropriately sets it as active"""
_, field = get_scalar(self, name, preference=preference, info=True)
if field == POINT_DATA_FIELD:
self.GetPointData().SetActiveScalars(name)
elif field == CELL_DATA_FIELD:
self.GetCellData().SetActiveScalars(name)
else:
raise RuntimeError('Data field ({}) not useable'.format(field))
self._active_scalar_info = [field, name] |
def create(cls, name, members):
"""Creates a new enum type based on this one (cls) and adds newly
passed members to the newly created subclass of cls.
This method helps to create enums having the same member values as
values of other enum(s).
:param name: name of the newly created type
:param members: 1) a dict or 2) a list of (name, value) tuples
and/or EnumBase instances describing new members
:return: newly created enum type.
"""
NewEnum = type(name, (cls,), {})
if isinstance(members, dict):
members = members.items()
for member in members:
if isinstance(member, tuple):
name, value = member
setattr(NewEnum, name, value)
elif isinstance(member, EnumBase):
setattr(NewEnum, member.short_name, member.value)
else:
assert False, (
"members must be either a dict, "
+ "a list of (name, value) tuples, "
+ "or a list of EnumBase instances."
)
NewEnum.process()
# needed for pickling to work (hopefully); taken from the namedtuple implementation in the
# standard library
try:
NewEnum.__module__ = sys._getframe(1).f_globals.get("__name__", "__main__")
except (AttributeError, ValueError):
pass
return NewEnum | Creates a new enum type based on this one (cls) and adds newly
passed members to the newly created subclass of cls.
This method helps to create enums having the same member values as
values of other enum(s).
:param name: name of the newly created type
:param members: 1) a dict or 2) a list of (name, value) tuples
and/or EnumBase instances describing new members
:return: newly created enum type. | Below is the the instruction that describes the task:
### Input:
Creates a new enum type based on this one (cls) and adds newly
passed members to the newly created subclass of cls.
This method helps to create enums having the same member values as
values of other enum(s).
:param name: name of the newly created type
:param members: 1) a dict or 2) a list of (name, value) tuples
and/or EnumBase instances describing new members
:return: newly created enum type.
### Response:
def create(cls, name, members):
"""Creates a new enum type based on this one (cls) and adds newly
passed members to the newly created subclass of cls.
This method helps to create enums having the same member values as
values of other enum(s).
:param name: name of the newly created type
:param members: 1) a dict or 2) a list of (name, value) tuples
and/or EnumBase instances describing new members
:return: newly created enum type.
"""
NewEnum = type(name, (cls,), {})
if isinstance(members, dict):
members = members.items()
for member in members:
if isinstance(member, tuple):
name, value = member
setattr(NewEnum, name, value)
elif isinstance(member, EnumBase):
setattr(NewEnum, member.short_name, member.value)
else:
assert False, (
"members must be either a dict, "
+ "a list of (name, value) tuples, "
+ "or a list of EnumBase instances."
)
NewEnum.process()
# needed for pickling to work (hopefully); taken from the namedtuple implementation in the
# standard library
try:
NewEnum.__module__ = sys._getframe(1).f_globals.get("__name__", "__main__")
except (AttributeError, ValueError):
pass
return NewEnum |
def long_description():
""" Collates project README and latest changes. """
changes = latest_changes()
changes[0] = "`Changes for v{}".format(changes[0][1:])
changes[1] = '-' * len(changes[0])
return "\n\n\n".join([
read_file('README.rst'),
'\n'.join(changes),
"`Full changelog <{}/en/develop/changelog.html#changelog>`_.".format(
DOCUMENTATION_URL)]) | Collates project README and latest changes. | Below is the the instruction that describes the task:
### Input:
Collates project README and latest changes.
### Response:
def long_description():
""" Collates project README and latest changes. """
changes = latest_changes()
changes[0] = "`Changes for v{}".format(changes[0][1:])
changes[1] = '-' * len(changes[0])
return "\n\n\n".join([
read_file('README.rst'),
'\n'.join(changes),
"`Full changelog <{}/en/develop/changelog.html#changelog>`_.".format(
DOCUMENTATION_URL)]) |
def __set_basic_auth_string(self, username, password):
'''
Creates and sets the authentication string for (write-)accessing the
Handle Server. No return, the string is set as an attribute to
the client instance.
:param username: Username handle with index: index:prefix/suffix.
:param password: The password contained in the index of the username
handle.
'''
auth = b2handle.utilhandle.create_authentication_string(username, password)
self.__basic_authentication_string = auth | Creates and sets the authentication string for (write-)accessing the
Handle Server. No return, the string is set as an attribute to
the client instance.
:param username: Username handle with index: index:prefix/suffix.
:param password: The password contained in the index of the username
handle. | Below is the the instruction that describes the task:
### Input:
Creates and sets the authentication string for (write-)accessing the
Handle Server. No return, the string is set as an attribute to
the client instance.
:param username: Username handle with index: index:prefix/suffix.
:param password: The password contained in the index of the username
handle.
### Response:
def __set_basic_auth_string(self, username, password):
'''
Creates and sets the authentication string for (write-)accessing the
Handle Server. No return, the string is set as an attribute to
the client instance.
:param username: Username handle with index: index:prefix/suffix.
:param password: The password contained in the index of the username
handle.
'''
auth = b2handle.utilhandle.create_authentication_string(username, password)
self.__basic_authentication_string = auth |
def magic_set(obj):
"""
Adds a function/method to an object. Uses the name of the first
argument as a hint about whether it is a method (``self``), class
method (``cls`` or ``klass``), or static method (anything else).
Works on both instances and classes.
>>> class color:
... def __init__(self, r, g, b):
... self.r, self.g, self.b = r, g, b
>>> c = color(0, 1, 0)
>>> c # doctest: +ELLIPSIS
<__main__.color instance at ...>
>>> @magic_set(color)
... def __repr__(self):
... return '<color %s %s %s>' % (self.r, self.g, self.b)
>>> c
<color 0 1 0>
>>> @magic_set(color)
... def red(cls):
... return cls(1, 0, 0)
>>> color.red()
<color 1 0 0>
>>> c.red()
<color 1 0 0>
>>> @magic_set(color)
... def name():
... return 'color'
>>> color.name()
'color'
>>> @magic_set(c)
... def name(self):
... return 'red'
>>> c.name()
'red'
>>> @magic_set(c)
... def name(cls):
... return cls.__name__
>>> c.name()
'color'
>>> @magic_set(c)
... def pr(obj):
... print obj
>>> c.pr(1)
1
"""
def decorator(func):
is_class = isinstance(obj, six.class_types)
args, varargs, varkw, defaults = inspect.getargspec(func)
if not args or args[0] not in ('self', 'cls', 'klass'):
# Static function/method
if is_class:
replacement = staticmethod(func)
else:
replacement = func
elif args[0] == 'self':
if is_class:
replacement = func
else:
def replacement(*args, **kw):
return func(obj, *args, **kw)
try:
replacement.__name__ = func.__name__
except:
pass
else:
if is_class:
replacement = classmethod(func)
else:
def replacement(*args, **kw):
return func(obj.__class__, *args, **kw)
try:
replacement.__name__ = func.__name__
except:
pass
setattr(obj, func.__name__, replacement)
return replacement
return decorator | Adds a function/method to an object. Uses the name of the first
argument as a hint about whether it is a method (``self``), class
method (``cls`` or ``klass``), or static method (anything else).
Works on both instances and classes.
>>> class color:
... def __init__(self, r, g, b):
... self.r, self.g, self.b = r, g, b
>>> c = color(0, 1, 0)
>>> c # doctest: +ELLIPSIS
<__main__.color instance at ...>
>>> @magic_set(color)
... def __repr__(self):
... return '<color %s %s %s>' % (self.r, self.g, self.b)
>>> c
<color 0 1 0>
>>> @magic_set(color)
... def red(cls):
... return cls(1, 0, 0)
>>> color.red()
<color 1 0 0>
>>> c.red()
<color 1 0 0>
>>> @magic_set(color)
... def name():
... return 'color'
>>> color.name()
'color'
>>> @magic_set(c)
... def name(self):
... return 'red'
>>> c.name()
'red'
>>> @magic_set(c)
... def name(cls):
... return cls.__name__
>>> c.name()
'color'
>>> @magic_set(c)
... def pr(obj):
... print obj
>>> c.pr(1)
1 | Below is the the instruction that describes the task:
### Input:
Adds a function/method to an object. Uses the name of the first
argument as a hint about whether it is a method (``self``), class
method (``cls`` or ``klass``), or static method (anything else).
Works on both instances and classes.
>>> class color:
... def __init__(self, r, g, b):
... self.r, self.g, self.b = r, g, b
>>> c = color(0, 1, 0)
>>> c # doctest: +ELLIPSIS
<__main__.color instance at ...>
>>> @magic_set(color)
... def __repr__(self):
... return '<color %s %s %s>' % (self.r, self.g, self.b)
>>> c
<color 0 1 0>
>>> @magic_set(color)
... def red(cls):
... return cls(1, 0, 0)
>>> color.red()
<color 1 0 0>
>>> c.red()
<color 1 0 0>
>>> @magic_set(color)
... def name():
... return 'color'
>>> color.name()
'color'
>>> @magic_set(c)
... def name(self):
... return 'red'
>>> c.name()
'red'
>>> @magic_set(c)
... def name(cls):
... return cls.__name__
>>> c.name()
'color'
>>> @magic_set(c)
... def pr(obj):
... print obj
>>> c.pr(1)
1
### Response:
def magic_set(obj):
"""
Adds a function/method to an object. Uses the name of the first
argument as a hint about whether it is a method (``self``), class
method (``cls`` or ``klass``), or static method (anything else).
Works on both instances and classes.
>>> class color:
... def __init__(self, r, g, b):
... self.r, self.g, self.b = r, g, b
>>> c = color(0, 1, 0)
>>> c # doctest: +ELLIPSIS
<__main__.color instance at ...>
>>> @magic_set(color)
... def __repr__(self):
... return '<color %s %s %s>' % (self.r, self.g, self.b)
>>> c
<color 0 1 0>
>>> @magic_set(color)
... def red(cls):
... return cls(1, 0, 0)
>>> color.red()
<color 1 0 0>
>>> c.red()
<color 1 0 0>
>>> @magic_set(color)
... def name():
... return 'color'
>>> color.name()
'color'
>>> @magic_set(c)
... def name(self):
... return 'red'
>>> c.name()
'red'
>>> @magic_set(c)
... def name(cls):
... return cls.__name__
>>> c.name()
'color'
>>> @magic_set(c)
... def pr(obj):
... print obj
>>> c.pr(1)
1
"""
def decorator(func):
is_class = isinstance(obj, six.class_types)
args, varargs, varkw, defaults = inspect.getargspec(func)
if not args or args[0] not in ('self', 'cls', 'klass'):
# Static function/method
if is_class:
replacement = staticmethod(func)
else:
replacement = func
elif args[0] == 'self':
if is_class:
replacement = func
else:
def replacement(*args, **kw):
return func(obj, *args, **kw)
try:
replacement.__name__ = func.__name__
except:
pass
else:
if is_class:
replacement = classmethod(func)
else:
def replacement(*args, **kw):
return func(obj.__class__, *args, **kw)
try:
replacement.__name__ = func.__name__
except:
pass
setattr(obj, func.__name__, replacement)
return replacement
return decorator |
def get_ntp_peers(self):
"""Implementation of get_ntp_peers for IOS."""
ntp_stats = self.get_ntp_stats()
return {
ntp_peer.get("remote"): {}
for ntp_peer in ntp_stats
if ntp_peer.get("remote")
} | Implementation of get_ntp_peers for IOS. | Below is the the instruction that describes the task:
### Input:
Implementation of get_ntp_peers for IOS.
### Response:
def get_ntp_peers(self):
"""Implementation of get_ntp_peers for IOS."""
ntp_stats = self.get_ntp_stats()
return {
ntp_peer.get("remote"): {}
for ntp_peer in ntp_stats
if ntp_peer.get("remote")
} |
def attendee(request, form, user_id=None):
''' Returns a list of all manifested attendees if no attendee is specified,
else displays the attendee manifest. '''
if user_id is None and form.cleaned_data["user"] is not None:
user_id = form.cleaned_data["user"]
if user_id is None:
return attendee_list(request)
attendee = people.Attendee.objects.get(user__id=user_id)
name = attendee.attendeeprofilebase.attendee_name()
reports = []
profile_data = []
try:
profile = people.AttendeeProfileBase.objects.get_subclass(
attendee=attendee
)
fields = profile._meta.get_fields()
except people.AttendeeProfileBase.DoesNotExist:
fields = []
exclude = set(["attendeeprofilebase_ptr", "id"])
for field in fields:
if field.name in exclude:
# Not actually important
continue
if not hasattr(field, "verbose_name"):
continue # Not a publicly visible field
value = getattr(profile, field.name)
if isinstance(field, models.ManyToManyField):
value = ", ".join(str(i) for i in value.all())
profile_data.append((field.verbose_name, value))
cart = CartController.for_user(attendee.user)
reservation = cart.cart.reservation_duration + cart.cart.time_last_updated
profile_data.append(("Current cart reserved until", reservation))
reports.append(ListReport("Profile", ["", ""], profile_data))
links = []
links.append((
reverse(views.badge, args=[user_id]),
"View badge",
))
links.append((
reverse(views.amend_registration, args=[user_id]),
"Amend current cart",
))
links.append((
reverse(views.extend_reservation, args=[user_id]),
"Extend reservation",
))
reports.append(Links("Actions for " + name, links))
# Paid and pending products
ic = ItemController(attendee.user)
reports.append(ListReport(
"Paid Products",
["Product", "Quantity"],
[(pq.product, pq.quantity) for pq in ic.items_purchased()],
))
reports.append(ListReport(
"Unpaid Products",
["Product", "Quantity"],
[(pq.product, pq.quantity) for pq in ic.items_pending()],
))
# Invoices
invoices = commerce.Invoice.objects.filter(
user=attendee.user,
)
reports.append(QuerysetReport(
"Invoices",
["id", "get_status_display", "value"],
invoices,
headings=["Invoice ID", "Status", "Value"],
link_view=views.invoice,
))
# Credit Notes
credit_notes = commerce.CreditNote.objects.filter(
invoice__user=attendee.user,
).select_related("invoice", "creditnoteapplication", "creditnoterefund")
reports.append(QuerysetReport(
"Credit Notes",
["id", "status", "value"],
credit_notes,
link_view=views.credit_note,
))
# All payments
payments = commerce.PaymentBase.objects.filter(
invoice__user=attendee.user,
).select_related("invoice")
reports.append(QuerysetReport(
"Payments",
["invoice__id", "id", "reference", "amount"],
payments,
link_view=views.invoice,
))
return reports | Returns a list of all manifested attendees if no attendee is specified,
else displays the attendee manifest. | Below is the the instruction that describes the task:
### Input:
Returns a list of all manifested attendees if no attendee is specified,
else displays the attendee manifest.
### Response:
def attendee(request, form, user_id=None):
''' Returns a list of all manifested attendees if no attendee is specified,
else displays the attendee manifest. '''
if user_id is None and form.cleaned_data["user"] is not None:
user_id = form.cleaned_data["user"]
if user_id is None:
return attendee_list(request)
attendee = people.Attendee.objects.get(user__id=user_id)
name = attendee.attendeeprofilebase.attendee_name()
reports = []
profile_data = []
try:
profile = people.AttendeeProfileBase.objects.get_subclass(
attendee=attendee
)
fields = profile._meta.get_fields()
except people.AttendeeProfileBase.DoesNotExist:
fields = []
exclude = set(["attendeeprofilebase_ptr", "id"])
for field in fields:
if field.name in exclude:
# Not actually important
continue
if not hasattr(field, "verbose_name"):
continue # Not a publicly visible field
value = getattr(profile, field.name)
if isinstance(field, models.ManyToManyField):
value = ", ".join(str(i) for i in value.all())
profile_data.append((field.verbose_name, value))
cart = CartController.for_user(attendee.user)
reservation = cart.cart.reservation_duration + cart.cart.time_last_updated
profile_data.append(("Current cart reserved until", reservation))
reports.append(ListReport("Profile", ["", ""], profile_data))
links = []
links.append((
reverse(views.badge, args=[user_id]),
"View badge",
))
links.append((
reverse(views.amend_registration, args=[user_id]),
"Amend current cart",
))
links.append((
reverse(views.extend_reservation, args=[user_id]),
"Extend reservation",
))
reports.append(Links("Actions for " + name, links))
# Paid and pending products
ic = ItemController(attendee.user)
reports.append(ListReport(
"Paid Products",
["Product", "Quantity"],
[(pq.product, pq.quantity) for pq in ic.items_purchased()],
))
reports.append(ListReport(
"Unpaid Products",
["Product", "Quantity"],
[(pq.product, pq.quantity) for pq in ic.items_pending()],
))
# Invoices
invoices = commerce.Invoice.objects.filter(
user=attendee.user,
)
reports.append(QuerysetReport(
"Invoices",
["id", "get_status_display", "value"],
invoices,
headings=["Invoice ID", "Status", "Value"],
link_view=views.invoice,
))
# Credit Notes
credit_notes = commerce.CreditNote.objects.filter(
invoice__user=attendee.user,
).select_related("invoice", "creditnoteapplication", "creditnoterefund")
reports.append(QuerysetReport(
"Credit Notes",
["id", "status", "value"],
credit_notes,
link_view=views.credit_note,
))
# All payments
payments = commerce.PaymentBase.objects.filter(
invoice__user=attendee.user,
).select_related("invoice")
reports.append(QuerysetReport(
"Payments",
["invoice__id", "id", "reference", "amount"],
payments,
link_view=views.invoice,
))
return reports |
def _QueryHash(self, digest):
"""Queries the Viper Server for a specfic hash.
Args:
digest (str): hash to look up.
Returns:
dict[str, object]: JSON response or None on error.
"""
if not self._url:
self._url = '{0:s}://{1:s}:{2:d}/file/find'.format(
self._protocol, self._host, self._port)
request_data = {self.lookup_hash: digest}
try:
json_response = self.MakeRequestAndDecodeJSON(
self._url, 'POST', data=request_data)
except errors.ConnectionError as exception:
json_response = None
logger.error('Unable to query Viper with error: {0!s}.'.format(
exception))
return json_response | Queries the Viper Server for a specfic hash.
Args:
digest (str): hash to look up.
Returns:
dict[str, object]: JSON response or None on error. | Below is the the instruction that describes the task:
### Input:
Queries the Viper Server for a specfic hash.
Args:
digest (str): hash to look up.
Returns:
dict[str, object]: JSON response or None on error.
### Response:
def _QueryHash(self, digest):
"""Queries the Viper Server for a specfic hash.
Args:
digest (str): hash to look up.
Returns:
dict[str, object]: JSON response or None on error.
"""
if not self._url:
self._url = '{0:s}://{1:s}:{2:d}/file/find'.format(
self._protocol, self._host, self._port)
request_data = {self.lookup_hash: digest}
try:
json_response = self.MakeRequestAndDecodeJSON(
self._url, 'POST', data=request_data)
except errors.ConnectionError as exception:
json_response = None
logger.error('Unable to query Viper with error: {0!s}.'.format(
exception))
return json_response |
def xAxisIsMajor(self):
'''
Returns True if the major axis is parallel to the X axis, boolean.
'''
return max(self.radius.x, self.radius.y) == self.radius.x | Returns True if the major axis is parallel to the X axis, boolean. | Below is the the instruction that describes the task:
### Input:
Returns True if the major axis is parallel to the X axis, boolean.
### Response:
def xAxisIsMajor(self):
'''
Returns True if the major axis is parallel to the X axis, boolean.
'''
return max(self.radius.x, self.radius.y) == self.radius.x |
def which(program, environ=None):
"""
Find out if an executable exists in the supplied PATH.
If so, the absolute path to the executable is returned.
If not, an exception is raised.
:type string
:param program: Executable to be checked for
:param dict
:param environ: Any additional ENV variables required, specifically PATH
:return string|:class:`command.CommandException` Returns the location if found, otherwise raises exception
"""
def is_exe(path):
"""
Helper method to check if a file exists and is executable
"""
return isfile(path) and os.access(path, os.X_OK)
if program is None:
raise CommandException("Invalid program name passed")
fpath, fname = split(program)
if fpath:
if is_exe(program):
return program
else:
if environ is None:
environ = os.environ
for path in environ['PATH'].split(os.pathsep):
exe_file = join(path, program)
if is_exe(exe_file):
return exe_file
raise CommandException("Could not find %s" % program) | Find out if an executable exists in the supplied PATH.
If so, the absolute path to the executable is returned.
If not, an exception is raised.
:type string
:param program: Executable to be checked for
:param dict
:param environ: Any additional ENV variables required, specifically PATH
:return string|:class:`command.CommandException` Returns the location if found, otherwise raises exception | Below is the the instruction that describes the task:
### Input:
Find out if an executable exists in the supplied PATH.
If so, the absolute path to the executable is returned.
If not, an exception is raised.
:type string
:param program: Executable to be checked for
:param dict
:param environ: Any additional ENV variables required, specifically PATH
:return string|:class:`command.CommandException` Returns the location if found, otherwise raises exception
### Response:
def which(program, environ=None):
"""
Find out if an executable exists in the supplied PATH.
If so, the absolute path to the executable is returned.
If not, an exception is raised.
:type string
:param program: Executable to be checked for
:param dict
:param environ: Any additional ENV variables required, specifically PATH
:return string|:class:`command.CommandException` Returns the location if found, otherwise raises exception
"""
def is_exe(path):
"""
Helper method to check if a file exists and is executable
"""
return isfile(path) and os.access(path, os.X_OK)
if program is None:
raise CommandException("Invalid program name passed")
fpath, fname = split(program)
if fpath:
if is_exe(program):
return program
else:
if environ is None:
environ = os.environ
for path in environ['PATH'].split(os.pathsep):
exe_file = join(path, program)
if is_exe(exe_file):
return exe_file
raise CommandException("Could not find %s" % program) |
def get_newest_app_version() -> Version:
"""
Download the version tag from remote.
:return: version from remote
:rtype: ~packaging.version.Version
"""
with urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where()) as p_man:
pypi_json = p_man.urlopen('GET', static_data.PYPI_JSON_URL).data.decode('utf-8')
releases = json.loads(pypi_json).get('releases', [])
online_version = Version('0.0.0')
for release in releases:
cur_version = Version(release)
if not cur_version.is_prerelease:
online_version = max(online_version, cur_version)
return online_version | Download the version tag from remote.
:return: version from remote
:rtype: ~packaging.version.Version | Below is the the instruction that describes the task:
### Input:
Download the version tag from remote.
:return: version from remote
:rtype: ~packaging.version.Version
### Response:
def get_newest_app_version() -> Version:
"""
Download the version tag from remote.
:return: version from remote
:rtype: ~packaging.version.Version
"""
with urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where()) as p_man:
pypi_json = p_man.urlopen('GET', static_data.PYPI_JSON_URL).data.decode('utf-8')
releases = json.loads(pypi_json).get('releases', [])
online_version = Version('0.0.0')
for release in releases:
cur_version = Version(release)
if not cur_version.is_prerelease:
online_version = max(online_version, cur_version)
return online_version |
Subsets and Splits