code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def build_options(payload, options, maxsize = 576, overload = OVERLOAD_FILE | OVERLOAD_SNAME, allowpartial = True):
'''
Split a list of options
This is the reverse operation of `reassemble_options`, it splits `dhcp_option` into
`dhcp_option_partial` if necessary, and set overload option if field overloading is
used.
:param options: a list of `dhcp_option`
:param maxsize: Limit the maximum DHCP message size. If options cannot fit into the DHCP
message, specified fields are overloaded for options. If options cannot
fit after overloading, extra options are DROPPED if allowpartial = True.
It is important to sort the dhcp options by priority.
:param overload: fields that are allowed to be overloaded
:param allowpartial: When options cannot fit into the DHCP message, allow the rest options
to be dropped.
:return: Number of options that are dropped i.e. `options[:-return_value]` are dropped
'''
if maxsize < 576:
maxsize = 576
max_options_size = maxsize - 240
# Ignore OPTION_PAD and OPTION_END
options = [o for o in options if o.tag not in (OPTION_PAD, OPTION_END)]
# Only preserve data
option_data = [(o.tag, o._tobytes()[2:]) for o in options]
def split_options(option_data, limits):
"""
Split options into multiple fields
:param option_data: list of (tag, data) pair
:param limits: list of int for limit of each field (excluding PAD and END)
:return: number of options that are dropped
"""
# List of (dhcp_option_partial, option_not_finished)
partial_options = []
buffers = [0]
if not options:
return ([], 0)
def create_result():
# Remove any unfinished partial options
while partial_options and partial_options[-1][1]:
partial_options.pop()
buffers.append(len(partial_options))
r = [[po for po,_ in partial_options[buffers[i]:buffers[i+1]]] for i in range(0, len(buffers) - 1)]
# Remove empty fields
while r and not r[-1]:
r.pop()
return r
# Current field used size
current_size = 0
limit_iter = iter(limits)
try:
next_limit = next(limit_iter)
except (StopIteration, GeneratorExit):
return ([], False)
for i, (tag, data) in enumerate(option_data):
# Current used data size
data_size = 0
# Do not split very small options on boundary, this may prevent some broken DHCP clients/servers
# to cause problem
nosplit = (len(data) <= 32)
while True:
# next partial option size should be:
# 1. no more than the current field limit (minus 1-byte tag and 1-byte length)
# 2. no more than the single dhcp_option_partial data limit (255 due to single byte length)
# 3. no more than the rest data size
next_size = min(next_limit - current_size - 2, 255, len(data) - data_size)
if next_size < 0 or (next_size == 0 and data_size < len(data)) \
or (next_size < len(data) - data_size and nosplit):
# Cannot put this part of data on the current field, find the next field
try:
next_limit = next(limit_iter)
except (StopIteration, GeneratorExit):
return (create_result(), len(option_data) - i)
# Record field boundary
buffers.append(len(partial_options))
current_size = 0
else:
# Put this partial option on current field
partial_options.append((dhcp_option_partial(tag = tag, data = data[data_size : data_size + next_size]),
(next_size < len(data) - data_size)))
data_size += next_size
current_size += next_size + 2
if data_size >= len(data):
# finished current option
break
return (create_result(), 0)
# First try to fit all options in options field
# preserve a byte for OPTION_END
result, not_finished = split_options(option_data, [max_options_size - 1])
if not_finished:
if overload & (OVERLOAD_FILE | OVERLOAD_SNAME):
# Try overload
# minus a overload option (1-byte tag, 1-byte lenght, 1-byte dhcp_overload) and 1-byte OPTION_END
limits = [max_options_size - 4]
if overload & OVERLOAD_FILE:
# preserve a byte for OPTION_END
limits.append(127)
if overload & OVERLOAD_SNAME:
# preserve a byte for OPTION_END
limits.append(63)
result2, not_finished2 = split_options(option_data, limits)
# Only overload if we have a better result
if len(result2) > 1:
result = result2
not_finished = not_finished2
if not allowpartial and not_finished:
raise ValueError("%d options cannot fit into a DHCP message" % (not_finished,))
if not result:
return not_finished
elif len(result) <= 1:
# No overload
payload.options = result[0] + [dhcp_option_partial(tag = OPTION_END)]
else:
overload_option = 0
if len(result) >= 2 and result[1]:
overload_option |= OVERLOAD_FILE
# overload file field
payload.file = dhcp_option_partial[0].tobytes(result[1] + [dhcp_option_partial(tag = OPTION_END)])
if len(result) >= 3 and result[2]:
overload_option |= OVERLOAD_SNAME
# overload sname field
payload.sname = dhcp_option_partial[0].tobytes(result[2] + [dhcp_option_partial(tag = OPTION_END)])
# Put an overload option before any other options
payload.options = [dhcp_option_partial(tag = OPTION_OVERLOAD, data = dhcp_overload.tobytes(overload_option))] \
+ result[0] + [dhcp_option_partial(tag = OPTION_END)]
return not_finished | Split a list of options
This is the reverse operation of `reassemble_options`, it splits `dhcp_option` into
`dhcp_option_partial` if necessary, and set overload option if field overloading is
used.
:param options: a list of `dhcp_option`
:param maxsize: Limit the maximum DHCP message size. If options cannot fit into the DHCP
message, specified fields are overloaded for options. If options cannot
fit after overloading, extra options are DROPPED if allowpartial = True.
It is important to sort the dhcp options by priority.
:param overload: fields that are allowed to be overloaded
:param allowpartial: When options cannot fit into the DHCP message, allow the rest options
to be dropped.
:return: Number of options that are dropped i.e. `options[:-return_value]` are dropped | Below is the the instruction that describes the task:
### Input:
Split a list of options
This is the reverse operation of `reassemble_options`, it splits `dhcp_option` into
`dhcp_option_partial` if necessary, and set overload option if field overloading is
used.
:param options: a list of `dhcp_option`
:param maxsize: Limit the maximum DHCP message size. If options cannot fit into the DHCP
message, specified fields are overloaded for options. If options cannot
fit after overloading, extra options are DROPPED if allowpartial = True.
It is important to sort the dhcp options by priority.
:param overload: fields that are allowed to be overloaded
:param allowpartial: When options cannot fit into the DHCP message, allow the rest options
to be dropped.
:return: Number of options that are dropped i.e. `options[:-return_value]` are dropped
### Response:
def build_options(payload, options, maxsize = 576, overload = OVERLOAD_FILE | OVERLOAD_SNAME, allowpartial = True):
'''
Split a list of options
This is the reverse operation of `reassemble_options`, it splits `dhcp_option` into
`dhcp_option_partial` if necessary, and set overload option if field overloading is
used.
:param options: a list of `dhcp_option`
:param maxsize: Limit the maximum DHCP message size. If options cannot fit into the DHCP
message, specified fields are overloaded for options. If options cannot
fit after overloading, extra options are DROPPED if allowpartial = True.
It is important to sort the dhcp options by priority.
:param overload: fields that are allowed to be overloaded
:param allowpartial: When options cannot fit into the DHCP message, allow the rest options
to be dropped.
:return: Number of options that are dropped i.e. `options[:-return_value]` are dropped
'''
if maxsize < 576:
maxsize = 576
max_options_size = maxsize - 240
# Ignore OPTION_PAD and OPTION_END
options = [o for o in options if o.tag not in (OPTION_PAD, OPTION_END)]
# Only preserve data
option_data = [(o.tag, o._tobytes()[2:]) for o in options]
def split_options(option_data, limits):
"""
Split options into multiple fields
:param option_data: list of (tag, data) pair
:param limits: list of int for limit of each field (excluding PAD and END)
:return: number of options that are dropped
"""
# List of (dhcp_option_partial, option_not_finished)
partial_options = []
buffers = [0]
if not options:
return ([], 0)
def create_result():
# Remove any unfinished partial options
while partial_options and partial_options[-1][1]:
partial_options.pop()
buffers.append(len(partial_options))
r = [[po for po,_ in partial_options[buffers[i]:buffers[i+1]]] for i in range(0, len(buffers) - 1)]
# Remove empty fields
while r and not r[-1]:
r.pop()
return r
# Current field used size
current_size = 0
limit_iter = iter(limits)
try:
next_limit = next(limit_iter)
except (StopIteration, GeneratorExit):
return ([], False)
for i, (tag, data) in enumerate(option_data):
# Current used data size
data_size = 0
# Do not split very small options on boundary, this may prevent some broken DHCP clients/servers
# to cause problem
nosplit = (len(data) <= 32)
while True:
# next partial option size should be:
# 1. no more than the current field limit (minus 1-byte tag and 1-byte length)
# 2. no more than the single dhcp_option_partial data limit (255 due to single byte length)
# 3. no more than the rest data size
next_size = min(next_limit - current_size - 2, 255, len(data) - data_size)
if next_size < 0 or (next_size == 0 and data_size < len(data)) \
or (next_size < len(data) - data_size and nosplit):
# Cannot put this part of data on the current field, find the next field
try:
next_limit = next(limit_iter)
except (StopIteration, GeneratorExit):
return (create_result(), len(option_data) - i)
# Record field boundary
buffers.append(len(partial_options))
current_size = 0
else:
# Put this partial option on current field
partial_options.append((dhcp_option_partial(tag = tag, data = data[data_size : data_size + next_size]),
(next_size < len(data) - data_size)))
data_size += next_size
current_size += next_size + 2
if data_size >= len(data):
# finished current option
break
return (create_result(), 0)
# First try to fit all options in options field
# preserve a byte for OPTION_END
result, not_finished = split_options(option_data, [max_options_size - 1])
if not_finished:
if overload & (OVERLOAD_FILE | OVERLOAD_SNAME):
# Try overload
# minus a overload option (1-byte tag, 1-byte lenght, 1-byte dhcp_overload) and 1-byte OPTION_END
limits = [max_options_size - 4]
if overload & OVERLOAD_FILE:
# preserve a byte for OPTION_END
limits.append(127)
if overload & OVERLOAD_SNAME:
# preserve a byte for OPTION_END
limits.append(63)
result2, not_finished2 = split_options(option_data, limits)
# Only overload if we have a better result
if len(result2) > 1:
result = result2
not_finished = not_finished2
if not allowpartial and not_finished:
raise ValueError("%d options cannot fit into a DHCP message" % (not_finished,))
if not result:
return not_finished
elif len(result) <= 1:
# No overload
payload.options = result[0] + [dhcp_option_partial(tag = OPTION_END)]
else:
overload_option = 0
if len(result) >= 2 and result[1]:
overload_option |= OVERLOAD_FILE
# overload file field
payload.file = dhcp_option_partial[0].tobytes(result[1] + [dhcp_option_partial(tag = OPTION_END)])
if len(result) >= 3 and result[2]:
overload_option |= OVERLOAD_SNAME
# overload sname field
payload.sname = dhcp_option_partial[0].tobytes(result[2] + [dhcp_option_partial(tag = OPTION_END)])
# Put an overload option before any other options
payload.options = [dhcp_option_partial(tag = OPTION_OVERLOAD, data = dhcp_overload.tobytes(overload_option))] \
+ result[0] + [dhcp_option_partial(tag = OPTION_END)]
return not_finished |
def ini_dump_hook(cfg, text: bool=False):
"""
Dumps all the data into a INI file.
This will automatically kill anything with a '_' in the keyname, replacing it with a dot. You have been warned.
"""
data = cfg.config.dump()
# Load data back into the goddamned ini file.
ndict = {}
for key, item in data.items():
key = key.replace('_', '.')
ndict[key] = item
cfg.tmpini = configparser.ConfigParser()
cfg.tmpini.read_dict(data)
if not text:
cfg.tmpini.write(cfg.fd)
else:
return
cfg.reload() | Dumps all the data into a INI file.
This will automatically kill anything with a '_' in the keyname, replacing it with a dot. You have been warned. | Below is the the instruction that describes the task:
### Input:
Dumps all the data into a INI file.
This will automatically kill anything with a '_' in the keyname, replacing it with a dot. You have been warned.
### Response:
def ini_dump_hook(cfg, text: bool=False):
"""
Dumps all the data into a INI file.
This will automatically kill anything with a '_' in the keyname, replacing it with a dot. You have been warned.
"""
data = cfg.config.dump()
# Load data back into the goddamned ini file.
ndict = {}
for key, item in data.items():
key = key.replace('_', '.')
ndict[key] = item
cfg.tmpini = configparser.ConfigParser()
cfg.tmpini.read_dict(data)
if not text:
cfg.tmpini.write(cfg.fd)
else:
return
cfg.reload() |
def main():
""" Runs Godot.
"""
application = GodotApplication( id="godot",
plugins=[CorePlugin(),
PuddlePlugin(),
WorkbenchPlugin(),
ResourcePlugin(),
GodotPlugin()] )
application.run() | Runs Godot. | Below is the the instruction that describes the task:
### Input:
Runs Godot.
### Response:
def main():
""" Runs Godot.
"""
application = GodotApplication( id="godot",
plugins=[CorePlugin(),
PuddlePlugin(),
WorkbenchPlugin(),
ResourcePlugin(),
GodotPlugin()] )
application.run() |
def replace_cluster_role(self, name, body, **kwargs):
"""
replace the specified ClusterRole
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_cluster_role(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ClusterRole (required)
:param V1ClusterRole body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1ClusterRole
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_cluster_role_with_http_info(name, body, **kwargs)
else:
(data) = self.replace_cluster_role_with_http_info(name, body, **kwargs)
return data | replace the specified ClusterRole
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_cluster_role(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ClusterRole (required)
:param V1ClusterRole body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1ClusterRole
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
replace the specified ClusterRole
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_cluster_role(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ClusterRole (required)
:param V1ClusterRole body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1ClusterRole
If the method is called asynchronously,
returns the request thread.
### Response:
def replace_cluster_role(self, name, body, **kwargs):
"""
replace the specified ClusterRole
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_cluster_role(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ClusterRole (required)
:param V1ClusterRole body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1ClusterRole
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_cluster_role_with_http_info(name, body, **kwargs)
else:
(data) = self.replace_cluster_role_with_http_info(name, body, **kwargs)
return data |
def supports_coordinate_type(self, coordinate_type=None):
"""Tests if the given coordinate type is supported.
arg: coordinate_type (osid.type.Type): a coordinate Type
return: (boolean) - ``true`` if the type is supported, ``false``
otherwise
raise: IllegalState - syntax is not a ``COORDINATE``
raise: NullArgument - ``coordinate_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.Metadata.supports_coordinate_type
from .osid_errors import IllegalState, NullArgument
if not coordinate_type:
raise NullArgument('no input Type provided')
if self._kwargs['syntax'] not in ['``COORDINATE``']:
raise IllegalState('put more meaninful message here')
return coordinate_type in self.get_coordinate_types | Tests if the given coordinate type is supported.
arg: coordinate_type (osid.type.Type): a coordinate Type
return: (boolean) - ``true`` if the type is supported, ``false``
otherwise
raise: IllegalState - syntax is not a ``COORDINATE``
raise: NullArgument - ``coordinate_type`` is ``null``
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Tests if the given coordinate type is supported.
arg: coordinate_type (osid.type.Type): a coordinate Type
return: (boolean) - ``true`` if the type is supported, ``false``
otherwise
raise: IllegalState - syntax is not a ``COORDINATE``
raise: NullArgument - ``coordinate_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
### Response:
def supports_coordinate_type(self, coordinate_type=None):
"""Tests if the given coordinate type is supported.
arg: coordinate_type (osid.type.Type): a coordinate Type
return: (boolean) - ``true`` if the type is supported, ``false``
otherwise
raise: IllegalState - syntax is not a ``COORDINATE``
raise: NullArgument - ``coordinate_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.Metadata.supports_coordinate_type
from .osid_errors import IllegalState, NullArgument
if not coordinate_type:
raise NullArgument('no input Type provided')
if self._kwargs['syntax'] not in ['``COORDINATE``']:
raise IllegalState('put more meaninful message here')
return coordinate_type in self.get_coordinate_types |
def reply(self):
"""
Reply to the selected item. This is a utility method and should not
be bound to a key directly.
Item type:
Submission - add a top level comment
Comment - add a comment reply
Message - reply to a private message
"""
data = self.get_selected_item()
if data['type'] == 'Submission':
body = data['text']
description = 'submission'
reply = data['object'].add_comment
elif data['type'] in ('Comment', 'InboxComment'):
body = data['body']
description = 'comment'
reply = data['object'].reply
elif data['type'] == 'Message':
body = data['body']
description = 'private message'
reply = data['object'].reply
else:
self.term.flash()
return
# Construct the text that will be displayed in the editor file.
# The post body will be commented out and added for reference
lines = [' |' + line for line in body.split('\n')]
content = '\n'.join(lines)
comment_info = docs.REPLY_FILE.format(
author=data['author'],
type=description,
content=content)
with self.term.open_editor(comment_info) as comment:
if not comment:
self.term.show_notification('Canceled')
return
with self.term.loader('Posting {}'.format(description), delay=0):
reply(comment)
# Give reddit time to process the submission
time.sleep(2.0)
if self.term.loader.exception is None:
self.reload_page()
else:
raise TemporaryFileError() | Reply to the selected item. This is a utility method and should not
be bound to a key directly.
Item type:
Submission - add a top level comment
Comment - add a comment reply
Message - reply to a private message | Below is the the instruction that describes the task:
### Input:
Reply to the selected item. This is a utility method and should not
be bound to a key directly.
Item type:
Submission - add a top level comment
Comment - add a comment reply
Message - reply to a private message
### Response:
def reply(self):
"""
Reply to the selected item. This is a utility method and should not
be bound to a key directly.
Item type:
Submission - add a top level comment
Comment - add a comment reply
Message - reply to a private message
"""
data = self.get_selected_item()
if data['type'] == 'Submission':
body = data['text']
description = 'submission'
reply = data['object'].add_comment
elif data['type'] in ('Comment', 'InboxComment'):
body = data['body']
description = 'comment'
reply = data['object'].reply
elif data['type'] == 'Message':
body = data['body']
description = 'private message'
reply = data['object'].reply
else:
self.term.flash()
return
# Construct the text that will be displayed in the editor file.
# The post body will be commented out and added for reference
lines = [' |' + line for line in body.split('\n')]
content = '\n'.join(lines)
comment_info = docs.REPLY_FILE.format(
author=data['author'],
type=description,
content=content)
with self.term.open_editor(comment_info) as comment:
if not comment:
self.term.show_notification('Canceled')
return
with self.term.loader('Posting {}'.format(description), delay=0):
reply(comment)
# Give reddit time to process the submission
time.sleep(2.0)
if self.term.loader.exception is None:
self.reload_page()
else:
raise TemporaryFileError() |
def _node(self, tax_id):
"""
Returns parent_id, rank
FIXME: expand return rank to include custom 'below' ranks built when
get_lineage is caled
"""
s = select([self.nodes.c.parent_id, self.nodes.c.rank],
self.nodes.c.tax_id == tax_id)
res = s.execute()
output = res.fetchone()
if not output:
msg = 'value "{}" not found in nodes.tax_id'.format(tax_id)
raise ValueError(msg)
else:
return output | Returns parent_id, rank
FIXME: expand return rank to include custom 'below' ranks built when
get_lineage is caled | Below is the the instruction that describes the task:
### Input:
Returns parent_id, rank
FIXME: expand return rank to include custom 'below' ranks built when
get_lineage is caled
### Response:
def _node(self, tax_id):
"""
Returns parent_id, rank
FIXME: expand return rank to include custom 'below' ranks built when
get_lineage is caled
"""
s = select([self.nodes.c.parent_id, self.nodes.c.rank],
self.nodes.c.tax_id == tax_id)
res = s.execute()
output = res.fetchone()
if not output:
msg = 'value "{}" not found in nodes.tax_id'.format(tax_id)
raise ValueError(msg)
else:
return output |
def close(self):
"""Force close all Channels and cancel all Operations
"""
if self._Q is not None:
for T in self._T:
self._Q.interrupt()
for n, T in enumerate(self._T):
_log.debug('Join Context worker %d', n)
T.join()
_log.debug('Joined Context workers')
self._Q, self._T = None, None
super(Context, self).close() | Force close all Channels and cancel all Operations | Below is the the instruction that describes the task:
### Input:
Force close all Channels and cancel all Operations
### Response:
def close(self):
"""Force close all Channels and cancel all Operations
"""
if self._Q is not None:
for T in self._T:
self._Q.interrupt()
for n, T in enumerate(self._T):
_log.debug('Join Context worker %d', n)
T.join()
_log.debug('Joined Context workers')
self._Q, self._T = None, None
super(Context, self).close() |
def create_tomodir(self, directory):
"""Create a tomodir subdirectory structure in the given directory
"""
pwd = os.getcwd()
if not os.path.isdir(directory):
os.makedirs(directory)
os.chdir(directory)
directories = (
'config',
'exe',
'grid',
'mod',
'mod/pot',
'mod/sens',
'rho',
)
for directory in directories:
if not os.path.isdir(directory):
os.makedirs(directory)
os.chdir(pwd) | Create a tomodir subdirectory structure in the given directory | Below is the the instruction that describes the task:
### Input:
Create a tomodir subdirectory structure in the given directory
### Response:
def create_tomodir(self, directory):
"""Create a tomodir subdirectory structure in the given directory
"""
pwd = os.getcwd()
if not os.path.isdir(directory):
os.makedirs(directory)
os.chdir(directory)
directories = (
'config',
'exe',
'grid',
'mod',
'mod/pot',
'mod/sens',
'rho',
)
for directory in directories:
if not os.path.isdir(directory):
os.makedirs(directory)
os.chdir(pwd) |
def delete(self, reason=''):
"""Deletes the event chatroom and if necessary the chatroom, too.
:param reason: reason for the deletion
:return: True if the associated chatroom was also
deleted, otherwise False
"""
db.session.delete(self)
db.session.flush()
if not self.chatroom.events:
db.session.delete(self.chatroom)
db.session.flush()
delete_room(self.chatroom, reason)
return True
return False | Deletes the event chatroom and if necessary the chatroom, too.
:param reason: reason for the deletion
:return: True if the associated chatroom was also
deleted, otherwise False | Below is the the instruction that describes the task:
### Input:
Deletes the event chatroom and if necessary the chatroom, too.
:param reason: reason for the deletion
:return: True if the associated chatroom was also
deleted, otherwise False
### Response:
def delete(self, reason=''):
"""Deletes the event chatroom and if necessary the chatroom, too.
:param reason: reason for the deletion
:return: True if the associated chatroom was also
deleted, otherwise False
"""
db.session.delete(self)
db.session.flush()
if not self.chatroom.events:
db.session.delete(self.chatroom)
db.session.flush()
delete_room(self.chatroom, reason)
return True
return False |
def rsdl_rn(self, AX, Y):
"""Compute primal residual normalisation term.
Overriding this method is required if methods :meth:`cnst_A`,
:meth:`cnst_AT`, :meth:`cnst_B`, and :meth:`cnst_c` are not
overridden.
"""
# Avoid computing the norm of the value returned by cnst_c()
# more than once
if not hasattr(self, '_nrm_cnst_c'):
self._nrm_cnst_c = np.linalg.norm(self.cnst_c())
return max((np.linalg.norm(AX), np.linalg.norm(self.cnst_B(Y)),
self._nrm_cnst_c)) | Compute primal residual normalisation term.
Overriding this method is required if methods :meth:`cnst_A`,
:meth:`cnst_AT`, :meth:`cnst_B`, and :meth:`cnst_c` are not
overridden. | Below is the the instruction that describes the task:
### Input:
Compute primal residual normalisation term.
Overriding this method is required if methods :meth:`cnst_A`,
:meth:`cnst_AT`, :meth:`cnst_B`, and :meth:`cnst_c` are not
overridden.
### Response:
def rsdl_rn(self, AX, Y):
"""Compute primal residual normalisation term.
Overriding this method is required if methods :meth:`cnst_A`,
:meth:`cnst_AT`, :meth:`cnst_B`, and :meth:`cnst_c` are not
overridden.
"""
# Avoid computing the norm of the value returned by cnst_c()
# more than once
if not hasattr(self, '_nrm_cnst_c'):
self._nrm_cnst_c = np.linalg.norm(self.cnst_c())
return max((np.linalg.norm(AX), np.linalg.norm(self.cnst_B(Y)),
self._nrm_cnst_c)) |
def set_display_columns(self, set_true=[], set_false=[]):
"""Add or remove columns from the output."""
for i in range(len(self.fields)):
if self.fields[i].name in set_true:
self.fields[i].display = True
elif self.fields[i].name in set_false:
self.fields[i].display = False | Add or remove columns from the output. | Below is the the instruction that describes the task:
### Input:
Add or remove columns from the output.
### Response:
def set_display_columns(self, set_true=[], set_false=[]):
"""Add or remove columns from the output."""
for i in range(len(self.fields)):
if self.fields[i].name in set_true:
self.fields[i].display = True
elif self.fields[i].name in set_false:
self.fields[i].display = False |
def __get_oauth_url(self, url, method, **kwargs):
""" Generate oAuth1.0a URL """
oauth = OAuth(
url=url,
consumer_key=self.consumer_key,
consumer_secret=self.consumer_secret,
version=self.version,
method=method,
oauth_timestamp=kwargs.get("oauth_timestamp", int(time()))
)
return oauth.get_oauth_url() | Generate oAuth1.0a URL | Below is the the instruction that describes the task:
### Input:
Generate oAuth1.0a URL
### Response:
def __get_oauth_url(self, url, method, **kwargs):
""" Generate oAuth1.0a URL """
oauth = OAuth(
url=url,
consumer_key=self.consumer_key,
consumer_secret=self.consumer_secret,
version=self.version,
method=method,
oauth_timestamp=kwargs.get("oauth_timestamp", int(time()))
)
return oauth.get_oauth_url() |
def _zip_from_file_patterns(root, includes, excludes, follow_symlinks):
"""Generates a ZIP file in-memory from file search patterns.
Args:
root (str): base directory to list files from.
includes (list[str]): inclusion patterns. Only files matching those
patterns will be included in the result.
excludes (list[str]): exclusion patterns. Files matching those
patterns will be excluded from the result. Exclusions take
precedence over inclusions.
follow_symlinks (bool): If true, symlinks will be included in the
resulting zip file
See Also:
:func:`_zip_files`, :func:`_find_files`.
Raises:
RuntimeError: when the generated archive would be empty.
"""
logger.info('lambda: base directory: %s', root)
files = list(_find_files(root, includes, excludes, follow_symlinks))
if not files:
raise RuntimeError('Empty list of files for Lambda payload. Check '
'your include/exclude options for errors.')
logger.info('lambda: adding %d files:', len(files))
for fname in files:
logger.debug('lambda: + %s', fname)
return _zip_files(files, root) | Generates a ZIP file in-memory from file search patterns.
Args:
root (str): base directory to list files from.
includes (list[str]): inclusion patterns. Only files matching those
patterns will be included in the result.
excludes (list[str]): exclusion patterns. Files matching those
patterns will be excluded from the result. Exclusions take
precedence over inclusions.
follow_symlinks (bool): If true, symlinks will be included in the
resulting zip file
See Also:
:func:`_zip_files`, :func:`_find_files`.
Raises:
RuntimeError: when the generated archive would be empty. | Below is the the instruction that describes the task:
### Input:
Generates a ZIP file in-memory from file search patterns.
Args:
root (str): base directory to list files from.
includes (list[str]): inclusion patterns. Only files matching those
patterns will be included in the result.
excludes (list[str]): exclusion patterns. Files matching those
patterns will be excluded from the result. Exclusions take
precedence over inclusions.
follow_symlinks (bool): If true, symlinks will be included in the
resulting zip file
See Also:
:func:`_zip_files`, :func:`_find_files`.
Raises:
RuntimeError: when the generated archive would be empty.
### Response:
def _zip_from_file_patterns(root, includes, excludes, follow_symlinks):
"""Generates a ZIP file in-memory from file search patterns.
Args:
root (str): base directory to list files from.
includes (list[str]): inclusion patterns. Only files matching those
patterns will be included in the result.
excludes (list[str]): exclusion patterns. Files matching those
patterns will be excluded from the result. Exclusions take
precedence over inclusions.
follow_symlinks (bool): If true, symlinks will be included in the
resulting zip file
See Also:
:func:`_zip_files`, :func:`_find_files`.
Raises:
RuntimeError: when the generated archive would be empty.
"""
logger.info('lambda: base directory: %s', root)
files = list(_find_files(root, includes, excludes, follow_symlinks))
if not files:
raise RuntimeError('Empty list of files for Lambda payload. Check '
'your include/exclude options for errors.')
logger.info('lambda: adding %d files:', len(files))
for fname in files:
logger.debug('lambda: + %s', fname)
return _zip_files(files, root) |
def get_principal_dictionary(graph_client, object_ids, raise_on_graph_call_error=False):
"""Retrieves Azure AD Objects for corresponding object ids passed.
:param graph_client: A client for Microsoft Graph.
:param object_ids: The object ids to retrieve Azure AD objects for.
:param raise_on_graph_call_error: A boolean indicate whether an error should be
raised if the underlying Microsoft Graph call fails.
:return: A dictionary keyed by object id with the Azure AD object as the value.
Note: empty Azure AD objects could be returned if not found in the graph.
"""
if not object_ids:
return {}
object_params = GetObjectsParameters(
include_directory_object_references=True,
object_ids=object_ids)
principal_dics = {object_id: DirectoryObject() for object_id in object_ids}
aad_objects = graph_client.objects.get_objects_by_object_ids(object_params)
try:
for aad_object in aad_objects:
principal_dics[aad_object.object_id] = aad_object
except CloudError as e:
if e.status_code in [403, 401]:
GraphHelper.log.warning(
'Credentials not authorized for access to read from Microsoft Graph. \n '
'Can not query on principalName, displayName, or aadType. \n')
else:
GraphHelper.log.error(
'Exception in call to Microsoft Graph. \n '
'Can not query on principalName, displayName, or aadType. \n'
'Error: {0}'.format(e))
if raise_on_graph_call_error:
raise
return principal_dics | Retrieves Azure AD Objects for corresponding object ids passed.
:param graph_client: A client for Microsoft Graph.
:param object_ids: The object ids to retrieve Azure AD objects for.
:param raise_on_graph_call_error: A boolean indicate whether an error should be
raised if the underlying Microsoft Graph call fails.
:return: A dictionary keyed by object id with the Azure AD object as the value.
Note: empty Azure AD objects could be returned if not found in the graph. | Below is the the instruction that describes the task:
### Input:
Retrieves Azure AD Objects for corresponding object ids passed.
:param graph_client: A client for Microsoft Graph.
:param object_ids: The object ids to retrieve Azure AD objects for.
:param raise_on_graph_call_error: A boolean indicate whether an error should be
raised if the underlying Microsoft Graph call fails.
:return: A dictionary keyed by object id with the Azure AD object as the value.
Note: empty Azure AD objects could be returned if not found in the graph.
### Response:
def get_principal_dictionary(graph_client, object_ids, raise_on_graph_call_error=False):
"""Retrieves Azure AD Objects for corresponding object ids passed.
:param graph_client: A client for Microsoft Graph.
:param object_ids: The object ids to retrieve Azure AD objects for.
:param raise_on_graph_call_error: A boolean indicate whether an error should be
raised if the underlying Microsoft Graph call fails.
:return: A dictionary keyed by object id with the Azure AD object as the value.
Note: empty Azure AD objects could be returned if not found in the graph.
"""
if not object_ids:
return {}
object_params = GetObjectsParameters(
include_directory_object_references=True,
object_ids=object_ids)
principal_dics = {object_id: DirectoryObject() for object_id in object_ids}
aad_objects = graph_client.objects.get_objects_by_object_ids(object_params)
try:
for aad_object in aad_objects:
principal_dics[aad_object.object_id] = aad_object
except CloudError as e:
if e.status_code in [403, 401]:
GraphHelper.log.warning(
'Credentials not authorized for access to read from Microsoft Graph. \n '
'Can not query on principalName, displayName, or aadType. \n')
else:
GraphHelper.log.error(
'Exception in call to Microsoft Graph. \n '
'Can not query on principalName, displayName, or aadType. \n'
'Error: {0}'.format(e))
if raise_on_graph_call_error:
raise
return principal_dics |
def next_frame_savp():
"""SAVP model hparams."""
hparams = sv2p_params.next_frame_sv2p()
hparams.add_hparam("z_dim", 8)
hparams.add_hparam("num_discriminator_filters", 32)
hparams.add_hparam("use_vae", True)
hparams.add_hparam("use_gan", False)
hparams.add_hparam("use_spectral_norm", True)
hparams.add_hparam("gan_loss", "cross_entropy")
hparams.add_hparam("gan_loss_multiplier", 0.01)
hparams.add_hparam("gan_vae_loss_multiplier", 0.01)
hparams.add_hparam("gan_optimization", "joint")
hparams.bottom = {
"inputs": modalities.video_raw_bottom,
"targets": modalities.video_raw_targets_bottom,
}
hparams.loss = {
"targets": modalities.video_l1_raw_loss,
}
hparams.top = {
"targets": modalities.video_raw_top,
}
hparams.latent_loss_multiplier_schedule = "linear"
hparams.upsample_method = "bilinear_upsample_conv"
hparams.internal_loss = False
hparams.reward_prediction = False
hparams.anneal_end = 100000
hparams.num_iterations_1st_stage = 0
hparams.num_iterations_2nd_stage = 50000
return hparams | SAVP model hparams. | Below is the the instruction that describes the task:
### Input:
SAVP model hparams.
### Response:
def next_frame_savp():
"""SAVP model hparams."""
hparams = sv2p_params.next_frame_sv2p()
hparams.add_hparam("z_dim", 8)
hparams.add_hparam("num_discriminator_filters", 32)
hparams.add_hparam("use_vae", True)
hparams.add_hparam("use_gan", False)
hparams.add_hparam("use_spectral_norm", True)
hparams.add_hparam("gan_loss", "cross_entropy")
hparams.add_hparam("gan_loss_multiplier", 0.01)
hparams.add_hparam("gan_vae_loss_multiplier", 0.01)
hparams.add_hparam("gan_optimization", "joint")
hparams.bottom = {
"inputs": modalities.video_raw_bottom,
"targets": modalities.video_raw_targets_bottom,
}
hparams.loss = {
"targets": modalities.video_l1_raw_loss,
}
hparams.top = {
"targets": modalities.video_raw_top,
}
hparams.latent_loss_multiplier_schedule = "linear"
hparams.upsample_method = "bilinear_upsample_conv"
hparams.internal_loss = False
hparams.reward_prediction = False
hparams.anneal_end = 100000
hparams.num_iterations_1st_stage = 0
hparams.num_iterations_2nd_stage = 50000
return hparams |
def remove_empty_keys(values, remove=({}, None, [], 'null')):
"""Recursively remove key/value pairs where the value is in ``remove``.
This is targeted at comparing json-e rebuilt task definitions, since
json-e drops key/value pairs with empty values.
Args:
values (dict/list): the dict or list to remove empty keys from.
Returns:
values (dict/list): a dict or list copy, with empty keys removed.
"""
if isinstance(values, dict):
return {key: remove_empty_keys(value, remove=remove)
for key, value in deepcopy(values).items() if value not in remove}
if isinstance(values, list):
return [remove_empty_keys(value, remove=remove)
for value in deepcopy(values) if value not in remove]
return values | Recursively remove key/value pairs where the value is in ``remove``.
This is targeted at comparing json-e rebuilt task definitions, since
json-e drops key/value pairs with empty values.
Args:
values (dict/list): the dict or list to remove empty keys from.
Returns:
values (dict/list): a dict or list copy, with empty keys removed. | Below is the the instruction that describes the task:
### Input:
Recursively remove key/value pairs where the value is in ``remove``.
This is targeted at comparing json-e rebuilt task definitions, since
json-e drops key/value pairs with empty values.
Args:
values (dict/list): the dict or list to remove empty keys from.
Returns:
values (dict/list): a dict or list copy, with empty keys removed.
### Response:
def remove_empty_keys(values, remove=({}, None, [], 'null')):
"""Recursively remove key/value pairs where the value is in ``remove``.
This is targeted at comparing json-e rebuilt task definitions, since
json-e drops key/value pairs with empty values.
Args:
values (dict/list): the dict or list to remove empty keys from.
Returns:
values (dict/list): a dict or list copy, with empty keys removed.
"""
if isinstance(values, dict):
return {key: remove_empty_keys(value, remove=remove)
for key, value in deepcopy(values).items() if value not in remove}
if isinstance(values, list):
return [remove_empty_keys(value, remove=remove)
for value in deepcopy(values) if value not in remove]
return values |
def links(self):
"""
Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping.
"""
def clean(url):
"Tidy up an URL."
scheme, netloc, path, params, query, frag = urlparse(url)
return urlunparse((scheme, netloc, quote(path),
params, query, frag))
result = set()
for match in self._href.finditer(self.data):
d = match.groupdict('')
rel = (d['rel1'] or d['rel2'] or d['rel3'] or
d['rel4'] or d['rel5'] or d['rel6'])
url = d['url1'] or d['url2'] or d['url3']
url = urljoin(self.base_url, url)
url = unescape(url)
url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
result.add((url, rel))
# We sort the result, hoping to bring the most recent versions
# to the front
result = sorted(result, key=lambda t: t[0], reverse=True)
return result | Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping. | Below is the the instruction that describes the task:
### Input:
Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping.
### Response:
def links(self):
"""
Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping.
"""
def clean(url):
"Tidy up an URL."
scheme, netloc, path, params, query, frag = urlparse(url)
return urlunparse((scheme, netloc, quote(path),
params, query, frag))
result = set()
for match in self._href.finditer(self.data):
d = match.groupdict('')
rel = (d['rel1'] or d['rel2'] or d['rel3'] or
d['rel4'] or d['rel5'] or d['rel6'])
url = d['url1'] or d['url2'] or d['url3']
url = urljoin(self.base_url, url)
url = unescape(url)
url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
result.add((url, rel))
# We sort the result, hoping to bring the most recent versions
# to the front
result = sorted(result, key=lambda t: t[0], reverse=True)
return result |
def main(context, no_color):
"""
Ipa provides a Python API and command line utility for testing images.
It can be used to test images in the Public Cloud (AWS, Azure, GCE, etc.).
"""
if context.obj is None:
context.obj = {}
context.obj['no_color'] = no_color | Ipa provides a Python API and command line utility for testing images.
It can be used to test images in the Public Cloud (AWS, Azure, GCE, etc.). | Below is the the instruction that describes the task:
### Input:
Ipa provides a Python API and command line utility for testing images.
It can be used to test images in the Public Cloud (AWS, Azure, GCE, etc.).
### Response:
def main(context, no_color):
"""
Ipa provides a Python API and command line utility for testing images.
It can be used to test images in the Public Cloud (AWS, Azure, GCE, etc.).
"""
if context.obj is None:
context.obj = {}
context.obj['no_color'] = no_color |
def connect_lv_generators(network, allow_multiple_genos_per_load=True):
"""Connect LV generators to existing grids.
This function searches for unconnected generators in all LV grids and
connects them.
It connects
* generators of voltage level 6
* to MV-LV station
* generators of voltage level 7
* with a nom. capacity of <=30 kW to LV loads of type residential
* with a nom. capacity of >30 kW and <=100 kW to LV loads of type
retail, industrial or agricultural
* to the MV-LV station if no appropriate load is available
(fallback)
Parameters
----------
network : :class:`~.grid.network.Network`
The eDisGo container object
allow_multiple_genos_per_load : :obj:`bool`
If True, more than one generator can be connected to one load
Notes
-----
For the allocation, loads are selected randomly (sector-wise) using a
predefined seed to ensure reproducibility.
"""
# get predefined random seed and initialize random generator
seed = int(network.config['grid_connection']['random_seed'])
#random.seed(a=seed)
random.seed(a=1234)
# ToDo: Switch back to 'seed' as soon as line ids are finished, #58
# get standard equipment
std_line_type = network.equipment_data['lv_cables'].loc[
network.config['grid_expansion_standard_equipment']['lv_line']]
std_line_kind = 'cable'
# # TEMP: DEBUG STUFF
# lv_grid_stats = pd.DataFrame(columns=('lv_grid',
# 'load_count',
# 'geno_count',
# 'more_genos_than_loads')
# )
# iterate over all LV grids
for lv_grid in network.mv_grid.lv_grids:
lv_loads = lv_grid.graph.nodes_by_attribute('load')
# counter for genos in v_level 7
log_geno_count_vlevel7 = 0
# generate random list (without replacement => unique elements)
# of loads (residential) to connect genos (P <= 30kW) to.
lv_loads_res = sorted([lv_load for lv_load in lv_loads
if 'residential' in list(lv_load.consumption.keys())],
key=lambda _: repr(_))
if len(lv_loads_res) > 0:
lv_loads_res_rnd = set(random.sample(lv_loads_res,
len(lv_loads_res)))
else:
lv_loads_res_rnd = None
# generate random list (without replacement => unique elements)
# of loads (retail, industrial, agricultural) to connect genos
# (30kW < P <= 100kW) to.
lv_loads_ria = sorted([lv_load for lv_load in lv_loads
if any([_ in list(lv_load.consumption.keys())
for _ in ['retail', 'industrial', 'agricultural']])],
key=lambda _: repr(_))
if len(lv_loads_ria) > 0:
lv_loads_ria_rnd = set(random.sample(lv_loads_ria,
len(lv_loads_ria)))
else:
lv_loads_ria_rnd = None
for geno in sorted(lv_grid.graph.nodes_by_attribute('generator'), key=lambda x: repr(x)):
if nx.is_isolate(lv_grid.graph, geno):
lv_station = lv_grid.station
# generator is of v_level 6 -> connect to LV station
if geno.v_level == 6:
line_length = calc_geo_dist_vincenty(network=network,
node_source=geno,
node_target=lv_station)
line = Line(id=random.randint(10 ** 8, 10 ** 9),
length=line_length / 1e3,
quantity=1,
kind=std_line_kind,
type=std_line_type,
grid=lv_grid)
lv_grid.graph.add_edge(geno,
lv_station,
line=line,
type='line')
# add line to equipment changes to track costs
_add_cable_to_equipment_changes(network=network,
line=line)
# generator is of v_level 7 -> assign geno to load
elif geno.v_level == 7:
# counter for genos in v_level 7
log_geno_count_vlevel7 += 1
# connect genos with P <= 30kW to residential loads, if available
if (geno.nominal_capacity <= 30) and (lv_loads_res_rnd is not None):
if len(lv_loads_res_rnd) > 0:
lv_load = lv_loads_res_rnd.pop()
# if random load list is empty, create new one
else:
lv_loads_res_rnd = set(random.sample(lv_loads_res,
len(lv_loads_res))
)
lv_load = lv_loads_res_rnd.pop()
# get cable distributor of building
lv_conn_target = lv_grid.graph.neighbors(lv_load)[0]
if not allow_multiple_genos_per_load:
# check if there's an existing generator connected to the load
# if so, select next load. If no load is available, connect to station.
while any([isinstance(_, Generator)
for _ in lv_grid.graph.neighbors(
lv_grid.graph.neighbors(lv_load)[0])]):
if len(lv_loads_res_rnd) > 0:
lv_load = lv_loads_res_rnd.pop()
# get cable distributor of building
lv_conn_target = lv_grid.graph.neighbors(lv_load)[0]
else:
lv_conn_target = lv_grid.station
logger.debug(
'No valid conn. target found for {}. '
'Connected to {}.'.format(
repr(geno),
repr(lv_conn_target)
)
)
break
# connect genos with 30kW <= P <= 100kW to residential loads
# to retail, industrial, agricultural loads, if available
elif (geno.nominal_capacity > 30) and (lv_loads_ria_rnd is not None):
if len(lv_loads_ria_rnd) > 0:
lv_load = lv_loads_ria_rnd.pop()
# if random load list is empty, create new one
else:
lv_loads_ria_rnd = set(random.sample(lv_loads_ria,
len(lv_loads_ria))
)
lv_load = lv_loads_ria_rnd.pop()
# get cable distributor of building
lv_conn_target = lv_grid.graph.neighbors(lv_load)[0]
if not allow_multiple_genos_per_load:
# check if there's an existing generator connected to the load
# if so, select next load. If no load is available, connect to station.
while any([isinstance(_, Generator)
for _ in lv_grid.graph.neighbors(
lv_grid.graph.neighbors(lv_load)[0])]):
if len(lv_loads_ria_rnd) > 0:
lv_load = lv_loads_ria_rnd.pop()
# get cable distributor of building
lv_conn_target = lv_grid.graph.neighbors(lv_load)[0]
else:
lv_conn_target = lv_grid.station
logger.debug(
'No valid conn. target found for {}. '
'Connected to {}.'.format(
repr(geno),
repr(lv_conn_target)
)
)
break
# fallback: connect to station
else:
lv_conn_target = lv_grid.station
logger.debug(
'No valid conn. target found for {}. '
'Connected to {}.'.format(
repr(geno),
repr(lv_conn_target)
)
)
line = Line(id=random.randint(10 ** 8, 10 ** 9),
length=1e-3,
quantity=1,
kind=std_line_kind,
type=std_line_type,
grid=lv_grid)
lv_grid.graph.add_edge(geno,
lv_station,
line=line,
type='line')
# add line to equipment changes to track costs
_add_cable_to_equipment_changes(network=network,
line=line)
# warn if there're more genos than loads in LV grid
if log_geno_count_vlevel7 > len(lv_loads):
logger.debug('The count of newly connected generators in voltage level 7 ({}) '
'exceeds the count of loads ({}) in LV grid {}.'
.format(str(log_geno_count_vlevel7),
str(len(lv_loads)),
repr(lv_grid)
)
) | Connect LV generators to existing grids.
This function searches for unconnected generators in all LV grids and
connects them.
It connects
* generators of voltage level 6
* to MV-LV station
* generators of voltage level 7
* with a nom. capacity of <=30 kW to LV loads of type residential
* with a nom. capacity of >30 kW and <=100 kW to LV loads of type
retail, industrial or agricultural
* to the MV-LV station if no appropriate load is available
(fallback)
Parameters
----------
network : :class:`~.grid.network.Network`
The eDisGo container object
allow_multiple_genos_per_load : :obj:`bool`
If True, more than one generator can be connected to one load
Notes
-----
For the allocation, loads are selected randomly (sector-wise) using a
predefined seed to ensure reproducibility. | Below is the the instruction that describes the task:
### Input:
Connect LV generators to existing grids.
This function searches for unconnected generators in all LV grids and
connects them.
It connects
* generators of voltage level 6
* to MV-LV station
* generators of voltage level 7
* with a nom. capacity of <=30 kW to LV loads of type residential
* with a nom. capacity of >30 kW and <=100 kW to LV loads of type
retail, industrial or agricultural
* to the MV-LV station if no appropriate load is available
(fallback)
Parameters
----------
network : :class:`~.grid.network.Network`
The eDisGo container object
allow_multiple_genos_per_load : :obj:`bool`
If True, more than one generator can be connected to one load
Notes
-----
For the allocation, loads are selected randomly (sector-wise) using a
predefined seed to ensure reproducibility.
### Response:
def connect_lv_generators(network, allow_multiple_genos_per_load=True):
"""Connect LV generators to existing grids.
This function searches for unconnected generators in all LV grids and
connects them.
It connects
* generators of voltage level 6
* to MV-LV station
* generators of voltage level 7
* with a nom. capacity of <=30 kW to LV loads of type residential
* with a nom. capacity of >30 kW and <=100 kW to LV loads of type
retail, industrial or agricultural
* to the MV-LV station if no appropriate load is available
(fallback)
Parameters
----------
network : :class:`~.grid.network.Network`
The eDisGo container object
allow_multiple_genos_per_load : :obj:`bool`
If True, more than one generator can be connected to one load
Notes
-----
For the allocation, loads are selected randomly (sector-wise) using a
predefined seed to ensure reproducibility.
"""
# get predefined random seed and initialize random generator
seed = int(network.config['grid_connection']['random_seed'])
#random.seed(a=seed)
random.seed(a=1234)
# ToDo: Switch back to 'seed' as soon as line ids are finished, #58
# get standard equipment
std_line_type = network.equipment_data['lv_cables'].loc[
network.config['grid_expansion_standard_equipment']['lv_line']]
std_line_kind = 'cable'
# # TEMP: DEBUG STUFF
# lv_grid_stats = pd.DataFrame(columns=('lv_grid',
# 'load_count',
# 'geno_count',
# 'more_genos_than_loads')
# )
# iterate over all LV grids
for lv_grid in network.mv_grid.lv_grids:
lv_loads = lv_grid.graph.nodes_by_attribute('load')
# counter for genos in v_level 7
log_geno_count_vlevel7 = 0
# generate random list (without replacement => unique elements)
# of loads (residential) to connect genos (P <= 30kW) to.
lv_loads_res = sorted([lv_load for lv_load in lv_loads
if 'residential' in list(lv_load.consumption.keys())],
key=lambda _: repr(_))
if len(lv_loads_res) > 0:
lv_loads_res_rnd = set(random.sample(lv_loads_res,
len(lv_loads_res)))
else:
lv_loads_res_rnd = None
# generate random list (without replacement => unique elements)
# of loads (retail, industrial, agricultural) to connect genos
# (30kW < P <= 100kW) to.
lv_loads_ria = sorted([lv_load for lv_load in lv_loads
if any([_ in list(lv_load.consumption.keys())
for _ in ['retail', 'industrial', 'agricultural']])],
key=lambda _: repr(_))
if len(lv_loads_ria) > 0:
lv_loads_ria_rnd = set(random.sample(lv_loads_ria,
len(lv_loads_ria)))
else:
lv_loads_ria_rnd = None
for geno in sorted(lv_grid.graph.nodes_by_attribute('generator'), key=lambda x: repr(x)):
if nx.is_isolate(lv_grid.graph, geno):
lv_station = lv_grid.station
# generator is of v_level 6 -> connect to LV station
if geno.v_level == 6:
line_length = calc_geo_dist_vincenty(network=network,
node_source=geno,
node_target=lv_station)
line = Line(id=random.randint(10 ** 8, 10 ** 9),
length=line_length / 1e3,
quantity=1,
kind=std_line_kind,
type=std_line_type,
grid=lv_grid)
lv_grid.graph.add_edge(geno,
lv_station,
line=line,
type='line')
# add line to equipment changes to track costs
_add_cable_to_equipment_changes(network=network,
line=line)
# generator is of v_level 7 -> assign geno to load
elif geno.v_level == 7:
# counter for genos in v_level 7
log_geno_count_vlevel7 += 1
# connect genos with P <= 30kW to residential loads, if available
if (geno.nominal_capacity <= 30) and (lv_loads_res_rnd is not None):
if len(lv_loads_res_rnd) > 0:
lv_load = lv_loads_res_rnd.pop()
# if random load list is empty, create new one
else:
lv_loads_res_rnd = set(random.sample(lv_loads_res,
len(lv_loads_res))
)
lv_load = lv_loads_res_rnd.pop()
# get cable distributor of building
lv_conn_target = lv_grid.graph.neighbors(lv_load)[0]
if not allow_multiple_genos_per_load:
# check if there's an existing generator connected to the load
# if so, select next load. If no load is available, connect to station.
while any([isinstance(_, Generator)
for _ in lv_grid.graph.neighbors(
lv_grid.graph.neighbors(lv_load)[0])]):
if len(lv_loads_res_rnd) > 0:
lv_load = lv_loads_res_rnd.pop()
# get cable distributor of building
lv_conn_target = lv_grid.graph.neighbors(lv_load)[0]
else:
lv_conn_target = lv_grid.station
logger.debug(
'No valid conn. target found for {}. '
'Connected to {}.'.format(
repr(geno),
repr(lv_conn_target)
)
)
break
# connect genos with 30kW <= P <= 100kW to residential loads
# to retail, industrial, agricultural loads, if available
elif (geno.nominal_capacity > 30) and (lv_loads_ria_rnd is not None):
if len(lv_loads_ria_rnd) > 0:
lv_load = lv_loads_ria_rnd.pop()
# if random load list is empty, create new one
else:
lv_loads_ria_rnd = set(random.sample(lv_loads_ria,
len(lv_loads_ria))
)
lv_load = lv_loads_ria_rnd.pop()
# get cable distributor of building
lv_conn_target = lv_grid.graph.neighbors(lv_load)[0]
if not allow_multiple_genos_per_load:
# check if there's an existing generator connected to the load
# if so, select next load. If no load is available, connect to station.
while any([isinstance(_, Generator)
for _ in lv_grid.graph.neighbors(
lv_grid.graph.neighbors(lv_load)[0])]):
if len(lv_loads_ria_rnd) > 0:
lv_load = lv_loads_ria_rnd.pop()
# get cable distributor of building
lv_conn_target = lv_grid.graph.neighbors(lv_load)[0]
else:
lv_conn_target = lv_grid.station
logger.debug(
'No valid conn. target found for {}. '
'Connected to {}.'.format(
repr(geno),
repr(lv_conn_target)
)
)
break
# fallback: connect to station
else:
lv_conn_target = lv_grid.station
logger.debug(
'No valid conn. target found for {}. '
'Connected to {}.'.format(
repr(geno),
repr(lv_conn_target)
)
)
line = Line(id=random.randint(10 ** 8, 10 ** 9),
length=1e-3,
quantity=1,
kind=std_line_kind,
type=std_line_type,
grid=lv_grid)
lv_grid.graph.add_edge(geno,
lv_station,
line=line,
type='line')
# add line to equipment changes to track costs
_add_cable_to_equipment_changes(network=network,
line=line)
# warn if there're more genos than loads in LV grid
if log_geno_count_vlevel7 > len(lv_loads):
logger.debug('The count of newly connected generators in voltage level 7 ({}) '
'exceeds the count of loads ({}) in LV grid {}.'
.format(str(log_geno_count_vlevel7),
str(len(lv_loads)),
repr(lv_grid)
)
) |
def parse_table_properties(doc, table, prop):
"Parse table properties."
if not table:
return
style = prop.find(_name('{{{w}}}tblStyle'))
if style is not None:
table.style_id = style.attrib[_name('{{{w}}}val')]
doc.add_style_as_used(table.style_id) | Parse table properties. | Below is the the instruction that describes the task:
### Input:
Parse table properties.
### Response:
def parse_table_properties(doc, table, prop):
"Parse table properties."
if not table:
return
style = prop.find(_name('{{{w}}}tblStyle'))
if style is not None:
table.style_id = style.attrib[_name('{{{w}}}val')]
doc.add_style_as_used(table.style_id) |
def parse_unique_urlencoded(content):
"""Parses unique key-value parameters from urlencoded content.
Args:
content: string, URL-encoded key-value pairs.
Returns:
dict, The key-value pairs from ``content``.
Raises:
ValueError: if one of the keys is repeated.
"""
urlencoded_params = urllib.parse.parse_qs(content)
params = {}
for key, value in six.iteritems(urlencoded_params):
if len(value) != 1:
msg = ('URL-encoded content contains a repeated value:'
'%s -> %s' % (key, ', '.join(value)))
raise ValueError(msg)
params[key] = value[0]
return params | Parses unique key-value parameters from urlencoded content.
Args:
content: string, URL-encoded key-value pairs.
Returns:
dict, The key-value pairs from ``content``.
Raises:
ValueError: if one of the keys is repeated. | Below is the the instruction that describes the task:
### Input:
Parses unique key-value parameters from urlencoded content.
Args:
content: string, URL-encoded key-value pairs.
Returns:
dict, The key-value pairs from ``content``.
Raises:
ValueError: if one of the keys is repeated.
### Response:
def parse_unique_urlencoded(content):
"""Parses unique key-value parameters from urlencoded content.
Args:
content: string, URL-encoded key-value pairs.
Returns:
dict, The key-value pairs from ``content``.
Raises:
ValueError: if one of the keys is repeated.
"""
urlencoded_params = urllib.parse.parse_qs(content)
params = {}
for key, value in six.iteritems(urlencoded_params):
if len(value) != 1:
msg = ('URL-encoded content contains a repeated value:'
'%s -> %s' % (key, ', '.join(value)))
raise ValueError(msg)
params[key] = value[0]
return params |
def merge(self, keys):
"""
Merges the join on pseudo keys of two or more reference data sets.
:param list[tuple[str,str]] keys: For each data set the keys of the start and end date.
"""
deletes = []
for pseudo_key, rows in self._rows.items():
self._additional_rows_date2int(keys, rows)
rows = self._intersection(keys, rows)
if rows:
rows = self._rows_sort(rows)
self._rows[pseudo_key] = self._merge_adjacent_rows(rows)
else:
deletes.append(pseudo_key)
for pseudo_key in deletes:
del self._rows[pseudo_key] | Merges the join on pseudo keys of two or more reference data sets.
:param list[tuple[str,str]] keys: For each data set the keys of the start and end date. | Below is the the instruction that describes the task:
### Input:
Merges the join on pseudo keys of two or more reference data sets.
:param list[tuple[str,str]] keys: For each data set the keys of the start and end date.
### Response:
def merge(self, keys):
"""
Merges the join on pseudo keys of two or more reference data sets.
:param list[tuple[str,str]] keys: For each data set the keys of the start and end date.
"""
deletes = []
for pseudo_key, rows in self._rows.items():
self._additional_rows_date2int(keys, rows)
rows = self._intersection(keys, rows)
if rows:
rows = self._rows_sort(rows)
self._rows[pseudo_key] = self._merge_adjacent_rows(rows)
else:
deletes.append(pseudo_key)
for pseudo_key in deletes:
del self._rows[pseudo_key] |
def create_snmp_manager(self, manager, host, **kwargs):
"""Create an SNMP manager.
:param manager: Name of manager to be created.
:type manager: str
:param host: IP address or DNS name of SNMP server to be used.
:type host: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST snmp/:manager**
:type \*\*kwargs: optional
:returns: A dictionary describing the created SNMP manager.
:rtype: ResponseDict
"""
data = {"host": host}
data.update(kwargs)
return self._request("POST", "snmp/{0}".format(manager), data) | Create an SNMP manager.
:param manager: Name of manager to be created.
:type manager: str
:param host: IP address or DNS name of SNMP server to be used.
:type host: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST snmp/:manager**
:type \*\*kwargs: optional
:returns: A dictionary describing the created SNMP manager.
:rtype: ResponseDict | Below is the the instruction that describes the task:
### Input:
Create an SNMP manager.
:param manager: Name of manager to be created.
:type manager: str
:param host: IP address or DNS name of SNMP server to be used.
:type host: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST snmp/:manager**
:type \*\*kwargs: optional
:returns: A dictionary describing the created SNMP manager.
:rtype: ResponseDict
### Response:
def create_snmp_manager(self, manager, host, **kwargs):
"""Create an SNMP manager.
:param manager: Name of manager to be created.
:type manager: str
:param host: IP address or DNS name of SNMP server to be used.
:type host: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST snmp/:manager**
:type \*\*kwargs: optional
:returns: A dictionary describing the created SNMP manager.
:rtype: ResponseDict
"""
data = {"host": host}
data.update(kwargs)
return self._request("POST", "snmp/{0}".format(manager), data) |
def dumps(self):
"""Return a dictionnary of current tables"""
return {table_name: getattr(self, table_name).dumps() for table_name in self.TABLES} | Return a dictionnary of current tables | Below is the the instruction that describes the task:
### Input:
Return a dictionnary of current tables
### Response:
def dumps(self):
"""Return a dictionnary of current tables"""
return {table_name: getattr(self, table_name).dumps() for table_name in self.TABLES} |
def _compute_unitary_oracle_matrix(bitstring_map: Dict[str, str]) -> Tuple[np.ndarray,
Dict[str, str]]:
"""
Computes the unitary matrix that encodes the oracle function used in the Bernstein-Vazirani
algorithm. It generates a dense matrix for a function :math:`f`
.. math::
f:\\{0,1\\}^n\\rightarrow \\{0,1\\}
\\mathbf{x}\\rightarrow \\mathbf{a}\\cdot\\mathbf{x}+b\\pmod{2}
(\\mathbf{a}\\in\\{0,1\\}^n, b\\in\\{0,1\\})
where :math:`(\\cdot)` is the bitwise dot product, that represents the transition-matrix
elements of the corresponding qubit and ancilla subsystems.
:param Dict[String, String] bitstring_map: truth-table of the input bitstring map in
dictionary format
:return: a dense matrix containing the permutation of the bit strings and a dictionary
containing the indices of the non-zero elements of the computed permutation matrix as
key-value-pairs
:rtype: Tuple[2darray, Dict[String, String]]
"""
n_bits = len(list(bitstring_map.keys())[0])
n_ancillas = 1
# We instantiate an empty matrix of size n_bits + 1 to encode the mapping from n qubits
# to one ancillas, which explains the additional +1 overhead.
# To construct the matrix we go through all possible state transitions and pad the index
# according to all possible states the ancilla-subsystem could be in
ufunc = np.zeros(shape=(2 ** (n_bits + 1), 2 ** (n_bits + 1)))
index_mapping_dct = defaultdict(dict)
for b in range(2**n_ancillas):
# padding according to ancilla state
pad_str = np.binary_repr(b, width=1)
for k, v in bitstring_map.items():
# add mapping from initial state to the state in the ancilla system.
# pad_str corresponds to the initial state of the ancilla system.
index_mapping_dct[pad_str + k] = utils.bitwise_xor(pad_str, v) + k
# calculate matrix indices that correspond to the transition-matrix-element
# of the oracle unitary
i, j = int(pad_str+k, 2), int(utils.bitwise_xor(pad_str, v) + k, 2)
ufunc[i, j] = 1
return ufunc, index_mapping_dct | Computes the unitary matrix that encodes the oracle function used in the Bernstein-Vazirani
algorithm. It generates a dense matrix for a function :math:`f`
.. math::
f:\\{0,1\\}^n\\rightarrow \\{0,1\\}
\\mathbf{x}\\rightarrow \\mathbf{a}\\cdot\\mathbf{x}+b\\pmod{2}
(\\mathbf{a}\\in\\{0,1\\}^n, b\\in\\{0,1\\})
where :math:`(\\cdot)` is the bitwise dot product, that represents the transition-matrix
elements of the corresponding qubit and ancilla subsystems.
:param Dict[String, String] bitstring_map: truth-table of the input bitstring map in
dictionary format
:return: a dense matrix containing the permutation of the bit strings and a dictionary
containing the indices of the non-zero elements of the computed permutation matrix as
key-value-pairs
:rtype: Tuple[2darray, Dict[String, String]] | Below is the the instruction that describes the task:
### Input:
Computes the unitary matrix that encodes the oracle function used in the Bernstein-Vazirani
algorithm. It generates a dense matrix for a function :math:`f`
.. math::
f:\\{0,1\\}^n\\rightarrow \\{0,1\\}
\\mathbf{x}\\rightarrow \\mathbf{a}\\cdot\\mathbf{x}+b\\pmod{2}
(\\mathbf{a}\\in\\{0,1\\}^n, b\\in\\{0,1\\})
where :math:`(\\cdot)` is the bitwise dot product, that represents the transition-matrix
elements of the corresponding qubit and ancilla subsystems.
:param Dict[String, String] bitstring_map: truth-table of the input bitstring map in
dictionary format
:return: a dense matrix containing the permutation of the bit strings and a dictionary
containing the indices of the non-zero elements of the computed permutation matrix as
key-value-pairs
:rtype: Tuple[2darray, Dict[String, String]]
### Response:
def _compute_unitary_oracle_matrix(bitstring_map: Dict[str, str]) -> Tuple[np.ndarray,
Dict[str, str]]:
"""
Computes the unitary matrix that encodes the oracle function used in the Bernstein-Vazirani
algorithm. It generates a dense matrix for a function :math:`f`
.. math::
f:\\{0,1\\}^n\\rightarrow \\{0,1\\}
\\mathbf{x}\\rightarrow \\mathbf{a}\\cdot\\mathbf{x}+b\\pmod{2}
(\\mathbf{a}\\in\\{0,1\\}^n, b\\in\\{0,1\\})
where :math:`(\\cdot)` is the bitwise dot product, that represents the transition-matrix
elements of the corresponding qubit and ancilla subsystems.
:param Dict[String, String] bitstring_map: truth-table of the input bitstring map in
dictionary format
:return: a dense matrix containing the permutation of the bit strings and a dictionary
containing the indices of the non-zero elements of the computed permutation matrix as
key-value-pairs
:rtype: Tuple[2darray, Dict[String, String]]
"""
n_bits = len(list(bitstring_map.keys())[0])
n_ancillas = 1
# We instantiate an empty matrix of size n_bits + 1 to encode the mapping from n qubits
# to one ancillas, which explains the additional +1 overhead.
# To construct the matrix we go through all possible state transitions and pad the index
# according to all possible states the ancilla-subsystem could be in
ufunc = np.zeros(shape=(2 ** (n_bits + 1), 2 ** (n_bits + 1)))
index_mapping_dct = defaultdict(dict)
for b in range(2**n_ancillas):
# padding according to ancilla state
pad_str = np.binary_repr(b, width=1)
for k, v in bitstring_map.items():
# add mapping from initial state to the state in the ancilla system.
# pad_str corresponds to the initial state of the ancilla system.
index_mapping_dct[pad_str + k] = utils.bitwise_xor(pad_str, v) + k
# calculate matrix indices that correspond to the transition-matrix-element
# of the oracle unitary
i, j = int(pad_str+k, 2), int(utils.bitwise_xor(pad_str, v) + k, 2)
ufunc[i, j] = 1
return ufunc, index_mapping_dct |
def hash(self):
'''
:rtype: int
:return: hash of the field
'''
hashed = super(String, self).hash()
return khash(hashed, self._max_size) | :rtype: int
:return: hash of the field | Below is the the instruction that describes the task:
### Input:
:rtype: int
:return: hash of the field
### Response:
def hash(self):
'''
:rtype: int
:return: hash of the field
'''
hashed = super(String, self).hash()
return khash(hashed, self._max_size) |
def flatten_list(x: List[Any]) -> List[Any]:
"""
Converts a list of lists into a flat list.
Args:
x: list of lists
Returns:
flat list
As per
http://stackoverflow.com/questions/952914/making-a-flat-list-out-of-list-of-lists-in-python
""" # noqa
return [item for sublist in x for item in sublist] | Converts a list of lists into a flat list.
Args:
x: list of lists
Returns:
flat list
As per
http://stackoverflow.com/questions/952914/making-a-flat-list-out-of-list-of-lists-in-python | Below is the the instruction that describes the task:
### Input:
Converts a list of lists into a flat list.
Args:
x: list of lists
Returns:
flat list
As per
http://stackoverflow.com/questions/952914/making-a-flat-list-out-of-list-of-lists-in-python
### Response:
def flatten_list(x: List[Any]) -> List[Any]:
"""
Converts a list of lists into a flat list.
Args:
x: list of lists
Returns:
flat list
As per
http://stackoverflow.com/questions/952914/making-a-flat-list-out-of-list-of-lists-in-python
""" # noqa
return [item for sublist in x for item in sublist] |
def _add_layer_clicked(self):
"""Add layer clicked."""
layer = self.tree.selectedItems()[0]
origin = layer.data(0, LAYER_ORIGIN_ROLE)
if origin == FROM_ANALYSIS['key']:
parent = layer.data(0, LAYER_PARENT_ANALYSIS_ROLE)
key = layer.data(0, LAYER_PURPOSE_KEY_OR_ID_ROLE)
item = QListWidgetItem('%s - %s' % (layer.text(0), parent))
item.setData(LAYER_PARENT_ANALYSIS_ROLE, parent)
item.setData(LAYER_PURPOSE_KEY_OR_ID_ROLE, key)
else:
item = QListWidgetItem(layer.text(0))
layer_id = layer.data(0, LAYER_PURPOSE_KEY_OR_ID_ROLE)
item.setData(LAYER_PURPOSE_KEY_OR_ID_ROLE, layer_id)
item.setData(LAYER_ORIGIN_ROLE, origin)
self.list_layers_in_map_report.addItem(item)
self.tree.invisibleRootItem().removeChild(layer)
self.tree.clearSelection() | Add layer clicked. | Below is the the instruction that describes the task:
### Input:
Add layer clicked.
### Response:
def _add_layer_clicked(self):
"""Add layer clicked."""
layer = self.tree.selectedItems()[0]
origin = layer.data(0, LAYER_ORIGIN_ROLE)
if origin == FROM_ANALYSIS['key']:
parent = layer.data(0, LAYER_PARENT_ANALYSIS_ROLE)
key = layer.data(0, LAYER_PURPOSE_KEY_OR_ID_ROLE)
item = QListWidgetItem('%s - %s' % (layer.text(0), parent))
item.setData(LAYER_PARENT_ANALYSIS_ROLE, parent)
item.setData(LAYER_PURPOSE_KEY_OR_ID_ROLE, key)
else:
item = QListWidgetItem(layer.text(0))
layer_id = layer.data(0, LAYER_PURPOSE_KEY_OR_ID_ROLE)
item.setData(LAYER_PURPOSE_KEY_OR_ID_ROLE, layer_id)
item.setData(LAYER_ORIGIN_ROLE, origin)
self.list_layers_in_map_report.addItem(item)
self.tree.invisibleRootItem().removeChild(layer)
self.tree.clearSelection() |
def event_params(segments, params, band=None, n_fft=None, slopes=None,
prep=None, parent=None):
"""Compute event parameters.
Parameters
----------
segments : instance of wonambi.trans.select.Segments
list of segments, with time series and metadata
params : dict of bool, or str
'dur', 'minamp', 'maxamp', 'ptp', 'rms', 'power', 'peakf', 'energy',
'peakef'. If 'all', a dict will be created with these keys and all
values as True, so that all parameters are returned.
band : tuple of float
band of interest for power and energy
n_fft : int
length of FFT. if shorter than input signal, signal is truncated; if
longer, signal is zero-padded to length
slopes : dict of bool
'avg_slope', 'max_slope', 'prep', 'invert'
prep : dict of bool
same keys as params. if True, segment['trans_data'] will be used as dat
parent : QMainWindow
for use with GUI only
Returns
-------
list of dict
list of segments, with time series, metadata and parameters
"""
if parent is not None:
progress = QProgressDialog('Computing parameters', 'Abort',
0, len(segments) - 1, parent)
progress.setWindowModality(Qt.ApplicationModal)
param_keys = ['dur', 'minamp', 'maxamp', 'ptp', 'rms', 'power', 'peakpf',
'energy', 'peakef']
if params == 'all':
params = {k: 1 for k in param_keys}
if prep is None:
prep = {k: 0 for k in param_keys}
if band is None:
band = (None, None)
params_out = []
evt_output = False
for i, seg in enumerate(segments):
out = dict(seg)
dat = seg['data']
if params['dur']:
out['dur'] = float(dat.number_of('time')) / dat.s_freq
evt_output = True
if params['minamp']:
dat1 = dat
if prep['minamp']:
dat1 = seg['trans_data']
out['minamp'] = math(dat1, operator=_amin, axis='time')
evt_output = True
if params['maxamp']:
dat1 = dat
if prep['maxamp']:
dat1 = seg['trans_data']
out['maxamp'] = math(dat1, operator=_amax, axis='time')
evt_output = True
if params['ptp']:
dat1 = dat
if prep['ptp']:
dat1 = seg['trans_data']
out['ptp'] = math(dat1, operator=_ptp, axis='time')
evt_output = True
if params['rms']:
dat1 = dat
if prep['rms']:
dat1 = seg['trans_data']
out['rms'] = math(dat1, operator=(square, _mean, sqrt),
axis='time')
evt_output = True
for pw, pk in [('power', 'peakpf'), ('energy', 'peakef')]:
if params[pw] or params[pk]:
evt_output = True
if prep[pw] or prep[pk]:
prep_pw, prep_pk = band_power(seg['trans_data'], band,
scaling=pw, n_fft=n_fft)
if not (prep[pw] and prep[pk]):
raw_pw, raw_pk = band_power(dat, band,
scaling=pw, n_fft=n_fft)
if prep[pw]:
out[pw] = prep_pw
else:
out[pw] = raw_pw
if prep[pk]:
out[pk] = prep_pk
else:
out[pk] = raw_pk
if slopes:
evt_output = True
out['slope'] = {}
dat1 = dat
if slopes['prep']:
dat1 = seg['trans_data']
if slopes['invert']:
dat1 = math(dat1, operator=negative, axis='time')
if slopes['avg_slope'] and slopes['max_slope']:
level = 'all'
elif slopes['avg_slope']:
level = 'average'
else:
level = 'maximum'
for chan in dat1.axis['chan'][0]:
d = dat1(chan=chan)[0]
out['slope'][chan] = get_slopes(d, dat.s_freq, level=level)
if evt_output:
timeline = dat.axis['time'][0]
out['start'] = timeline[0]
out['end'] = timeline[-1]
params_out.append(out)
if parent:
progress.setValue(i)
if progress.wasCanceled():
msg = 'Analysis canceled by user.'
parent.statusBar().showMessage(msg)
return
if parent:
progress.close()
return params_out | Compute event parameters.
Parameters
----------
segments : instance of wonambi.trans.select.Segments
list of segments, with time series and metadata
params : dict of bool, or str
'dur', 'minamp', 'maxamp', 'ptp', 'rms', 'power', 'peakf', 'energy',
'peakef'. If 'all', a dict will be created with these keys and all
values as True, so that all parameters are returned.
band : tuple of float
band of interest for power and energy
n_fft : int
length of FFT. if shorter than input signal, signal is truncated; if
longer, signal is zero-padded to length
slopes : dict of bool
'avg_slope', 'max_slope', 'prep', 'invert'
prep : dict of bool
same keys as params. if True, segment['trans_data'] will be used as dat
parent : QMainWindow
for use with GUI only
Returns
-------
list of dict
list of segments, with time series, metadata and parameters | Below is the the instruction that describes the task:
### Input:
Compute event parameters.
Parameters
----------
segments : instance of wonambi.trans.select.Segments
list of segments, with time series and metadata
params : dict of bool, or str
'dur', 'minamp', 'maxamp', 'ptp', 'rms', 'power', 'peakf', 'energy',
'peakef'. If 'all', a dict will be created with these keys and all
values as True, so that all parameters are returned.
band : tuple of float
band of interest for power and energy
n_fft : int
length of FFT. if shorter than input signal, signal is truncated; if
longer, signal is zero-padded to length
slopes : dict of bool
'avg_slope', 'max_slope', 'prep', 'invert'
prep : dict of bool
same keys as params. if True, segment['trans_data'] will be used as dat
parent : QMainWindow
for use with GUI only
Returns
-------
list of dict
list of segments, with time series, metadata and parameters
### Response:
def event_params(segments, params, band=None, n_fft=None, slopes=None,
prep=None, parent=None):
"""Compute event parameters.
Parameters
----------
segments : instance of wonambi.trans.select.Segments
list of segments, with time series and metadata
params : dict of bool, or str
'dur', 'minamp', 'maxamp', 'ptp', 'rms', 'power', 'peakf', 'energy',
'peakef'. If 'all', a dict will be created with these keys and all
values as True, so that all parameters are returned.
band : tuple of float
band of interest for power and energy
n_fft : int
length of FFT. if shorter than input signal, signal is truncated; if
longer, signal is zero-padded to length
slopes : dict of bool
'avg_slope', 'max_slope', 'prep', 'invert'
prep : dict of bool
same keys as params. if True, segment['trans_data'] will be used as dat
parent : QMainWindow
for use with GUI only
Returns
-------
list of dict
list of segments, with time series, metadata and parameters
"""
if parent is not None:
progress = QProgressDialog('Computing parameters', 'Abort',
0, len(segments) - 1, parent)
progress.setWindowModality(Qt.ApplicationModal)
param_keys = ['dur', 'minamp', 'maxamp', 'ptp', 'rms', 'power', 'peakpf',
'energy', 'peakef']
if params == 'all':
params = {k: 1 for k in param_keys}
if prep is None:
prep = {k: 0 for k in param_keys}
if band is None:
band = (None, None)
params_out = []
evt_output = False
for i, seg in enumerate(segments):
out = dict(seg)
dat = seg['data']
if params['dur']:
out['dur'] = float(dat.number_of('time')) / dat.s_freq
evt_output = True
if params['minamp']:
dat1 = dat
if prep['minamp']:
dat1 = seg['trans_data']
out['minamp'] = math(dat1, operator=_amin, axis='time')
evt_output = True
if params['maxamp']:
dat1 = dat
if prep['maxamp']:
dat1 = seg['trans_data']
out['maxamp'] = math(dat1, operator=_amax, axis='time')
evt_output = True
if params['ptp']:
dat1 = dat
if prep['ptp']:
dat1 = seg['trans_data']
out['ptp'] = math(dat1, operator=_ptp, axis='time')
evt_output = True
if params['rms']:
dat1 = dat
if prep['rms']:
dat1 = seg['trans_data']
out['rms'] = math(dat1, operator=(square, _mean, sqrt),
axis='time')
evt_output = True
for pw, pk in [('power', 'peakpf'), ('energy', 'peakef')]:
if params[pw] or params[pk]:
evt_output = True
if prep[pw] or prep[pk]:
prep_pw, prep_pk = band_power(seg['trans_data'], band,
scaling=pw, n_fft=n_fft)
if not (prep[pw] and prep[pk]):
raw_pw, raw_pk = band_power(dat, band,
scaling=pw, n_fft=n_fft)
if prep[pw]:
out[pw] = prep_pw
else:
out[pw] = raw_pw
if prep[pk]:
out[pk] = prep_pk
else:
out[pk] = raw_pk
if slopes:
evt_output = True
out['slope'] = {}
dat1 = dat
if slopes['prep']:
dat1 = seg['trans_data']
if slopes['invert']:
dat1 = math(dat1, operator=negative, axis='time')
if slopes['avg_slope'] and slopes['max_slope']:
level = 'all'
elif slopes['avg_slope']:
level = 'average'
else:
level = 'maximum'
for chan in dat1.axis['chan'][0]:
d = dat1(chan=chan)[0]
out['slope'][chan] = get_slopes(d, dat.s_freq, level=level)
if evt_output:
timeline = dat.axis['time'][0]
out['start'] = timeline[0]
out['end'] = timeline[-1]
params_out.append(out)
if parent:
progress.setValue(i)
if progress.wasCanceled():
msg = 'Analysis canceled by user.'
parent.statusBar().showMessage(msg)
return
if parent:
progress.close()
return params_out |
def verify(verified_entity, verification_key):
"""
Метод должен райзить ошибки
:param verified_entity: сущность
:param verification_key: ключ
:return:
"""
verification = get_object_or_none(Verification, verified_entity=verified_entity)
if verification is None:
raise ServerError(VerificationHandler.STATUS_VERIFICATION_NOT_FOUND)
if not verification.verify(verification_key):
raise ServerError(VerificationHandler.STATUS_INVALID_VERIFICATION_KEY)
verification.verified = True
verification.save() | Метод должен райзить ошибки
:param verified_entity: сущность
:param verification_key: ключ
:return: | Below is the the instruction that describes the task:
### Input:
Метод должен райзить ошибки
:param verified_entity: сущность
:param verification_key: ключ
:return:
### Response:
def verify(verified_entity, verification_key):
"""
Метод должен райзить ошибки
:param verified_entity: сущность
:param verification_key: ключ
:return:
"""
verification = get_object_or_none(Verification, verified_entity=verified_entity)
if verification is None:
raise ServerError(VerificationHandler.STATUS_VERIFICATION_NOT_FOUND)
if not verification.verify(verification_key):
raise ServerError(VerificationHandler.STATUS_INVALID_VERIFICATION_KEY)
verification.verified = True
verification.save() |
def list_domains(self):
"""Utility method to list all the domains in the jar."""
domains = []
for cookie in iter(self):
if cookie.domain not in domains:
domains.append(cookie.domain)
return domains | Utility method to list all the domains in the jar. | Below is the the instruction that describes the task:
### Input:
Utility method to list all the domains in the jar.
### Response:
def list_domains(self):
"""Utility method to list all the domains in the jar."""
domains = []
for cookie in iter(self):
if cookie.domain not in domains:
domains.append(cookie.domain)
return domains |
def teletex_search_function(name):
"""
Search function for teletex codec that is passed to codecs.register()
"""
if name != 'teletex':
return None
return codecs.CodecInfo(
name='teletex',
encode=TeletexCodec().encode,
decode=TeletexCodec().decode,
incrementalencoder=TeletexIncrementalEncoder,
incrementaldecoder=TeletexIncrementalDecoder,
streamreader=TeletexStreamReader,
streamwriter=TeletexStreamWriter,
) | Search function for teletex codec that is passed to codecs.register() | Below is the the instruction that describes the task:
### Input:
Search function for teletex codec that is passed to codecs.register()
### Response:
def teletex_search_function(name):
"""
Search function for teletex codec that is passed to codecs.register()
"""
if name != 'teletex':
return None
return codecs.CodecInfo(
name='teletex',
encode=TeletexCodec().encode,
decode=TeletexCodec().decode,
incrementalencoder=TeletexIncrementalEncoder,
incrementaldecoder=TeletexIncrementalDecoder,
streamreader=TeletexStreamReader,
streamwriter=TeletexStreamWriter,
) |
def null_space(M, k, k_skip=1, eigen_solver='arpack',
random_state=None, solver_kwds=None):
"""
Find the null space of a matrix M: eigenvectors associated with 0 eigenvalues
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
algorithm will attempt to choose the best method for input data
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type. This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
AMG requires pyamg to be installed. It can be faster on very large,
sparse problems, but may also lead to instabilities.
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
Returns
-------
null_space : estimated k vectors of the null space
error : estimated error (sum of eigenvalues)
Notes
-----
dense solver key words: see
http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.linalg.eigh.html
for symmetric problems and
http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.linalg.eig.html#scipy.linalg.eig
for non symmetric problems.
arpack sovler key words: see
http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.sparse.linalg.eigsh.html
for symmetric problems and http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.sparse.linalg.eigs.html#scipy.sparse.linalg.eigs
for non symmetric problems.
lobpcg solver keywords: see
http://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.lobpcg.html
amg solver keywords: see
http://pyamg.googlecode.com/svn/branches/1.0.x/Docs/html/pyamg.aggregation.html#module-pyamg.aggregation.aggregation
(Note amg solver uses lobpcg and also accepts lobpcg keywords)
"""
eigen_solver, solver_kwds = check_eigen_solver(eigen_solver, solver_kwds,
size=M.shape[0],
nvec=k + k_skip)
random_state = check_random_state(random_state)
if eigen_solver == 'arpack':
# This matches the internal initial state used by ARPACK
v0 = random_state.uniform(-1, 1, M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
v0=v0,**(solver_kwds or {}))
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(M, eigvals=(0, k+k_skip),overwrite_a=True,
**(solver_kwds or {}))
index = np.argsort(np.abs(eigen_values))
eigen_vectors = eigen_vectors[:, index]
eigen_values = eigen_values[index]
return eigen_vectors[:, k_skip:k+1], np.sum(eigen_values[k_skip:k+1])
# eigen_values, eigen_vectors = eigh(
# M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
# index = np.argsort(np.abs(eigen_values))
# return eigen_vectors[:, index], np.sum(eigen_values)
elif (eigen_solver == 'amg' or eigen_solver == 'lobpcg'):
# M should be positive semi-definite. Add 1 to make it pos. def.
try:
M = sparse.identity(M.shape[0]) + M
n_components = min(k + k_skip + 10, M.shape[0])
eigen_values, eigen_vectors = eigen_decomposition(M, n_components,
eigen_solver = eigen_solver,
drop_first = False,
largest = False,
random_state=random_state,
solver_kwds=solver_kwds)
eigen_values = eigen_values -1
index = np.argsort(np.abs(eigen_values))
eigen_values = eigen_values[index]
eigen_vectors = eigen_vectors[:, index]
return eigen_vectors[:, k_skip:k+1], np.sum(eigen_values[k_skip:k+1])
except np.linalg.LinAlgError: # try again with bigger increase
warnings.warn("LOBPCG failed the first time. Increasing Pos Def adjustment.")
M = 2.0*sparse.identity(M.shape[0]) + M
n_components = min(k + k_skip + 10, M.shape[0])
eigen_values, eigen_vectors = eigen_decomposition(M, n_components,
eigen_solver = eigen_solver,
drop_first = False,
largest = False,
random_state=random_state,
solver_kwds=solver_kwds)
eigen_values = eigen_values - 2
index = np.argsort(np.abs(eigen_values))
eigen_values = eigen_values[index]
eigen_vectors = eigen_vectors[:, index]
return eigen_vectors[:, k_skip:k+1], np.sum(eigen_values[k_skip:k+1])
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver) | Find the null space of a matrix M: eigenvectors associated with 0 eigenvalues
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
algorithm will attempt to choose the best method for input data
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type. This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
AMG requires pyamg to be installed. It can be faster on very large,
sparse problems, but may also lead to instabilities.
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
Returns
-------
null_space : estimated k vectors of the null space
error : estimated error (sum of eigenvalues)
Notes
-----
dense solver key words: see
http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.linalg.eigh.html
for symmetric problems and
http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.linalg.eig.html#scipy.linalg.eig
for non symmetric problems.
arpack sovler key words: see
http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.sparse.linalg.eigsh.html
for symmetric problems and http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.sparse.linalg.eigs.html#scipy.sparse.linalg.eigs
for non symmetric problems.
lobpcg solver keywords: see
http://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.lobpcg.html
amg solver keywords: see
http://pyamg.googlecode.com/svn/branches/1.0.x/Docs/html/pyamg.aggregation.html#module-pyamg.aggregation.aggregation
(Note amg solver uses lobpcg and also accepts lobpcg keywords) | Below is the the instruction that describes the task:
### Input:
Find the null space of a matrix M: eigenvectors associated with 0 eigenvalues
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
algorithm will attempt to choose the best method for input data
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type. This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
AMG requires pyamg to be installed. It can be faster on very large,
sparse problems, but may also lead to instabilities.
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
Returns
-------
null_space : estimated k vectors of the null space
error : estimated error (sum of eigenvalues)
Notes
-----
dense solver key words: see
http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.linalg.eigh.html
for symmetric problems and
http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.linalg.eig.html#scipy.linalg.eig
for non symmetric problems.
arpack sovler key words: see
http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.sparse.linalg.eigsh.html
for symmetric problems and http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.sparse.linalg.eigs.html#scipy.sparse.linalg.eigs
for non symmetric problems.
lobpcg solver keywords: see
http://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.lobpcg.html
amg solver keywords: see
http://pyamg.googlecode.com/svn/branches/1.0.x/Docs/html/pyamg.aggregation.html#module-pyamg.aggregation.aggregation
(Note amg solver uses lobpcg and also accepts lobpcg keywords)
### Response:
def null_space(M, k, k_skip=1, eigen_solver='arpack',
random_state=None, solver_kwds=None):
"""
Find the null space of a matrix M: eigenvectors associated with 0 eigenvalues
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
algorithm will attempt to choose the best method for input data
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type. This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
AMG requires pyamg to be installed. It can be faster on very large,
sparse problems, but may also lead to instabilities.
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
Returns
-------
null_space : estimated k vectors of the null space
error : estimated error (sum of eigenvalues)
Notes
-----
dense solver key words: see
http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.linalg.eigh.html
for symmetric problems and
http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.linalg.eig.html#scipy.linalg.eig
for non symmetric problems.
arpack sovler key words: see
http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.sparse.linalg.eigsh.html
for symmetric problems and http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.sparse.linalg.eigs.html#scipy.sparse.linalg.eigs
for non symmetric problems.
lobpcg solver keywords: see
http://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.lobpcg.html
amg solver keywords: see
http://pyamg.googlecode.com/svn/branches/1.0.x/Docs/html/pyamg.aggregation.html#module-pyamg.aggregation.aggregation
(Note amg solver uses lobpcg and also accepts lobpcg keywords)
"""
eigen_solver, solver_kwds = check_eigen_solver(eigen_solver, solver_kwds,
size=M.shape[0],
nvec=k + k_skip)
random_state = check_random_state(random_state)
if eigen_solver == 'arpack':
# This matches the internal initial state used by ARPACK
v0 = random_state.uniform(-1, 1, M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
v0=v0,**(solver_kwds or {}))
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(M, eigvals=(0, k+k_skip),overwrite_a=True,
**(solver_kwds or {}))
index = np.argsort(np.abs(eigen_values))
eigen_vectors = eigen_vectors[:, index]
eigen_values = eigen_values[index]
return eigen_vectors[:, k_skip:k+1], np.sum(eigen_values[k_skip:k+1])
# eigen_values, eigen_vectors = eigh(
# M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
# index = np.argsort(np.abs(eigen_values))
# return eigen_vectors[:, index], np.sum(eigen_values)
elif (eigen_solver == 'amg' or eigen_solver == 'lobpcg'):
# M should be positive semi-definite. Add 1 to make it pos. def.
try:
M = sparse.identity(M.shape[0]) + M
n_components = min(k + k_skip + 10, M.shape[0])
eigen_values, eigen_vectors = eigen_decomposition(M, n_components,
eigen_solver = eigen_solver,
drop_first = False,
largest = False,
random_state=random_state,
solver_kwds=solver_kwds)
eigen_values = eigen_values -1
index = np.argsort(np.abs(eigen_values))
eigen_values = eigen_values[index]
eigen_vectors = eigen_vectors[:, index]
return eigen_vectors[:, k_skip:k+1], np.sum(eigen_values[k_skip:k+1])
except np.linalg.LinAlgError: # try again with bigger increase
warnings.warn("LOBPCG failed the first time. Increasing Pos Def adjustment.")
M = 2.0*sparse.identity(M.shape[0]) + M
n_components = min(k + k_skip + 10, M.shape[0])
eigen_values, eigen_vectors = eigen_decomposition(M, n_components,
eigen_solver = eigen_solver,
drop_first = False,
largest = False,
random_state=random_state,
solver_kwds=solver_kwds)
eigen_values = eigen_values - 2
index = np.argsort(np.abs(eigen_values))
eigen_values = eigen_values[index]
eigen_vectors = eigen_vectors[:, index]
return eigen_vectors[:, k_skip:k+1], np.sum(eigen_values[k_skip:k+1])
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver) |
def register_job_definition(self, json_fpath):
"""Register a job definition with AWS Batch, using a JSON"""
with open(json_fpath) as f:
job_def = json.load(f)
response = self._client.register_job_definition(**job_def)
status_code = response['ResponseMetadata']['HTTPStatusCode']
if status_code != 200:
msg = 'Register job definition request received status code {0}:\n{1}'
raise Exception(msg.format(status_code, response))
return response | Register a job definition with AWS Batch, using a JSON | Below is the the instruction that describes the task:
### Input:
Register a job definition with AWS Batch, using a JSON
### Response:
def register_job_definition(self, json_fpath):
"""Register a job definition with AWS Batch, using a JSON"""
with open(json_fpath) as f:
job_def = json.load(f)
response = self._client.register_job_definition(**job_def)
status_code = response['ResponseMetadata']['HTTPStatusCode']
if status_code != 200:
msg = 'Register job definition request received status code {0}:\n{1}'
raise Exception(msg.format(status_code, response))
return response |
def prox_min(X, step, thresh=0):
"""Projection onto numbers above `thresh`
"""
thresh_ = _step_gamma(step, thresh)
below = X - thresh_ < 0
X[below] = thresh_
return X | Projection onto numbers above `thresh` | Below is the the instruction that describes the task:
### Input:
Projection onto numbers above `thresh`
### Response:
def prox_min(X, step, thresh=0):
"""Projection onto numbers above `thresh`
"""
thresh_ = _step_gamma(step, thresh)
below = X - thresh_ < 0
X[below] = thresh_
return X |
def _se_all(self):
"""Standard errors (SE) for all parameters, including the intercept."""
err = np.expand_dims(self._ms_err, axis=1)
t1 = np.diagonal(
np.linalg.inv(np.matmul(self.xwins.swapaxes(1, 2), self.xwins)),
axis1=1,
axis2=2,
)
return np.squeeze(np.sqrt(t1 * err)) | Standard errors (SE) for all parameters, including the intercept. | Below is the the instruction that describes the task:
### Input:
Standard errors (SE) for all parameters, including the intercept.
### Response:
def _se_all(self):
"""Standard errors (SE) for all parameters, including the intercept."""
err = np.expand_dims(self._ms_err, axis=1)
t1 = np.diagonal(
np.linalg.inv(np.matmul(self.xwins.swapaxes(1, 2), self.xwins)),
axis1=1,
axis2=2,
)
return np.squeeze(np.sqrt(t1 * err)) |
def unit_key_from_name(name):
"""Return a legal python name for the given name for use as a unit key."""
result = name
for old, new in six.iteritems(UNIT_KEY_REPLACEMENTS):
result = result.replace(old, new)
# Collapse redundant underscores and convert to uppercase.
result = re.sub(r'_+', '_', result.upper())
return result | Return a legal python name for the given name for use as a unit key. | Below is the the instruction that describes the task:
### Input:
Return a legal python name for the given name for use as a unit key.
### Response:
def unit_key_from_name(name):
"""Return a legal python name for the given name for use as a unit key."""
result = name
for old, new in six.iteritems(UNIT_KEY_REPLACEMENTS):
result = result.replace(old, new)
# Collapse redundant underscores and convert to uppercase.
result = re.sub(r'_+', '_', result.upper())
return result |
def recover(self, key, value):
"""Get the deserialized value for a given key, and the serialized version."""
if key not in self._dtypes:
self.read_types()
if key not in self._dtypes:
raise ValueError("Unknown datatype for {} and {}".format(key, value))
return self._dtypes[key][2](value) | Get the deserialized value for a given key, and the serialized version. | Below is the the instruction that describes the task:
### Input:
Get the deserialized value for a given key, and the serialized version.
### Response:
def recover(self, key, value):
"""Get the deserialized value for a given key, and the serialized version."""
if key not in self._dtypes:
self.read_types()
if key not in self._dtypes:
raise ValueError("Unknown datatype for {} and {}".format(key, value))
return self._dtypes[key][2](value) |
def step(step_name=None):
"""
Decorates functions that will be called by the `run` function.
Decorator version of `add_step`. step name defaults to
name of function.
The function's argument names and keyword argument values
will be matched to registered variables when the function
needs to be evaluated by Orca.
The argument name "iter_var" may be used to have the current
iteration variable injected.
"""
def decorator(func):
if step_name:
name = step_name
else:
name = func.__name__
add_step(name, func)
return func
return decorator | Decorates functions that will be called by the `run` function.
Decorator version of `add_step`. step name defaults to
name of function.
The function's argument names and keyword argument values
will be matched to registered variables when the function
needs to be evaluated by Orca.
The argument name "iter_var" may be used to have the current
iteration variable injected. | Below is the the instruction that describes the task:
### Input:
Decorates functions that will be called by the `run` function.
Decorator version of `add_step`. step name defaults to
name of function.
The function's argument names and keyword argument values
will be matched to registered variables when the function
needs to be evaluated by Orca.
The argument name "iter_var" may be used to have the current
iteration variable injected.
### Response:
def step(step_name=None):
"""
Decorates functions that will be called by the `run` function.
Decorator version of `add_step`. step name defaults to
name of function.
The function's argument names and keyword argument values
will be matched to registered variables when the function
needs to be evaluated by Orca.
The argument name "iter_var" may be used to have the current
iteration variable injected.
"""
def decorator(func):
if step_name:
name = step_name
else:
name = func.__name__
add_step(name, func)
return func
return decorator |
def cli(env, billing_id, datacenter):
"""Adds a load balancer given the id returned from create-options."""
mgr = SoftLayer.LoadBalancerManager(env.client)
if not formatting.confirm("This action will incur charges on your "
"account. Continue?"):
raise exceptions.CLIAbort('Aborted.')
mgr.add_local_lb(billing_id, datacenter=datacenter)
env.fout("Load balancer is being created!") | Adds a load balancer given the id returned from create-options. | Below is the the instruction that describes the task:
### Input:
Adds a load balancer given the id returned from create-options.
### Response:
def cli(env, billing_id, datacenter):
"""Adds a load balancer given the id returned from create-options."""
mgr = SoftLayer.LoadBalancerManager(env.client)
if not formatting.confirm("This action will incur charges on your "
"account. Continue?"):
raise exceptions.CLIAbort('Aborted.')
mgr.add_local_lb(billing_id, datacenter=datacenter)
env.fout("Load balancer is being created!") |
def get_assets_metadata(self):
"""Gets the metadata for the assets.
return: (osid.Metadata) - metadata for the assets
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.ActivityForm.get_assets_metadata_template
metadata = dict(self._mdata['assets'])
metadata.update({'existing_assets_values': self._my_map['assetIds']})
return Metadata(**metadata) | Gets the metadata for the assets.
return: (osid.Metadata) - metadata for the assets
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the metadata for the assets.
return: (osid.Metadata) - metadata for the assets
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_assets_metadata(self):
"""Gets the metadata for the assets.
return: (osid.Metadata) - metadata for the assets
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.ActivityForm.get_assets_metadata_template
metadata = dict(self._mdata['assets'])
metadata.update({'existing_assets_values': self._my_map['assetIds']})
return Metadata(**metadata) |
def MakeRanges(codes):
"""Turn a list like [1,2,3,7,8,9] into a range list [[1,3], [7,9]]"""
ranges = []
last = -100
for c in codes:
if c == last+1:
ranges[-1][1] = c
else:
ranges.append([c, c])
last = c
return ranges | Turn a list like [1,2,3,7,8,9] into a range list [[1,3], [7,9]] | Below is the the instruction that describes the task:
### Input:
Turn a list like [1,2,3,7,8,9] into a range list [[1,3], [7,9]]
### Response:
def MakeRanges(codes):
"""Turn a list like [1,2,3,7,8,9] into a range list [[1,3], [7,9]]"""
ranges = []
last = -100
for c in codes:
if c == last+1:
ranges[-1][1] = c
else:
ranges.append([c, c])
last = c
return ranges |
def include_file_cb(include_path, line_ranges, symbol):
"""
Banana banana
"""
lang = ''
if include_path.endswith((".md", ".markdown")):
lang = 'markdown'
else:
split = os.path.splitext(include_path)
if len(split) == 2:
ext = split[1].strip('.')
lang = LANG_MAPPING.get(ext) or ext
if line_ranges:
res = []
for line_range in line_ranges:
for lineno in range(line_range[0] + 1, line_range[1] + 1):
line = linecache.getline(include_path, lineno)
if not line:
return None
res.append(line)
return ''.join(res), lang
with io.open(include_path, 'r', encoding='utf-8') as _:
return _.read(), lang | Banana banana | Below is the the instruction that describes the task:
### Input:
Banana banana
### Response:
def include_file_cb(include_path, line_ranges, symbol):
"""
Banana banana
"""
lang = ''
if include_path.endswith((".md", ".markdown")):
lang = 'markdown'
else:
split = os.path.splitext(include_path)
if len(split) == 2:
ext = split[1].strip('.')
lang = LANG_MAPPING.get(ext) or ext
if line_ranges:
res = []
for line_range in line_ranges:
for lineno in range(line_range[0] + 1, line_range[1] + 1):
line = linecache.getline(include_path, lineno)
if not line:
return None
res.append(line)
return ''.join(res), lang
with io.open(include_path, 'r', encoding='utf-8') as _:
return _.read(), lang |
def _diff(self, cursor, tokenizer, output_fh):
"""Returns output_fh with diff results that have been reduced.
Uses a temporary file to store the results from `cursor`
before being reduced, in order to not have the results stored
in memory twice.
:param cursor: database cursor containing raw diff data
:type cursor: `sqlite3.Cursor`
:param tokenizer: tokenizer for the n-grams
:type tokenizer: `Tokenizer`
:type output_fh: file-like object
:rtype: file-like object
"""
temp_path = self._csv_temp(cursor, constants.QUERY_FIELDNAMES)
output_fh = self._reduce_diff_results(temp_path, tokenizer, output_fh)
try:
os.remove(temp_path)
except OSError as e:
self._logger.error('Failed to remove temporary file containing '
'unreduced results: {}'.format(e))
return output_fh | Returns output_fh with diff results that have been reduced.
Uses a temporary file to store the results from `cursor`
before being reduced, in order to not have the results stored
in memory twice.
:param cursor: database cursor containing raw diff data
:type cursor: `sqlite3.Cursor`
:param tokenizer: tokenizer for the n-grams
:type tokenizer: `Tokenizer`
:type output_fh: file-like object
:rtype: file-like object | Below is the the instruction that describes the task:
### Input:
Returns output_fh with diff results that have been reduced.
Uses a temporary file to store the results from `cursor`
before being reduced, in order to not have the results stored
in memory twice.
:param cursor: database cursor containing raw diff data
:type cursor: `sqlite3.Cursor`
:param tokenizer: tokenizer for the n-grams
:type tokenizer: `Tokenizer`
:type output_fh: file-like object
:rtype: file-like object
### Response:
def _diff(self, cursor, tokenizer, output_fh):
"""Returns output_fh with diff results that have been reduced.
Uses a temporary file to store the results from `cursor`
before being reduced, in order to not have the results stored
in memory twice.
:param cursor: database cursor containing raw diff data
:type cursor: `sqlite3.Cursor`
:param tokenizer: tokenizer for the n-grams
:type tokenizer: `Tokenizer`
:type output_fh: file-like object
:rtype: file-like object
"""
temp_path = self._csv_temp(cursor, constants.QUERY_FIELDNAMES)
output_fh = self._reduce_diff_results(temp_path, tokenizer, output_fh)
try:
os.remove(temp_path)
except OSError as e:
self._logger.error('Failed to remove temporary file containing '
'unreduced results: {}'.format(e))
return output_fh |
def delete_loadbalancer(self, datacenter_id, loadbalancer_id):
"""
Removes the load balancer from the data center.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param loadbalancer_id: The unique ID of the load balancer.
:type loadbalancer_id: ``str``
"""
response = self._perform_request(
url='/datacenters/%s/loadbalancers/%s' % (
datacenter_id, loadbalancer_id), method='DELETE')
return response | Removes the load balancer from the data center.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param loadbalancer_id: The unique ID of the load balancer.
:type loadbalancer_id: ``str`` | Below is the the instruction that describes the task:
### Input:
Removes the load balancer from the data center.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param loadbalancer_id: The unique ID of the load balancer.
:type loadbalancer_id: ``str``
### Response:
def delete_loadbalancer(self, datacenter_id, loadbalancer_id):
"""
Removes the load balancer from the data center.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param loadbalancer_id: The unique ID of the load balancer.
:type loadbalancer_id: ``str``
"""
response = self._perform_request(
url='/datacenters/%s/loadbalancers/%s' % (
datacenter_id, loadbalancer_id), method='DELETE')
return response |
def get_rml(self, rml_name):
""" returns the rml mapping RdfDataset
rml_name(str): Name of the rml mapping to retrieve
"""
try:
return getattr(self, rml_name)
except AttributeError:
return self.load_rml(rml_name) | returns the rml mapping RdfDataset
rml_name(str): Name of the rml mapping to retrieve | Below is the the instruction that describes the task:
### Input:
returns the rml mapping RdfDataset
rml_name(str): Name of the rml mapping to retrieve
### Response:
def get_rml(self, rml_name):
""" returns the rml mapping RdfDataset
rml_name(str): Name of the rml mapping to retrieve
"""
try:
return getattr(self, rml_name)
except AttributeError:
return self.load_rml(rml_name) |
def classes_can_admin(self):
"""Return all the classes (sorted) that this user can admin."""
if self.is_admin:
return sorted(Session.query(Class).all())
else:
return sorted(self.admin_for) | Return all the classes (sorted) that this user can admin. | Below is the the instruction that describes the task:
### Input:
Return all the classes (sorted) that this user can admin.
### Response:
def classes_can_admin(self):
"""Return all the classes (sorted) that this user can admin."""
if self.is_admin:
return sorted(Session.query(Class).all())
else:
return sorted(self.admin_for) |
def get(self, buffer_type, offset):
"""Get a reading from the buffer at offset.
Offset is specified relative to the start of the data buffer.
This means that if the buffer rolls over, the offset for a given
item will appear to change. Anyone holding an offset outside of this
engine object will need to be notified when rollovers happen (i.e.
popn is called so that they can update their offset indices)
Args:
buffer_type (str): The buffer to pop from (either u"storage" or u"streaming")
offset (int): The offset of the reading to get
"""
if buffer_type == u'streaming':
chosen_buffer = self.streaming_data
else:
chosen_buffer = self.storage_data
if offset >= len(chosen_buffer):
raise StreamEmptyError("Invalid index given in get command", requested=offset, stored=len(chosen_buffer), buffer=buffer_type)
return chosen_buffer[offset] | Get a reading from the buffer at offset.
Offset is specified relative to the start of the data buffer.
This means that if the buffer rolls over, the offset for a given
item will appear to change. Anyone holding an offset outside of this
engine object will need to be notified when rollovers happen (i.e.
popn is called so that they can update their offset indices)
Args:
buffer_type (str): The buffer to pop from (either u"storage" or u"streaming")
offset (int): The offset of the reading to get | Below is the the instruction that describes the task:
### Input:
Get a reading from the buffer at offset.
Offset is specified relative to the start of the data buffer.
This means that if the buffer rolls over, the offset for a given
item will appear to change. Anyone holding an offset outside of this
engine object will need to be notified when rollovers happen (i.e.
popn is called so that they can update their offset indices)
Args:
buffer_type (str): The buffer to pop from (either u"storage" or u"streaming")
offset (int): The offset of the reading to get
### Response:
def get(self, buffer_type, offset):
"""Get a reading from the buffer at offset.
Offset is specified relative to the start of the data buffer.
This means that if the buffer rolls over, the offset for a given
item will appear to change. Anyone holding an offset outside of this
engine object will need to be notified when rollovers happen (i.e.
popn is called so that they can update their offset indices)
Args:
buffer_type (str): The buffer to pop from (either u"storage" or u"streaming")
offset (int): The offset of the reading to get
"""
if buffer_type == u'streaming':
chosen_buffer = self.streaming_data
else:
chosen_buffer = self.storage_data
if offset >= len(chosen_buffer):
raise StreamEmptyError("Invalid index given in get command", requested=offset, stored=len(chosen_buffer), buffer=buffer_type)
return chosen_buffer[offset] |
def get(self, key, index=None):
"""Retrieves a value associated with a key from the database
Args:
key (str): The key to retrieve
"""
records = self.get_multi([key], index=index)
try:
return records[0][1] # return the value from the key/value tuple
except IndexError:
return None | Retrieves a value associated with a key from the database
Args:
key (str): The key to retrieve | Below is the the instruction that describes the task:
### Input:
Retrieves a value associated with a key from the database
Args:
key (str): The key to retrieve
### Response:
def get(self, key, index=None):
"""Retrieves a value associated with a key from the database
Args:
key (str): The key to retrieve
"""
records = self.get_multi([key], index=index)
try:
return records[0][1] # return the value from the key/value tuple
except IndexError:
return None |
def t_SEMICOLON(self, t):
r';'
t.endlexpos = t.lexpos + len(t.value)
return t | r'; | Below is the the instruction that describes the task:
### Input:
r';
### Response:
def t_SEMICOLON(self, t):
r';'
t.endlexpos = t.lexpos + len(t.value)
return t |
def _parse_attribute_details_file(self, prop=ATTRIBUTES):
""" Concatenates a list of Attribute Details data structures parsed from a remote file """
# Parse content from remote file URL, which may be stored in one of two places:
# Starting at: contentInfo/MD_FeatureCatalogueDescription/featureCatalogueCitation
# ATTRIBUTE: href
# ELEMENT TEXT: CI_Citation/.../CI_Contact/onlineResource/CI_OnlineResource/linkage
self._attr_details_file_url = parse_property(
self._xml_tree, None, self._data_map, '_attributes_file'
)
if not self._attr_details_file_url:
return None
try:
tree_to_parse = get_remote_element(self._attr_details_file_url)
except Exception:
self._attr_details_file_url = None
return None
xpath_map = self._data_structures[ATTRIBUTES]
xpath_root = self._get_xroot_for(prop)
return parse_complex_list(tree_to_parse, xpath_root, xpath_map, prop) | Concatenates a list of Attribute Details data structures parsed from a remote file | Below is the the instruction that describes the task:
### Input:
Concatenates a list of Attribute Details data structures parsed from a remote file
### Response:
def _parse_attribute_details_file(self, prop=ATTRIBUTES):
""" Concatenates a list of Attribute Details data structures parsed from a remote file """
# Parse content from remote file URL, which may be stored in one of two places:
# Starting at: contentInfo/MD_FeatureCatalogueDescription/featureCatalogueCitation
# ATTRIBUTE: href
# ELEMENT TEXT: CI_Citation/.../CI_Contact/onlineResource/CI_OnlineResource/linkage
self._attr_details_file_url = parse_property(
self._xml_tree, None, self._data_map, '_attributes_file'
)
if not self._attr_details_file_url:
return None
try:
tree_to_parse = get_remote_element(self._attr_details_file_url)
except Exception:
self._attr_details_file_url = None
return None
xpath_map = self._data_structures[ATTRIBUTES]
xpath_root = self._get_xroot_for(prop)
return parse_complex_list(tree_to_parse, xpath_root, xpath_map, prop) |
def volume_present(name, provider=None, **kwargs):
'''
Check that a block volume exists.
'''
ret = _check_name(name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
if name in volumes:
ret['comment'] = 'Volume exists: {0}'.format(name)
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be created.'.format(name)
ret['result'] = None
return ret
response = __salt__['cloud.volume_create'](
names=name,
provider=provider,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was created'.format(name)
ret['changes'] = {'old': None, 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to create.'.format(name)
return ret | Check that a block volume exists. | Below is the the instruction that describes the task:
### Input:
Check that a block volume exists.
### Response:
def volume_present(name, provider=None, **kwargs):
'''
Check that a block volume exists.
'''
ret = _check_name(name)
if not ret['result']:
return ret
volumes = __salt__['cloud.volume_list'](provider=provider)
if name in volumes:
ret['comment'] = 'Volume exists: {0}'.format(name)
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be created.'.format(name)
ret['result'] = None
return ret
response = __salt__['cloud.volume_create'](
names=name,
provider=provider,
**kwargs
)
if response:
ret['result'] = True
ret['comment'] = 'Volume {0} was created'.format(name)
ret['changes'] = {'old': None, 'new': response}
else:
ret['result'] = False
ret['comment'] = 'Volume {0} failed to create.'.format(name)
return ret |
def instruction_BMI(self, opcode, ea):
"""
Tests the state of the N (negative) bit and causes a branch if set. That
is, branch if the sign of the twos complement result is negative.
When used after an operation on signed binary values, this instruction
will branch if the result is minus. It is generally preferred to use the
LBLT instruction after signed operations.
source code forms: BMI dd; LBMI DDDD
CC bits "HNZVC": -----
"""
if self.N == 1:
# log.info("$%x BMI branch to $%x, because N==1 \t| %s" % (
# self.program_counter, ea, self.cfg.mem_info.get_shortest(ea)
# ))
self.program_counter.set(ea) | Tests the state of the N (negative) bit and causes a branch if set. That
is, branch if the sign of the twos complement result is negative.
When used after an operation on signed binary values, this instruction
will branch if the result is minus. It is generally preferred to use the
LBLT instruction after signed operations.
source code forms: BMI dd; LBMI DDDD
CC bits "HNZVC": ----- | Below is the the instruction that describes the task:
### Input:
Tests the state of the N (negative) bit and causes a branch if set. That
is, branch if the sign of the twos complement result is negative.
When used after an operation on signed binary values, this instruction
will branch if the result is minus. It is generally preferred to use the
LBLT instruction after signed operations.
source code forms: BMI dd; LBMI DDDD
CC bits "HNZVC": -----
### Response:
def instruction_BMI(self, opcode, ea):
"""
Tests the state of the N (negative) bit and causes a branch if set. That
is, branch if the sign of the twos complement result is negative.
When used after an operation on signed binary values, this instruction
will branch if the result is minus. It is generally preferred to use the
LBLT instruction after signed operations.
source code forms: BMI dd; LBMI DDDD
CC bits "HNZVC": -----
"""
if self.N == 1:
# log.info("$%x BMI branch to $%x, because N==1 \t| %s" % (
# self.program_counter, ea, self.cfg.mem_info.get_shortest(ea)
# ))
self.program_counter.set(ea) |
def get_priority_rules(db) -> Iterable[PriorityRule]:
"""Get file priority rules."""
cur = db.cursor()
cur.execute('SELECT id, regexp, priority FROM file_priority')
for row in cur:
yield PriorityRule(*row) | Get file priority rules. | Below is the the instruction that describes the task:
### Input:
Get file priority rules.
### Response:
def get_priority_rules(db) -> Iterable[PriorityRule]:
"""Get file priority rules."""
cur = db.cursor()
cur.execute('SELECT id, regexp, priority FROM file_priority')
for row in cur:
yield PriorityRule(*row) |
def _serve_individual_image(self, request):
"""Serves an individual image."""
run = request.args.get('run')
tag = request.args.get('tag')
index = int(request.args.get('index'))
sample = int(request.args.get('sample', 0))
data = self._get_individual_image(run, tag, index, sample)
image_type = imghdr.what(None, data)
content_type = _IMGHDR_TO_MIMETYPE.get(image_type, _DEFAULT_IMAGE_MIMETYPE)
return http_util.Respond(request, data, content_type) | Serves an individual image. | Below is the the instruction that describes the task:
### Input:
Serves an individual image.
### Response:
def _serve_individual_image(self, request):
"""Serves an individual image."""
run = request.args.get('run')
tag = request.args.get('tag')
index = int(request.args.get('index'))
sample = int(request.args.get('sample', 0))
data = self._get_individual_image(run, tag, index, sample)
image_type = imghdr.what(None, data)
content_type = _IMGHDR_TO_MIMETYPE.get(image_type, _DEFAULT_IMAGE_MIMETYPE)
return http_util.Respond(request, data, content_type) |
def format_survival_rate():
"""cr-rate
Usage: cr-rate <session-file>
Calculate the survival rate of a session.
"""
arguments = docopt.docopt(
format_survival_rate.__doc__, version='cr-rate 1.0')
with use_db(arguments['<session-file>'], WorkDB.Mode.open) as db:
rate = survival_rate(db)
print('{:.2f}'.format(rate)) | cr-rate
Usage: cr-rate <session-file>
Calculate the survival rate of a session. | Below is the the instruction that describes the task:
### Input:
cr-rate
Usage: cr-rate <session-file>
Calculate the survival rate of a session.
### Response:
def format_survival_rate():
"""cr-rate
Usage: cr-rate <session-file>
Calculate the survival rate of a session.
"""
arguments = docopt.docopt(
format_survival_rate.__doc__, version='cr-rate 1.0')
with use_db(arguments['<session-file>'], WorkDB.Mode.open) as db:
rate = survival_rate(db)
print('{:.2f}'.format(rate)) |
def _check_query(self, query, style_cols=None):
"""Checks if query from Layer or QueryLayer is valid"""
try:
self.sql_client.send(
utils.minify_sql((
'EXPLAIN',
'SELECT',
' {style_cols}{comma}',
' the_geom, the_geom_webmercator',
'FROM ({query}) _wrap;',
)).format(query=query,
comma=',' if style_cols else '',
style_cols=(','.join(style_cols)
if style_cols else '')),
do_post=False)
except Exception as err:
raise ValueError(('Layer query `{query}` and/or style column(s) '
'{cols} are not valid: {err}.'
'').format(query=query,
cols=', '.join(['`{}`'.format(c)
for c in style_cols]),
err=err)) | Checks if query from Layer or QueryLayer is valid | Below is the the instruction that describes the task:
### Input:
Checks if query from Layer or QueryLayer is valid
### Response:
def _check_query(self, query, style_cols=None):
"""Checks if query from Layer or QueryLayer is valid"""
try:
self.sql_client.send(
utils.minify_sql((
'EXPLAIN',
'SELECT',
' {style_cols}{comma}',
' the_geom, the_geom_webmercator',
'FROM ({query}) _wrap;',
)).format(query=query,
comma=',' if style_cols else '',
style_cols=(','.join(style_cols)
if style_cols else '')),
do_post=False)
except Exception as err:
raise ValueError(('Layer query `{query}` and/or style column(s) '
'{cols} are not valid: {err}.'
'').format(query=query,
cols=', '.join(['`{}`'.format(c)
for c in style_cols]),
err=err)) |
def randomize(length=6, choices=None):
"""Returns a random string of the given length."""
if type(choices) == str:
choices = list(choices)
choices = choices or ascii_lowercase
return "".join(choice(choices) for _ in range(length)) | Returns a random string of the given length. | Below is the the instruction that describes the task:
### Input:
Returns a random string of the given length.
### Response:
def randomize(length=6, choices=None):
"""Returns a random string of the given length."""
if type(choices) == str:
choices = list(choices)
choices = choices or ascii_lowercase
return "".join(choice(choices) for _ in range(length)) |
def linkify_h_by_hd(self, hosts):
"""Add dependency in host objects
:param hosts: hosts list
:type hosts: alignak.objects.host.Hosts
:return: None
"""
for hostdep in self:
# Only used for debugging purpose when loops are detected
setattr(hostdep, "host_name_string", "undefined")
setattr(hostdep, "dependent_host_name_string", "undefined")
# if the host dep conf is bad, pass this one
if getattr(hostdep, 'host_name', None) is None or\
getattr(hostdep, 'dependent_host_name', None) is None:
continue
if hostdep.host_name not in hosts or hostdep.dependent_host_name not in hosts:
continue
hosts.add_act_dependency(hostdep.dependent_host_name, hostdep.host_name,
hostdep.notification_failure_criteria,
getattr(hostdep, 'dependency_period', ''),
hostdep.inherits_parent)
hosts.add_chk_dependency(hostdep.dependent_host_name, hostdep.host_name,
hostdep.execution_failure_criteria,
getattr(hostdep, 'dependency_period', ''),
hostdep.inherits_parent)
# Only used for debugging purpose when loops are detected
setattr(hostdep, "host_name_string", hosts[hostdep.host_name].get_name())
setattr(hostdep, "dependent_host_name_string",
hosts[hostdep.dependent_host_name].get_name()) | Add dependency in host objects
:param hosts: hosts list
:type hosts: alignak.objects.host.Hosts
:return: None | Below is the the instruction that describes the task:
### Input:
Add dependency in host objects
:param hosts: hosts list
:type hosts: alignak.objects.host.Hosts
:return: None
### Response:
def linkify_h_by_hd(self, hosts):
"""Add dependency in host objects
:param hosts: hosts list
:type hosts: alignak.objects.host.Hosts
:return: None
"""
for hostdep in self:
# Only used for debugging purpose when loops are detected
setattr(hostdep, "host_name_string", "undefined")
setattr(hostdep, "dependent_host_name_string", "undefined")
# if the host dep conf is bad, pass this one
if getattr(hostdep, 'host_name', None) is None or\
getattr(hostdep, 'dependent_host_name', None) is None:
continue
if hostdep.host_name not in hosts or hostdep.dependent_host_name not in hosts:
continue
hosts.add_act_dependency(hostdep.dependent_host_name, hostdep.host_name,
hostdep.notification_failure_criteria,
getattr(hostdep, 'dependency_period', ''),
hostdep.inherits_parent)
hosts.add_chk_dependency(hostdep.dependent_host_name, hostdep.host_name,
hostdep.execution_failure_criteria,
getattr(hostdep, 'dependency_period', ''),
hostdep.inherits_parent)
# Only used for debugging purpose when loops are detected
setattr(hostdep, "host_name_string", hosts[hostdep.host_name].get_name())
setattr(hostdep, "dependent_host_name_string",
hosts[hostdep.dependent_host_name].get_name()) |
def timezone_name(dt, version=LATEST_VER):
"""
Determine an appropriate timezone for the given date/time object
"""
tz_rmap = get_tz_rmap(version=version)
if dt.tzinfo is None:
raise ValueError('%r has no timezone' % dt)
# Easy case: pytz timezone.
try:
tz_name = dt.tzinfo.zone
return tz_rmap[tz_name]
except KeyError:
# Not in timezone map
pass
except AttributeError:
# Not a pytz-compatible tzinfo
pass
# Hard case, try to find one that's equivalent. Hopefully we don't get
# many of these. Start by getting the current timezone offset, and a
# timezone-naïve copy of the timestamp.
offset = dt.utcoffset()
dt_notz = dt.replace(tzinfo=None)
if offset == datetime.timedelta(0):
# UTC?
return 'UTC'
for olson_name, haystack_name in list(tz_rmap.items()):
if pytz.timezone(olson_name).utcoffset(dt_notz) == offset:
return haystack_name
raise ValueError('Unable to get timezone of %r' % dt) | Determine an appropriate timezone for the given date/time object | Below is the the instruction that describes the task:
### Input:
Determine an appropriate timezone for the given date/time object
### Response:
def timezone_name(dt, version=LATEST_VER):
"""
Determine an appropriate timezone for the given date/time object
"""
tz_rmap = get_tz_rmap(version=version)
if dt.tzinfo is None:
raise ValueError('%r has no timezone' % dt)
# Easy case: pytz timezone.
try:
tz_name = dt.tzinfo.zone
return tz_rmap[tz_name]
except KeyError:
# Not in timezone map
pass
except AttributeError:
# Not a pytz-compatible tzinfo
pass
# Hard case, try to find one that's equivalent. Hopefully we don't get
# many of these. Start by getting the current timezone offset, and a
# timezone-naïve copy of the timestamp.
offset = dt.utcoffset()
dt_notz = dt.replace(tzinfo=None)
if offset == datetime.timedelta(0):
# UTC?
return 'UTC'
for olson_name, haystack_name in list(tz_rmap.items()):
if pytz.timezone(olson_name).utcoffset(dt_notz) == offset:
return haystack_name
raise ValueError('Unable to get timezone of %r' % dt) |
def new(self, br, ino, sector_count, load_seg, media_name, system_type,
platform_id, bootable):
# type: (headervd.BootRecord, inode.Inode, int, int, str, int, int, bool) -> None
'''
A method to create a new El Torito Boot Catalog.
Parameters:
br - The boot record that this El Torito Boot Catalog is associated with.
ino - The Inode to associate with the initial entry.
sector_count - The number of sectors for the initial entry.
load_seg - The load segment address of the boot image.
media_name - The name of the media type, one of 'noemul', 'floppy', or 'hdemul'.
system_type - The partition type the entry should be.
platform_id - The platform id to set in the validation entry.
bootable - Whether this entry should be bootable.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Boot Catalog already initialized')
# Create the El Torito validation entry
self.validation_entry.new(platform_id)
self.initial_entry.new(sector_count, load_seg, media_name, system_type,
bootable)
self.initial_entry.set_inode(ino)
ino.linked_records.append(self.initial_entry)
self.br = br
self._initialized = True | A method to create a new El Torito Boot Catalog.
Parameters:
br - The boot record that this El Torito Boot Catalog is associated with.
ino - The Inode to associate with the initial entry.
sector_count - The number of sectors for the initial entry.
load_seg - The load segment address of the boot image.
media_name - The name of the media type, one of 'noemul', 'floppy', or 'hdemul'.
system_type - The partition type the entry should be.
platform_id - The platform id to set in the validation entry.
bootable - Whether this entry should be bootable.
Returns:
Nothing. | Below is the the instruction that describes the task:
### Input:
A method to create a new El Torito Boot Catalog.
Parameters:
br - The boot record that this El Torito Boot Catalog is associated with.
ino - The Inode to associate with the initial entry.
sector_count - The number of sectors for the initial entry.
load_seg - The load segment address of the boot image.
media_name - The name of the media type, one of 'noemul', 'floppy', or 'hdemul'.
system_type - The partition type the entry should be.
platform_id - The platform id to set in the validation entry.
bootable - Whether this entry should be bootable.
Returns:
Nothing.
### Response:
def new(self, br, ino, sector_count, load_seg, media_name, system_type,
platform_id, bootable):
# type: (headervd.BootRecord, inode.Inode, int, int, str, int, int, bool) -> None
'''
A method to create a new El Torito Boot Catalog.
Parameters:
br - The boot record that this El Torito Boot Catalog is associated with.
ino - The Inode to associate with the initial entry.
sector_count - The number of sectors for the initial entry.
load_seg - The load segment address of the boot image.
media_name - The name of the media type, one of 'noemul', 'floppy', or 'hdemul'.
system_type - The partition type the entry should be.
platform_id - The platform id to set in the validation entry.
bootable - Whether this entry should be bootable.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Boot Catalog already initialized')
# Create the El Torito validation entry
self.validation_entry.new(platform_id)
self.initial_entry.new(sector_count, load_seg, media_name, system_type,
bootable)
self.initial_entry.set_inode(ino)
ino.linked_records.append(self.initial_entry)
self.br = br
self._initialized = True |
def generateHeader(self, sObjectType):
'''
Generate a SOAP header as defined in:
http://www.salesforce.com/us/developer/docs/api/Content/soap_headers.htm
'''
try:
return self._sforce.factory.create(sObjectType)
except:
print 'There is not a SOAP header of type %s' % sObjectType | Generate a SOAP header as defined in:
http://www.salesforce.com/us/developer/docs/api/Content/soap_headers.htm | Below is the the instruction that describes the task:
### Input:
Generate a SOAP header as defined in:
http://www.salesforce.com/us/developer/docs/api/Content/soap_headers.htm
### Response:
def generateHeader(self, sObjectType):
'''
Generate a SOAP header as defined in:
http://www.salesforce.com/us/developer/docs/api/Content/soap_headers.htm
'''
try:
return self._sforce.factory.create(sObjectType)
except:
print 'There is not a SOAP header of type %s' % sObjectType |
def assert_condition_md5(self):
"""If the ``Content-MD5`` request header is present in the request
it's verified against the MD5 hash of the request body. If they don't
match, a 400 HTTP response is returned.
:raises: :class:`webob.exceptions.ResponseException` of status 400 if
the MD5 hash does not match the body.
"""
if 'Content-MD5' in self.request.headers:
body_md5 = hashlib.md5(self.request.body).hexdigest()
if body_md5 != self.request.headers['Content-MD5']:
raise_400(self, msg='Invalid Content-MD5 request header.') | If the ``Content-MD5`` request header is present in the request
it's verified against the MD5 hash of the request body. If they don't
match, a 400 HTTP response is returned.
:raises: :class:`webob.exceptions.ResponseException` of status 400 if
the MD5 hash does not match the body. | Below is the the instruction that describes the task:
### Input:
If the ``Content-MD5`` request header is present in the request
it's verified against the MD5 hash of the request body. If they don't
match, a 400 HTTP response is returned.
:raises: :class:`webob.exceptions.ResponseException` of status 400 if
the MD5 hash does not match the body.
### Response:
def assert_condition_md5(self):
"""If the ``Content-MD5`` request header is present in the request
it's verified against the MD5 hash of the request body. If they don't
match, a 400 HTTP response is returned.
:raises: :class:`webob.exceptions.ResponseException` of status 400 if
the MD5 hash does not match the body.
"""
if 'Content-MD5' in self.request.headers:
body_md5 = hashlib.md5(self.request.body).hexdigest()
if body_md5 != self.request.headers['Content-MD5']:
raise_400(self, msg='Invalid Content-MD5 request header.') |
def tiles_exist(self, process_tile=None, output_tile=None):
"""
Check whether output tiles of a tile (either process or output) exists.
Parameters
----------
process_tile : ``BufferedTile``
must be member of process ``TilePyramid``
output_tile : ``BufferedTile``
must be member of output ``TilePyramid``
Returns
-------
exists : bool
"""
if process_tile and output_tile:
raise ValueError("just one of 'process_tile' and 'output_tile' allowed")
if process_tile:
return any(
path_exists(self.get_path(tile))
for tile in self.pyramid.intersecting(process_tile)
)
if output_tile:
return path_exists(self.get_path(output_tile)) | Check whether output tiles of a tile (either process or output) exists.
Parameters
----------
process_tile : ``BufferedTile``
must be member of process ``TilePyramid``
output_tile : ``BufferedTile``
must be member of output ``TilePyramid``
Returns
-------
exists : bool | Below is the the instruction that describes the task:
### Input:
Check whether output tiles of a tile (either process or output) exists.
Parameters
----------
process_tile : ``BufferedTile``
must be member of process ``TilePyramid``
output_tile : ``BufferedTile``
must be member of output ``TilePyramid``
Returns
-------
exists : bool
### Response:
def tiles_exist(self, process_tile=None, output_tile=None):
"""
Check whether output tiles of a tile (either process or output) exists.
Parameters
----------
process_tile : ``BufferedTile``
must be member of process ``TilePyramid``
output_tile : ``BufferedTile``
must be member of output ``TilePyramid``
Returns
-------
exists : bool
"""
if process_tile and output_tile:
raise ValueError("just one of 'process_tile' and 'output_tile' allowed")
if process_tile:
return any(
path_exists(self.get_path(tile))
for tile in self.pyramid.intersecting(process_tile)
)
if output_tile:
return path_exists(self.get_path(output_tile)) |
def parameterized_send(self, request, parameter_list):
"""Send batched requests for a list of parameters
Args:
request (str): Request to send, like "%s.*?\n"
parameter_list (list): parameters to format with, like
["TTLIN", "TTLOUT"]
Returns:
dict: {parameter: response_queue}
"""
response_queues = OrderedDict()
for parameter in parameter_list:
response_queues[parameter] = self.send(request % parameter)
return response_queues | Send batched requests for a list of parameters
Args:
request (str): Request to send, like "%s.*?\n"
parameter_list (list): parameters to format with, like
["TTLIN", "TTLOUT"]
Returns:
dict: {parameter: response_queue} | Below is the the instruction that describes the task:
### Input:
Send batched requests for a list of parameters
Args:
request (str): Request to send, like "%s.*?\n"
parameter_list (list): parameters to format with, like
["TTLIN", "TTLOUT"]
Returns:
dict: {parameter: response_queue}
### Response:
def parameterized_send(self, request, parameter_list):
"""Send batched requests for a list of parameters
Args:
request (str): Request to send, like "%s.*?\n"
parameter_list (list): parameters to format with, like
["TTLIN", "TTLOUT"]
Returns:
dict: {parameter: response_queue}
"""
response_queues = OrderedDict()
for parameter in parameter_list:
response_queues[parameter] = self.send(request % parameter)
return response_queues |
def _apply_dvs_infrastructure_traffic_resources(infra_traffic_resources,
resource_dicts):
'''
Applies the values of the resource dictionaries to infra traffic resources,
creating the infra traffic resource if required
(vim.DistributedVirtualSwitchProductSpec)
'''
for res_dict in resource_dicts:
filtered_traffic_resources = \
[r for r in infra_traffic_resources if r.key == res_dict['key']]
if filtered_traffic_resources:
traffic_res = filtered_traffic_resources[0]
else:
traffic_res = vim.DvsHostInfrastructureTrafficResource()
traffic_res.key = res_dict['key']
traffic_res.allocationInfo = \
vim.DvsHostInfrastructureTrafficResourceAllocation()
infra_traffic_resources.append(traffic_res)
if res_dict.get('limit'):
traffic_res.allocationInfo.limit = res_dict['limit']
if res_dict.get('reservation'):
traffic_res.allocationInfo.reservation = res_dict['reservation']
if res_dict.get('num_shares') or res_dict.get('share_level'):
if not traffic_res.allocationInfo.shares:
traffic_res.allocationInfo.shares = vim.SharesInfo()
if res_dict.get('share_level'):
traffic_res.allocationInfo.shares.level = \
vim.SharesLevel(res_dict['share_level'])
if res_dict.get('num_shares'):
#XXX Even though we always set the number of shares if provided,
#the vCenter will ignore it unless the share level is 'custom'.
traffic_res.allocationInfo.shares.shares = res_dict['num_shares'] | Applies the values of the resource dictionaries to infra traffic resources,
creating the infra traffic resource if required
(vim.DistributedVirtualSwitchProductSpec) | Below is the the instruction that describes the task:
### Input:
Applies the values of the resource dictionaries to infra traffic resources,
creating the infra traffic resource if required
(vim.DistributedVirtualSwitchProductSpec)
### Response:
def _apply_dvs_infrastructure_traffic_resources(infra_traffic_resources,
resource_dicts):
'''
Applies the values of the resource dictionaries to infra traffic resources,
creating the infra traffic resource if required
(vim.DistributedVirtualSwitchProductSpec)
'''
for res_dict in resource_dicts:
filtered_traffic_resources = \
[r for r in infra_traffic_resources if r.key == res_dict['key']]
if filtered_traffic_resources:
traffic_res = filtered_traffic_resources[0]
else:
traffic_res = vim.DvsHostInfrastructureTrafficResource()
traffic_res.key = res_dict['key']
traffic_res.allocationInfo = \
vim.DvsHostInfrastructureTrafficResourceAllocation()
infra_traffic_resources.append(traffic_res)
if res_dict.get('limit'):
traffic_res.allocationInfo.limit = res_dict['limit']
if res_dict.get('reservation'):
traffic_res.allocationInfo.reservation = res_dict['reservation']
if res_dict.get('num_shares') or res_dict.get('share_level'):
if not traffic_res.allocationInfo.shares:
traffic_res.allocationInfo.shares = vim.SharesInfo()
if res_dict.get('share_level'):
traffic_res.allocationInfo.shares.level = \
vim.SharesLevel(res_dict['share_level'])
if res_dict.get('num_shares'):
#XXX Even though we always set the number of shares if provided,
#the vCenter will ignore it unless the share level is 'custom'.
traffic_res.allocationInfo.shares.shares = res_dict['num_shares'] |
def _download_images(data, img_cols):
"""Download images given image columns."""
images = collections.defaultdict(list)
for d in data:
for img_col in img_cols:
if d.get(img_col, None):
if isinstance(d[img_col], Image.Image):
# If it is already an Image, just copy and continue.
images[img_col].append(d[img_col])
else:
# Otherwise it is image url. Load the image.
with file_io.FileIO(d[img_col], 'rb') as fi:
im = Image.open(fi)
images[img_col].append(im)
else:
images[img_col].append('')
return images | Download images given image columns. | Below is the the instruction that describes the task:
### Input:
Download images given image columns.
### Response:
def _download_images(data, img_cols):
"""Download images given image columns."""
images = collections.defaultdict(list)
for d in data:
for img_col in img_cols:
if d.get(img_col, None):
if isinstance(d[img_col], Image.Image):
# If it is already an Image, just copy and continue.
images[img_col].append(d[img_col])
else:
# Otherwise it is image url. Load the image.
with file_io.FileIO(d[img_col], 'rb') as fi:
im = Image.open(fi)
images[img_col].append(im)
else:
images[img_col].append('')
return images |
def contourf_to_geojson_overlap(contourf, geojson_filepath=None, min_angle_deg=None,
ndigits=5, unit='', stroke_width=1, fill_opacity=.9,
geojson_properties=None, strdump=False, serialize=True):
"""Transform matplotlib.contourf to geojson with overlapping filled contours."""
polygon_features = []
contourf_idx = 0
for collection in contourf.collections:
color = collection.get_facecolor()
for path in collection.get_paths():
for coord in path.to_polygons():
if min_angle_deg:
coord = keep_high_angle(coord, min_angle_deg)
coord = np.around(coord, ndigits) if ndigits else coord
polygon = Polygon(coordinates=[coord.tolist()])
fcolor = rgb2hex(color[0])
properties = set_contourf_properties(stroke_width, fcolor, fill_opacity, contourf.levels, contourf_idx, unit)
if geojson_properties:
properties.update(geojson_properties)
feature = Feature(geometry=polygon, properties=properties)
polygon_features.append(feature)
contourf_idx += 1
feature_collection = FeatureCollection(polygon_features)
return _render_feature_collection(feature_collection, geojson_filepath, strdump, serialize) | Transform matplotlib.contourf to geojson with overlapping filled contours. | Below is the the instruction that describes the task:
### Input:
Transform matplotlib.contourf to geojson with overlapping filled contours.
### Response:
def contourf_to_geojson_overlap(contourf, geojson_filepath=None, min_angle_deg=None,
ndigits=5, unit='', stroke_width=1, fill_opacity=.9,
geojson_properties=None, strdump=False, serialize=True):
"""Transform matplotlib.contourf to geojson with overlapping filled contours."""
polygon_features = []
contourf_idx = 0
for collection in contourf.collections:
color = collection.get_facecolor()
for path in collection.get_paths():
for coord in path.to_polygons():
if min_angle_deg:
coord = keep_high_angle(coord, min_angle_deg)
coord = np.around(coord, ndigits) if ndigits else coord
polygon = Polygon(coordinates=[coord.tolist()])
fcolor = rgb2hex(color[0])
properties = set_contourf_properties(stroke_width, fcolor, fill_opacity, contourf.levels, contourf_idx, unit)
if geojson_properties:
properties.update(geojson_properties)
feature = Feature(geometry=polygon, properties=properties)
polygon_features.append(feature)
contourf_idx += 1
feature_collection = FeatureCollection(polygon_features)
return _render_feature_collection(feature_collection, geojson_filepath, strdump, serialize) |
def main():
"""For testing purpose"""
tcp_adapter = TcpAdapter("192.168.1.3", name="HASS", activate_source=False)
hdmi_network = HDMINetwork(tcp_adapter)
hdmi_network.start()
while True:
for d in hdmi_network.devices:
_LOGGER.info("Device: %s", d)
time.sleep(7) | For testing purpose | Below is the the instruction that describes the task:
### Input:
For testing purpose
### Response:
def main():
"""For testing purpose"""
tcp_adapter = TcpAdapter("192.168.1.3", name="HASS", activate_source=False)
hdmi_network = HDMINetwork(tcp_adapter)
hdmi_network.start()
while True:
for d in hdmi_network.devices:
_LOGGER.info("Device: %s", d)
time.sleep(7) |
def run(self, evals, feed_dict=None, breakpoints=None, break_immediately=False):
"""
starts the debug session
"""
if not isinstance(evals,list):
evals=[evals]
if feed_dict is None:
feed_dict={}
if breakpoints is None:
breakpoints=[]
self.state=RUNNING
self._original_evals=evals
self._original_feed_dict=feed_dict
self._exe_order=op_store.compute_exe_order(evals)
self._init_evals_bps(evals, breakpoints)
# convert cache keys to strings
for k,v in feed_dict.items():
if not isinstance(k,str):
k=k.name
self._cache[k]=v
op_store.register_dbsession(self)
if break_immediately:
return self._break()
else:
return self.c() | starts the debug session | Below is the the instruction that describes the task:
### Input:
starts the debug session
### Response:
def run(self, evals, feed_dict=None, breakpoints=None, break_immediately=False):
"""
starts the debug session
"""
if not isinstance(evals,list):
evals=[evals]
if feed_dict is None:
feed_dict={}
if breakpoints is None:
breakpoints=[]
self.state=RUNNING
self._original_evals=evals
self._original_feed_dict=feed_dict
self._exe_order=op_store.compute_exe_order(evals)
self._init_evals_bps(evals, breakpoints)
# convert cache keys to strings
for k,v in feed_dict.items():
if not isinstance(k,str):
k=k.name
self._cache[k]=v
op_store.register_dbsession(self)
if break_immediately:
return self._break()
else:
return self.c() |
def CreateRunner(self, **kw):
"""Make a new runner."""
self.runner = HuntRunner(self, token=self.token, **kw)
return self.runner | Make a new runner. | Below is the the instruction that describes the task:
### Input:
Make a new runner.
### Response:
def CreateRunner(self, **kw):
"""Make a new runner."""
self.runner = HuntRunner(self, token=self.token, **kw)
return self.runner |
def search_tree(self, name): # noqa: D302
r"""
Search tree for all nodes with a specific name.
:param name: Node name to search for
:type name: :ref:`NodeName`
:raises: RuntimeError (Argument \`name\` is not valid)
For example:
>>> from __future__ import print_function
>>> import pprint, ptrie
>>> tobj = ptrie.Trie('/')
>>> tobj.add_nodes([
... {'name':'root', 'data':[]},
... {'name':'root/anode', 'data':7},
... {'name':'root/bnode', 'data':[]},
... {'name':'root/cnode', 'data':[]},
... {'name':'root/bnode/anode', 'data':['a', 'b', 'c']},
... {'name':'root/cnode/anode/leaf', 'data':True}
... ])
>>> print(tobj)
root
├anode (*)
├bnode
│└anode (*)
└cnode
└anode
└leaf (*)
>>> pprint.pprint(tobj.search_tree('anode'), width=40)
['root/anode',
'root/bnode/anode',
'root/cnode/anode',
'root/cnode/anode/leaf']
"""
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
return self._search_tree(name) | r"""
Search tree for all nodes with a specific name.
:param name: Node name to search for
:type name: :ref:`NodeName`
:raises: RuntimeError (Argument \`name\` is not valid)
For example:
>>> from __future__ import print_function
>>> import pprint, ptrie
>>> tobj = ptrie.Trie('/')
>>> tobj.add_nodes([
... {'name':'root', 'data':[]},
... {'name':'root/anode', 'data':7},
... {'name':'root/bnode', 'data':[]},
... {'name':'root/cnode', 'data':[]},
... {'name':'root/bnode/anode', 'data':['a', 'b', 'c']},
... {'name':'root/cnode/anode/leaf', 'data':True}
... ])
>>> print(tobj)
root
├anode (*)
├bnode
│└anode (*)
└cnode
└anode
└leaf (*)
>>> pprint.pprint(tobj.search_tree('anode'), width=40)
['root/anode',
'root/bnode/anode',
'root/cnode/anode',
'root/cnode/anode/leaf'] | Below is the the instruction that describes the task:
### Input:
r"""
Search tree for all nodes with a specific name.
:param name: Node name to search for
:type name: :ref:`NodeName`
:raises: RuntimeError (Argument \`name\` is not valid)
For example:
>>> from __future__ import print_function
>>> import pprint, ptrie
>>> tobj = ptrie.Trie('/')
>>> tobj.add_nodes([
... {'name':'root', 'data':[]},
... {'name':'root/anode', 'data':7},
... {'name':'root/bnode', 'data':[]},
... {'name':'root/cnode', 'data':[]},
... {'name':'root/bnode/anode', 'data':['a', 'b', 'c']},
... {'name':'root/cnode/anode/leaf', 'data':True}
... ])
>>> print(tobj)
root
├anode (*)
├bnode
│└anode (*)
└cnode
└anode
└leaf (*)
>>> pprint.pprint(tobj.search_tree('anode'), width=40)
['root/anode',
'root/bnode/anode',
'root/cnode/anode',
'root/cnode/anode/leaf']
### Response:
def search_tree(self, name): # noqa: D302
r"""
Search tree for all nodes with a specific name.
:param name: Node name to search for
:type name: :ref:`NodeName`
:raises: RuntimeError (Argument \`name\` is not valid)
For example:
>>> from __future__ import print_function
>>> import pprint, ptrie
>>> tobj = ptrie.Trie('/')
>>> tobj.add_nodes([
... {'name':'root', 'data':[]},
... {'name':'root/anode', 'data':7},
... {'name':'root/bnode', 'data':[]},
... {'name':'root/cnode', 'data':[]},
... {'name':'root/bnode/anode', 'data':['a', 'b', 'c']},
... {'name':'root/cnode/anode/leaf', 'data':True}
... ])
>>> print(tobj)
root
├anode (*)
├bnode
│└anode (*)
└cnode
└anode
└leaf (*)
>>> pprint.pprint(tobj.search_tree('anode'), width=40)
['root/anode',
'root/bnode/anode',
'root/cnode/anode',
'root/cnode/anode/leaf']
"""
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
return self._search_tree(name) |
def div_filter(key: str, value: list, format: str, meta: Any) -> Optional[list]:
"""Filter the JSON ``value`` for alert divs.
Arguments
---------
key
Key of the structure
value
Values in the structure
format
Output format of the processing
meta
Meta information
"""
if key != "Div" or format != "latex":
return None
[[_, classes, _], contents] = value
try:
alert_type = [name.split("-")[1] for name in classes if "-" in name][0]
except IndexError:
return None
if alert_type not in ALLOWED_ALERT_TYPES.__members__:
return None
filtered = [RawBlock("latex", rf"\begin{{{alert_type}box}}")]
filtered.extend(contents)
filtered.append(RawBlock("latex", rf"\end{{{alert_type}box}}"))
return filtered | Filter the JSON ``value`` for alert divs.
Arguments
---------
key
Key of the structure
value
Values in the structure
format
Output format of the processing
meta
Meta information | Below is the the instruction that describes the task:
### Input:
Filter the JSON ``value`` for alert divs.
Arguments
---------
key
Key of the structure
value
Values in the structure
format
Output format of the processing
meta
Meta information
### Response:
def div_filter(key: str, value: list, format: str, meta: Any) -> Optional[list]:
"""Filter the JSON ``value`` for alert divs.
Arguments
---------
key
Key of the structure
value
Values in the structure
format
Output format of the processing
meta
Meta information
"""
if key != "Div" or format != "latex":
return None
[[_, classes, _], contents] = value
try:
alert_type = [name.split("-")[1] for name in classes if "-" in name][0]
except IndexError:
return None
if alert_type not in ALLOWED_ALERT_TYPES.__members__:
return None
filtered = [RawBlock("latex", rf"\begin{{{alert_type}box}}")]
filtered.extend(contents)
filtered.append(RawBlock("latex", rf"\end{{{alert_type}box}}"))
return filtered |
def compute_residuals(self):
"""Compute residuals and stopping thresholds."""
if self.opt['AutoRho', 'StdResiduals']:
r = np.linalg.norm(self.rsdl_r(self.AXnr, self.Y))
s = np.linalg.norm(self.rsdl_s(self.Yprev, self.Y))
epri = np.sqrt(self.Nc) * self.opt['AbsStopTol'] + \
self.rsdl_rn(self.AXnr, self.Y) * self.opt['RelStopTol']
edua = np.sqrt(self.Nx) * self.opt['AbsStopTol'] + \
self.rsdl_sn(self.U) * self.opt['RelStopTol']
else:
rn = self.rsdl_rn(self.AXnr, self.Y)
if rn == 0.0:
rn = 1.0
sn = self.rsdl_sn(self.U)
if sn == 0.0:
sn = 1.0
r = np.linalg.norm(self.rsdl_r(self.AXnr, self.Y)) / rn
s = np.linalg.norm(self.rsdl_s(self.Yprev, self.Y)) / sn
epri = np.sqrt(self.Nc) * self.opt['AbsStopTol'] / rn + \
self.opt['RelStopTol']
edua = np.sqrt(self.Nx) * self.opt['AbsStopTol'] / sn + \
self.opt['RelStopTol']
return r, s, epri, edua | Compute residuals and stopping thresholds. | Below is the the instruction that describes the task:
### Input:
Compute residuals and stopping thresholds.
### Response:
def compute_residuals(self):
"""Compute residuals and stopping thresholds."""
if self.opt['AutoRho', 'StdResiduals']:
r = np.linalg.norm(self.rsdl_r(self.AXnr, self.Y))
s = np.linalg.norm(self.rsdl_s(self.Yprev, self.Y))
epri = np.sqrt(self.Nc) * self.opt['AbsStopTol'] + \
self.rsdl_rn(self.AXnr, self.Y) * self.opt['RelStopTol']
edua = np.sqrt(self.Nx) * self.opt['AbsStopTol'] + \
self.rsdl_sn(self.U) * self.opt['RelStopTol']
else:
rn = self.rsdl_rn(self.AXnr, self.Y)
if rn == 0.0:
rn = 1.0
sn = self.rsdl_sn(self.U)
if sn == 0.0:
sn = 1.0
r = np.linalg.norm(self.rsdl_r(self.AXnr, self.Y)) / rn
s = np.linalg.norm(self.rsdl_s(self.Yprev, self.Y)) / sn
epri = np.sqrt(self.Nc) * self.opt['AbsStopTol'] / rn + \
self.opt['RelStopTol']
edua = np.sqrt(self.Nx) * self.opt['AbsStopTol'] / sn + \
self.opt['RelStopTol']
return r, s, epri, edua |
def pad(lines, delim):
"""
Right Pads text split by their delim.
:param lines:
:param delim:
:return:
"""
"""
Populates text into chunks. If the delim was & then
['12 & 344', '344 & 8', '8 & 88'] would be stored in chunks as
[['12', '344', '8'], ['344', '8', '88']]
"""
chunks = []
for i in range(len(lines)):
line = lines[i]
sections = line.split(delim)
for j in range(len(sections)):
if len(chunks) <= j:
chunks.append([])
chunks[j].append(sections[j])
"""
Calculates & Stores the max length of chunks
"""
max_lengths = []
for i in range(len(chunks)):
_max = max([len(j) for j in chunks[i]])
max_lengths.append(_max)
"""
Pads the children of chunks according to the chunk's max length" \
"""
for i in range(len(chunks)):
for j in range(len(chunks[i])):
chunks[i][j] += (max_lengths[i] - len(chunks[i][j])) * ' '
new_lines = ['' for i in range(len(lines))]
for i in range(len(chunks)):
for j in range(len(chunks[i])):
new_lines[j] += chunks[i][j]
return new_lines | Right Pads text split by their delim.
:param lines:
:param delim:
:return: | Below is the the instruction that describes the task:
### Input:
Right Pads text split by their delim.
:param lines:
:param delim:
:return:
### Response:
def pad(lines, delim):
"""
Right Pads text split by their delim.
:param lines:
:param delim:
:return:
"""
"""
Populates text into chunks. If the delim was & then
['12 & 344', '344 & 8', '8 & 88'] would be stored in chunks as
[['12', '344', '8'], ['344', '8', '88']]
"""
chunks = []
for i in range(len(lines)):
line = lines[i]
sections = line.split(delim)
for j in range(len(sections)):
if len(chunks) <= j:
chunks.append([])
chunks[j].append(sections[j])
"""
Calculates & Stores the max length of chunks
"""
max_lengths = []
for i in range(len(chunks)):
_max = max([len(j) for j in chunks[i]])
max_lengths.append(_max)
"""
Pads the children of chunks according to the chunk's max length" \
"""
for i in range(len(chunks)):
for j in range(len(chunks[i])):
chunks[i][j] += (max_lengths[i] - len(chunks[i][j])) * ' '
new_lines = ['' for i in range(len(lines))]
for i in range(len(chunks)):
for j in range(len(chunks[i])):
new_lines[j] += chunks[i][j]
return new_lines |
def _prepare_for_training(self, job_name=None):
"""Set any values in the estimator that need to be set before training.
Args:
* job_name (str): Name of the training job to be created. If not specified, one is generated,
using the base name given to the constructor if applicable.
"""
if job_name is not None:
self._current_job_name = job_name
else:
# honor supplied base_job_name or generate it
if self.base_job_name:
base_name = self.base_job_name
elif isinstance(self, sagemaker.algorithm.AlgorithmEstimator):
base_name = self.algorithm_arn.split('/')[-1] # pylint: disable=no-member
else:
base_name = base_name_from_image(self.train_image())
self._current_job_name = name_from_base(base_name)
# if output_path was specified we use it otherwise initialize here.
# For Local Mode with local_code=True we don't need an explicit output_path
if self.output_path is None:
local_code = get_config_value('local.local_code', self.sagemaker_session.config)
if self.sagemaker_session.local_mode and local_code:
self.output_path = ''
else:
self.output_path = 's3://{}/'.format(self.sagemaker_session.default_bucket()) | Set any values in the estimator that need to be set before training.
Args:
* job_name (str): Name of the training job to be created. If not specified, one is generated,
using the base name given to the constructor if applicable. | Below is the the instruction that describes the task:
### Input:
Set any values in the estimator that need to be set before training.
Args:
* job_name (str): Name of the training job to be created. If not specified, one is generated,
using the base name given to the constructor if applicable.
### Response:
def _prepare_for_training(self, job_name=None):
"""Set any values in the estimator that need to be set before training.
Args:
* job_name (str): Name of the training job to be created. If not specified, one is generated,
using the base name given to the constructor if applicable.
"""
if job_name is not None:
self._current_job_name = job_name
else:
# honor supplied base_job_name or generate it
if self.base_job_name:
base_name = self.base_job_name
elif isinstance(self, sagemaker.algorithm.AlgorithmEstimator):
base_name = self.algorithm_arn.split('/')[-1] # pylint: disable=no-member
else:
base_name = base_name_from_image(self.train_image())
self._current_job_name = name_from_base(base_name)
# if output_path was specified we use it otherwise initialize here.
# For Local Mode with local_code=True we don't need an explicit output_path
if self.output_path is None:
local_code = get_config_value('local.local_code', self.sagemaker_session.config)
if self.sagemaker_session.local_mode and local_code:
self.output_path = ''
else:
self.output_path = 's3://{}/'.format(self.sagemaker_session.default_bucket()) |
def recursively_save_dict_contents_to_group(h5file, path, dic):
"""
Parameters
----------
h5file:
h5py file to be written to
path:
path within h5py file to saved dictionary
dic:
python dictionary to be converted to hdf5 format
"""
for key, item in dic.items():
if isinstance(item, (np.ndarray, np.int64, np.float64, str, bytes, tuple, list)):
h5file[path + str(key)] = item
elif isinstance(item, dict):
recursively_save_dict_contents_to_group(h5file, path + key + '/', item)
else:
raise ValueError('Cannot save %s type' % type(item)) | Parameters
----------
h5file:
h5py file to be written to
path:
path within h5py file to saved dictionary
dic:
python dictionary to be converted to hdf5 format | Below is the the instruction that describes the task:
### Input:
Parameters
----------
h5file:
h5py file to be written to
path:
path within h5py file to saved dictionary
dic:
python dictionary to be converted to hdf5 format
### Response:
def recursively_save_dict_contents_to_group(h5file, path, dic):
"""
Parameters
----------
h5file:
h5py file to be written to
path:
path within h5py file to saved dictionary
dic:
python dictionary to be converted to hdf5 format
"""
for key, item in dic.items():
if isinstance(item, (np.ndarray, np.int64, np.float64, str, bytes, tuple, list)):
h5file[path + str(key)] = item
elif isinstance(item, dict):
recursively_save_dict_contents_to_group(h5file, path + key + '/', item)
else:
raise ValueError('Cannot save %s type' % type(item)) |
def hidden(self):
"""return list of hidden members"""
members = [self.member_info(item["_id"]) for item in self.members()]
result = []
for member in members:
if member['rsInfo'].get('hidden'):
server_id = member['server_id']
result.append({
'_id': member['_id'],
'host': self._servers.hostname(server_id),
'server_id': server_id})
return result | return list of hidden members | Below is the the instruction that describes the task:
### Input:
return list of hidden members
### Response:
def hidden(self):
"""return list of hidden members"""
members = [self.member_info(item["_id"]) for item in self.members()]
result = []
for member in members:
if member['rsInfo'].get('hidden'):
server_id = member['server_id']
result.append({
'_id': member['_id'],
'host': self._servers.hostname(server_id),
'server_id': server_id})
return result |
def render_to_string(self, template_file, context):
"""Render given template to string and add object to context"""
context = context if context else {}
if self.object:
context['object'] = self.object
context[self.object.__class__.__name__.lower()] = self.object
return render_to_string(template_file, context, self.request) | Render given template to string and add object to context | Below is the the instruction that describes the task:
### Input:
Render given template to string and add object to context
### Response:
def render_to_string(self, template_file, context):
"""Render given template to string and add object to context"""
context = context if context else {}
if self.object:
context['object'] = self.object
context[self.object.__class__.__name__.lower()] = self.object
return render_to_string(template_file, context, self.request) |
def tag_secondary_structure(self, force=False):
"""Tags each `Residue` of the `Polypeptide` with secondary structure.
Notes
-----
DSSP must be available to call. Check by running
`isambard.external_programs.dssp.test_dssp`. If DSSP is not
available, please follow instruction here to add it:
https://github.com/woolfson-group/isambard#external-programs
For more information on DSSP see [1].
References
----------
.. [1] Kabsch W, Sander C (1983) "Dictionary of protein
secondary structure: pattern recognition of hydrogen-bonded
and geometrical features", Biopolymers, 22, 2577-637.
Parameters
----------
force : bool, optional
If `True` the tag will be run even if `Residues` are
already tagged.
"""
tagged = ['secondary_structure' in x.tags.keys()
for x in self._monomers]
if (not all(tagged)) or force:
dssp_out = run_dssp(self.pdb, path=False)
if dssp_out is None:
return
dssp_ss_list = extract_all_ss_dssp(dssp_out, path=False)
for monomer, dssp_ss in zip(self._monomers, dssp_ss_list):
monomer.tags['secondary_structure'] = dssp_ss[1]
return | Tags each `Residue` of the `Polypeptide` with secondary structure.
Notes
-----
DSSP must be available to call. Check by running
`isambard.external_programs.dssp.test_dssp`. If DSSP is not
available, please follow instruction here to add it:
https://github.com/woolfson-group/isambard#external-programs
For more information on DSSP see [1].
References
----------
.. [1] Kabsch W, Sander C (1983) "Dictionary of protein
secondary structure: pattern recognition of hydrogen-bonded
and geometrical features", Biopolymers, 22, 2577-637.
Parameters
----------
force : bool, optional
If `True` the tag will be run even if `Residues` are
already tagged. | Below is the the instruction that describes the task:
### Input:
Tags each `Residue` of the `Polypeptide` with secondary structure.
Notes
-----
DSSP must be available to call. Check by running
`isambard.external_programs.dssp.test_dssp`. If DSSP is not
available, please follow instruction here to add it:
https://github.com/woolfson-group/isambard#external-programs
For more information on DSSP see [1].
References
----------
.. [1] Kabsch W, Sander C (1983) "Dictionary of protein
secondary structure: pattern recognition of hydrogen-bonded
and geometrical features", Biopolymers, 22, 2577-637.
Parameters
----------
force : bool, optional
If `True` the tag will be run even if `Residues` are
already tagged.
### Response:
def tag_secondary_structure(self, force=False):
"""Tags each `Residue` of the `Polypeptide` with secondary structure.
Notes
-----
DSSP must be available to call. Check by running
`isambard.external_programs.dssp.test_dssp`. If DSSP is not
available, please follow instruction here to add it:
https://github.com/woolfson-group/isambard#external-programs
For more information on DSSP see [1].
References
----------
.. [1] Kabsch W, Sander C (1983) "Dictionary of protein
secondary structure: pattern recognition of hydrogen-bonded
and geometrical features", Biopolymers, 22, 2577-637.
Parameters
----------
force : bool, optional
If `True` the tag will be run even if `Residues` are
already tagged.
"""
tagged = ['secondary_structure' in x.tags.keys()
for x in self._monomers]
if (not all(tagged)) or force:
dssp_out = run_dssp(self.pdb, path=False)
if dssp_out is None:
return
dssp_ss_list = extract_all_ss_dssp(dssp_out, path=False)
for monomer, dssp_ss in zip(self._monomers, dssp_ss_list):
monomer.tags['secondary_structure'] = dssp_ss[1]
return |
def _init_qualifier(qualifier, qual_repo):
"""
Initialize the flavors of a qualifier from the qualifier repo and
initialize propagated.
"""
qual_dict_entry = qual_repo[qualifier.name]
qualifier.propagated = False
if qualifier.tosubclass is None:
if qual_dict_entry.tosubclass is None:
qualifier.tosubclass = True
else:
qualifier.tosubclass = qual_dict_entry.tosubclass
if qualifier.overridable is None:
if qual_dict_entry.overridable is None:
qualifier.overridable = True
else:
qualifier.overridable = qual_dict_entry.overridable
if qualifier.translatable is None:
qualifier.translatable = qual_dict_entry.translatable | Initialize the flavors of a qualifier from the qualifier repo and
initialize propagated. | Below is the the instruction that describes the task:
### Input:
Initialize the flavors of a qualifier from the qualifier repo and
initialize propagated.
### Response:
def _init_qualifier(qualifier, qual_repo):
"""
Initialize the flavors of a qualifier from the qualifier repo and
initialize propagated.
"""
qual_dict_entry = qual_repo[qualifier.name]
qualifier.propagated = False
if qualifier.tosubclass is None:
if qual_dict_entry.tosubclass is None:
qualifier.tosubclass = True
else:
qualifier.tosubclass = qual_dict_entry.tosubclass
if qualifier.overridable is None:
if qual_dict_entry.overridable is None:
qualifier.overridable = True
else:
qualifier.overridable = qual_dict_entry.overridable
if qualifier.translatable is None:
qualifier.translatable = qual_dict_entry.translatable |
def from_xuid(cls, xuid):
'''
Instantiates an instance of ``GamerProfile`` from
an xuid
:param xuid: Xuid to look up
:raises: :class:`~xbox.exceptions.GamertagNotFound`
:returns: :class:`~xbox.GamerProfile` instance
'''
url = 'https://profile.xboxlive.com/users/xuid(%s)/profile/settings' % xuid
try:
return cls._fetch(url)
except (GamertagNotFound, InvalidRequest):
# this endpoint seems to return 400 when the resource
# does not exist
raise GamertagNotFound('No such user: %s' % xuid) | Instantiates an instance of ``GamerProfile`` from
an xuid
:param xuid: Xuid to look up
:raises: :class:`~xbox.exceptions.GamertagNotFound`
:returns: :class:`~xbox.GamerProfile` instance | Below is the the instruction that describes the task:
### Input:
Instantiates an instance of ``GamerProfile`` from
an xuid
:param xuid: Xuid to look up
:raises: :class:`~xbox.exceptions.GamertagNotFound`
:returns: :class:`~xbox.GamerProfile` instance
### Response:
def from_xuid(cls, xuid):
'''
Instantiates an instance of ``GamerProfile`` from
an xuid
:param xuid: Xuid to look up
:raises: :class:`~xbox.exceptions.GamertagNotFound`
:returns: :class:`~xbox.GamerProfile` instance
'''
url = 'https://profile.xboxlive.com/users/xuid(%s)/profile/settings' % xuid
try:
return cls._fetch(url)
except (GamertagNotFound, InvalidRequest):
# this endpoint seems to return 400 when the resource
# does not exist
raise GamertagNotFound('No such user: %s' % xuid) |
def segment(args):
"""
%prog segment loss.ids bedfile
Merge adjacent gene loss into segmental loss.
Then based on the segmental loss, estimate amount of DNA loss in base pairs.
Two estimates can be given:
- conservative: just within the start and end of a single gene
- aggressive: extend the deletion track to the next gene
The real deletion size is within these estimates.
"""
from jcvi.formats.base import SetFile
p = OptionParser(segment.__doc__)
p.add_option("--chain", default=1, type="int",
help="Allow next N genes to be chained [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
idsfile, bedfile = args
bed = Bed(bedfile)
order = bed.order
ids = SetFile(idsfile)
losses = Grouper()
skip = opts.chain
for i, a in enumerate(bed):
a = a.accn
for j in xrange(i + 1, i + 1 + skip):
if j >= len(bed):
break
b = bed[j].accn
if a in ids:
losses.join(a, a)
if a in ids and b in ids:
losses.join(a, b)
losses = list(losses)
singletons = [x for x in losses if len(x) == 1]
segments = [x for x in losses if len(x) > 1]
ns, nm, nt = len(singletons), len(segments), len(losses)
assert ns + nm == nt
# Summary for all segments
for x in sorted(singletons) + sorted(segments):
print("\t".join(str(x) for x in ("|".join(sorted(x)), len(x),
estimate_size(x, bed, order))))
# Find longest segment stretch
if segments:
mx, maxsegment = max([(len(x), x) for x in segments])
print("Longest stretch: run of {0} genes".format(mx), file=sys.stderr)
print(" {0}".format("|".join(sorted(maxsegment))), file=sys.stderr)
seg_asize = sum(estimate_size(x, bed, order) for x in segments)
seg_bsize = sum(estimate_size(x, bed, order, conservative=False) \
for x in segments)
else:
seg_asize = seg_bsize = 0
sing_asize = sum(estimate_size(x, bed, order) for x in singletons)
sing_bsize = sum(estimate_size(x, bed, order, conservative=False) \
for x in singletons)
total_asize = sing_asize + seg_asize
total_bsize = sing_bsize + seg_bsize
print("Singleton ({0}): {1} - {2} bp".\
format(ns, sing_asize, sing_bsize), file=sys.stderr)
print("Segment ({0}): {1} - {2} bp".\
format(nm, seg_asize, seg_bsize), file=sys.stderr)
print("Total ({0}): {1} - {2} bp".\
format(nt, total_asize, total_bsize), file=sys.stderr)
print("Average ({0}): {1} bp".\
format(nt, (total_asize + total_bsize) / 2), file=sys.stderr) | %prog segment loss.ids bedfile
Merge adjacent gene loss into segmental loss.
Then based on the segmental loss, estimate amount of DNA loss in base pairs.
Two estimates can be given:
- conservative: just within the start and end of a single gene
- aggressive: extend the deletion track to the next gene
The real deletion size is within these estimates. | Below is the the instruction that describes the task:
### Input:
%prog segment loss.ids bedfile
Merge adjacent gene loss into segmental loss.
Then based on the segmental loss, estimate amount of DNA loss in base pairs.
Two estimates can be given:
- conservative: just within the start and end of a single gene
- aggressive: extend the deletion track to the next gene
The real deletion size is within these estimates.
### Response:
def segment(args):
"""
%prog segment loss.ids bedfile
Merge adjacent gene loss into segmental loss.
Then based on the segmental loss, estimate amount of DNA loss in base pairs.
Two estimates can be given:
- conservative: just within the start and end of a single gene
- aggressive: extend the deletion track to the next gene
The real deletion size is within these estimates.
"""
from jcvi.formats.base import SetFile
p = OptionParser(segment.__doc__)
p.add_option("--chain", default=1, type="int",
help="Allow next N genes to be chained [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
idsfile, bedfile = args
bed = Bed(bedfile)
order = bed.order
ids = SetFile(idsfile)
losses = Grouper()
skip = opts.chain
for i, a in enumerate(bed):
a = a.accn
for j in xrange(i + 1, i + 1 + skip):
if j >= len(bed):
break
b = bed[j].accn
if a in ids:
losses.join(a, a)
if a in ids and b in ids:
losses.join(a, b)
losses = list(losses)
singletons = [x for x in losses if len(x) == 1]
segments = [x for x in losses if len(x) > 1]
ns, nm, nt = len(singletons), len(segments), len(losses)
assert ns + nm == nt
# Summary for all segments
for x in sorted(singletons) + sorted(segments):
print("\t".join(str(x) for x in ("|".join(sorted(x)), len(x),
estimate_size(x, bed, order))))
# Find longest segment stretch
if segments:
mx, maxsegment = max([(len(x), x) for x in segments])
print("Longest stretch: run of {0} genes".format(mx), file=sys.stderr)
print(" {0}".format("|".join(sorted(maxsegment))), file=sys.stderr)
seg_asize = sum(estimate_size(x, bed, order) for x in segments)
seg_bsize = sum(estimate_size(x, bed, order, conservative=False) \
for x in segments)
else:
seg_asize = seg_bsize = 0
sing_asize = sum(estimate_size(x, bed, order) for x in singletons)
sing_bsize = sum(estimate_size(x, bed, order, conservative=False) \
for x in singletons)
total_asize = sing_asize + seg_asize
total_bsize = sing_bsize + seg_bsize
print("Singleton ({0}): {1} - {2} bp".\
format(ns, sing_asize, sing_bsize), file=sys.stderr)
print("Segment ({0}): {1} - {2} bp".\
format(nm, seg_asize, seg_bsize), file=sys.stderr)
print("Total ({0}): {1} - {2} bp".\
format(nt, total_asize, total_bsize), file=sys.stderr)
print("Average ({0}): {1} bp".\
format(nt, (total_asize + total_bsize) / 2), file=sys.stderr) |
def _get_Berger_data(verbose=True):
'''Read in the Berger and Loutre orbital table as a pandas dataframe, convert to xarray
'''
# The first column of the data file is used as the row index, and represents kyr from present
orbit91_pd, path = load_data_source(local_path = local_path,
remote_source_list = [threddspath, NCDCpath],
open_method = pd.read_csv,
open_method_kwargs = {'delim_whitespace': True, 'skiprows':1},
verbose=verbose,)
# As xarray structure with the dimension named 'kyear'
orbit = xr.Dataset(orbit91_pd).rename({'dim_0': 'kyear'})
# Now change names
orbit = orbit.rename({'ECC': 'ecc', 'OMEGA': 'long_peri',
'OBL': 'obliquity', 'PREC': 'precession'})
# add 180 degrees to long_peri (see lambda definition, Berger 1978 Appendix)
orbit['long_peri'] += 180.
orbit['precession'] *= -1.
orbit.attrs['Description'] = 'The Berger and Loutre (1991) orbital data table'
orbit.attrs['Citation'] = 'https://doi.org/10.1016/0277-3791(91)90033-Q'
orbit.attrs['Source'] = path
orbit.attrs['Note'] = 'Longitude of perihelion is defined to be 0 degrees at Northern Vernal Equinox. This differs by 180 degrees from orbit91 source file.'
return orbit | Read in the Berger and Loutre orbital table as a pandas dataframe, convert to xarray | Below is the the instruction that describes the task:
### Input:
Read in the Berger and Loutre orbital table as a pandas dataframe, convert to xarray
### Response:
def _get_Berger_data(verbose=True):
'''Read in the Berger and Loutre orbital table as a pandas dataframe, convert to xarray
'''
# The first column of the data file is used as the row index, and represents kyr from present
orbit91_pd, path = load_data_source(local_path = local_path,
remote_source_list = [threddspath, NCDCpath],
open_method = pd.read_csv,
open_method_kwargs = {'delim_whitespace': True, 'skiprows':1},
verbose=verbose,)
# As xarray structure with the dimension named 'kyear'
orbit = xr.Dataset(orbit91_pd).rename({'dim_0': 'kyear'})
# Now change names
orbit = orbit.rename({'ECC': 'ecc', 'OMEGA': 'long_peri',
'OBL': 'obliquity', 'PREC': 'precession'})
# add 180 degrees to long_peri (see lambda definition, Berger 1978 Appendix)
orbit['long_peri'] += 180.
orbit['precession'] *= -1.
orbit.attrs['Description'] = 'The Berger and Loutre (1991) orbital data table'
orbit.attrs['Citation'] = 'https://doi.org/10.1016/0277-3791(91)90033-Q'
orbit.attrs['Source'] = path
orbit.attrs['Note'] = 'Longitude of perihelion is defined to be 0 degrees at Northern Vernal Equinox. This differs by 180 degrees from orbit91 source file.'
return orbit |
def _unzip_file(self, src_path, dest_path, filename):
"""unzips file located at src_path into destination_path"""
self.logger.info("unzipping file...")
# construct full path (including file name) for unzipping
unzip_path = os.path.join(dest_path, filename)
utils.ensure_directory_exists(unzip_path)
# extract data
with zipfile.ZipFile(src_path, "r") as z:
z.extractall(unzip_path)
return True | unzips file located at src_path into destination_path | Below is the the instruction that describes the task:
### Input:
unzips file located at src_path into destination_path
### Response:
def _unzip_file(self, src_path, dest_path, filename):
"""unzips file located at src_path into destination_path"""
self.logger.info("unzipping file...")
# construct full path (including file name) for unzipping
unzip_path = os.path.join(dest_path, filename)
utils.ensure_directory_exists(unzip_path)
# extract data
with zipfile.ZipFile(src_path, "r") as z:
z.extractall(unzip_path)
return True |
def get_user(self, user_id):
"""
Return user object.
:param user_id: primary key of user object
:return: user object
"""
user_model = get_user_model()
try:
return user_model.objects.get(id=user_id)
except user_model.DoesNotExist:
return None | Return user object.
:param user_id: primary key of user object
:return: user object | Below is the the instruction that describes the task:
### Input:
Return user object.
:param user_id: primary key of user object
:return: user object
### Response:
def get_user(self, user_id):
"""
Return user object.
:param user_id: primary key of user object
:return: user object
"""
user_model = get_user_model()
try:
return user_model.objects.get(id=user_id)
except user_model.DoesNotExist:
return None |
def iterative_stratification(node_label_matrix, training_set_size, number_of_categories, random_seed=0):
"""
Iterative data fold stratification/balancing for two folds.
Based on: Sechidis, K., Tsoumakas, G., & Vlahavas, I. (2011).
On the stratification of multi-label data.
In Machine Learning and Knowledge Discovery in Databases (pp. 145-158).
Springer Berlin Heidelberg.
Inputs: - node_label_matrix: The node-label ground truth in a SciPy sparse matrix format.
- training_set_size: The minimum required size for the training set.
- number_of_categories: The number of categories/classes in the learning.
- random_seed: A seed for numpy random.
Outputs: - train_set: A NumPy array containing the training set node ids.
- test_set: A NumPy array containing the testing set node ids.
"""
number_of_labelled_nodes = node_label_matrix.shape[0]
testing_set_size = number_of_labelled_nodes - training_set_size
training_set_proportion = training_set_size/number_of_labelled_nodes
testing_set_proportion = testing_set_size/number_of_labelled_nodes
# Calculate the desired number of examples of each label at each subset.
desired_label_number = np.zeros((2, number_of_categories), dtype=np.int64)
node_label_matrix = node_label_matrix.tocsc()
for j in range(number_of_categories):
category_label_number = node_label_matrix.getcol(j).indices.size
desired_label_number[0, j] = math.ceil(category_label_number*training_set_proportion)
desired_label_number[1, j] = category_label_number - desired_label_number[0, j]
train_ids = list()
test_ids = list()
append_train_id = train_ids.append
append_test_id = test_ids.append
# Randomize process
np.random.seed(random_seed)
while True:
if len(train_ids) + len(test_ids) >= number_of_labelled_nodes:
break
# Find the label with the fewest (but at least one) remaining examples, breaking the ties randomly
remaining_label_distribution = desired_label_number.sum(axis=0)
min_label = np.min(remaining_label_distribution[np.where(remaining_label_distribution > 0)[0]])
label_indices = np.where(remaining_label_distribution == min_label)[0]
chosen_label = int(np.random.choice(label_indices, 1)[0])
# Find the subset with the largest number of desired examples for this label,
# breaking ties by considering the largest number of desired examples, breaking further ties randomly.
fold_max_remaining_labels = np.max(desired_label_number[:, chosen_label])
fold_indices = np.where(desired_label_number[:, chosen_label] == fold_max_remaining_labels)[0]
chosen_fold = int(np.random.choice(fold_indices, 1)[0])
# Choose a random example for the selected label.
relevant_nodes = node_label_matrix.getcol(chosen_label).indices
chosen_node = int(np.random.choice(np.setdiff1d(relevant_nodes,
np.union1d(np.array(train_ids),
np.array(test_ids))),
1)[0])
if chosen_fold == 0:
append_train_id(chosen_node)
desired_label_number[0, node_label_matrix.getrow(chosen_node).indices] -= 1
elif chosen_fold == 1:
append_test_id(chosen_node)
desired_label_number[1, node_label_matrix.getrow(chosen_node).indices] -= 1
else:
raise RuntimeError
return np.array(train_ids), np.array(test_ids) | Iterative data fold stratification/balancing for two folds.
Based on: Sechidis, K., Tsoumakas, G., & Vlahavas, I. (2011).
On the stratification of multi-label data.
In Machine Learning and Knowledge Discovery in Databases (pp. 145-158).
Springer Berlin Heidelberg.
Inputs: - node_label_matrix: The node-label ground truth in a SciPy sparse matrix format.
- training_set_size: The minimum required size for the training set.
- number_of_categories: The number of categories/classes in the learning.
- random_seed: A seed for numpy random.
Outputs: - train_set: A NumPy array containing the training set node ids.
- test_set: A NumPy array containing the testing set node ids. | Below is the the instruction that describes the task:
### Input:
Iterative data fold stratification/balancing for two folds.
Based on: Sechidis, K., Tsoumakas, G., & Vlahavas, I. (2011).
On the stratification of multi-label data.
In Machine Learning and Knowledge Discovery in Databases (pp. 145-158).
Springer Berlin Heidelberg.
Inputs: - node_label_matrix: The node-label ground truth in a SciPy sparse matrix format.
- training_set_size: The minimum required size for the training set.
- number_of_categories: The number of categories/classes in the learning.
- random_seed: A seed for numpy random.
Outputs: - train_set: A NumPy array containing the training set node ids.
- test_set: A NumPy array containing the testing set node ids.
### Response:
def iterative_stratification(node_label_matrix, training_set_size, number_of_categories, random_seed=0):
"""
Iterative data fold stratification/balancing for two folds.
Based on: Sechidis, K., Tsoumakas, G., & Vlahavas, I. (2011).
On the stratification of multi-label data.
In Machine Learning and Knowledge Discovery in Databases (pp. 145-158).
Springer Berlin Heidelberg.
Inputs: - node_label_matrix: The node-label ground truth in a SciPy sparse matrix format.
- training_set_size: The minimum required size for the training set.
- number_of_categories: The number of categories/classes in the learning.
- random_seed: A seed for numpy random.
Outputs: - train_set: A NumPy array containing the training set node ids.
- test_set: A NumPy array containing the testing set node ids.
"""
number_of_labelled_nodes = node_label_matrix.shape[0]
testing_set_size = number_of_labelled_nodes - training_set_size
training_set_proportion = training_set_size/number_of_labelled_nodes
testing_set_proportion = testing_set_size/number_of_labelled_nodes
# Calculate the desired number of examples of each label at each subset.
desired_label_number = np.zeros((2, number_of_categories), dtype=np.int64)
node_label_matrix = node_label_matrix.tocsc()
for j in range(number_of_categories):
category_label_number = node_label_matrix.getcol(j).indices.size
desired_label_number[0, j] = math.ceil(category_label_number*training_set_proportion)
desired_label_number[1, j] = category_label_number - desired_label_number[0, j]
train_ids = list()
test_ids = list()
append_train_id = train_ids.append
append_test_id = test_ids.append
# Randomize process
np.random.seed(random_seed)
while True:
if len(train_ids) + len(test_ids) >= number_of_labelled_nodes:
break
# Find the label with the fewest (but at least one) remaining examples, breaking the ties randomly
remaining_label_distribution = desired_label_number.sum(axis=0)
min_label = np.min(remaining_label_distribution[np.where(remaining_label_distribution > 0)[0]])
label_indices = np.where(remaining_label_distribution == min_label)[0]
chosen_label = int(np.random.choice(label_indices, 1)[0])
# Find the subset with the largest number of desired examples for this label,
# breaking ties by considering the largest number of desired examples, breaking further ties randomly.
fold_max_remaining_labels = np.max(desired_label_number[:, chosen_label])
fold_indices = np.where(desired_label_number[:, chosen_label] == fold_max_remaining_labels)[0]
chosen_fold = int(np.random.choice(fold_indices, 1)[0])
# Choose a random example for the selected label.
relevant_nodes = node_label_matrix.getcol(chosen_label).indices
chosen_node = int(np.random.choice(np.setdiff1d(relevant_nodes,
np.union1d(np.array(train_ids),
np.array(test_ids))),
1)[0])
if chosen_fold == 0:
append_train_id(chosen_node)
desired_label_number[0, node_label_matrix.getrow(chosen_node).indices] -= 1
elif chosen_fold == 1:
append_test_id(chosen_node)
desired_label_number[1, node_label_matrix.getrow(chosen_node).indices] -= 1
else:
raise RuntimeError
return np.array(train_ids), np.array(test_ids) |
def _overwrite_special_dates(midnight_utcs,
opens_or_closes,
special_opens_or_closes):
"""
Overwrite dates in open_or_closes with corresponding dates in
special_opens_or_closes, using midnight_utcs for alignment.
"""
# Short circuit when nothing to apply.
if not len(special_opens_or_closes):
return
len_m, len_oc = len(midnight_utcs), len(opens_or_closes)
if len_m != len_oc:
raise ValueError(
"Found misaligned dates while building calendar.\n"
"Expected midnight_utcs to be the same length as open_or_closes,\n"
"but len(midnight_utcs)=%d, len(open_or_closes)=%d" % len_m, len_oc
)
# Find the array indices corresponding to each special date.
indexer = midnight_utcs.get_indexer(special_opens_or_closes.index)
# -1 indicates that no corresponding entry was found. If any -1s are
# present, then we have special dates that doesn't correspond to any
# trading day.
if -1 in indexer:
bad_dates = list(special_opens_or_closes[indexer == -1])
raise ValueError("Special dates %s are not trading days." % bad_dates)
# NOTE: This is a slightly dirty hack. We're in-place overwriting the
# internal data of an Index, which is conceptually immutable. Since we're
# maintaining sorting, this should be ok, but this is a good place to
# sanity check if things start going haywire with calendar computations.
opens_or_closes.values[indexer] = special_opens_or_closes.values | Overwrite dates in open_or_closes with corresponding dates in
special_opens_or_closes, using midnight_utcs for alignment. | Below is the the instruction that describes the task:
### Input:
Overwrite dates in open_or_closes with corresponding dates in
special_opens_or_closes, using midnight_utcs for alignment.
### Response:
def _overwrite_special_dates(midnight_utcs,
opens_or_closes,
special_opens_or_closes):
"""
Overwrite dates in open_or_closes with corresponding dates in
special_opens_or_closes, using midnight_utcs for alignment.
"""
# Short circuit when nothing to apply.
if not len(special_opens_or_closes):
return
len_m, len_oc = len(midnight_utcs), len(opens_or_closes)
if len_m != len_oc:
raise ValueError(
"Found misaligned dates while building calendar.\n"
"Expected midnight_utcs to be the same length as open_or_closes,\n"
"but len(midnight_utcs)=%d, len(open_or_closes)=%d" % len_m, len_oc
)
# Find the array indices corresponding to each special date.
indexer = midnight_utcs.get_indexer(special_opens_or_closes.index)
# -1 indicates that no corresponding entry was found. If any -1s are
# present, then we have special dates that doesn't correspond to any
# trading day.
if -1 in indexer:
bad_dates = list(special_opens_or_closes[indexer == -1])
raise ValueError("Special dates %s are not trading days." % bad_dates)
# NOTE: This is a slightly dirty hack. We're in-place overwriting the
# internal data of an Index, which is conceptually immutable. Since we're
# maintaining sorting, this should be ok, but this is a good place to
# sanity check if things start going haywire with calendar computations.
opens_or_closes.values[indexer] = special_opens_or_closes.values |
def _handle_command(self, buffer):
" When text is accepted in the command line. "
text = buffer.text
# First leave command mode. We want to make sure that the working
# pane is focused again before executing the command handers.
self.pymux.leave_command_mode(append_to_history=True)
# Execute command.
self.pymux.handle_command(text) | When text is accepted in the command line. | Below is the the instruction that describes the task:
### Input:
When text is accepted in the command line.
### Response:
def _handle_command(self, buffer):
" When text is accepted in the command line. "
text = buffer.text
# First leave command mode. We want to make sure that the working
# pane is focused again before executing the command handers.
self.pymux.leave_command_mode(append_to_history=True)
# Execute command.
self.pymux.handle_command(text) |
def reject_entry(request, entry_id):
"""
Admins can reject an entry that has been verified or approved but not
invoiced to set its status to 'unverified' for the user to fix.
"""
return_url = request.GET.get('next', reverse('dashboard'))
try:
entry = Entry.no_join.get(pk=entry_id)
except:
message = 'No such log entry.'
messages.error(request, message)
return redirect(return_url)
if entry.status == Entry.UNVERIFIED or entry.status == Entry.INVOICED:
msg_text = 'This entry is unverified or is already invoiced.'
messages.error(request, msg_text)
return redirect(return_url)
if request.POST.get('Yes'):
entry.status = Entry.UNVERIFIED
entry.save()
msg_text = 'The entry\'s status was set to unverified.'
messages.info(request, msg_text)
return redirect(return_url)
return render(request, 'timepiece/entry/reject.html', {
'entry': entry,
'next': request.GET.get('next'),
}) | Admins can reject an entry that has been verified or approved but not
invoiced to set its status to 'unverified' for the user to fix. | Below is the the instruction that describes the task:
### Input:
Admins can reject an entry that has been verified or approved but not
invoiced to set its status to 'unverified' for the user to fix.
### Response:
def reject_entry(request, entry_id):
"""
Admins can reject an entry that has been verified or approved but not
invoiced to set its status to 'unverified' for the user to fix.
"""
return_url = request.GET.get('next', reverse('dashboard'))
try:
entry = Entry.no_join.get(pk=entry_id)
except:
message = 'No such log entry.'
messages.error(request, message)
return redirect(return_url)
if entry.status == Entry.UNVERIFIED or entry.status == Entry.INVOICED:
msg_text = 'This entry is unverified or is already invoiced.'
messages.error(request, msg_text)
return redirect(return_url)
if request.POST.get('Yes'):
entry.status = Entry.UNVERIFIED
entry.save()
msg_text = 'The entry\'s status was set to unverified.'
messages.info(request, msg_text)
return redirect(return_url)
return render(request, 'timepiece/entry/reject.html', {
'entry': entry,
'next': request.GET.get('next'),
}) |
def _write_cpr(self, f, cType, parameter) -> int:
'''
Write compression info to the end of the file in a CPR.
'''
f.seek(0, 2)
byte_loc = f.tell()
block_size = CDF.CPR_BASE_SIZE64 + 4
section_type = CDF.CPR_
rfuA = 0
pCount = 1
cpr = bytearray(block_size)
cpr[0:8] = struct.pack('>q', block_size)
cpr[8:12] = struct.pack('>i', section_type)
cpr[12:16] = struct.pack('>i', cType)
cpr[16:20] = struct.pack('>i', rfuA)
cpr[20:24] = struct.pack('>i', pCount)
cpr[24:28] = struct.pack('>i', parameter)
f.write(cpr)
return byte_loc | Write compression info to the end of the file in a CPR. | Below is the the instruction that describes the task:
### Input:
Write compression info to the end of the file in a CPR.
### Response:
def _write_cpr(self, f, cType, parameter) -> int:
'''
Write compression info to the end of the file in a CPR.
'''
f.seek(0, 2)
byte_loc = f.tell()
block_size = CDF.CPR_BASE_SIZE64 + 4
section_type = CDF.CPR_
rfuA = 0
pCount = 1
cpr = bytearray(block_size)
cpr[0:8] = struct.pack('>q', block_size)
cpr[8:12] = struct.pack('>i', section_type)
cpr[12:16] = struct.pack('>i', cType)
cpr[16:20] = struct.pack('>i', rfuA)
cpr[20:24] = struct.pack('>i', pCount)
cpr[24:28] = struct.pack('>i', parameter)
f.write(cpr)
return byte_loc |
def relative_script(lines):
"Return a script that'll work in a relocatable environment."
activate = "import os; activate_this=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'activate_this.py'); exec(compile(open(activate_this).read(), activate_this, 'exec'), dict(__file__=activate_this)); del os, activate_this"
# Find the last future statement in the script. If we insert the activation
# line before a future statement, Python will raise a SyntaxError.
activate_at = None
for idx, line in reversed(list(enumerate(lines))):
if line.split()[:3] == ['from', '__future__', 'import']:
activate_at = idx + 1
break
if activate_at is None:
# Activate after the shebang.
activate_at = 1
return lines[:activate_at] + ['', activate, ''] + lines[activate_at:] | Return a script that'll work in a relocatable environment. | Below is the the instruction that describes the task:
### Input:
Return a script that'll work in a relocatable environment.
### Response:
def relative_script(lines):
"Return a script that'll work in a relocatable environment."
activate = "import os; activate_this=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'activate_this.py'); exec(compile(open(activate_this).read(), activate_this, 'exec'), dict(__file__=activate_this)); del os, activate_this"
# Find the last future statement in the script. If we insert the activation
# line before a future statement, Python will raise a SyntaxError.
activate_at = None
for idx, line in reversed(list(enumerate(lines))):
if line.split()[:3] == ['from', '__future__', 'import']:
activate_at = idx + 1
break
if activate_at is None:
# Activate after the shebang.
activate_at = 1
return lines[:activate_at] + ['', activate, ''] + lines[activate_at:] |
def getScreenBounds(self, screenId):
""" Returns the screen size of the specified monitor (0 being the main monitor). """
screen_details = self.getScreenDetails()
if not isinstance(screenId, int) or screenId < -1 or screenId >= len(screen_details):
raise ValueError("Invalid screen ID")
if screenId == -1:
# -1 represents the entire virtual screen
return self._getVirtualScreenRect()
return screen_details[screenId]["rect"] | Returns the screen size of the specified monitor (0 being the main monitor). | Below is the the instruction that describes the task:
### Input:
Returns the screen size of the specified monitor (0 being the main monitor).
### Response:
def getScreenBounds(self, screenId):
""" Returns the screen size of the specified monitor (0 being the main monitor). """
screen_details = self.getScreenDetails()
if not isinstance(screenId, int) or screenId < -1 or screenId >= len(screen_details):
raise ValueError("Invalid screen ID")
if screenId == -1:
# -1 represents the entire virtual screen
return self._getVirtualScreenRect()
return screen_details[screenId]["rect"] |
def _bar(s, align, colors, width=100, vmin=None, vmax=None):
"""
Draw bar chart in dataframe cells.
"""
# Get input value range.
smin = s.min() if vmin is None else vmin
if isinstance(smin, ABCSeries):
smin = smin.min()
smax = s.max() if vmax is None else vmax
if isinstance(smax, ABCSeries):
smax = smax.max()
if align == 'mid':
smin = min(0, smin)
smax = max(0, smax)
elif align == 'zero':
# For "zero" mode, we want the range to be symmetrical around zero.
smax = max(abs(smin), abs(smax))
smin = -smax
# Transform to percent-range of linear-gradient
normed = width * (s.values - smin) / (smax - smin + 1e-12)
zero = -width * smin / (smax - smin + 1e-12)
def css_bar(start, end, color):
"""
Generate CSS code to draw a bar from start to end.
"""
css = 'width: 10em; height: 80%;'
if end > start:
css += 'background: linear-gradient(90deg,'
if start > 0:
css += ' transparent {s:.1f}%, {c} {s:.1f}%, '.format(
s=start, c=color
)
css += '{c} {e:.1f}%, transparent {e:.1f}%)'.format(
e=min(end, width), c=color,
)
return css
def css(x):
if pd.isna(x):
return ''
# avoid deprecated indexing `colors[x > zero]`
color = colors[1] if x > zero else colors[0]
if align == 'left':
return css_bar(0, x, color)
else:
return css_bar(min(x, zero), max(x, zero), color)
if s.ndim == 1:
return [css(x) for x in normed]
else:
return pd.DataFrame(
[[css(x) for x in row] for row in normed],
index=s.index, columns=s.columns
) | Draw bar chart in dataframe cells. | Below is the the instruction that describes the task:
### Input:
Draw bar chart in dataframe cells.
### Response:
def _bar(s, align, colors, width=100, vmin=None, vmax=None):
"""
Draw bar chart in dataframe cells.
"""
# Get input value range.
smin = s.min() if vmin is None else vmin
if isinstance(smin, ABCSeries):
smin = smin.min()
smax = s.max() if vmax is None else vmax
if isinstance(smax, ABCSeries):
smax = smax.max()
if align == 'mid':
smin = min(0, smin)
smax = max(0, smax)
elif align == 'zero':
# For "zero" mode, we want the range to be symmetrical around zero.
smax = max(abs(smin), abs(smax))
smin = -smax
# Transform to percent-range of linear-gradient
normed = width * (s.values - smin) / (smax - smin + 1e-12)
zero = -width * smin / (smax - smin + 1e-12)
def css_bar(start, end, color):
"""
Generate CSS code to draw a bar from start to end.
"""
css = 'width: 10em; height: 80%;'
if end > start:
css += 'background: linear-gradient(90deg,'
if start > 0:
css += ' transparent {s:.1f}%, {c} {s:.1f}%, '.format(
s=start, c=color
)
css += '{c} {e:.1f}%, transparent {e:.1f}%)'.format(
e=min(end, width), c=color,
)
return css
def css(x):
if pd.isna(x):
return ''
# avoid deprecated indexing `colors[x > zero]`
color = colors[1] if x > zero else colors[0]
if align == 'left':
return css_bar(0, x, color)
else:
return css_bar(min(x, zero), max(x, zero), color)
if s.ndim == 1:
return [css(x) for x in normed]
else:
return pd.DataFrame(
[[css(x) for x in row] for row in normed],
index=s.index, columns=s.columns
) |
def refreshRecords( self ):
"""
Refreshes the records being loaded by this browser.
"""
table_type = self.tableType()
if ( not table_type ):
self._records = RecordSet()
return False
search = nativestring(self.uiSearchTXT.text())
query = self.query().copy()
terms, search_query = Q.fromSearch(search)
if ( search_query ):
query &= search_query
self._records = table_type.select(where = query).search(terms)
return True | Refreshes the records being loaded by this browser. | Below is the the instruction that describes the task:
### Input:
Refreshes the records being loaded by this browser.
### Response:
def refreshRecords( self ):
"""
Refreshes the records being loaded by this browser.
"""
table_type = self.tableType()
if ( not table_type ):
self._records = RecordSet()
return False
search = nativestring(self.uiSearchTXT.text())
query = self.query().copy()
terms, search_query = Q.fromSearch(search)
if ( search_query ):
query &= search_query
self._records = table_type.select(where = query).search(terms)
return True |
def dframe(self, dimensions=None, multi_index=False):
"""Convert dimension values to DataFrame.
Returns a pandas dataframe of columns along each dimension,
either completely flat or indexed by key dimensions.
Args:
dimensions: Dimensions to return as columns
multi_index: Convert key dimensions to (multi-)index
Returns:
DataFrame of columns corresponding to each dimension
"""
if dimensions is None:
dimensions = [d.name for d in self.dimensions()]
else:
dimensions = [self.get_dimension(d, strict=True).name for d in dimensions]
df = self.interface.dframe(self, dimensions)
if multi_index:
df = df.set_index([d for d in dimensions if d in self.kdims])
return df | Convert dimension values to DataFrame.
Returns a pandas dataframe of columns along each dimension,
either completely flat or indexed by key dimensions.
Args:
dimensions: Dimensions to return as columns
multi_index: Convert key dimensions to (multi-)index
Returns:
DataFrame of columns corresponding to each dimension | Below is the the instruction that describes the task:
### Input:
Convert dimension values to DataFrame.
Returns a pandas dataframe of columns along each dimension,
either completely flat or indexed by key dimensions.
Args:
dimensions: Dimensions to return as columns
multi_index: Convert key dimensions to (multi-)index
Returns:
DataFrame of columns corresponding to each dimension
### Response:
def dframe(self, dimensions=None, multi_index=False):
"""Convert dimension values to DataFrame.
Returns a pandas dataframe of columns along each dimension,
either completely flat or indexed by key dimensions.
Args:
dimensions: Dimensions to return as columns
multi_index: Convert key dimensions to (multi-)index
Returns:
DataFrame of columns corresponding to each dimension
"""
if dimensions is None:
dimensions = [d.name for d in self.dimensions()]
else:
dimensions = [self.get_dimension(d, strict=True).name for d in dimensions]
df = self.interface.dframe(self, dimensions)
if multi_index:
df = df.set_index([d for d in dimensions if d in self.kdims])
return df |
def _add_junction(item):
'''
Adds a junction to the _current_statement.
'''
type_, channels = _expand_one_key_dictionary(item)
junction = UnnamedStatement(type='junction')
for item in channels:
type_, value = _expand_one_key_dictionary(item)
channel = UnnamedStatement(type='channel')
for val in value:
if _is_reference(val):
_add_reference(val, channel)
elif _is_inline_definition(val):
_add_inline_definition(val, channel)
junction.add_child(channel)
_current_statement.add_child(junction) | Adds a junction to the _current_statement. | Below is the the instruction that describes the task:
### Input:
Adds a junction to the _current_statement.
### Response:
def _add_junction(item):
'''
Adds a junction to the _current_statement.
'''
type_, channels = _expand_one_key_dictionary(item)
junction = UnnamedStatement(type='junction')
for item in channels:
type_, value = _expand_one_key_dictionary(item)
channel = UnnamedStatement(type='channel')
for val in value:
if _is_reference(val):
_add_reference(val, channel)
elif _is_inline_definition(val):
_add_inline_definition(val, channel)
junction.add_child(channel)
_current_statement.add_child(junction) |
def to_dict(self):
"""
Get a dictionary representation of :class:`InstanceResource`
"""
self_dict = {}
for key, value in six.iteritems(self.__dict__):
if self.allowed_params and key in self.allowed_params:
self_dict[key] = value
return self_dict | Get a dictionary representation of :class:`InstanceResource` | Below is the the instruction that describes the task:
### Input:
Get a dictionary representation of :class:`InstanceResource`
### Response:
def to_dict(self):
"""
Get a dictionary representation of :class:`InstanceResource`
"""
self_dict = {}
for key, value in six.iteritems(self.__dict__):
if self.allowed_params and key in self.allowed_params:
self_dict[key] = value
return self_dict |
def normalize_name(decl):
"""
Cached variant of normalize
Args:
decl (declaration.declaration_t): the declaration
Returns:
str: normalized name
"""
if decl.cache.normalized_name is None:
decl.cache.normalized_name = normalize(decl.name)
return decl.cache.normalized_name | Cached variant of normalize
Args:
decl (declaration.declaration_t): the declaration
Returns:
str: normalized name | Below is the the instruction that describes the task:
### Input:
Cached variant of normalize
Args:
decl (declaration.declaration_t): the declaration
Returns:
str: normalized name
### Response:
def normalize_name(decl):
"""
Cached variant of normalize
Args:
decl (declaration.declaration_t): the declaration
Returns:
str: normalized name
"""
if decl.cache.normalized_name is None:
decl.cache.normalized_name = normalize(decl.name)
return decl.cache.normalized_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.