code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def calc_parent(self, i, j, h):
"""
Returns get_big_array and end of span of parent sequence that contains given child.
"""
N = self.repo.array_size
c_i = i
c_j = j
c_h = h
# Calculate the number of the sequence in its row (sequences
# with same height), from left to right, starting from 0.
c_n = c_i // (N ** c_h)
p_n = c_n // N
# Position of the child ID in the parent array.
p_p = c_n % N
# Parent height is child height plus one.
p_h = c_h + 1
# Span of sequences in parent row is max size N, to the power of the height.
span = N ** p_h
# Calculate parent i and j.
p_i = p_n * span
p_j = p_i + span
# Check the parent i,j bounds the child i,j, ie child span is contained by parent span.
assert p_i <= c_i, 'i greater on parent than child: {}'.format(p_i, p_j)
assert p_j >= c_j, 'j less on parent than child: {}'.format(p_i, p_j)
# Return parent i, j, h, p.
return p_i, p_j, p_h, p_p | Returns get_big_array and end of span of parent sequence that contains given child. | Below is the the instruction that describes the task:
### Input:
Returns get_big_array and end of span of parent sequence that contains given child.
### Response:
def calc_parent(self, i, j, h):
"""
Returns get_big_array and end of span of parent sequence that contains given child.
"""
N = self.repo.array_size
c_i = i
c_j = j
c_h = h
# Calculate the number of the sequence in its row (sequences
# with same height), from left to right, starting from 0.
c_n = c_i // (N ** c_h)
p_n = c_n // N
# Position of the child ID in the parent array.
p_p = c_n % N
# Parent height is child height plus one.
p_h = c_h + 1
# Span of sequences in parent row is max size N, to the power of the height.
span = N ** p_h
# Calculate parent i and j.
p_i = p_n * span
p_j = p_i + span
# Check the parent i,j bounds the child i,j, ie child span is contained by parent span.
assert p_i <= c_i, 'i greater on parent than child: {}'.format(p_i, p_j)
assert p_j >= c_j, 'j less on parent than child: {}'.format(p_i, p_j)
# Return parent i, j, h, p.
return p_i, p_j, p_h, p_p |
def setns(fd, nstype):
"""
Reassociate thread with a namespace
:param fd int: The file descriptor referreing to one of the namespace
entries in a :directory::`/proc/<pid>/ns/` directory.
:param nstype int: The type of namespace the calling thread should be
reasscoiated with.
"""
res = lib.setns(fd, nstype)
if res != 0:
_check_error(ffi.errno) | Reassociate thread with a namespace
:param fd int: The file descriptor referreing to one of the namespace
entries in a :directory::`/proc/<pid>/ns/` directory.
:param nstype int: The type of namespace the calling thread should be
reasscoiated with. | Below is the the instruction that describes the task:
### Input:
Reassociate thread with a namespace
:param fd int: The file descriptor referreing to one of the namespace
entries in a :directory::`/proc/<pid>/ns/` directory.
:param nstype int: The type of namespace the calling thread should be
reasscoiated with.
### Response:
def setns(fd, nstype):
"""
Reassociate thread with a namespace
:param fd int: The file descriptor referreing to one of the namespace
entries in a :directory::`/proc/<pid>/ns/` directory.
:param nstype int: The type of namespace the calling thread should be
reasscoiated with.
"""
res = lib.setns(fd, nstype)
if res != 0:
_check_error(ffi.errno) |
def autobuild_trub_script(file_name, slot_assignments=None, os_info=None, sensor_graph=None,
app_info=None, use_safeupdate=False):
"""Build a trub script that loads given firmware into the given slots.
slot_assignments should be a list of tuples in the following form:
("slot X" or "controller", firmware_image_name)
The output of this autobuild action will be a trub script in
build/output/<file_name> that assigns the given firmware to each slot in
the order specified in the slot_assignments list.
Args:
file_name (str): The name of the output file that we should create.
This file name should end in .trub
slot_assignments (list of (str, str)): A list of tuples containing
the slot name and the firmware image that we should use to build
our update script. Optional
os_info (tuple(int, str)): A tuple of OS version tag and X.Y version
number that will be set as part of the OTA script if included. Optional.
sensor_graph (str): Name of sgf file. Optional.
app_info (tuple(int, str)): A tuple of App version tag and X.Y version
number that will be set as part of the OTA script if included. Optional.
use_safeupdate (bool): If True, Enables safemode before the firmware update records, then
disables them after the firmware update records.
"""
build_update_script(file_name, slot_assignments, os_info, sensor_graph, app_info, use_safeupdate) | Build a trub script that loads given firmware into the given slots.
slot_assignments should be a list of tuples in the following form:
("slot X" or "controller", firmware_image_name)
The output of this autobuild action will be a trub script in
build/output/<file_name> that assigns the given firmware to each slot in
the order specified in the slot_assignments list.
Args:
file_name (str): The name of the output file that we should create.
This file name should end in .trub
slot_assignments (list of (str, str)): A list of tuples containing
the slot name and the firmware image that we should use to build
our update script. Optional
os_info (tuple(int, str)): A tuple of OS version tag and X.Y version
number that will be set as part of the OTA script if included. Optional.
sensor_graph (str): Name of sgf file. Optional.
app_info (tuple(int, str)): A tuple of App version tag and X.Y version
number that will be set as part of the OTA script if included. Optional.
use_safeupdate (bool): If True, Enables safemode before the firmware update records, then
disables them after the firmware update records. | Below is the the instruction that describes the task:
### Input:
Build a trub script that loads given firmware into the given slots.
slot_assignments should be a list of tuples in the following form:
("slot X" or "controller", firmware_image_name)
The output of this autobuild action will be a trub script in
build/output/<file_name> that assigns the given firmware to each slot in
the order specified in the slot_assignments list.
Args:
file_name (str): The name of the output file that we should create.
This file name should end in .trub
slot_assignments (list of (str, str)): A list of tuples containing
the slot name and the firmware image that we should use to build
our update script. Optional
os_info (tuple(int, str)): A tuple of OS version tag and X.Y version
number that will be set as part of the OTA script if included. Optional.
sensor_graph (str): Name of sgf file. Optional.
app_info (tuple(int, str)): A tuple of App version tag and X.Y version
number that will be set as part of the OTA script if included. Optional.
use_safeupdate (bool): If True, Enables safemode before the firmware update records, then
disables them after the firmware update records.
### Response:
def autobuild_trub_script(file_name, slot_assignments=None, os_info=None, sensor_graph=None,
app_info=None, use_safeupdate=False):
"""Build a trub script that loads given firmware into the given slots.
slot_assignments should be a list of tuples in the following form:
("slot X" or "controller", firmware_image_name)
The output of this autobuild action will be a trub script in
build/output/<file_name> that assigns the given firmware to each slot in
the order specified in the slot_assignments list.
Args:
file_name (str): The name of the output file that we should create.
This file name should end in .trub
slot_assignments (list of (str, str)): A list of tuples containing
the slot name and the firmware image that we should use to build
our update script. Optional
os_info (tuple(int, str)): A tuple of OS version tag and X.Y version
number that will be set as part of the OTA script if included. Optional.
sensor_graph (str): Name of sgf file. Optional.
app_info (tuple(int, str)): A tuple of App version tag and X.Y version
number that will be set as part of the OTA script if included. Optional.
use_safeupdate (bool): If True, Enables safemode before the firmware update records, then
disables them after the firmware update records.
"""
build_update_script(file_name, slot_assignments, os_info, sensor_graph, app_info, use_safeupdate) |
def _handle_get(self, transaction):
"""
Handle GET requests
:type transaction: Transaction
:param transaction: the transaction that owns the request
:rtype : Transaction
:return: the edited transaction with the response to the request
"""
path = str("/" + transaction.request.uri_path)
transaction.response = Response()
transaction.response.destination = transaction.request.source
transaction.response.token = transaction.request.token
if path == defines.DISCOVERY_URL:
transaction = self._server.resourceLayer.discover(transaction)
else:
try:
resource = self._server.root[path]
except KeyError:
resource = None
if resource is None or path == '/':
# Not Found
transaction.response.code = defines.Codes.NOT_FOUND.number
else:
transaction.resource = resource
transaction = self._server.resourceLayer.get_resource(transaction)
return transaction | Handle GET requests
:type transaction: Transaction
:param transaction: the transaction that owns the request
:rtype : Transaction
:return: the edited transaction with the response to the request | Below is the the instruction that describes the task:
### Input:
Handle GET requests
:type transaction: Transaction
:param transaction: the transaction that owns the request
:rtype : Transaction
:return: the edited transaction with the response to the request
### Response:
def _handle_get(self, transaction):
"""
Handle GET requests
:type transaction: Transaction
:param transaction: the transaction that owns the request
:rtype : Transaction
:return: the edited transaction with the response to the request
"""
path = str("/" + transaction.request.uri_path)
transaction.response = Response()
transaction.response.destination = transaction.request.source
transaction.response.token = transaction.request.token
if path == defines.DISCOVERY_URL:
transaction = self._server.resourceLayer.discover(transaction)
else:
try:
resource = self._server.root[path]
except KeyError:
resource = None
if resource is None or path == '/':
# Not Found
transaction.response.code = defines.Codes.NOT_FOUND.number
else:
transaction.resource = resource
transaction = self._server.resourceLayer.get_resource(transaction)
return transaction |
def zsh_complete(self, path, cmd, *cmds, sourceable=False):
"""Write zsh compdef script.
Args:
path (path-like): desired path of the compdef script.
cmd (str): command name that should be completed.
cmds (str): extra command names that should be completed.
sourceable (bool): if True, the generated file will contain an
explicit call to ``compdef``, which means it can be sourced
to activate CLI completion.
"""
grouping = internal.zsh_version() >= (5, 4)
path = pathlib.Path(path)
firstline = ['#compdef', cmd]
firstline.extend(cmds)
subcmds = list(self.subcmds.keys())
with path.open('w') as zcf:
print(*firstline, end='\n\n', file=zcf)
# main function
print('function _{} {{'.format(cmd), file=zcf)
print('local line', file=zcf)
print('_arguments -C', end=BLK, file=zcf)
if subcmds:
# list of subcommands and their description
substrs = ["{}\\:'{}'".format(sub, self.subcmds[sub].help)
for sub in subcmds]
print('"1:Commands:(({}))"'.format(' '.join(substrs)),
end=BLK, file=zcf)
self._zsh_comp_command(zcf, None, grouping)
if subcmds:
print("'*::arg:->args'", file=zcf)
print('case $line[1] in', file=zcf)
for sub in subcmds:
print('{sub}) _{cmd}_{sub} ;;'.format(sub=sub, cmd=cmd),
file=zcf)
print('esac', file=zcf)
print('}', file=zcf)
# all subcommand completion handlers
for sub in subcmds:
print('\nfunction _{}_{} {{'.format(cmd, sub), file=zcf)
print('_arguments', end=BLK, file=zcf)
self._zsh_comp_command(zcf, sub, grouping)
print('}', file=zcf)
if sourceable:
print('\ncompdef _{0} {0}'.format(cmd), *cmds, file=zcf) | Write zsh compdef script.
Args:
path (path-like): desired path of the compdef script.
cmd (str): command name that should be completed.
cmds (str): extra command names that should be completed.
sourceable (bool): if True, the generated file will contain an
explicit call to ``compdef``, which means it can be sourced
to activate CLI completion. | Below is the the instruction that describes the task:
### Input:
Write zsh compdef script.
Args:
path (path-like): desired path of the compdef script.
cmd (str): command name that should be completed.
cmds (str): extra command names that should be completed.
sourceable (bool): if True, the generated file will contain an
explicit call to ``compdef``, which means it can be sourced
to activate CLI completion.
### Response:
def zsh_complete(self, path, cmd, *cmds, sourceable=False):
"""Write zsh compdef script.
Args:
path (path-like): desired path of the compdef script.
cmd (str): command name that should be completed.
cmds (str): extra command names that should be completed.
sourceable (bool): if True, the generated file will contain an
explicit call to ``compdef``, which means it can be sourced
to activate CLI completion.
"""
grouping = internal.zsh_version() >= (5, 4)
path = pathlib.Path(path)
firstline = ['#compdef', cmd]
firstline.extend(cmds)
subcmds = list(self.subcmds.keys())
with path.open('w') as zcf:
print(*firstline, end='\n\n', file=zcf)
# main function
print('function _{} {{'.format(cmd), file=zcf)
print('local line', file=zcf)
print('_arguments -C', end=BLK, file=zcf)
if subcmds:
# list of subcommands and their description
substrs = ["{}\\:'{}'".format(sub, self.subcmds[sub].help)
for sub in subcmds]
print('"1:Commands:(({}))"'.format(' '.join(substrs)),
end=BLK, file=zcf)
self._zsh_comp_command(zcf, None, grouping)
if subcmds:
print("'*::arg:->args'", file=zcf)
print('case $line[1] in', file=zcf)
for sub in subcmds:
print('{sub}) _{cmd}_{sub} ;;'.format(sub=sub, cmd=cmd),
file=zcf)
print('esac', file=zcf)
print('}', file=zcf)
# all subcommand completion handlers
for sub in subcmds:
print('\nfunction _{}_{} {{'.format(cmd, sub), file=zcf)
print('_arguments', end=BLK, file=zcf)
self._zsh_comp_command(zcf, sub, grouping)
print('}', file=zcf)
if sourceable:
print('\ncompdef _{0} {0}'.format(cmd), *cmds, file=zcf) |
def add(self, text, checked=False, sort=None):
"""Add a new sub item to the list. This item must already be attached to a list.
Args:
text (str): The text.
checked (bool): Whether this item is checked.
sort (int): Item id for sorting.
"""
if self.parent is None:
raise exception.InvalidException('Item has no parent')
node = self.parent.add(text, checked, sort)
self.indent(node)
return node | Add a new sub item to the list. This item must already be attached to a list.
Args:
text (str): The text.
checked (bool): Whether this item is checked.
sort (int): Item id for sorting. | Below is the the instruction that describes the task:
### Input:
Add a new sub item to the list. This item must already be attached to a list.
Args:
text (str): The text.
checked (bool): Whether this item is checked.
sort (int): Item id for sorting.
### Response:
def add(self, text, checked=False, sort=None):
"""Add a new sub item to the list. This item must already be attached to a list.
Args:
text (str): The text.
checked (bool): Whether this item is checked.
sort (int): Item id for sorting.
"""
if self.parent is None:
raise exception.InvalidException('Item has no parent')
node = self.parent.add(text, checked, sort)
self.indent(node)
return node |
def find_xor_mask(data, alphabet=None, max_depth=3, min_depth=0, iv=None):
"""
Produce a series of bytestrings that when XORed together end up being
equal to ``data`` and only contain characters from the giving
``alphabet``. The initial state (or previous state) can be given as
``iv``.
Arguments:
data (bytes): The data to recreate as a series of XOR operations.
alphabet (bytes): The bytestring containing the allowed characters
for the XOR values. If ``None``, all characters except NUL bytes,
carriage returns and newlines will be allowed.
max_depth (int): The maximum depth to look for a solution.
min_depth (int): The minimum depth to look for a solution.
iv (bytes): Initialization vector. If ``None``, it will be assumed the
operation starts at an all zero string.
Returns:
A list of bytestrings that, when XOR'ed with ``iv`` (or just eachother
if ``iv` is not providede) will be the same as ``data``.
Examples:
Produce a series of strings that when XORed together will result in
the string 'pwnypack' using only ASCII characters in the range 65 to
96:
>>> from pwny import *
>>> find_xor_mask('pwnypack', alphabet=''.join(chr(c) for c in range(65, 97)))
[b'````````', b'AAAAABAA', b'QVOXQCBJ']
>>> xor(xor(b'````````', b'AAAAABAA'), b'QVOXQCBJ')
'pwnypack'
"""
if alphabet is None:
alphabet = set(i for i in range(256) if i not in (0, 10, 13))
else:
alphabet = set(six.iterbytes(alphabet))
if iv is None:
iv = b'\0' * len(data)
if len(data) != len(iv):
raise ValueError('length of iv differs from data')
if not min_depth and data == iv:
return []
data = xor(data, iv)
# Pre-flight check to see if we have all the bits we need.
mask = 0
for ch in alphabet:
mask |= ch
mask = ~mask
# Map all bytes in data into a {byte: [pos...]} dictionary, check
# if we have enough bits along the way.
data_map_tmpl = {}
for i, ch in enumerate(six.iterbytes(data)):
if ch & mask:
raise ValueError('Alphabet does not contain enough bits.')
data_map_tmpl.setdefault(ch, []).append(i)
# Let's try to find a solution.
for depth in range(max(min_depth, 1), max_depth + 1):
# Prepare for round.
data_map = data_map_tmpl.copy()
results = [[None] * len(data) for _ in range(depth)]
for values in itertools.product(*([alphabet] * (depth - 1))):
# Prepare cumulative mask for this combination of alphabet.
mask = 0
for value in values:
mask ^= value
for ch in list(data_map):
r = ch ^ mask
if r in alphabet:
# Found a solution for this character, mark the result.
pos = data_map.pop(ch)
for p in pos:
results[0][p] = r
for i, value in enumerate(values):
results[i + 1][p] = value
if not data_map:
# Aaaand.. We're done!
return [
b''.join(six.int2byte(b) for b in r)
for r in results
]
# No solution found at this depth. Increase depth, try again.
raise ValueError('No solution found.') | Produce a series of bytestrings that when XORed together end up being
equal to ``data`` and only contain characters from the giving
``alphabet``. The initial state (or previous state) can be given as
``iv``.
Arguments:
data (bytes): The data to recreate as a series of XOR operations.
alphabet (bytes): The bytestring containing the allowed characters
for the XOR values. If ``None``, all characters except NUL bytes,
carriage returns and newlines will be allowed.
max_depth (int): The maximum depth to look for a solution.
min_depth (int): The minimum depth to look for a solution.
iv (bytes): Initialization vector. If ``None``, it will be assumed the
operation starts at an all zero string.
Returns:
A list of bytestrings that, when XOR'ed with ``iv`` (or just eachother
if ``iv` is not providede) will be the same as ``data``.
Examples:
Produce a series of strings that when XORed together will result in
the string 'pwnypack' using only ASCII characters in the range 65 to
96:
>>> from pwny import *
>>> find_xor_mask('pwnypack', alphabet=''.join(chr(c) for c in range(65, 97)))
[b'````````', b'AAAAABAA', b'QVOXQCBJ']
>>> xor(xor(b'````````', b'AAAAABAA'), b'QVOXQCBJ')
'pwnypack' | Below is the the instruction that describes the task:
### Input:
Produce a series of bytestrings that when XORed together end up being
equal to ``data`` and only contain characters from the giving
``alphabet``. The initial state (or previous state) can be given as
``iv``.
Arguments:
data (bytes): The data to recreate as a series of XOR operations.
alphabet (bytes): The bytestring containing the allowed characters
for the XOR values. If ``None``, all characters except NUL bytes,
carriage returns and newlines will be allowed.
max_depth (int): The maximum depth to look for a solution.
min_depth (int): The minimum depth to look for a solution.
iv (bytes): Initialization vector. If ``None``, it will be assumed the
operation starts at an all zero string.
Returns:
A list of bytestrings that, when XOR'ed with ``iv`` (or just eachother
if ``iv` is not providede) will be the same as ``data``.
Examples:
Produce a series of strings that when XORed together will result in
the string 'pwnypack' using only ASCII characters in the range 65 to
96:
>>> from pwny import *
>>> find_xor_mask('pwnypack', alphabet=''.join(chr(c) for c in range(65, 97)))
[b'````````', b'AAAAABAA', b'QVOXQCBJ']
>>> xor(xor(b'````````', b'AAAAABAA'), b'QVOXQCBJ')
'pwnypack'
### Response:
def find_xor_mask(data, alphabet=None, max_depth=3, min_depth=0, iv=None):
"""
Produce a series of bytestrings that when XORed together end up being
equal to ``data`` and only contain characters from the giving
``alphabet``. The initial state (or previous state) can be given as
``iv``.
Arguments:
data (bytes): The data to recreate as a series of XOR operations.
alphabet (bytes): The bytestring containing the allowed characters
for the XOR values. If ``None``, all characters except NUL bytes,
carriage returns and newlines will be allowed.
max_depth (int): The maximum depth to look for a solution.
min_depth (int): The minimum depth to look for a solution.
iv (bytes): Initialization vector. If ``None``, it will be assumed the
operation starts at an all zero string.
Returns:
A list of bytestrings that, when XOR'ed with ``iv`` (or just eachother
if ``iv` is not providede) will be the same as ``data``.
Examples:
Produce a series of strings that when XORed together will result in
the string 'pwnypack' using only ASCII characters in the range 65 to
96:
>>> from pwny import *
>>> find_xor_mask('pwnypack', alphabet=''.join(chr(c) for c in range(65, 97)))
[b'````````', b'AAAAABAA', b'QVOXQCBJ']
>>> xor(xor(b'````````', b'AAAAABAA'), b'QVOXQCBJ')
'pwnypack'
"""
if alphabet is None:
alphabet = set(i for i in range(256) if i not in (0, 10, 13))
else:
alphabet = set(six.iterbytes(alphabet))
if iv is None:
iv = b'\0' * len(data)
if len(data) != len(iv):
raise ValueError('length of iv differs from data')
if not min_depth and data == iv:
return []
data = xor(data, iv)
# Pre-flight check to see if we have all the bits we need.
mask = 0
for ch in alphabet:
mask |= ch
mask = ~mask
# Map all bytes in data into a {byte: [pos...]} dictionary, check
# if we have enough bits along the way.
data_map_tmpl = {}
for i, ch in enumerate(six.iterbytes(data)):
if ch & mask:
raise ValueError('Alphabet does not contain enough bits.')
data_map_tmpl.setdefault(ch, []).append(i)
# Let's try to find a solution.
for depth in range(max(min_depth, 1), max_depth + 1):
# Prepare for round.
data_map = data_map_tmpl.copy()
results = [[None] * len(data) for _ in range(depth)]
for values in itertools.product(*([alphabet] * (depth - 1))):
# Prepare cumulative mask for this combination of alphabet.
mask = 0
for value in values:
mask ^= value
for ch in list(data_map):
r = ch ^ mask
if r in alphabet:
# Found a solution for this character, mark the result.
pos = data_map.pop(ch)
for p in pos:
results[0][p] = r
for i, value in enumerate(values):
results[i + 1][p] = value
if not data_map:
# Aaaand.. We're done!
return [
b''.join(six.int2byte(b) for b in r)
for r in results
]
# No solution found at this depth. Increase depth, try again.
raise ValueError('No solution found.') |
def _get_dir(toml_config_setting, sawtooth_home_dir, windows_dir, default_dir):
"""Determines the directory path based on configuration.
Arguments:
toml_config_setting (str): The name of the config setting related
to the directory which will appear in path.toml.
sawtooth_home_dir (str): The directory under the SAWTOOTH_HOME
environment variable. For example, for 'data' if the data
directory is $SAWTOOTH_HOME/data.
windows_dir (str): The windows path relative to the computed base
directory.
default_dir (str): The default path on Linux.
Returns:
directory (str): The path.
"""
conf_file = os.path.join(_get_config_dir(), 'path.toml')
if os.path.exists(conf_file):
with open(conf_file) as fd:
raw_config = fd.read()
toml_config = toml.loads(raw_config)
if toml_config_setting in toml_config:
return toml_config[toml_config_setting]
if 'SAWTOOTH_HOME' in os.environ:
return os.path.join(os.environ['SAWTOOTH_HOME'], sawtooth_home_dir)
if os.name == 'nt':
base_dir = \
os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
return os.path.join(base_dir, windows_dir)
return default_dir | Determines the directory path based on configuration.
Arguments:
toml_config_setting (str): The name of the config setting related
to the directory which will appear in path.toml.
sawtooth_home_dir (str): The directory under the SAWTOOTH_HOME
environment variable. For example, for 'data' if the data
directory is $SAWTOOTH_HOME/data.
windows_dir (str): The windows path relative to the computed base
directory.
default_dir (str): The default path on Linux.
Returns:
directory (str): The path. | Below is the the instruction that describes the task:
### Input:
Determines the directory path based on configuration.
Arguments:
toml_config_setting (str): The name of the config setting related
to the directory which will appear in path.toml.
sawtooth_home_dir (str): The directory under the SAWTOOTH_HOME
environment variable. For example, for 'data' if the data
directory is $SAWTOOTH_HOME/data.
windows_dir (str): The windows path relative to the computed base
directory.
default_dir (str): The default path on Linux.
Returns:
directory (str): The path.
### Response:
def _get_dir(toml_config_setting, sawtooth_home_dir, windows_dir, default_dir):
"""Determines the directory path based on configuration.
Arguments:
toml_config_setting (str): The name of the config setting related
to the directory which will appear in path.toml.
sawtooth_home_dir (str): The directory under the SAWTOOTH_HOME
environment variable. For example, for 'data' if the data
directory is $SAWTOOTH_HOME/data.
windows_dir (str): The windows path relative to the computed base
directory.
default_dir (str): The default path on Linux.
Returns:
directory (str): The path.
"""
conf_file = os.path.join(_get_config_dir(), 'path.toml')
if os.path.exists(conf_file):
with open(conf_file) as fd:
raw_config = fd.read()
toml_config = toml.loads(raw_config)
if toml_config_setting in toml_config:
return toml_config[toml_config_setting]
if 'SAWTOOTH_HOME' in os.environ:
return os.path.join(os.environ['SAWTOOTH_HOME'], sawtooth_home_dir)
if os.name == 'nt':
base_dir = \
os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
return os.path.join(base_dir, windows_dir)
return default_dir |
def sort_idx(m, reverse=False):
"""Return the indices of m in sorted order (default: ascending order)"""
return sorted(range(len(m)), key=lambda k: m[k], reverse=reverse) | Return the indices of m in sorted order (default: ascending order) | Below is the the instruction that describes the task:
### Input:
Return the indices of m in sorted order (default: ascending order)
### Response:
def sort_idx(m, reverse=False):
"""Return the indices of m in sorted order (default: ascending order)"""
return sorted(range(len(m)), key=lambda k: m[k], reverse=reverse) |
def meta(self):
"""Data for loading later"""
mount_points = []
for overlay in self.overlays:
mount_points.append(overlay.mount_point)
return [self.end_dir, self.start_dir, mount_points] | Data for loading later | Below is the the instruction that describes the task:
### Input:
Data for loading later
### Response:
def meta(self):
"""Data for loading later"""
mount_points = []
for overlay in self.overlays:
mount_points.append(overlay.mount_point)
return [self.end_dir, self.start_dir, mount_points] |
def _WriteFileChunk(self, chunk):
"""Yields binary chunks, respecting archive file headers and footers.
Args:
chunk: the StreamedFileChunk to be written
"""
if chunk.chunk_index == 0:
# Make sure size of the original file is passed. It's required
# when output_writer is StreamingTarWriter.
st = os.stat_result((0o644, 0, 0, 0, 0, 0, chunk.total_size, 0, 0, 0))
target_path = _ClientPathToString(chunk.client_path, prefix=self.prefix)
yield self.archive_generator.WriteFileHeader(target_path, st=st)
yield self.archive_generator.WriteFileChunk(chunk.data)
if chunk.chunk_index == chunk.total_chunks - 1:
yield self.archive_generator.WriteFileFooter()
self.archived_files.add(chunk.client_path) | Yields binary chunks, respecting archive file headers and footers.
Args:
chunk: the StreamedFileChunk to be written | Below is the the instruction that describes the task:
### Input:
Yields binary chunks, respecting archive file headers and footers.
Args:
chunk: the StreamedFileChunk to be written
### Response:
def _WriteFileChunk(self, chunk):
"""Yields binary chunks, respecting archive file headers and footers.
Args:
chunk: the StreamedFileChunk to be written
"""
if chunk.chunk_index == 0:
# Make sure size of the original file is passed. It's required
# when output_writer is StreamingTarWriter.
st = os.stat_result((0o644, 0, 0, 0, 0, 0, chunk.total_size, 0, 0, 0))
target_path = _ClientPathToString(chunk.client_path, prefix=self.prefix)
yield self.archive_generator.WriteFileHeader(target_path, st=st)
yield self.archive_generator.WriteFileChunk(chunk.data)
if chunk.chunk_index == chunk.total_chunks - 1:
yield self.archive_generator.WriteFileFooter()
self.archived_files.add(chunk.client_path) |
def dispatch_hook(cls, _pkt=None, *args, **kargs):
"""
Returns the right RadiusAttribute class for the given data.
"""
if _pkt:
attr_type = orb(_pkt[0])
return cls.registered_attributes.get(attr_type, cls)
return cls | Returns the right RadiusAttribute class for the given data. | Below is the the instruction that describes the task:
### Input:
Returns the right RadiusAttribute class for the given data.
### Response:
def dispatch_hook(cls, _pkt=None, *args, **kargs):
"""
Returns the right RadiusAttribute class for the given data.
"""
if _pkt:
attr_type = orb(_pkt[0])
return cls.registered_attributes.get(attr_type, cls)
return cls |
def count_cores_in_state(self, state, app_id):
"""Count the number of cores in a given state.
.. warning::
In current implementations of SARK, signals (which are used to
determine the state of cores) are highly likely to arrive but this
is not guaranteed (especially when the system's network is heavily
utilised). Users should treat this mechanism with caution. Future
versions of SARK may resolve this issue.
Parameters
----------
state : string or :py:class:`~rig.machine_control.consts.AppState` or
iterable
Count the number of cores currently in this state. This may be
either an entry of the
:py:class:`~rig.machine_control.consts.AppState` enum or, for
convenience, the name of a state (defined in
:py:class:`~rig.machine_control.consts.AppState`) as a string or
an iterable of these, in which case the total count will be
returned.
"""
if (isinstance(state, collections.Iterable) and
not isinstance(state, str)):
# If the state is iterable then call for each state and return the
# sum.
return sum(self.count_cores_in_state(s, app_id) for s in state)
if isinstance(state, str):
try:
state = getattr(consts.AppState, state)
except AttributeError:
# The state name is not present in consts.AppSignal! The next
# test will throw an appropriate exception since no string can
# be "in" an IntEnum.
pass
if state not in consts.AppState:
raise ValueError(
"count_cores_in_state: Unknown state {}".format(
repr(state)))
# TODO Determine a way to nicely express a way to use the region data
# stored in arg3.
region = 0x0000ffff # Largest possible machine, level 0
level = (region >> 16) & 0x3
mask = region & 0x0000ffff
# Construct the packet
arg1 = consts.diagnostic_signal_types[consts.AppDiagnosticSignal.count]
arg2 = ((level << 26) | (1 << 22) |
(consts.AppDiagnosticSignal.count << 20) | (state << 16) |
(0xff << 8) | app_id) # App mask for 1 app_id = 0xff
arg3 = mask
# Transmit and return the count
return self._send_scp(
255, 255, 0, SCPCommands.signal, arg1, arg2, arg3).arg1 | Count the number of cores in a given state.
.. warning::
In current implementations of SARK, signals (which are used to
determine the state of cores) are highly likely to arrive but this
is not guaranteed (especially when the system's network is heavily
utilised). Users should treat this mechanism with caution. Future
versions of SARK may resolve this issue.
Parameters
----------
state : string or :py:class:`~rig.machine_control.consts.AppState` or
iterable
Count the number of cores currently in this state. This may be
either an entry of the
:py:class:`~rig.machine_control.consts.AppState` enum or, for
convenience, the name of a state (defined in
:py:class:`~rig.machine_control.consts.AppState`) as a string or
an iterable of these, in which case the total count will be
returned. | Below is the the instruction that describes the task:
### Input:
Count the number of cores in a given state.
.. warning::
In current implementations of SARK, signals (which are used to
determine the state of cores) are highly likely to arrive but this
is not guaranteed (especially when the system's network is heavily
utilised). Users should treat this mechanism with caution. Future
versions of SARK may resolve this issue.
Parameters
----------
state : string or :py:class:`~rig.machine_control.consts.AppState` or
iterable
Count the number of cores currently in this state. This may be
either an entry of the
:py:class:`~rig.machine_control.consts.AppState` enum or, for
convenience, the name of a state (defined in
:py:class:`~rig.machine_control.consts.AppState`) as a string or
an iterable of these, in which case the total count will be
returned.
### Response:
def count_cores_in_state(self, state, app_id):
"""Count the number of cores in a given state.
.. warning::
In current implementations of SARK, signals (which are used to
determine the state of cores) are highly likely to arrive but this
is not guaranteed (especially when the system's network is heavily
utilised). Users should treat this mechanism with caution. Future
versions of SARK may resolve this issue.
Parameters
----------
state : string or :py:class:`~rig.machine_control.consts.AppState` or
iterable
Count the number of cores currently in this state. This may be
either an entry of the
:py:class:`~rig.machine_control.consts.AppState` enum or, for
convenience, the name of a state (defined in
:py:class:`~rig.machine_control.consts.AppState`) as a string or
an iterable of these, in which case the total count will be
returned.
"""
if (isinstance(state, collections.Iterable) and
not isinstance(state, str)):
# If the state is iterable then call for each state and return the
# sum.
return sum(self.count_cores_in_state(s, app_id) for s in state)
if isinstance(state, str):
try:
state = getattr(consts.AppState, state)
except AttributeError:
# The state name is not present in consts.AppSignal! The next
# test will throw an appropriate exception since no string can
# be "in" an IntEnum.
pass
if state not in consts.AppState:
raise ValueError(
"count_cores_in_state: Unknown state {}".format(
repr(state)))
# TODO Determine a way to nicely express a way to use the region data
# stored in arg3.
region = 0x0000ffff # Largest possible machine, level 0
level = (region >> 16) & 0x3
mask = region & 0x0000ffff
# Construct the packet
arg1 = consts.diagnostic_signal_types[consts.AppDiagnosticSignal.count]
arg2 = ((level << 26) | (1 << 22) |
(consts.AppDiagnosticSignal.count << 20) | (state << 16) |
(0xff << 8) | app_id) # App mask for 1 app_id = 0xff
arg3 = mask
# Transmit and return the count
return self._send_scp(
255, 255, 0, SCPCommands.signal, arg1, arg2, arg3).arg1 |
def program(self, prog, offset = 0):
"""
.. _program:
Write the content of the iterable ``prog`` starting with the optional offset ``offset``
to the device.
Invokes program_word_.
"""
for addr, word in enumerate(prog):
self.program_word(offset + addr, word) | .. _program:
Write the content of the iterable ``prog`` starting with the optional offset ``offset``
to the device.
Invokes program_word_. | Below is the the instruction that describes the task:
### Input:
.. _program:
Write the content of the iterable ``prog`` starting with the optional offset ``offset``
to the device.
Invokes program_word_.
### Response:
def program(self, prog, offset = 0):
"""
.. _program:
Write the content of the iterable ``prog`` starting with the optional offset ``offset``
to the device.
Invokes program_word_.
"""
for addr, word in enumerate(prog):
self.program_word(offset + addr, word) |
def configure_app(app):
"""Configure Flask/Celery application.
* Rio will find environment variable `RIO_SETTINGS` first::
$ export RIO_SETTINGS=/path/to/settings.cfg
$ rio worker
* If `RIO_SETTINGS` is missing, Rio will try to load configuration
module in `rio.settings` according to another environment
variable `RIO_ENV`. Default load `rio.settings.dev`.
$ export RIO_ENV=prod
$ rio worker
"""
app.config_from_object('rio.settings.default')
if environ.get('RIO_SETTINGS'):
app.config_from_envvar('RIO_SETTINGS')
return
config_map = {
'dev': 'rio.settings.dev',
'stag': 'rio.settings.stag',
'prod': 'rio.settings.prod',
'test': 'rio.settings.test',
}
rio_env = environ.get('RIO_ENV', 'dev')
config = config_map.get(rio_env, config_map['dev'])
app.config_from_object(config) | Configure Flask/Celery application.
* Rio will find environment variable `RIO_SETTINGS` first::
$ export RIO_SETTINGS=/path/to/settings.cfg
$ rio worker
* If `RIO_SETTINGS` is missing, Rio will try to load configuration
module in `rio.settings` according to another environment
variable `RIO_ENV`. Default load `rio.settings.dev`.
$ export RIO_ENV=prod
$ rio worker | Below is the the instruction that describes the task:
### Input:
Configure Flask/Celery application.
* Rio will find environment variable `RIO_SETTINGS` first::
$ export RIO_SETTINGS=/path/to/settings.cfg
$ rio worker
* If `RIO_SETTINGS` is missing, Rio will try to load configuration
module in `rio.settings` according to another environment
variable `RIO_ENV`. Default load `rio.settings.dev`.
$ export RIO_ENV=prod
$ rio worker
### Response:
def configure_app(app):
"""Configure Flask/Celery application.
* Rio will find environment variable `RIO_SETTINGS` first::
$ export RIO_SETTINGS=/path/to/settings.cfg
$ rio worker
* If `RIO_SETTINGS` is missing, Rio will try to load configuration
module in `rio.settings` according to another environment
variable `RIO_ENV`. Default load `rio.settings.dev`.
$ export RIO_ENV=prod
$ rio worker
"""
app.config_from_object('rio.settings.default')
if environ.get('RIO_SETTINGS'):
app.config_from_envvar('RIO_SETTINGS')
return
config_map = {
'dev': 'rio.settings.dev',
'stag': 'rio.settings.stag',
'prod': 'rio.settings.prod',
'test': 'rio.settings.test',
}
rio_env = environ.get('RIO_ENV', 'dev')
config = config_map.get(rio_env, config_map['dev'])
app.config_from_object(config) |
def get_daemon_stats(self, details=False):
"""Send a HTTP request to the satellite (GET /get_daemon_stats)
:return: Daemon statistics
:rtype: dict
"""
logger.debug("Get daemon statistics for %s, %s %s", self.name, self.alive, self.reachable)
return self.con.get('stats%s' % ('?details=1' if details else '')) | Send a HTTP request to the satellite (GET /get_daemon_stats)
:return: Daemon statistics
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Send a HTTP request to the satellite (GET /get_daemon_stats)
:return: Daemon statistics
:rtype: dict
### Response:
def get_daemon_stats(self, details=False):
"""Send a HTTP request to the satellite (GET /get_daemon_stats)
:return: Daemon statistics
:rtype: dict
"""
logger.debug("Get daemon statistics for %s, %s %s", self.name, self.alive, self.reachable)
return self.con.get('stats%s' % ('?details=1' if details else '')) |
def get_logger(name):
"""Get a logger with the specified name."""
logger = logging.getLogger(name)
logger.setLevel(getenv('LOGLEVEL', 'INFO'))
return logger | Get a logger with the specified name. | Below is the the instruction that describes the task:
### Input:
Get a logger with the specified name.
### Response:
def get_logger(name):
"""Get a logger with the specified name."""
logger = logging.getLogger(name)
logger.setLevel(getenv('LOGLEVEL', 'INFO'))
return logger |
def add_func(self, transmute_func, transmute_context):
""" add a transmute function's swagger definition to the spec """
swagger_path = transmute_func.get_swagger_path(transmute_context)
for p in transmute_func.paths:
self.add_path(p, swagger_path) | add a transmute function's swagger definition to the spec | Below is the the instruction that describes the task:
### Input:
add a transmute function's swagger definition to the spec
### Response:
def add_func(self, transmute_func, transmute_context):
""" add a transmute function's swagger definition to the spec """
swagger_path = transmute_func.get_swagger_path(transmute_context)
for p in transmute_func.paths:
self.add_path(p, swagger_path) |
def create_container_instance_group(access_token, subscription_id, resource_group,
container_group_name, container_list, location,
ostype='Linux', port=80, iptype='public'):
'''Create a new container group with a list of containers specifified by container_list.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
container_group_name (str): Name of container instance group.
container_list (list): A list of container properties. Use create_container_definition to
create each container property set.
location (str): Azure data center location. E.g. westus.
ostype (str): Container operating system type. Linux or Windows.
port (int): TCP port number. E.g. 8080.
iptype (str): Type of IP address. E.g. public.
Returns:
HTTP response with JSON body of container group.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', resource_group,
'/providers/Microsoft.ContainerInstance/ContainerGroups/',
container_group_name,
'?api-version=', CONTAINER_API])
container_group_body = {'location': location}
properties = {'osType': ostype}
properties['containers'] = container_list
ipport = {'protocol': 'TCP'}
ipport['port'] = port
ipaddress = {'ports': [ipport]}
ipaddress['type'] = iptype
properties['ipAddress'] = ipaddress
container_group_body['properties'] = properties
body = json.dumps(container_group_body)
return do_put(endpoint, body, access_token) | Create a new container group with a list of containers specifified by container_list.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
container_group_name (str): Name of container instance group.
container_list (list): A list of container properties. Use create_container_definition to
create each container property set.
location (str): Azure data center location. E.g. westus.
ostype (str): Container operating system type. Linux or Windows.
port (int): TCP port number. E.g. 8080.
iptype (str): Type of IP address. E.g. public.
Returns:
HTTP response with JSON body of container group. | Below is the the instruction that describes the task:
### Input:
Create a new container group with a list of containers specifified by container_list.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
container_group_name (str): Name of container instance group.
container_list (list): A list of container properties. Use create_container_definition to
create each container property set.
location (str): Azure data center location. E.g. westus.
ostype (str): Container operating system type. Linux or Windows.
port (int): TCP port number. E.g. 8080.
iptype (str): Type of IP address. E.g. public.
Returns:
HTTP response with JSON body of container group.
### Response:
def create_container_instance_group(access_token, subscription_id, resource_group,
container_group_name, container_list, location,
ostype='Linux', port=80, iptype='public'):
'''Create a new container group with a list of containers specifified by container_list.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
container_group_name (str): Name of container instance group.
container_list (list): A list of container properties. Use create_container_definition to
create each container property set.
location (str): Azure data center location. E.g. westus.
ostype (str): Container operating system type. Linux or Windows.
port (int): TCP port number. E.g. 8080.
iptype (str): Type of IP address. E.g. public.
Returns:
HTTP response with JSON body of container group.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', resource_group,
'/providers/Microsoft.ContainerInstance/ContainerGroups/',
container_group_name,
'?api-version=', CONTAINER_API])
container_group_body = {'location': location}
properties = {'osType': ostype}
properties['containers'] = container_list
ipport = {'protocol': 'TCP'}
ipport['port'] = port
ipaddress = {'ports': [ipport]}
ipaddress['type'] = iptype
properties['ipAddress'] = ipaddress
container_group_body['properties'] = properties
body = json.dumps(container_group_body)
return do_put(endpoint, body, access_token) |
def to_glyphs_family_user_data_from_ufo(self, ufo):
"""Set the GSFont userData from the UFO family-wide lib data."""
target_user_data = self.font.userData
try:
for key, value in ufo.lib[FONT_USER_DATA_KEY].items():
# Existing values taken from the designspace lib take precedence
if key not in target_user_data.keys():
target_user_data[key] = value
except KeyError:
# No FONT_USER_DATA in ufo.lib
pass | Set the GSFont userData from the UFO family-wide lib data. | Below is the the instruction that describes the task:
### Input:
Set the GSFont userData from the UFO family-wide lib data.
### Response:
def to_glyphs_family_user_data_from_ufo(self, ufo):
"""Set the GSFont userData from the UFO family-wide lib data."""
target_user_data = self.font.userData
try:
for key, value in ufo.lib[FONT_USER_DATA_KEY].items():
# Existing values taken from the designspace lib take precedence
if key not in target_user_data.keys():
target_user_data[key] = value
except KeyError:
# No FONT_USER_DATA in ufo.lib
pass |
def _safe_copy_proto_list_values(dst_proto_list, src_proto_list, get_key):
"""Safely merge values from `src_proto_list` into `dst_proto_list`.
Each element in `dst_proto_list` must be mapped by `get_key` to a key
value that is unique within that list; likewise for `src_proto_list`.
If an element of `src_proto_list` has the same key as an existing
element in `dst_proto_list`, then the elements must also be equal.
Args:
dst_proto_list: A `RepeatedCompositeContainer` or
`RepeatedScalarContainer` into which values should be copied.
src_proto_list: A container holding the same kind of values as in
`dst_proto_list` from which values should be copied.
get_key: A function that takes an element of `dst_proto_list` or
`src_proto_list` and returns a key, such that if two elements have
the same key then it is required that they be deep-equal. For
instance, if `dst_proto_list` is a list of nodes, then `get_key`
might be `lambda node: node.name` to indicate that if two nodes
have the same name then they must be the same node. All keys must
be hashable.
Raises:
_ProtoListDuplicateKeyError: A proto_list contains items with duplicate
keys.
_SameKeyDiffContentError: An item with the same key has different contents.
"""
def _assert_proto_container_unique_keys(proto_list, get_key):
"""Asserts proto_list to only contains unique keys.
Args:
proto_list: A `RepeatedCompositeContainer` or `RepeatedScalarContainer`.
get_key: A function that takes an element of `proto_list` and returns a
hashable key.
Raises:
_ProtoListDuplicateKeyError: A proto_list contains items with duplicate
keys.
"""
keys = set()
for item in proto_list:
key = get_key(item)
if key in keys:
raise _ProtoListDuplicateKeyError(key)
keys.add(key)
_assert_proto_container_unique_keys(dst_proto_list, get_key)
_assert_proto_container_unique_keys(src_proto_list, get_key)
key_to_proto = {}
for proto in dst_proto_list:
key = get_key(proto)
key_to_proto[key] = proto
for proto in src_proto_list:
key = get_key(proto)
if key in key_to_proto:
if proto != key_to_proto.get(key):
raise _SameKeyDiffContentError(key)
else:
dst_proto_list.add().CopyFrom(proto) | Safely merge values from `src_proto_list` into `dst_proto_list`.
Each element in `dst_proto_list` must be mapped by `get_key` to a key
value that is unique within that list; likewise for `src_proto_list`.
If an element of `src_proto_list` has the same key as an existing
element in `dst_proto_list`, then the elements must also be equal.
Args:
dst_proto_list: A `RepeatedCompositeContainer` or
`RepeatedScalarContainer` into which values should be copied.
src_proto_list: A container holding the same kind of values as in
`dst_proto_list` from which values should be copied.
get_key: A function that takes an element of `dst_proto_list` or
`src_proto_list` and returns a key, such that if two elements have
the same key then it is required that they be deep-equal. For
instance, if `dst_proto_list` is a list of nodes, then `get_key`
might be `lambda node: node.name` to indicate that if two nodes
have the same name then they must be the same node. All keys must
be hashable.
Raises:
_ProtoListDuplicateKeyError: A proto_list contains items with duplicate
keys.
_SameKeyDiffContentError: An item with the same key has different contents. | Below is the the instruction that describes the task:
### Input:
Safely merge values from `src_proto_list` into `dst_proto_list`.
Each element in `dst_proto_list` must be mapped by `get_key` to a key
value that is unique within that list; likewise for `src_proto_list`.
If an element of `src_proto_list` has the same key as an existing
element in `dst_proto_list`, then the elements must also be equal.
Args:
dst_proto_list: A `RepeatedCompositeContainer` or
`RepeatedScalarContainer` into which values should be copied.
src_proto_list: A container holding the same kind of values as in
`dst_proto_list` from which values should be copied.
get_key: A function that takes an element of `dst_proto_list` or
`src_proto_list` and returns a key, such that if two elements have
the same key then it is required that they be deep-equal. For
instance, if `dst_proto_list` is a list of nodes, then `get_key`
might be `lambda node: node.name` to indicate that if two nodes
have the same name then they must be the same node. All keys must
be hashable.
Raises:
_ProtoListDuplicateKeyError: A proto_list contains items with duplicate
keys.
_SameKeyDiffContentError: An item with the same key has different contents.
### Response:
def _safe_copy_proto_list_values(dst_proto_list, src_proto_list, get_key):
"""Safely merge values from `src_proto_list` into `dst_proto_list`.
Each element in `dst_proto_list` must be mapped by `get_key` to a key
value that is unique within that list; likewise for `src_proto_list`.
If an element of `src_proto_list` has the same key as an existing
element in `dst_proto_list`, then the elements must also be equal.
Args:
dst_proto_list: A `RepeatedCompositeContainer` or
`RepeatedScalarContainer` into which values should be copied.
src_proto_list: A container holding the same kind of values as in
`dst_proto_list` from which values should be copied.
get_key: A function that takes an element of `dst_proto_list` or
`src_proto_list` and returns a key, such that if two elements have
the same key then it is required that they be deep-equal. For
instance, if `dst_proto_list` is a list of nodes, then `get_key`
might be `lambda node: node.name` to indicate that if two nodes
have the same name then they must be the same node. All keys must
be hashable.
Raises:
_ProtoListDuplicateKeyError: A proto_list contains items with duplicate
keys.
_SameKeyDiffContentError: An item with the same key has different contents.
"""
def _assert_proto_container_unique_keys(proto_list, get_key):
"""Asserts proto_list to only contains unique keys.
Args:
proto_list: A `RepeatedCompositeContainer` or `RepeatedScalarContainer`.
get_key: A function that takes an element of `proto_list` and returns a
hashable key.
Raises:
_ProtoListDuplicateKeyError: A proto_list contains items with duplicate
keys.
"""
keys = set()
for item in proto_list:
key = get_key(item)
if key in keys:
raise _ProtoListDuplicateKeyError(key)
keys.add(key)
_assert_proto_container_unique_keys(dst_proto_list, get_key)
_assert_proto_container_unique_keys(src_proto_list, get_key)
key_to_proto = {}
for proto in dst_proto_list:
key = get_key(proto)
key_to_proto[key] = proto
for proto in src_proto_list:
key = get_key(proto)
if key in key_to_proto:
if proto != key_to_proto.get(key):
raise _SameKeyDiffContentError(key)
else:
dst_proto_list.add().CopyFrom(proto) |
def modify_attached_policies(self, role_name, new_policies):
"""Make sure this role has just the new policies"""
parts = role_name.split('/', 1)
if len(parts) == 2:
prefix, name = parts
prefix = "/{0}/".format(prefix)
else:
prefix = "/"
name = parts[0]
current_attached_policies = []
with self.ignore_missing():
current_attached_policies = self.client.list_attached_role_policies(RoleName=name)
current_attached_policies = [p['PolicyArn'] for p in current_attached_policies["AttachedPolicies"]]
new_attached_policies = ["arn:aws:iam::aws:policy/{0}".format(p) for p in new_policies]
changes = list(Differ.compare_two_documents(current_attached_policies, new_attached_policies))
if changes:
with self.catch_boto_400("Couldn't modify attached policies", role=role_name):
for policy in new_attached_policies:
if policy not in current_attached_policies:
for _ in self.change("+", "attached_policy", role=role_name, policy=policy):
self.client.attach_role_policy(RoleName=name, PolicyArn=policy)
for policy in current_attached_policies:
if policy not in new_attached_policies:
for _ in self.change("-", "attached_policy", role=role_name, changes=changes, policy=policy):
self.client.detach_role_policy(RoleName=name, PolicyArn=policy) | Make sure this role has just the new policies | Below is the the instruction that describes the task:
### Input:
Make sure this role has just the new policies
### Response:
def modify_attached_policies(self, role_name, new_policies):
"""Make sure this role has just the new policies"""
parts = role_name.split('/', 1)
if len(parts) == 2:
prefix, name = parts
prefix = "/{0}/".format(prefix)
else:
prefix = "/"
name = parts[0]
current_attached_policies = []
with self.ignore_missing():
current_attached_policies = self.client.list_attached_role_policies(RoleName=name)
current_attached_policies = [p['PolicyArn'] for p in current_attached_policies["AttachedPolicies"]]
new_attached_policies = ["arn:aws:iam::aws:policy/{0}".format(p) for p in new_policies]
changes = list(Differ.compare_two_documents(current_attached_policies, new_attached_policies))
if changes:
with self.catch_boto_400("Couldn't modify attached policies", role=role_name):
for policy in new_attached_policies:
if policy not in current_attached_policies:
for _ in self.change("+", "attached_policy", role=role_name, policy=policy):
self.client.attach_role_policy(RoleName=name, PolicyArn=policy)
for policy in current_attached_policies:
if policy not in new_attached_policies:
for _ in self.change("-", "attached_policy", role=role_name, changes=changes, policy=policy):
self.client.detach_role_policy(RoleName=name, PolicyArn=policy) |
def on_touch_down(self, touch):
"""Tell my parent if I've been touched"""
if self.parent is None:
return
if self.collide_point(*touch.pos):
self.parent.bar_touched(self, touch) | Tell my parent if I've been touched | Below is the the instruction that describes the task:
### Input:
Tell my parent if I've been touched
### Response:
def on_touch_down(self, touch):
"""Tell my parent if I've been touched"""
if self.parent is None:
return
if self.collide_point(*touch.pos):
self.parent.bar_touched(self, touch) |
def calc_av_uv_v1(self):
"""Calculate the flown through area and the wetted perimeter of both
forelands.
Note that the each foreland lies between the main channel and one
outer embankment and that water flowing exactly above the a foreland
is contributing to |AV|. The theoretical surface seperating water
above the main channel from water above the foreland is not
contributing to |UV|, but the surface seperating water above the
foreland from water above its outer embankment is contributing to |UV|.
Required control parameters:
|HM|
|BV|
|BNV|
Required derived parameter:
|HV|
Required flux sequence:
|H|
Calculated flux sequence:
|AV|
|UV|
Examples:
Generally, right trapezoids are assumed. Here, for simplicity, both
forelands are assumed to be symmetrical. Their smaller bases (bottoms)
hava a length of 2 meters, their non-vertical legs show an inclination
of 1 meter per 4 meters, and their height (depths) is 1 meter. Both
forelands lie 1 meter above the main channels bottom.
>>> from hydpy.models.lstream import *
>>> parameterstep()
>>> hm(1.0)
>>> bv(2.0)
>>> bnv(4.0)
>>> derived.hv(1.0)
The first example deals with normal flow conditions, where water flows
within the main channel completely (|H| < |HM|):
>>> fluxes.h = 0.5
>>> model.calc_av_uv_v1()
>>> fluxes.av
av(0.0, 0.0)
>>> fluxes.uv
uv(0.0, 0.0)
The second example deals with moderate high flow conditions, where
water flows over both forelands, but not over their embankments
(|HM| < |H| < (|HM| + |HV|)):
>>> fluxes.h = 1.5
>>> model.calc_av_uv_v1()
>>> fluxes.av
av(1.5, 1.5)
>>> fluxes.uv
uv(4.061553, 4.061553)
The third example deals with extreme high flow conditions, where
water flows over the both foreland and their outer embankments
((|HM| + |HV|) < |H|):
>>> fluxes.h = 2.5
>>> model.calc_av_uv_v1()
>>> fluxes.av
av(7.0, 7.0)
>>> fluxes.uv
uv(6.623106, 6.623106)
The forth example assures that zero widths or hights of the forelands
are handled properly:
>>> bv.left = 0.0
>>> derived.hv.right = 0.0
>>> model.calc_av_uv_v1()
>>> fluxes.av
av(4.0, 3.0)
>>> fluxes.uv
uv(4.623106, 3.5)
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
for i in range(2):
if flu.h <= con.hm:
flu.av[i] = 0.
flu.uv[i] = 0.
elif flu.h <= (con.hm+der.hv[i]):
flu.av[i] = (flu.h-con.hm)*(con.bv[i]+(flu.h-con.hm)*con.bnv[i]/2.)
flu.uv[i] = con.bv[i]+(flu.h-con.hm)*(1.+con.bnv[i]**2)**.5
else:
flu.av[i] = (der.hv[i]*(con.bv[i]+der.hv[i]*con.bnv[i]/2.) +
((flu.h-(con.hm+der.hv[i])) *
(con.bv[i]+der.hv[i]*con.bnv[i])))
flu.uv[i] = ((con.bv[i])+(der.hv[i]*(1.+con.bnv[i]**2)**.5) +
(flu.h-(con.hm+der.hv[i]))) | Calculate the flown through area and the wetted perimeter of both
forelands.
Note that the each foreland lies between the main channel and one
outer embankment and that water flowing exactly above the a foreland
is contributing to |AV|. The theoretical surface seperating water
above the main channel from water above the foreland is not
contributing to |UV|, but the surface seperating water above the
foreland from water above its outer embankment is contributing to |UV|.
Required control parameters:
|HM|
|BV|
|BNV|
Required derived parameter:
|HV|
Required flux sequence:
|H|
Calculated flux sequence:
|AV|
|UV|
Examples:
Generally, right trapezoids are assumed. Here, for simplicity, both
forelands are assumed to be symmetrical. Their smaller bases (bottoms)
hava a length of 2 meters, their non-vertical legs show an inclination
of 1 meter per 4 meters, and their height (depths) is 1 meter. Both
forelands lie 1 meter above the main channels bottom.
>>> from hydpy.models.lstream import *
>>> parameterstep()
>>> hm(1.0)
>>> bv(2.0)
>>> bnv(4.0)
>>> derived.hv(1.0)
The first example deals with normal flow conditions, where water flows
within the main channel completely (|H| < |HM|):
>>> fluxes.h = 0.5
>>> model.calc_av_uv_v1()
>>> fluxes.av
av(0.0, 0.0)
>>> fluxes.uv
uv(0.0, 0.0)
The second example deals with moderate high flow conditions, where
water flows over both forelands, but not over their embankments
(|HM| < |H| < (|HM| + |HV|)):
>>> fluxes.h = 1.5
>>> model.calc_av_uv_v1()
>>> fluxes.av
av(1.5, 1.5)
>>> fluxes.uv
uv(4.061553, 4.061553)
The third example deals with extreme high flow conditions, where
water flows over the both foreland and their outer embankments
((|HM| + |HV|) < |H|):
>>> fluxes.h = 2.5
>>> model.calc_av_uv_v1()
>>> fluxes.av
av(7.0, 7.0)
>>> fluxes.uv
uv(6.623106, 6.623106)
The forth example assures that zero widths or hights of the forelands
are handled properly:
>>> bv.left = 0.0
>>> derived.hv.right = 0.0
>>> model.calc_av_uv_v1()
>>> fluxes.av
av(4.0, 3.0)
>>> fluxes.uv
uv(4.623106, 3.5) | Below is the the instruction that describes the task:
### Input:
Calculate the flown through area and the wetted perimeter of both
forelands.
Note that the each foreland lies between the main channel and one
outer embankment and that water flowing exactly above the a foreland
is contributing to |AV|. The theoretical surface seperating water
above the main channel from water above the foreland is not
contributing to |UV|, but the surface seperating water above the
foreland from water above its outer embankment is contributing to |UV|.
Required control parameters:
|HM|
|BV|
|BNV|
Required derived parameter:
|HV|
Required flux sequence:
|H|
Calculated flux sequence:
|AV|
|UV|
Examples:
Generally, right trapezoids are assumed. Here, for simplicity, both
forelands are assumed to be symmetrical. Their smaller bases (bottoms)
hava a length of 2 meters, their non-vertical legs show an inclination
of 1 meter per 4 meters, and their height (depths) is 1 meter. Both
forelands lie 1 meter above the main channels bottom.
>>> from hydpy.models.lstream import *
>>> parameterstep()
>>> hm(1.0)
>>> bv(2.0)
>>> bnv(4.0)
>>> derived.hv(1.0)
The first example deals with normal flow conditions, where water flows
within the main channel completely (|H| < |HM|):
>>> fluxes.h = 0.5
>>> model.calc_av_uv_v1()
>>> fluxes.av
av(0.0, 0.0)
>>> fluxes.uv
uv(0.0, 0.0)
The second example deals with moderate high flow conditions, where
water flows over both forelands, but not over their embankments
(|HM| < |H| < (|HM| + |HV|)):
>>> fluxes.h = 1.5
>>> model.calc_av_uv_v1()
>>> fluxes.av
av(1.5, 1.5)
>>> fluxes.uv
uv(4.061553, 4.061553)
The third example deals with extreme high flow conditions, where
water flows over the both foreland and their outer embankments
((|HM| + |HV|) < |H|):
>>> fluxes.h = 2.5
>>> model.calc_av_uv_v1()
>>> fluxes.av
av(7.0, 7.0)
>>> fluxes.uv
uv(6.623106, 6.623106)
The forth example assures that zero widths or hights of the forelands
are handled properly:
>>> bv.left = 0.0
>>> derived.hv.right = 0.0
>>> model.calc_av_uv_v1()
>>> fluxes.av
av(4.0, 3.0)
>>> fluxes.uv
uv(4.623106, 3.5)
### Response:
def calc_av_uv_v1(self):
"""Calculate the flown through area and the wetted perimeter of both
forelands.
Note that the each foreland lies between the main channel and one
outer embankment and that water flowing exactly above the a foreland
is contributing to |AV|. The theoretical surface seperating water
above the main channel from water above the foreland is not
contributing to |UV|, but the surface seperating water above the
foreland from water above its outer embankment is contributing to |UV|.
Required control parameters:
|HM|
|BV|
|BNV|
Required derived parameter:
|HV|
Required flux sequence:
|H|
Calculated flux sequence:
|AV|
|UV|
Examples:
Generally, right trapezoids are assumed. Here, for simplicity, both
forelands are assumed to be symmetrical. Their smaller bases (bottoms)
hava a length of 2 meters, their non-vertical legs show an inclination
of 1 meter per 4 meters, and their height (depths) is 1 meter. Both
forelands lie 1 meter above the main channels bottom.
>>> from hydpy.models.lstream import *
>>> parameterstep()
>>> hm(1.0)
>>> bv(2.0)
>>> bnv(4.0)
>>> derived.hv(1.0)
The first example deals with normal flow conditions, where water flows
within the main channel completely (|H| < |HM|):
>>> fluxes.h = 0.5
>>> model.calc_av_uv_v1()
>>> fluxes.av
av(0.0, 0.0)
>>> fluxes.uv
uv(0.0, 0.0)
The second example deals with moderate high flow conditions, where
water flows over both forelands, but not over their embankments
(|HM| < |H| < (|HM| + |HV|)):
>>> fluxes.h = 1.5
>>> model.calc_av_uv_v1()
>>> fluxes.av
av(1.5, 1.5)
>>> fluxes.uv
uv(4.061553, 4.061553)
The third example deals with extreme high flow conditions, where
water flows over the both foreland and their outer embankments
((|HM| + |HV|) < |H|):
>>> fluxes.h = 2.5
>>> model.calc_av_uv_v1()
>>> fluxes.av
av(7.0, 7.0)
>>> fluxes.uv
uv(6.623106, 6.623106)
The forth example assures that zero widths or hights of the forelands
are handled properly:
>>> bv.left = 0.0
>>> derived.hv.right = 0.0
>>> model.calc_av_uv_v1()
>>> fluxes.av
av(4.0, 3.0)
>>> fluxes.uv
uv(4.623106, 3.5)
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
for i in range(2):
if flu.h <= con.hm:
flu.av[i] = 0.
flu.uv[i] = 0.
elif flu.h <= (con.hm+der.hv[i]):
flu.av[i] = (flu.h-con.hm)*(con.bv[i]+(flu.h-con.hm)*con.bnv[i]/2.)
flu.uv[i] = con.bv[i]+(flu.h-con.hm)*(1.+con.bnv[i]**2)**.5
else:
flu.av[i] = (der.hv[i]*(con.bv[i]+der.hv[i]*con.bnv[i]/2.) +
((flu.h-(con.hm+der.hv[i])) *
(con.bv[i]+der.hv[i]*con.bnv[i])))
flu.uv[i] = ((con.bv[i])+(der.hv[i]*(1.+con.bnv[i]**2)**.5) +
(flu.h-(con.hm+der.hv[i]))) |
def gopro_set_response_send(self, cmd_id, status, force_mavlink1=False):
'''
Response from a GOPRO_COMMAND set request
cmd_id : Command ID (uint8_t)
status : Status (uint8_t)
'''
return self.send(self.gopro_set_response_encode(cmd_id, status), force_mavlink1=force_mavlink1) | Response from a GOPRO_COMMAND set request
cmd_id : Command ID (uint8_t)
status : Status (uint8_t) | Below is the the instruction that describes the task:
### Input:
Response from a GOPRO_COMMAND set request
cmd_id : Command ID (uint8_t)
status : Status (uint8_t)
### Response:
def gopro_set_response_send(self, cmd_id, status, force_mavlink1=False):
'''
Response from a GOPRO_COMMAND set request
cmd_id : Command ID (uint8_t)
status : Status (uint8_t)
'''
return self.send(self.gopro_set_response_encode(cmd_id, status), force_mavlink1=force_mavlink1) |
def avg_gate_fidelity(self, reference_unitary):
"""
Compute the average gate fidelity of the estimated process with respect to a unitary
process. See `Chow et al., 2012, <https://doi.org/10.1103/PhysRevLett.109.060501>`_
:param (qutip.Qobj|matrix-like) reference_unitary: A unitary operator that induces a process
as `rho -> other*rho*other.dag()`, alternatively a superoperator or Pauli-transfer matrix.
:return: The average gate fidelity, a real number between 1/(d+1) and 1, where d is the
Hilbert space dimension.
:rtype: float
"""
process_fidelity = self.process_fidelity(reference_unitary)
dimension = self.pauli_basis.ops[0].shape[0]
return (dimension * process_fidelity + 1.0) / (dimension + 1.0) | Compute the average gate fidelity of the estimated process with respect to a unitary
process. See `Chow et al., 2012, <https://doi.org/10.1103/PhysRevLett.109.060501>`_
:param (qutip.Qobj|matrix-like) reference_unitary: A unitary operator that induces a process
as `rho -> other*rho*other.dag()`, alternatively a superoperator or Pauli-transfer matrix.
:return: The average gate fidelity, a real number between 1/(d+1) and 1, where d is the
Hilbert space dimension.
:rtype: float | Below is the the instruction that describes the task:
### Input:
Compute the average gate fidelity of the estimated process with respect to a unitary
process. See `Chow et al., 2012, <https://doi.org/10.1103/PhysRevLett.109.060501>`_
:param (qutip.Qobj|matrix-like) reference_unitary: A unitary operator that induces a process
as `rho -> other*rho*other.dag()`, alternatively a superoperator or Pauli-transfer matrix.
:return: The average gate fidelity, a real number between 1/(d+1) and 1, where d is the
Hilbert space dimension.
:rtype: float
### Response:
def avg_gate_fidelity(self, reference_unitary):
"""
Compute the average gate fidelity of the estimated process with respect to a unitary
process. See `Chow et al., 2012, <https://doi.org/10.1103/PhysRevLett.109.060501>`_
:param (qutip.Qobj|matrix-like) reference_unitary: A unitary operator that induces a process
as `rho -> other*rho*other.dag()`, alternatively a superoperator or Pauli-transfer matrix.
:return: The average gate fidelity, a real number between 1/(d+1) and 1, where d is the
Hilbert space dimension.
:rtype: float
"""
process_fidelity = self.process_fidelity(reference_unitary)
dimension = self.pauli_basis.ops[0].shape[0]
return (dimension * process_fidelity + 1.0) / (dimension + 1.0) |
def simple_prot(x, start):
"""Find the first peak to the right of start"""
# start must b >= 1
for i in range(start,len(x)-1):
a,b,c = x[i-1], x[i], x[i+1]
if b - a > 0 and b -c >= 0:
return i
else:
return None | Find the first peak to the right of start | Below is the the instruction that describes the task:
### Input:
Find the first peak to the right of start
### Response:
def simple_prot(x, start):
"""Find the first peak to the right of start"""
# start must b >= 1
for i in range(start,len(x)-1):
a,b,c = x[i-1], x[i], x[i+1]
if b - a > 0 and b -c >= 0:
return i
else:
return None |
def _qnwcheb1(n, a, b):
"""
Compute univariate Guass-Checbychev quadrature nodes and weights
Parameters
----------
n : int
The number of nodes
a : int
The lower endpoint
b : int
The upper endpoint
Returns
-------
nodes : np.ndarray(dtype=float)
An n element array of nodes
nodes : np.ndarray(dtype=float)
An n element array of weights
Notes
-----
Based of original function ``qnwcheb1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
nodes = (b+a)/2 - (b-a)/2 * np.cos(np.pi/n * np.linspace(0.5, n-0.5, n))
# Create temporary arrays to be used in computing weights
t1 = np.arange(1, n+1) - 0.5
t2 = np.arange(0.0, n, 2)
t3 = np.concatenate((np.array([1.0]),
-2.0/(np.arange(1.0, n-1, 2)*np.arange(3.0, n+1, 2))))
# compute weights and return
weights = ((b-a)/n)*np.cos(np.pi/n*np.outer(t1, t2)) @ t3
return nodes, weights | Compute univariate Guass-Checbychev quadrature nodes and weights
Parameters
----------
n : int
The number of nodes
a : int
The lower endpoint
b : int
The upper endpoint
Returns
-------
nodes : np.ndarray(dtype=float)
An n element array of nodes
nodes : np.ndarray(dtype=float)
An n element array of weights
Notes
-----
Based of original function ``qnwcheb1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002. | Below is the the instruction that describes the task:
### Input:
Compute univariate Guass-Checbychev quadrature nodes and weights
Parameters
----------
n : int
The number of nodes
a : int
The lower endpoint
b : int
The upper endpoint
Returns
-------
nodes : np.ndarray(dtype=float)
An n element array of nodes
nodes : np.ndarray(dtype=float)
An n element array of weights
Notes
-----
Based of original function ``qnwcheb1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
### Response:
def _qnwcheb1(n, a, b):
"""
Compute univariate Guass-Checbychev quadrature nodes and weights
Parameters
----------
n : int
The number of nodes
a : int
The lower endpoint
b : int
The upper endpoint
Returns
-------
nodes : np.ndarray(dtype=float)
An n element array of nodes
nodes : np.ndarray(dtype=float)
An n element array of weights
Notes
-----
Based of original function ``qnwcheb1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
nodes = (b+a)/2 - (b-a)/2 * np.cos(np.pi/n * np.linspace(0.5, n-0.5, n))
# Create temporary arrays to be used in computing weights
t1 = np.arange(1, n+1) - 0.5
t2 = np.arange(0.0, n, 2)
t3 = np.concatenate((np.array([1.0]),
-2.0/(np.arange(1.0, n-1, 2)*np.arange(3.0, n+1, 2))))
# compute weights and return
weights = ((b-a)/n)*np.cos(np.pi/n*np.outer(t1, t2)) @ t3
return nodes, weights |
def check_field_multiplicity(tag, previous_tags):
"""
Check the multiplicity of a 'field' for an object.
"""
fail = False
#If the field is single
if not tag.field.multiple:
#If the tag is being created...
if not tag.id:
#... and the new field was already included in the previous tags,
#fail
fail = previous_tags.filter(field=tag.field)
#If the tag is being modifying...
else:
#... but there is only one previous tag (the one that is being
#modifying), do not fail
fail = previous_tags.filter(field=tag.field).count() > 1
return fail | Check the multiplicity of a 'field' for an object. | Below is the the instruction that describes the task:
### Input:
Check the multiplicity of a 'field' for an object.
### Response:
def check_field_multiplicity(tag, previous_tags):
"""
Check the multiplicity of a 'field' for an object.
"""
fail = False
#If the field is single
if not tag.field.multiple:
#If the tag is being created...
if not tag.id:
#... and the new field was already included in the previous tags,
#fail
fail = previous_tags.filter(field=tag.field)
#If the tag is being modifying...
else:
#... but there is only one previous tag (the one that is being
#modifying), do not fail
fail = previous_tags.filter(field=tag.field).count() > 1
return fail |
def records(self):
""" Returns a list of records in SORT_KEY order """
return [self._records[i] for i in range(len(self._records))] | Returns a list of records in SORT_KEY order | Below is the the instruction that describes the task:
### Input:
Returns a list of records in SORT_KEY order
### Response:
def records(self):
""" Returns a list of records in SORT_KEY order """
return [self._records[i] for i in range(len(self._records))] |
def _get_options_group(group=None):
"""Get a specific group of options which are allowed."""
#: These expect a hexidecimal keyid as their argument, and can be parsed
#: with :func:`_is_hex`.
hex_options = frozenset(['--check-sigs',
'--default-key',
'--default-recipient',
'--delete-keys',
'--delete-secret-keys',
'--delete-secret-and-public-keys',
'--desig-revoke',
'--export',
'--export-secret-keys',
'--export-secret-subkeys',
'--fingerprint',
'--gen-revoke',
'--hidden-encrypt-to',
'--hidden-recipient',
'--list-key',
'--list-keys',
'--list-public-keys',
'--list-secret-keys',
'--list-sigs',
'--recipient',
'--recv-keys',
'--send-keys',
'--edit-key',
'--sign-key',
])
#: These options expect value which are left unchecked, though still run
#: through :func:`_fix_unsafe`.
unchecked_options = frozenset(['--list-options',
'--passphrase-fd',
'--status-fd',
'--verify-options',
'--command-fd',
])
#: These have their own parsers and don't really fit into a group
other_options = frozenset(['--debug-level',
'--keyserver',
])
#: These should have a directory for an argument
dir_options = frozenset(['--homedir',
])
#: These expect a keyring or keyfile as their argument
keyring_options = frozenset(['--keyring',
'--primary-keyring',
'--secret-keyring',
'--trustdb-name',
])
#: These expect a filename (or the contents of a file as a string) or None
#: (meaning that they read from stdin)
file_or_none_options = frozenset(['--decrypt',
'--decrypt-files',
'--encrypt',
'--encrypt-files',
'--import',
'--verify',
'--verify-files',
'--output',
])
#: These options expect a string. see :func:`_check_preferences`.
pref_options = frozenset(['--digest-algo',
'--cipher-algo',
'--compress-algo',
'--compression-algo',
'--cert-digest-algo',
'--personal-digest-prefs',
'--personal-digest-preferences',
'--personal-cipher-prefs',
'--personal-cipher-preferences',
'--personal-compress-prefs',
'--personal-compress-preferences',
'--pinentry-mode',
'--print-md',
'--trust-model',
])
#: These options expect no arguments
none_options = frozenset(['--allow-loopback-pinentry',
'--always-trust',
'--armor',
'--armour',
'--batch',
'--check-sigs',
'--check-trustdb',
'--clearsign',
'--debug-all',
'--default-recipient-self',
'--detach-sign',
'--export',
'--export-ownertrust',
'--export-secret-keys',
'--export-secret-subkeys',
'--fingerprint',
'--fixed-list-mode',
'--gen-key',
'--import-ownertrust',
'--list-config',
'--list-key',
'--list-keys',
'--list-packets',
'--list-public-keys',
'--list-secret-keys',
'--list-sigs',
'--lock-multiple',
'--lock-never',
'--lock-once',
'--no-default-keyring',
'--no-default-recipient',
'--no-emit-version',
'--no-options',
'--no-tty',
'--no-use-agent',
'--no-verbose',
'--print-mds',
'--quiet',
'--sign',
'--symmetric',
'--throw-keyids',
'--use-agent',
'--verbose',
'--version',
'--with-colons',
'--yes',
])
#: These options expect either None or a hex string
hex_or_none_options = hex_options.intersection(none_options)
allowed = hex_options.union(unchecked_options, other_options, dir_options,
keyring_options, file_or_none_options,
pref_options, none_options)
if group and group in locals().keys():
return locals()[group] | Get a specific group of options which are allowed. | Below is the the instruction that describes the task:
### Input:
Get a specific group of options which are allowed.
### Response:
def _get_options_group(group=None):
"""Get a specific group of options which are allowed."""
#: These expect a hexidecimal keyid as their argument, and can be parsed
#: with :func:`_is_hex`.
hex_options = frozenset(['--check-sigs',
'--default-key',
'--default-recipient',
'--delete-keys',
'--delete-secret-keys',
'--delete-secret-and-public-keys',
'--desig-revoke',
'--export',
'--export-secret-keys',
'--export-secret-subkeys',
'--fingerprint',
'--gen-revoke',
'--hidden-encrypt-to',
'--hidden-recipient',
'--list-key',
'--list-keys',
'--list-public-keys',
'--list-secret-keys',
'--list-sigs',
'--recipient',
'--recv-keys',
'--send-keys',
'--edit-key',
'--sign-key',
])
#: These options expect value which are left unchecked, though still run
#: through :func:`_fix_unsafe`.
unchecked_options = frozenset(['--list-options',
'--passphrase-fd',
'--status-fd',
'--verify-options',
'--command-fd',
])
#: These have their own parsers and don't really fit into a group
other_options = frozenset(['--debug-level',
'--keyserver',
])
#: These should have a directory for an argument
dir_options = frozenset(['--homedir',
])
#: These expect a keyring or keyfile as their argument
keyring_options = frozenset(['--keyring',
'--primary-keyring',
'--secret-keyring',
'--trustdb-name',
])
#: These expect a filename (or the contents of a file as a string) or None
#: (meaning that they read from stdin)
file_or_none_options = frozenset(['--decrypt',
'--decrypt-files',
'--encrypt',
'--encrypt-files',
'--import',
'--verify',
'--verify-files',
'--output',
])
#: These options expect a string. see :func:`_check_preferences`.
pref_options = frozenset(['--digest-algo',
'--cipher-algo',
'--compress-algo',
'--compression-algo',
'--cert-digest-algo',
'--personal-digest-prefs',
'--personal-digest-preferences',
'--personal-cipher-prefs',
'--personal-cipher-preferences',
'--personal-compress-prefs',
'--personal-compress-preferences',
'--pinentry-mode',
'--print-md',
'--trust-model',
])
#: These options expect no arguments
none_options = frozenset(['--allow-loopback-pinentry',
'--always-trust',
'--armor',
'--armour',
'--batch',
'--check-sigs',
'--check-trustdb',
'--clearsign',
'--debug-all',
'--default-recipient-self',
'--detach-sign',
'--export',
'--export-ownertrust',
'--export-secret-keys',
'--export-secret-subkeys',
'--fingerprint',
'--fixed-list-mode',
'--gen-key',
'--import-ownertrust',
'--list-config',
'--list-key',
'--list-keys',
'--list-packets',
'--list-public-keys',
'--list-secret-keys',
'--list-sigs',
'--lock-multiple',
'--lock-never',
'--lock-once',
'--no-default-keyring',
'--no-default-recipient',
'--no-emit-version',
'--no-options',
'--no-tty',
'--no-use-agent',
'--no-verbose',
'--print-mds',
'--quiet',
'--sign',
'--symmetric',
'--throw-keyids',
'--use-agent',
'--verbose',
'--version',
'--with-colons',
'--yes',
])
#: These options expect either None or a hex string
hex_or_none_options = hex_options.intersection(none_options)
allowed = hex_options.union(unchecked_options, other_options, dir_options,
keyring_options, file_or_none_options,
pref_options, none_options)
if group and group in locals().keys():
return locals()[group] |
def to_json(self):
"""
Returns the JSON representation of the webhook.
"""
result = super(Webhook, self).to_json()
result.update({
'name': self.name,
'url': self.url,
'topics': self.topics,
'httpBasicUsername': self.http_basic_username,
'headers': self.headers
})
if self.filters:
result.update({'filters': self.filters})
if self.transformation:
result.update({'transformation': self.transformation})
return result | Returns the JSON representation of the webhook. | Below is the the instruction that describes the task:
### Input:
Returns the JSON representation of the webhook.
### Response:
def to_json(self):
"""
Returns the JSON representation of the webhook.
"""
result = super(Webhook, self).to_json()
result.update({
'name': self.name,
'url': self.url,
'topics': self.topics,
'httpBasicUsername': self.http_basic_username,
'headers': self.headers
})
if self.filters:
result.update({'filters': self.filters})
if self.transformation:
result.update({'transformation': self.transformation})
return result |
def set_data(self, data, invsigma=None):
"""Set the data to be modeled.
Returns *self*.
"""
self.data = np.array(data, dtype=np.float, ndmin=1)
if invsigma is None:
self.invsigma = np.ones(self.data.shape)
else:
i = np.array(invsigma, dtype=np.float)
self.invsigma = np.broadcast_arrays(self.data, i)[1] # allow scalar invsigma
if self.invsigma.shape != self.data.shape:
raise ValueError('data values and inverse-sigma values must have same shape')
return self | Set the data to be modeled.
Returns *self*. | Below is the the instruction that describes the task:
### Input:
Set the data to be modeled.
Returns *self*.
### Response:
def set_data(self, data, invsigma=None):
"""Set the data to be modeled.
Returns *self*.
"""
self.data = np.array(data, dtype=np.float, ndmin=1)
if invsigma is None:
self.invsigma = np.ones(self.data.shape)
else:
i = np.array(invsigma, dtype=np.float)
self.invsigma = np.broadcast_arrays(self.data, i)[1] # allow scalar invsigma
if self.invsigma.shape != self.data.shape:
raise ValueError('data values and inverse-sigma values must have same shape')
return self |
def get(interface, method, version=1,
apihost=DEFAULT_PARAMS['apihost'], https=DEFAULT_PARAMS['https'],
caller=None, session=None, params=None):
"""Send GET request to an API endpoint
.. versionadded:: 0.8.3
:param interface: interface name
:type interface: str
:param method: method name
:type method: str
:param version: method version
:type version: int
:param apihost: API hostname
:type apihost: str
:param https: whether to use HTTPS
:type https: bool
:param params: parameters for endpoint
:type params: dict
:return: endpoint response
:rtype: :class:`dict`, :class:`lxml.etree.Element`, :class:`str`
"""
url = u"%s://%s/%s/%s/v%s/" % (
'https' if https else 'http', apihost, interface, method, version)
return webapi_request(url, 'GET', caller=caller, session=session, params=params) | Send GET request to an API endpoint
.. versionadded:: 0.8.3
:param interface: interface name
:type interface: str
:param method: method name
:type method: str
:param version: method version
:type version: int
:param apihost: API hostname
:type apihost: str
:param https: whether to use HTTPS
:type https: bool
:param params: parameters for endpoint
:type params: dict
:return: endpoint response
:rtype: :class:`dict`, :class:`lxml.etree.Element`, :class:`str` | Below is the the instruction that describes the task:
### Input:
Send GET request to an API endpoint
.. versionadded:: 0.8.3
:param interface: interface name
:type interface: str
:param method: method name
:type method: str
:param version: method version
:type version: int
:param apihost: API hostname
:type apihost: str
:param https: whether to use HTTPS
:type https: bool
:param params: parameters for endpoint
:type params: dict
:return: endpoint response
:rtype: :class:`dict`, :class:`lxml.etree.Element`, :class:`str`
### Response:
def get(interface, method, version=1,
apihost=DEFAULT_PARAMS['apihost'], https=DEFAULT_PARAMS['https'],
caller=None, session=None, params=None):
"""Send GET request to an API endpoint
.. versionadded:: 0.8.3
:param interface: interface name
:type interface: str
:param method: method name
:type method: str
:param version: method version
:type version: int
:param apihost: API hostname
:type apihost: str
:param https: whether to use HTTPS
:type https: bool
:param params: parameters for endpoint
:type params: dict
:return: endpoint response
:rtype: :class:`dict`, :class:`lxml.etree.Element`, :class:`str`
"""
url = u"%s://%s/%s/%s/v%s/" % (
'https' if https else 'http', apihost, interface, method, version)
return webapi_request(url, 'GET', caller=caller, session=session, params=params) |
def move_user_data(primary, secondary):
'''
Moves all submissions and other data linked to the secondary user into the primary user.
Nothing is deleted here, we just modify foreign user keys.
'''
# Update all submission authorships of the secondary to the primary
submissions = Submission.objects.filter(authors__id=secondary.pk)
for subm in submissions:
if subm.submitter == secondary:
subm.submitter = primary
subm.authors.remove(secondary)
subm.authors.add(primary)
subm.save()
# Transfer course registrations
try:
for course in secondary.profile.courses.all():
primary.profile.courses.add(course)
primary.profile.save()
except UserProfile.DoesNotExist:
# That's a database consistency problem, but he will go away anyway
pass | Moves all submissions and other data linked to the secondary user into the primary user.
Nothing is deleted here, we just modify foreign user keys. | Below is the the instruction that describes the task:
### Input:
Moves all submissions and other data linked to the secondary user into the primary user.
Nothing is deleted here, we just modify foreign user keys.
### Response:
def move_user_data(primary, secondary):
'''
Moves all submissions and other data linked to the secondary user into the primary user.
Nothing is deleted here, we just modify foreign user keys.
'''
# Update all submission authorships of the secondary to the primary
submissions = Submission.objects.filter(authors__id=secondary.pk)
for subm in submissions:
if subm.submitter == secondary:
subm.submitter = primary
subm.authors.remove(secondary)
subm.authors.add(primary)
subm.save()
# Transfer course registrations
try:
for course in secondary.profile.courses.all():
primary.profile.courses.add(course)
primary.profile.save()
except UserProfile.DoesNotExist:
# That's a database consistency problem, but he will go away anyway
pass |
def build_chvatal_graph():
"""Makes a new Chvatal graph.
Ref: http://mathworld.wolfram.com/ChvatalGraph.html"""
# The easiest way to build the Chvatal graph is to start
# with C12 and add the additional 12 edges
graph = build_cycle_graph(12)
edge_tpls = [
(1,7), (1,9), (2,5), (2,11),
(3,7), (3,9), (4,10), (4,12),
(5,8), (6,10), (6,12), (8,11),
]
for i, j in edge_tpls:
graph.new_edge(i, j)
return graph | Makes a new Chvatal graph.
Ref: http://mathworld.wolfram.com/ChvatalGraph.html | Below is the the instruction that describes the task:
### Input:
Makes a new Chvatal graph.
Ref: http://mathworld.wolfram.com/ChvatalGraph.html
### Response:
def build_chvatal_graph():
"""Makes a new Chvatal graph.
Ref: http://mathworld.wolfram.com/ChvatalGraph.html"""
# The easiest way to build the Chvatal graph is to start
# with C12 and add the additional 12 edges
graph = build_cycle_graph(12)
edge_tpls = [
(1,7), (1,9), (2,5), (2,11),
(3,7), (3,9), (4,10), (4,12),
(5,8), (6,10), (6,12), (8,11),
]
for i, j in edge_tpls:
graph.new_edge(i, j)
return graph |
def get_container_list(self) -> list:
"""Get list of containers.
Returns:
list, all the ids of containers
"""
# Initialising empty list
containers = []
containers_list = self._client.containers.list()
for c_list in containers_list:
containers.append(c_list.short_id)
return containers | Get list of containers.
Returns:
list, all the ids of containers | Below is the the instruction that describes the task:
### Input:
Get list of containers.
Returns:
list, all the ids of containers
### Response:
def get_container_list(self) -> list:
"""Get list of containers.
Returns:
list, all the ids of containers
"""
# Initialising empty list
containers = []
containers_list = self._client.containers.list()
for c_list in containers_list:
containers.append(c_list.short_id)
return containers |
def gen_signature(self, privkey, pubkey, sig_path):
'''
Generate master public-key-signature
'''
return salt.crypt.gen_signature(privkey,
pubkey,
sig_path,
self.passphrase) | Generate master public-key-signature | Below is the the instruction that describes the task:
### Input:
Generate master public-key-signature
### Response:
def gen_signature(self, privkey, pubkey, sig_path):
'''
Generate master public-key-signature
'''
return salt.crypt.gen_signature(privkey,
pubkey,
sig_path,
self.passphrase) |
def _compile(self, parselet_node, level=0):
"""
Build part of the abstract Parsley extraction tree
Arguments:
parselet_node (dict) -- part of the Parsley tree to compile
(can be the root dict/node)
level (int) -- current recursion depth (used for debug)
"""
if self.DEBUG:
debug_offset = "".join([" " for x in range(level)])
if self.DEBUG:
print(debug_offset, "%s::compile(%s)" % (
self.__class__.__name__, parselet_node))
if isinstance(parselet_node, dict):
parselet_tree = ParsleyNode()
for k, v in list(parselet_node.items()):
# we parse the key raw elements but without much
# interpretation (which is done by the SelectorHandler)
try:
m = self.REGEX_PARSELET_KEY.match(k)
if not m:
if self.DEBUG:
print(debug_offset, "could not parse key", k)
raise InvalidKeySyntax(k)
except:
raise InvalidKeySyntax("Key %s is not valid" % k)
key = m.group('key')
# by default, fields are required
key_required = True
operator = m.group('operator')
if operator == '?':
key_required = False
# FIXME: "!" operator not supported (complete array)
scope = m.group('scope')
# example: get list of H3 tags
# { "titles": ["h3"] }
# FIXME: should we support multiple selectors in list?
# e.g. { "titles": ["h1", "h2", "h3", "h4"] }
if isinstance(v, (list, tuple)):
v = v[0]
iterate = True
else:
iterate = False
# keys in the abstract Parsley trees are of type `ParsleyContext`
try:
parsley_context = ParsleyContext(
key,
operator=operator,
required=key_required,
scope=self.selector_handler.make(scope) if scope else None,
iterate=iterate)
except SyntaxError:
if self.DEBUG:
print("Invalid scope:", k, scope)
raise
if self.DEBUG:
print(debug_offset, "current context:", parsley_context)
# go deeper in the Parsley tree...
try:
child_tree = self._compile(v, level=level+1)
except SyntaxError:
if self.DEBUG:
print("Invalid value: ", v)
raise
except:
raise
if self.DEBUG:
print(debug_offset, "child tree:", child_tree)
parselet_tree[parsley_context] = child_tree
return parselet_tree
# a string leaf should match some kind of selector,
# let the selector handler deal with it
elif isstr(parselet_node):
return self.selector_handler.make(parselet_node)
else:
raise ValueError(
"Unsupported type(%s) for Parselet node <%s>" % (
type(parselet_node), parselet_node)) | Build part of the abstract Parsley extraction tree
Arguments:
parselet_node (dict) -- part of the Parsley tree to compile
(can be the root dict/node)
level (int) -- current recursion depth (used for debug) | Below is the the instruction that describes the task:
### Input:
Build part of the abstract Parsley extraction tree
Arguments:
parselet_node (dict) -- part of the Parsley tree to compile
(can be the root dict/node)
level (int) -- current recursion depth (used for debug)
### Response:
def _compile(self, parselet_node, level=0):
"""
Build part of the abstract Parsley extraction tree
Arguments:
parselet_node (dict) -- part of the Parsley tree to compile
(can be the root dict/node)
level (int) -- current recursion depth (used for debug)
"""
if self.DEBUG:
debug_offset = "".join([" " for x in range(level)])
if self.DEBUG:
print(debug_offset, "%s::compile(%s)" % (
self.__class__.__name__, parselet_node))
if isinstance(parselet_node, dict):
parselet_tree = ParsleyNode()
for k, v in list(parselet_node.items()):
# we parse the key raw elements but without much
# interpretation (which is done by the SelectorHandler)
try:
m = self.REGEX_PARSELET_KEY.match(k)
if not m:
if self.DEBUG:
print(debug_offset, "could not parse key", k)
raise InvalidKeySyntax(k)
except:
raise InvalidKeySyntax("Key %s is not valid" % k)
key = m.group('key')
# by default, fields are required
key_required = True
operator = m.group('operator')
if operator == '?':
key_required = False
# FIXME: "!" operator not supported (complete array)
scope = m.group('scope')
# example: get list of H3 tags
# { "titles": ["h3"] }
# FIXME: should we support multiple selectors in list?
# e.g. { "titles": ["h1", "h2", "h3", "h4"] }
if isinstance(v, (list, tuple)):
v = v[0]
iterate = True
else:
iterate = False
# keys in the abstract Parsley trees are of type `ParsleyContext`
try:
parsley_context = ParsleyContext(
key,
operator=operator,
required=key_required,
scope=self.selector_handler.make(scope) if scope else None,
iterate=iterate)
except SyntaxError:
if self.DEBUG:
print("Invalid scope:", k, scope)
raise
if self.DEBUG:
print(debug_offset, "current context:", parsley_context)
# go deeper in the Parsley tree...
try:
child_tree = self._compile(v, level=level+1)
except SyntaxError:
if self.DEBUG:
print("Invalid value: ", v)
raise
except:
raise
if self.DEBUG:
print(debug_offset, "child tree:", child_tree)
parselet_tree[parsley_context] = child_tree
return parselet_tree
# a string leaf should match some kind of selector,
# let the selector handler deal with it
elif isstr(parselet_node):
return self.selector_handler.make(parselet_node)
else:
raise ValueError(
"Unsupported type(%s) for Parselet node <%s>" % (
type(parselet_node), parselet_node)) |
def overlap_add(blk_sig, size=None, hop=None, wnd=None, normalize=True):
"""
Overlap-add algorithm using Numpy arrays.
Parameters
----------
blk_sig :
An iterable of blocks (sequences), such as the ``Stream.blocks`` result.
size :
Block size for each ``blk_sig`` element, in samples.
hop :
Number of samples for two adjacent blocks (defaults to the size).
wnd :
Windowing function to be applied to each block or any iterable with
exactly ``size`` elements. If ``None`` (default), applies a rectangular
window.
normalize :
Flag whether the window should be normalized so that the process could
happen in the [-1; 1] range, dividing the window by its hop gain.
Default is ``True``.
Returns
-------
A Stream instance with the blocks overlapped and added.
See Also
--------
Stream.blocks :
Splits the Stream instance into blocks with given size and hop.
blocks :
Same to Stream.blocks but for without using the Stream class.
chain :
Lazily joins all iterables given as parameters.
chain.from_iterable :
Same to ``chain(*data)``, but the ``data`` evaluation is lazy.
window :
Window/apodization/tapering functions for a given size as a StrategyDict.
Note
----
Each block has the window function applied to it and the result is the
sum of the blocks without any edge-case special treatment for the first
and last few blocks.
"""
import numpy as np
# Finds the size from data, if needed
if size is None:
blk_sig = Stream(blk_sig)
size = len(blk_sig.peek())
if hop is None:
hop = size
# Find the right windowing function to be applied
if wnd is None:
wnd = np.ones(size)
elif callable(wnd) and not isinstance(wnd, Stream):
wnd = wnd(size)
if isinstance(wnd, Sequence):
wnd = np.array(wnd)
elif isinstance(wnd, Iterable):
wnd = np.hstack(wnd)
else:
raise TypeError("Window should be an iterable or a callable")
# Normalization to the [-1; 1] range
if normalize:
steps = Stream(wnd).blocks(hop).map(np.array)
gain = np.sum(np.abs(np.vstack(steps)), 0).max()
if gain: # If gain is zero, normalization couldn't have any effect
wnd = wnd / gain # Can't use "/=" nor "*=" as Numpy would keep datatype
# Overlap-add algorithm
old = np.zeros(size)
for blk in (wnd * blk for blk in blk_sig):
blk[:-hop] += old[hop:]
for el in blk[:hop]:
yield el
old = blk
for el in old[hop:]: # No more blocks, finish yielding the last one
yield el | Overlap-add algorithm using Numpy arrays.
Parameters
----------
blk_sig :
An iterable of blocks (sequences), such as the ``Stream.blocks`` result.
size :
Block size for each ``blk_sig`` element, in samples.
hop :
Number of samples for two adjacent blocks (defaults to the size).
wnd :
Windowing function to be applied to each block or any iterable with
exactly ``size`` elements. If ``None`` (default), applies a rectangular
window.
normalize :
Flag whether the window should be normalized so that the process could
happen in the [-1; 1] range, dividing the window by its hop gain.
Default is ``True``.
Returns
-------
A Stream instance with the blocks overlapped and added.
See Also
--------
Stream.blocks :
Splits the Stream instance into blocks with given size and hop.
blocks :
Same to Stream.blocks but for without using the Stream class.
chain :
Lazily joins all iterables given as parameters.
chain.from_iterable :
Same to ``chain(*data)``, but the ``data`` evaluation is lazy.
window :
Window/apodization/tapering functions for a given size as a StrategyDict.
Note
----
Each block has the window function applied to it and the result is the
sum of the blocks without any edge-case special treatment for the first
and last few blocks. | Below is the the instruction that describes the task:
### Input:
Overlap-add algorithm using Numpy arrays.
Parameters
----------
blk_sig :
An iterable of blocks (sequences), such as the ``Stream.blocks`` result.
size :
Block size for each ``blk_sig`` element, in samples.
hop :
Number of samples for two adjacent blocks (defaults to the size).
wnd :
Windowing function to be applied to each block or any iterable with
exactly ``size`` elements. If ``None`` (default), applies a rectangular
window.
normalize :
Flag whether the window should be normalized so that the process could
happen in the [-1; 1] range, dividing the window by its hop gain.
Default is ``True``.
Returns
-------
A Stream instance with the blocks overlapped and added.
See Also
--------
Stream.blocks :
Splits the Stream instance into blocks with given size and hop.
blocks :
Same to Stream.blocks but for without using the Stream class.
chain :
Lazily joins all iterables given as parameters.
chain.from_iterable :
Same to ``chain(*data)``, but the ``data`` evaluation is lazy.
window :
Window/apodization/tapering functions for a given size as a StrategyDict.
Note
----
Each block has the window function applied to it and the result is the
sum of the blocks without any edge-case special treatment for the first
and last few blocks.
### Response:
def overlap_add(blk_sig, size=None, hop=None, wnd=None, normalize=True):
"""
Overlap-add algorithm using Numpy arrays.
Parameters
----------
blk_sig :
An iterable of blocks (sequences), such as the ``Stream.blocks`` result.
size :
Block size for each ``blk_sig`` element, in samples.
hop :
Number of samples for two adjacent blocks (defaults to the size).
wnd :
Windowing function to be applied to each block or any iterable with
exactly ``size`` elements. If ``None`` (default), applies a rectangular
window.
normalize :
Flag whether the window should be normalized so that the process could
happen in the [-1; 1] range, dividing the window by its hop gain.
Default is ``True``.
Returns
-------
A Stream instance with the blocks overlapped and added.
See Also
--------
Stream.blocks :
Splits the Stream instance into blocks with given size and hop.
blocks :
Same to Stream.blocks but for without using the Stream class.
chain :
Lazily joins all iterables given as parameters.
chain.from_iterable :
Same to ``chain(*data)``, but the ``data`` evaluation is lazy.
window :
Window/apodization/tapering functions for a given size as a StrategyDict.
Note
----
Each block has the window function applied to it and the result is the
sum of the blocks without any edge-case special treatment for the first
and last few blocks.
"""
import numpy as np
# Finds the size from data, if needed
if size is None:
blk_sig = Stream(blk_sig)
size = len(blk_sig.peek())
if hop is None:
hop = size
# Find the right windowing function to be applied
if wnd is None:
wnd = np.ones(size)
elif callable(wnd) and not isinstance(wnd, Stream):
wnd = wnd(size)
if isinstance(wnd, Sequence):
wnd = np.array(wnd)
elif isinstance(wnd, Iterable):
wnd = np.hstack(wnd)
else:
raise TypeError("Window should be an iterable or a callable")
# Normalization to the [-1; 1] range
if normalize:
steps = Stream(wnd).blocks(hop).map(np.array)
gain = np.sum(np.abs(np.vstack(steps)), 0).max()
if gain: # If gain is zero, normalization couldn't have any effect
wnd = wnd / gain # Can't use "/=" nor "*=" as Numpy would keep datatype
# Overlap-add algorithm
old = np.zeros(size)
for blk in (wnd * blk for blk in blk_sig):
blk[:-hop] += old[hop:]
for el in blk[:hop]:
yield el
old = blk
for el in old[hop:]: # No more blocks, finish yielding the last one
yield el |
def cancellation_fee(self, percentage):
''' Generates an invoice with a cancellation fee, and applies
credit to the invoice.
percentage (Decimal): The percentage of the credit note to turn into
a cancellation fee. Must be 0 <= percentage <= 100.
'''
# Local import to fix import cycles. Can we do better?
from .invoice import InvoiceController
assert(percentage >= 0 and percentage <= 100)
cancellation_fee = self.credit_note.value * percentage / 100
due = datetime.timedelta(days=1)
item = [("Cancellation fee", cancellation_fee)]
invoice = InvoiceController.manual_invoice(
self.credit_note.invoice.user, due, item
)
if not invoice.is_paid:
self.apply_to_invoice(invoice)
return InvoiceController(invoice) | Generates an invoice with a cancellation fee, and applies
credit to the invoice.
percentage (Decimal): The percentage of the credit note to turn into
a cancellation fee. Must be 0 <= percentage <= 100. | Below is the the instruction that describes the task:
### Input:
Generates an invoice with a cancellation fee, and applies
credit to the invoice.
percentage (Decimal): The percentage of the credit note to turn into
a cancellation fee. Must be 0 <= percentage <= 100.
### Response:
def cancellation_fee(self, percentage):
''' Generates an invoice with a cancellation fee, and applies
credit to the invoice.
percentage (Decimal): The percentage of the credit note to turn into
a cancellation fee. Must be 0 <= percentage <= 100.
'''
# Local import to fix import cycles. Can we do better?
from .invoice import InvoiceController
assert(percentage >= 0 and percentage <= 100)
cancellation_fee = self.credit_note.value * percentage / 100
due = datetime.timedelta(days=1)
item = [("Cancellation fee", cancellation_fee)]
invoice = InvoiceController.manual_invoice(
self.credit_note.invoice.user, due, item
)
if not invoice.is_paid:
self.apply_to_invoice(invoice)
return InvoiceController(invoice) |
def delete_container_service(access_token, subscription_id, resource_group, service_name):
'''Delete a named container.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
service_name (str): Name of container service.
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', resource_group,
'/providers/Microsoft.ContainerService/ContainerServices/', service_name,
'?api-version=', ACS_API])
return do_delete(endpoint, access_token) | Delete a named container.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
service_name (str): Name of container service.
Returns:
HTTP response. | Below is the the instruction that describes the task:
### Input:
Delete a named container.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
service_name (str): Name of container service.
Returns:
HTTP response.
### Response:
def delete_container_service(access_token, subscription_id, resource_group, service_name):
'''Delete a named container.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
service_name (str): Name of container service.
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', resource_group,
'/providers/Microsoft.ContainerService/ContainerServices/', service_name,
'?api-version=', ACS_API])
return do_delete(endpoint, access_token) |
def tag_image(self, repository=None, tag=None):
"""
Apply additional tags to the image or even add a new name
:param repository: str, see constructor
:param tag: str, see constructor
:return: instance of DockerImage
"""
if not (repository or tag):
raise ValueError("You need to specify either repository or tag.")
r = repository or self.name
t = "latest" if not tag else tag
self.d.tag(image=self.get_full_name(), repository=r, tag=t)
return DockerImage(r, tag=t) | Apply additional tags to the image or even add a new name
:param repository: str, see constructor
:param tag: str, see constructor
:return: instance of DockerImage | Below is the the instruction that describes the task:
### Input:
Apply additional tags to the image or even add a new name
:param repository: str, see constructor
:param tag: str, see constructor
:return: instance of DockerImage
### Response:
def tag_image(self, repository=None, tag=None):
"""
Apply additional tags to the image or even add a new name
:param repository: str, see constructor
:param tag: str, see constructor
:return: instance of DockerImage
"""
if not (repository or tag):
raise ValueError("You need to specify either repository or tag.")
r = repository or self.name
t = "latest" if not tag else tag
self.d.tag(image=self.get_full_name(), repository=r, tag=t)
return DockerImage(r, tag=t) |
def run_somaticsniper_full(job, tumor_bam, normal_bam, univ_options, somaticsniper_options):
"""
Run SomaticSniper on the DNA bams.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict somaticsniper_options: Options specific to SomaticSniper
:return: fsID to the genome-level vcf
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
'normal.bam': normal_bam['normal_dna_fix_pg_sorted.bam'],
'normal.bam.bai': normal_bam['normal_dna_fix_pg_sorted.bam.bai'],
'genome.fa.tar.gz': somaticsniper_options['genome_fasta'],
'genome.fa.fai.tar.gz': somaticsniper_options['genome_fai']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
for key in ('genome.fa', 'genome.fa.fai'):
input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
input_files = {key: docker_path(path) for key, path in input_files.items()}
output_file = os.path.join(work_dir, 'somatic-sniper_full.vcf')
parameters = ['-f', input_files['genome.fa'],
'-F', 'vcf',
'-G',
'-L',
'-q', '1',
'-Q', '15',
input_files['tumor.bam'],
input_files['normal.bam'],
docker_path(output_file)]
docker_call(tool='somaticsniper', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=somaticsniper_options['version'])
outfile = job.fileStore.writeGlobalFile(output_file)
job.fileStore.logToMaster('Ran SomaticSniper on %s successfully' % univ_options['patient'])
return outfile | Run SomaticSniper on the DNA bams.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict somaticsniper_options: Options specific to SomaticSniper
:return: fsID to the genome-level vcf
:rtype: toil.fileStore.FileID | Below is the the instruction that describes the task:
### Input:
Run SomaticSniper on the DNA bams.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict somaticsniper_options: Options specific to SomaticSniper
:return: fsID to the genome-level vcf
:rtype: toil.fileStore.FileID
### Response:
def run_somaticsniper_full(job, tumor_bam, normal_bam, univ_options, somaticsniper_options):
"""
Run SomaticSniper on the DNA bams.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict somaticsniper_options: Options specific to SomaticSniper
:return: fsID to the genome-level vcf
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
'normal.bam': normal_bam['normal_dna_fix_pg_sorted.bam'],
'normal.bam.bai': normal_bam['normal_dna_fix_pg_sorted.bam.bai'],
'genome.fa.tar.gz': somaticsniper_options['genome_fasta'],
'genome.fa.fai.tar.gz': somaticsniper_options['genome_fai']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
for key in ('genome.fa', 'genome.fa.fai'):
input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
input_files = {key: docker_path(path) for key, path in input_files.items()}
output_file = os.path.join(work_dir, 'somatic-sniper_full.vcf')
parameters = ['-f', input_files['genome.fa'],
'-F', 'vcf',
'-G',
'-L',
'-q', '1',
'-Q', '15',
input_files['tumor.bam'],
input_files['normal.bam'],
docker_path(output_file)]
docker_call(tool='somaticsniper', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=somaticsniper_options['version'])
outfile = job.fileStore.writeGlobalFile(output_file)
job.fileStore.logToMaster('Ran SomaticSniper on %s successfully' % univ_options['patient'])
return outfile |
def _xml_namespace_strip(root):
# type: (ET.Element) -> None
"""Strip the XML namespace prefix from all element tags under the given root Element."""
if '}' not in root.tag:
return # Nothing to do, no namespace present
for element in root.iter():
if '}' in element.tag:
element.tag = element.tag.split('}')[1]
else: # pragma: no cover
# We should never get here. If there is a namespace, then the namespace should be
# included in all elements.
pass | Strip the XML namespace prefix from all element tags under the given root Element. | Below is the the instruction that describes the task:
### Input:
Strip the XML namespace prefix from all element tags under the given root Element.
### Response:
def _xml_namespace_strip(root):
# type: (ET.Element) -> None
"""Strip the XML namespace prefix from all element tags under the given root Element."""
if '}' not in root.tag:
return # Nothing to do, no namespace present
for element in root.iter():
if '}' in element.tag:
element.tag = element.tag.split('}')[1]
else: # pragma: no cover
# We should never get here. If there is a namespace, then the namespace should be
# included in all elements.
pass |
def parse(self, rrstr):
# type: (bytes) -> None
'''
Parse a Rock Ridge Extensions Reference record out of a string.
Parameters:
rrstr - The string to parse the record out of.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('ER record already initialized!')
(su_len, su_entry_version_unused, len_id, len_des, len_src,
self.ext_ver) = struct.unpack_from('=BBBBBB', rrstr[:8], 2)
# We assume that the caller has already checked the su_entry_version,
# so we don't bother.
# Ensure that the length isn't crazy
if su_len > len(rrstr):
raise pycdlibexception.PyCdlibInvalidISO('Length of ER record much too long')
# Also ensure that the combination of len_id, len_des, and len_src
# doesn't overrun su_len; because of the check above, this means it
# can't overrun len(rrstr) either
total_length = len_id + len_des + len_src
if total_length > su_len:
raise pycdlibexception.PyCdlibInvalidISO('Combined length of ER ID, des, and src longer than record')
fmtstr = '=%ds%ds%ds' % (len_id, len_des, len_src)
(self.ext_id, self.ext_des, self.ext_src) = struct.unpack_from(fmtstr, rrstr, 8)
self._initialized = True | Parse a Rock Ridge Extensions Reference record out of a string.
Parameters:
rrstr - The string to parse the record out of.
Returns:
Nothing. | Below is the the instruction that describes the task:
### Input:
Parse a Rock Ridge Extensions Reference record out of a string.
Parameters:
rrstr - The string to parse the record out of.
Returns:
Nothing.
### Response:
def parse(self, rrstr):
# type: (bytes) -> None
'''
Parse a Rock Ridge Extensions Reference record out of a string.
Parameters:
rrstr - The string to parse the record out of.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('ER record already initialized!')
(su_len, su_entry_version_unused, len_id, len_des, len_src,
self.ext_ver) = struct.unpack_from('=BBBBBB', rrstr[:8], 2)
# We assume that the caller has already checked the su_entry_version,
# so we don't bother.
# Ensure that the length isn't crazy
if su_len > len(rrstr):
raise pycdlibexception.PyCdlibInvalidISO('Length of ER record much too long')
# Also ensure that the combination of len_id, len_des, and len_src
# doesn't overrun su_len; because of the check above, this means it
# can't overrun len(rrstr) either
total_length = len_id + len_des + len_src
if total_length > su_len:
raise pycdlibexception.PyCdlibInvalidISO('Combined length of ER ID, des, and src longer than record')
fmtstr = '=%ds%ds%ds' % (len_id, len_des, len_src)
(self.ext_id, self.ext_des, self.ext_src) = struct.unpack_from(fmtstr, rrstr, 8)
self._initialized = True |
def get_exp_dir_num(parent_dir: str) -> int:
""" Gets the number of the current experiment directory."""
return max([int(fn.split(".")[0])
for fn in os.listdir(parent_dir) if fn.split(".")[0].isdigit()]
+ [-1]) | Gets the number of the current experiment directory. | Below is the the instruction that describes the task:
### Input:
Gets the number of the current experiment directory.
### Response:
def get_exp_dir_num(parent_dir: str) -> int:
""" Gets the number of the current experiment directory."""
return max([int(fn.split(".")[0])
for fn in os.listdir(parent_dir) if fn.split(".")[0].isdigit()]
+ [-1]) |
def double_click(self, on_element=None):
"""
Double-clicks an element.
:Args:
- on_element: The element to double-click.
If None, clicks on current mouse position.
"""
if on_element:
self.move_to_element(on_element)
if self._driver.w3c:
self.w3c_actions.pointer_action.double_click()
for _ in range(4):
self.w3c_actions.key_action.pause()
else:
self._actions.append(lambda: self._driver.execute(
Command.DOUBLE_CLICK, {}))
return self | Double-clicks an element.
:Args:
- on_element: The element to double-click.
If None, clicks on current mouse position. | Below is the the instruction that describes the task:
### Input:
Double-clicks an element.
:Args:
- on_element: The element to double-click.
If None, clicks on current mouse position.
### Response:
def double_click(self, on_element=None):
"""
Double-clicks an element.
:Args:
- on_element: The element to double-click.
If None, clicks on current mouse position.
"""
if on_element:
self.move_to_element(on_element)
if self._driver.w3c:
self.w3c_actions.pointer_action.double_click()
for _ in range(4):
self.w3c_actions.key_action.pause()
else:
self._actions.append(lambda: self._driver.execute(
Command.DOUBLE_CLICK, {}))
return self |
def listener(self, event, *args, **kwargs):
"""Create a listener from a decorated function.
:param event: Event to listen to.
:type event: str
:param args: captures all of the positional arguments passed in
:type args: tuple(Any)
:param kwargs: captures the keyword arguments passed in
:type kwargs: dict(Any)
:return: The function to use as the listener
:rtype: fn
"""
if len(args) == 1 and callable(args[0]): # pragma: no cover
raise RuntimeError("Cannot use the @listener decorator without "
"arguments")
def wrapper(listener_f):
if len(kwargs) > 0:
listener_f = (listener_f, kwargs)
self._listeners[event].append(listener_f)
return listener_f
return wrapper | Create a listener from a decorated function.
:param event: Event to listen to.
:type event: str
:param args: captures all of the positional arguments passed in
:type args: tuple(Any)
:param kwargs: captures the keyword arguments passed in
:type kwargs: dict(Any)
:return: The function to use as the listener
:rtype: fn | Below is the the instruction that describes the task:
### Input:
Create a listener from a decorated function.
:param event: Event to listen to.
:type event: str
:param args: captures all of the positional arguments passed in
:type args: tuple(Any)
:param kwargs: captures the keyword arguments passed in
:type kwargs: dict(Any)
:return: The function to use as the listener
:rtype: fn
### Response:
def listener(self, event, *args, **kwargs):
"""Create a listener from a decorated function.
:param event: Event to listen to.
:type event: str
:param args: captures all of the positional arguments passed in
:type args: tuple(Any)
:param kwargs: captures the keyword arguments passed in
:type kwargs: dict(Any)
:return: The function to use as the listener
:rtype: fn
"""
if len(args) == 1 and callable(args[0]): # pragma: no cover
raise RuntimeError("Cannot use the @listener decorator without "
"arguments")
def wrapper(listener_f):
if len(kwargs) > 0:
listener_f = (listener_f, kwargs)
self._listeners[event].append(listener_f)
return listener_f
return wrapper |
def FUNCTION_DECL(self, cursor):
"""Handles function declaration"""
# FIXME to UT
name = self.get_unique_name(cursor)
if self.is_registered(name):
return self.get_registered(name)
returns = self.parse_cursor_type(cursor.type.get_result())
attributes = []
extern = False
obj = typedesc.Function(name, returns, attributes, extern)
for arg in cursor.get_arguments():
arg_obj = self.parse_cursor(arg)
# if arg_obj is None:
# code.interact(local=locals())
obj.add_argument(arg_obj)
# code.interact(local=locals())
self.register(name, obj)
self.set_location(obj, cursor)
self.set_comment(obj, cursor)
return obj | Handles function declaration | Below is the the instruction that describes the task:
### Input:
Handles function declaration
### Response:
def FUNCTION_DECL(self, cursor):
"""Handles function declaration"""
# FIXME to UT
name = self.get_unique_name(cursor)
if self.is_registered(name):
return self.get_registered(name)
returns = self.parse_cursor_type(cursor.type.get_result())
attributes = []
extern = False
obj = typedesc.Function(name, returns, attributes, extern)
for arg in cursor.get_arguments():
arg_obj = self.parse_cursor(arg)
# if arg_obj is None:
# code.interact(local=locals())
obj.add_argument(arg_obj)
# code.interact(local=locals())
self.register(name, obj)
self.set_location(obj, cursor)
self.set_comment(obj, cursor)
return obj |
def _strip_object(key):
"""
Strips branch and version info if the given key supports those attributes.
"""
if hasattr(key, 'version_agnostic') and hasattr(key, 'for_branch'):
return key.for_branch(None).version_agnostic()
else:
return key | Strips branch and version info if the given key supports those attributes. | Below is the the instruction that describes the task:
### Input:
Strips branch and version info if the given key supports those attributes.
### Response:
def _strip_object(key):
"""
Strips branch and version info if the given key supports those attributes.
"""
if hasattr(key, 'version_agnostic') and hasattr(key, 'for_branch'):
return key.for_branch(None).version_agnostic()
else:
return key |
def list_policy_versions(policy_name,
region=None, key=None, keyid=None, profile=None):
'''
List versions of a policy.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.list_policy_versions mypolicy
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile)
try:
ret = conn.list_policy_versions(policy_arn)
return ret.get('list_policy_versions_response', {}).get('list_policy_versions_result', {}).get('versions')
except boto.exception.BotoServerError as e:
log.debug(e)
log.error('Failed to list versions for IAM policy %s.', policy_name)
return [] | List versions of a policy.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.list_policy_versions mypolicy | Below is the the instruction that describes the task:
### Input:
List versions of a policy.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.list_policy_versions mypolicy
### Response:
def list_policy_versions(policy_name,
region=None, key=None, keyid=None, profile=None):
'''
List versions of a policy.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.list_policy_versions mypolicy
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile)
try:
ret = conn.list_policy_versions(policy_arn)
return ret.get('list_policy_versions_response', {}).get('list_policy_versions_result', {}).get('versions')
except boto.exception.BotoServerError as e:
log.debug(e)
log.error('Failed to list versions for IAM policy %s.', policy_name)
return [] |
def process_tokens(
tokens: Sequence[Token], *, end: str = "\n", sep: str = " "
) -> Tuple[str, str]:
""" Returns two strings from a list of tokens.
One containing ASCII escape codes, the other
only the 'normal' characters
"""
# Flatten the list of tokens in case some of them are of
# class UnicodeSequence:
flat_tokens = list() # type: List[Token]
for token in tokens:
if isinstance(token, UnicodeSequence):
flat_tokens.extend(token.tuple())
else:
flat_tokens.append(token)
with_color = _process_tokens(flat_tokens, end=end, sep=sep, color=True)
without_color = _process_tokens(flat_tokens, end=end, sep=sep, color=False)
return (with_color, without_color) | Returns two strings from a list of tokens.
One containing ASCII escape codes, the other
only the 'normal' characters | Below is the the instruction that describes the task:
### Input:
Returns two strings from a list of tokens.
One containing ASCII escape codes, the other
only the 'normal' characters
### Response:
def process_tokens(
tokens: Sequence[Token], *, end: str = "\n", sep: str = " "
) -> Tuple[str, str]:
""" Returns two strings from a list of tokens.
One containing ASCII escape codes, the other
only the 'normal' characters
"""
# Flatten the list of tokens in case some of them are of
# class UnicodeSequence:
flat_tokens = list() # type: List[Token]
for token in tokens:
if isinstance(token, UnicodeSequence):
flat_tokens.extend(token.tuple())
else:
flat_tokens.append(token)
with_color = _process_tokens(flat_tokens, end=end, sep=sep, color=True)
without_color = _process_tokens(flat_tokens, end=end, sep=sep, color=False)
return (with_color, without_color) |
def execute(self):
"""Resolves the specified confs for the configured targets and returns an iterator over
tuples of (conf, jar path).
"""
if JvmResolveSubsystem.global_instance().get_options().resolver != 'ivy':
return
compile_classpath = self.context.products.get_data('compile_classpath',
init_func=ClasspathProducts.init_func(self.get_options().pants_workdir))
targets = self.context.targets()
if all(not isinstance(target, JarLibrary) for target in targets):
if self._report:
self.context.log.info("Not generating a report. No resolution performed.")
return
executor = self.create_java_executor()
results = self.resolve(executor=executor,
targets=targets,
classpath_products=compile_classpath,
confs=self.get_options().confs,
extra_args=self._args)
if self._report:
results_with_resolved_artifacts = [r for r in results if r.has_resolved_artifacts]
if not results_with_resolved_artifacts:
self.context.log.info("Not generating a report. No resolution performed.")
else:
for result in results_with_resolved_artifacts:
self._generate_ivy_report(result) | Resolves the specified confs for the configured targets and returns an iterator over
tuples of (conf, jar path). | Below is the the instruction that describes the task:
### Input:
Resolves the specified confs for the configured targets and returns an iterator over
tuples of (conf, jar path).
### Response:
def execute(self):
"""Resolves the specified confs for the configured targets and returns an iterator over
tuples of (conf, jar path).
"""
if JvmResolveSubsystem.global_instance().get_options().resolver != 'ivy':
return
compile_classpath = self.context.products.get_data('compile_classpath',
init_func=ClasspathProducts.init_func(self.get_options().pants_workdir))
targets = self.context.targets()
if all(not isinstance(target, JarLibrary) for target in targets):
if self._report:
self.context.log.info("Not generating a report. No resolution performed.")
return
executor = self.create_java_executor()
results = self.resolve(executor=executor,
targets=targets,
classpath_products=compile_classpath,
confs=self.get_options().confs,
extra_args=self._args)
if self._report:
results_with_resolved_artifacts = [r for r in results if r.has_resolved_artifacts]
if not results_with_resolved_artifacts:
self.context.log.info("Not generating a report. No resolution performed.")
else:
for result in results_with_resolved_artifacts:
self._generate_ivy_report(result) |
def _start_local_queue_process(self):
""" TODO: docstring """
comm_q = Queue(maxsize=10)
self.queue_proc = Process(target=interchange.starter,
args=(comm_q,),
kwargs={"client_ports": (self.outgoing_q.port,
self.incoming_q.port),
"worker_port": self.worker_port,
"worker_port_range": self.worker_port_range
# TODO: logdir and logging level
})
self.queue_proc.start()
try:
worker_port = comm_q.get(block=True, timeout=120)
logger.debug(
"Got worker port {} from interchange".format(worker_port))
except queue.Empty:
logger.error(
"Interchange has not completed initialization in 120s. Aborting")
raise Exception("Interchange failed to start")
self.worker_task_url = "tcp://{}:{}".format(
self.address, worker_port) | TODO: docstring | Below is the the instruction that describes the task:
### Input:
TODO: docstring
### Response:
def _start_local_queue_process(self):
""" TODO: docstring """
comm_q = Queue(maxsize=10)
self.queue_proc = Process(target=interchange.starter,
args=(comm_q,),
kwargs={"client_ports": (self.outgoing_q.port,
self.incoming_q.port),
"worker_port": self.worker_port,
"worker_port_range": self.worker_port_range
# TODO: logdir and logging level
})
self.queue_proc.start()
try:
worker_port = comm_q.get(block=True, timeout=120)
logger.debug(
"Got worker port {} from interchange".format(worker_port))
except queue.Empty:
logger.error(
"Interchange has not completed initialization in 120s. Aborting")
raise Exception("Interchange failed to start")
self.worker_task_url = "tcp://{}:{}".format(
self.address, worker_port) |
def build_path(levels):
"""
make a linear directory structure from a list of path levels names
levels = ["chefdir", "trees", "test"]
builds ./chefdir/trees/test/
"""
path = os.path.join(*levels)
if not dir_exists(path):
os.makedirs(path)
return path | make a linear directory structure from a list of path levels names
levels = ["chefdir", "trees", "test"]
builds ./chefdir/trees/test/ | Below is the the instruction that describes the task:
### Input:
make a linear directory structure from a list of path levels names
levels = ["chefdir", "trees", "test"]
builds ./chefdir/trees/test/
### Response:
def build_path(levels):
"""
make a linear directory structure from a list of path levels names
levels = ["chefdir", "trees", "test"]
builds ./chefdir/trees/test/
"""
path = os.path.join(*levels)
if not dir_exists(path):
os.makedirs(path)
return path |
def displayable_path(path, separator=u'; '):
"""Attempts to decode a bytestring path to a unicode object for the
purpose of displaying it to the user. If the `path` argument is a
list or a tuple, the elements are joined with `separator`.
"""
if isinstance(path, (list, tuple)):
return separator.join(displayable_path(p) for p in path)
elif isinstance(path, six.text_type):
return path
elif not isinstance(path, bytes):
# A non-string object: just get its unicode representation.
return six.text_type(path)
try:
return path.decode(_fsencoding(), 'ignore')
except (UnicodeError, LookupError):
return path.decode('utf8', 'ignore') | Attempts to decode a bytestring path to a unicode object for the
purpose of displaying it to the user. If the `path` argument is a
list or a tuple, the elements are joined with `separator`. | Below is the the instruction that describes the task:
### Input:
Attempts to decode a bytestring path to a unicode object for the
purpose of displaying it to the user. If the `path` argument is a
list or a tuple, the elements are joined with `separator`.
### Response:
def displayable_path(path, separator=u'; '):
"""Attempts to decode a bytestring path to a unicode object for the
purpose of displaying it to the user. If the `path` argument is a
list or a tuple, the elements are joined with `separator`.
"""
if isinstance(path, (list, tuple)):
return separator.join(displayable_path(p) for p in path)
elif isinstance(path, six.text_type):
return path
elif not isinstance(path, bytes):
# A non-string object: just get its unicode representation.
return six.text_type(path)
try:
return path.decode(_fsencoding(), 'ignore')
except (UnicodeError, LookupError):
return path.decode('utf8', 'ignore') |
def remove_item(self, *args, **kwargs):
"""Pass through to provider methods."""
try:
self._get_provider_session('assessment_basic_authoring_session').remove_item(*args, **kwargs)
except InvalidArgument:
self._get_sub_package_provider_session(
'assessment_authoring', 'assessment_part_item_design_session').remove_item(*args, **kwargs) | Pass through to provider methods. | Below is the the instruction that describes the task:
### Input:
Pass through to provider methods.
### Response:
def remove_item(self, *args, **kwargs):
"""Pass through to provider methods."""
try:
self._get_provider_session('assessment_basic_authoring_session').remove_item(*args, **kwargs)
except InvalidArgument:
self._get_sub_package_provider_session(
'assessment_authoring', 'assessment_part_item_design_session').remove_item(*args, **kwargs) |
def find_eigen(hint=None):
r'''
Try to find the Eigen library. If successful the include directory is returned.
'''
# search with pkgconfig
# ---------------------
try:
import pkgconfig
if pkgconfig.installed('eigen3','>3.0.0'):
return pkgconfig.parse('eigen3')['include_dirs'][0]
except:
pass
# manual search
# -------------
search_dirs = [] if hint is None else hint
search_dirs += [
"/usr/local/include/eigen3",
"/usr/local/homebrew/include/eigen3",
"/opt/local/var/macports/software/eigen3",
"/opt/local/include/eigen3",
"/usr/include/eigen3",
"/usr/include/local",
"/usr/include",
]
for d in search_dirs:
path = os.path.join(d, "Eigen", "Dense")
if os.path.exists(path):
vf = os.path.join(d, "Eigen", "src", "Core", "util", "Macros.h")
if not os.path.exists(vf):
continue
src = open(vf, "r").read()
v1 = re.findall("#define EIGEN_WORLD_VERSION (.+)", src)
v2 = re.findall("#define EIGEN_MAJOR_VERSION (.+)", src)
v3 = re.findall("#define EIGEN_MINOR_VERSION (.+)", src)
if not len(v1) or not len(v2) or not len(v3):
continue
v = "{0}.{1}.{2}".format(v1[0], v2[0], v3[0])
print("Found Eigen version {0} in: {1}".format(v, d))
return d
return None | r'''
Try to find the Eigen library. If successful the include directory is returned. | Below is the the instruction that describes the task:
### Input:
r'''
Try to find the Eigen library. If successful the include directory is returned.
### Response:
def find_eigen(hint=None):
r'''
Try to find the Eigen library. If successful the include directory is returned.
'''
# search with pkgconfig
# ---------------------
try:
import pkgconfig
if pkgconfig.installed('eigen3','>3.0.0'):
return pkgconfig.parse('eigen3')['include_dirs'][0]
except:
pass
# manual search
# -------------
search_dirs = [] if hint is None else hint
search_dirs += [
"/usr/local/include/eigen3",
"/usr/local/homebrew/include/eigen3",
"/opt/local/var/macports/software/eigen3",
"/opt/local/include/eigen3",
"/usr/include/eigen3",
"/usr/include/local",
"/usr/include",
]
for d in search_dirs:
path = os.path.join(d, "Eigen", "Dense")
if os.path.exists(path):
vf = os.path.join(d, "Eigen", "src", "Core", "util", "Macros.h")
if not os.path.exists(vf):
continue
src = open(vf, "r").read()
v1 = re.findall("#define EIGEN_WORLD_VERSION (.+)", src)
v2 = re.findall("#define EIGEN_MAJOR_VERSION (.+)", src)
v3 = re.findall("#define EIGEN_MINOR_VERSION (.+)", src)
if not len(v1) or not len(v2) or not len(v3):
continue
v = "{0}.{1}.{2}".format(v1[0], v2[0], v3[0])
print("Found Eigen version {0} in: {1}".format(v, d))
return d
return None |
def _url_lookup_builder(id=None, artist_amg_id=None, upc=None, country='US', media='music', entity=None, attribute=None,
limit=50):
"""
Builds the URL to perform the lookup based on the provided data
:param id: String. iTunes ID of the artist, album, track, ebook or software
:param artist_amg_id: String. All Music Guide ID of the artist
:param upc: String. UPCs/EANs
:param country: String. The two-letter country code for the store you want to search.
For a full list of the codes: http://en.wikipedia.org/wiki/%20ISO_3166-1_alpha-2
:param media: String. The media type you want to search for. Example: music
:param entity: String. The type of results you want returned, relative to the specified media type. Example: musicArtist.
Full list: musicArtist, musicTrack, album, musicVideo, mix, song
:param attribute: String. The attribute you want to search for in the stores, relative to the specified media type.
:param limit: Integer. The number of search results you want the iTunes Store to return.
:return: The built URL as a string
"""
built_url = base_lookup_url
has_one_argument = False
if id is not None:
built_url += parameters[6] + str(id)
has_one_argument = True
if artist_amg_id is not None:
if has_one_argument:
built_url += ampersand + parameters[7] + artist_amg_id
else:
built_url += parameters[7] + str(artist_amg_id)
has_one_argument = True
if upc is not None:
if has_one_argument:
built_url += ampersand + parameters[8] + upc
else:
built_url += parameters[8] + str(upc)
built_url += ampersand + parameters[1] + country
built_url += ampersand + parameters[2] + media
if entity is not None:
built_url += ampersand + parameters[3] + entity
if attribute is not None:
built_url += ampersand + parameters[4] + attribute
built_url += ampersand + parameters[5] + str(limit)
return built_url | Builds the URL to perform the lookup based on the provided data
:param id: String. iTunes ID of the artist, album, track, ebook or software
:param artist_amg_id: String. All Music Guide ID of the artist
:param upc: String. UPCs/EANs
:param country: String. The two-letter country code for the store you want to search.
For a full list of the codes: http://en.wikipedia.org/wiki/%20ISO_3166-1_alpha-2
:param media: String. The media type you want to search for. Example: music
:param entity: String. The type of results you want returned, relative to the specified media type. Example: musicArtist.
Full list: musicArtist, musicTrack, album, musicVideo, mix, song
:param attribute: String. The attribute you want to search for in the stores, relative to the specified media type.
:param limit: Integer. The number of search results you want the iTunes Store to return.
:return: The built URL as a string | Below is the the instruction that describes the task:
### Input:
Builds the URL to perform the lookup based on the provided data
:param id: String. iTunes ID of the artist, album, track, ebook or software
:param artist_amg_id: String. All Music Guide ID of the artist
:param upc: String. UPCs/EANs
:param country: String. The two-letter country code for the store you want to search.
For a full list of the codes: http://en.wikipedia.org/wiki/%20ISO_3166-1_alpha-2
:param media: String. The media type you want to search for. Example: music
:param entity: String. The type of results you want returned, relative to the specified media type. Example: musicArtist.
Full list: musicArtist, musicTrack, album, musicVideo, mix, song
:param attribute: String. The attribute you want to search for in the stores, relative to the specified media type.
:param limit: Integer. The number of search results you want the iTunes Store to return.
:return: The built URL as a string
### Response:
def _url_lookup_builder(id=None, artist_amg_id=None, upc=None, country='US', media='music', entity=None, attribute=None,
limit=50):
"""
Builds the URL to perform the lookup based on the provided data
:param id: String. iTunes ID of the artist, album, track, ebook or software
:param artist_amg_id: String. All Music Guide ID of the artist
:param upc: String. UPCs/EANs
:param country: String. The two-letter country code for the store you want to search.
For a full list of the codes: http://en.wikipedia.org/wiki/%20ISO_3166-1_alpha-2
:param media: String. The media type you want to search for. Example: music
:param entity: String. The type of results you want returned, relative to the specified media type. Example: musicArtist.
Full list: musicArtist, musicTrack, album, musicVideo, mix, song
:param attribute: String. The attribute you want to search for in the stores, relative to the specified media type.
:param limit: Integer. The number of search results you want the iTunes Store to return.
:return: The built URL as a string
"""
built_url = base_lookup_url
has_one_argument = False
if id is not None:
built_url += parameters[6] + str(id)
has_one_argument = True
if artist_amg_id is not None:
if has_one_argument:
built_url += ampersand + parameters[7] + artist_amg_id
else:
built_url += parameters[7] + str(artist_amg_id)
has_one_argument = True
if upc is not None:
if has_one_argument:
built_url += ampersand + parameters[8] + upc
else:
built_url += parameters[8] + str(upc)
built_url += ampersand + parameters[1] + country
built_url += ampersand + parameters[2] + media
if entity is not None:
built_url += ampersand + parameters[3] + entity
if attribute is not None:
built_url += ampersand + parameters[4] + attribute
built_url += ampersand + parameters[5] + str(limit)
return built_url |
def options_help():
"""Help message for options dialog.
.. versionadded:: 3.2.1
:returns: A message object containing helpful information.
:rtype: messaging.message.Message
"""
message = m.Message()
message.add(m.Brand())
message.add(heading())
message.add(content())
return message | Help message for options dialog.
.. versionadded:: 3.2.1
:returns: A message object containing helpful information.
:rtype: messaging.message.Message | Below is the the instruction that describes the task:
### Input:
Help message for options dialog.
.. versionadded:: 3.2.1
:returns: A message object containing helpful information.
:rtype: messaging.message.Message
### Response:
def options_help():
"""Help message for options dialog.
.. versionadded:: 3.2.1
:returns: A message object containing helpful information.
:rtype: messaging.message.Message
"""
message = m.Message()
message.add(m.Brand())
message.add(heading())
message.add(content())
return message |
def get_previous_scheduled_dagrun(self, session=None):
"""The previous, SCHEDULED DagRun, if there is one"""
dag = self.get_dag()
return session.query(DagRun).filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date == dag.previous_schedule(self.execution_date)
).first() | The previous, SCHEDULED DagRun, if there is one | Below is the the instruction that describes the task:
### Input:
The previous, SCHEDULED DagRun, if there is one
### Response:
def get_previous_scheduled_dagrun(self, session=None):
"""The previous, SCHEDULED DagRun, if there is one"""
dag = self.get_dag()
return session.query(DagRun).filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date == dag.previous_schedule(self.execution_date)
).first() |
def decorate_fn_with_doc(new_fn, old_fn, additional_text=""):
"""Make new_fn have old_fn's doc string. This is particularly useful
for the do_... commands that hook into the help system.
Adapted from from a comp.lang.python posting
by Duncan Booth."""
def wrapper(*args, **kw):
return new_fn(*args, **kw)
if old_fn.__doc__:
wrapper.__doc__ = old_fn.__doc__ + additional_text
return wrapper | Make new_fn have old_fn's doc string. This is particularly useful
for the do_... commands that hook into the help system.
Adapted from from a comp.lang.python posting
by Duncan Booth. | Below is the the instruction that describes the task:
### Input:
Make new_fn have old_fn's doc string. This is particularly useful
for the do_... commands that hook into the help system.
Adapted from from a comp.lang.python posting
by Duncan Booth.
### Response:
def decorate_fn_with_doc(new_fn, old_fn, additional_text=""):
"""Make new_fn have old_fn's doc string. This is particularly useful
for the do_... commands that hook into the help system.
Adapted from from a comp.lang.python posting
by Duncan Booth."""
def wrapper(*args, **kw):
return new_fn(*args, **kw)
if old_fn.__doc__:
wrapper.__doc__ = old_fn.__doc__ + additional_text
return wrapper |
def _register_token_network_without_limits(
self,
token_registry_abi: Dict,
token_registry_address: str,
token_address: str,
channel_participant_deposit_limit: Optional[int],
token_network_deposit_limit: Optional[int],
):
"""Register token with a TokenNetworkRegistry contract
with a contracts-version that doesn't require deposit limits in the TokenNetwork
constructor.
"""
if channel_participant_deposit_limit:
raise ValueError(
'contracts_version below 0.9.0 does not expect '
'channel_participant_deposit_limit',
)
if token_network_deposit_limit:
raise ValueError(
'contracts_version below 0.9.0 does not expect token_network_deposit_limit',
)
token_network_registry = self.web3.eth.contract(
abi=token_registry_abi,
address=token_registry_address,
)
version_from_onchain = token_network_registry.functions.contract_version().call()
if version_from_onchain != self.contract_manager.version_string:
raise RuntimeError(
f'got {version_from_onchain} from the chain, expected '
f'{self.contract_manager.version_string} in the deployment data',
)
command = token_network_registry.functions.createERC20TokenNetwork(
token_address,
)
self.transact(command)
token_network_address = token_network_registry.functions.token_to_token_networks(
token_address,
).call()
token_network_address = to_checksum_address(token_network_address)
LOG.debug(f'TokenNetwork address: {token_network_address}')
return token_network_address | Register token with a TokenNetworkRegistry contract
with a contracts-version that doesn't require deposit limits in the TokenNetwork
constructor. | Below is the the instruction that describes the task:
### Input:
Register token with a TokenNetworkRegistry contract
with a contracts-version that doesn't require deposit limits in the TokenNetwork
constructor.
### Response:
def _register_token_network_without_limits(
self,
token_registry_abi: Dict,
token_registry_address: str,
token_address: str,
channel_participant_deposit_limit: Optional[int],
token_network_deposit_limit: Optional[int],
):
"""Register token with a TokenNetworkRegistry contract
with a contracts-version that doesn't require deposit limits in the TokenNetwork
constructor.
"""
if channel_participant_deposit_limit:
raise ValueError(
'contracts_version below 0.9.0 does not expect '
'channel_participant_deposit_limit',
)
if token_network_deposit_limit:
raise ValueError(
'contracts_version below 0.9.0 does not expect token_network_deposit_limit',
)
token_network_registry = self.web3.eth.contract(
abi=token_registry_abi,
address=token_registry_address,
)
version_from_onchain = token_network_registry.functions.contract_version().call()
if version_from_onchain != self.contract_manager.version_string:
raise RuntimeError(
f'got {version_from_onchain} from the chain, expected '
f'{self.contract_manager.version_string} in the deployment data',
)
command = token_network_registry.functions.createERC20TokenNetwork(
token_address,
)
self.transact(command)
token_network_address = token_network_registry.functions.token_to_token_networks(
token_address,
).call()
token_network_address = to_checksum_address(token_network_address)
LOG.debug(f'TokenNetwork address: {token_network_address}')
return token_network_address |
def _send(key, value, metric_type):
"""Send the specified value to the statsd daemon via UDP without a
direct socket connection.
:param str value: The properly formatted statsd counter value
"""
if STATSD_PREFIX:
key = '.'.join([STATSD_PREFIX, key])
try:
STATSD_SOCKET.sendto('{0}:{1}|{2}'.format(key,
value,
metric_type).encode(),
STATSD_ADDR)
except socket.error:
LOGGER.exception(SOCKET_ERROR) | Send the specified value to the statsd daemon via UDP without a
direct socket connection.
:param str value: The properly formatted statsd counter value | Below is the the instruction that describes the task:
### Input:
Send the specified value to the statsd daemon via UDP without a
direct socket connection.
:param str value: The properly formatted statsd counter value
### Response:
def _send(key, value, metric_type):
"""Send the specified value to the statsd daemon via UDP without a
direct socket connection.
:param str value: The properly formatted statsd counter value
"""
if STATSD_PREFIX:
key = '.'.join([STATSD_PREFIX, key])
try:
STATSD_SOCKET.sendto('{0}:{1}|{2}'.format(key,
value,
metric_type).encode(),
STATSD_ADDR)
except socket.error:
LOGGER.exception(SOCKET_ERROR) |
def sample_surface(mesh, count):
"""
Sample the surface of a mesh, returning the specified
number of points
For individual triangle sampling uses this method:
http://mathworld.wolfram.com/TrianglePointPicking.html
Parameters
---------
mesh: Trimesh object
count: number of points to return
Returns
---------
samples: (count,3) points in space on the surface of mesh
face_index: (count,) indices of faces for each sampled point
"""
# len(mesh.faces) float, array of the areas
# of each face of the mesh
area = mesh.area_faces
# total area (float)
area_sum = np.sum(area)
# cumulative area (len(mesh.faces))
area_cum = np.cumsum(area)
face_pick = np.random.random(count) * area_sum
face_index = np.searchsorted(area_cum, face_pick)
# pull triangles into the form of an origin + 2 vectors
tri_origins = mesh.triangles[:, 0]
tri_vectors = mesh.triangles[:, 1:].copy()
tri_vectors -= np.tile(tri_origins, (1, 2)).reshape((-1, 2, 3))
# pull the vectors for the faces we are going to sample from
tri_origins = tri_origins[face_index]
tri_vectors = tri_vectors[face_index]
# randomly generate two 0-1 scalar components to multiply edge vectors by
random_lengths = np.random.random((len(tri_vectors), 2, 1))
# points will be distributed on a quadrilateral if we use 2 0-1 samples
# if the two scalar components sum less than 1.0 the point will be
# inside the triangle, so we find vectors longer than 1.0 and
# transform them to be inside the triangle
random_test = random_lengths.sum(axis=1).reshape(-1) > 1.0
random_lengths[random_test] -= 1.0
random_lengths = np.abs(random_lengths)
# multiply triangle edge vectors by the random lengths and sum
sample_vector = (tri_vectors * random_lengths).sum(axis=1)
# finally, offset by the origin to generate
# (n,3) points in space on the triangle
samples = sample_vector + tri_origins
return samples, face_index | Sample the surface of a mesh, returning the specified
number of points
For individual triangle sampling uses this method:
http://mathworld.wolfram.com/TrianglePointPicking.html
Parameters
---------
mesh: Trimesh object
count: number of points to return
Returns
---------
samples: (count,3) points in space on the surface of mesh
face_index: (count,) indices of faces for each sampled point | Below is the the instruction that describes the task:
### Input:
Sample the surface of a mesh, returning the specified
number of points
For individual triangle sampling uses this method:
http://mathworld.wolfram.com/TrianglePointPicking.html
Parameters
---------
mesh: Trimesh object
count: number of points to return
Returns
---------
samples: (count,3) points in space on the surface of mesh
face_index: (count,) indices of faces for each sampled point
### Response:
def sample_surface(mesh, count):
"""
Sample the surface of a mesh, returning the specified
number of points
For individual triangle sampling uses this method:
http://mathworld.wolfram.com/TrianglePointPicking.html
Parameters
---------
mesh: Trimesh object
count: number of points to return
Returns
---------
samples: (count,3) points in space on the surface of mesh
face_index: (count,) indices of faces for each sampled point
"""
# len(mesh.faces) float, array of the areas
# of each face of the mesh
area = mesh.area_faces
# total area (float)
area_sum = np.sum(area)
# cumulative area (len(mesh.faces))
area_cum = np.cumsum(area)
face_pick = np.random.random(count) * area_sum
face_index = np.searchsorted(area_cum, face_pick)
# pull triangles into the form of an origin + 2 vectors
tri_origins = mesh.triangles[:, 0]
tri_vectors = mesh.triangles[:, 1:].copy()
tri_vectors -= np.tile(tri_origins, (1, 2)).reshape((-1, 2, 3))
# pull the vectors for the faces we are going to sample from
tri_origins = tri_origins[face_index]
tri_vectors = tri_vectors[face_index]
# randomly generate two 0-1 scalar components to multiply edge vectors by
random_lengths = np.random.random((len(tri_vectors), 2, 1))
# points will be distributed on a quadrilateral if we use 2 0-1 samples
# if the two scalar components sum less than 1.0 the point will be
# inside the triangle, so we find vectors longer than 1.0 and
# transform them to be inside the triangle
random_test = random_lengths.sum(axis=1).reshape(-1) > 1.0
random_lengths[random_test] -= 1.0
random_lengths = np.abs(random_lengths)
# multiply triangle edge vectors by the random lengths and sum
sample_vector = (tri_vectors * random_lengths).sum(axis=1)
# finally, offset by the origin to generate
# (n,3) points in space on the triangle
samples = sample_vector + tri_origins
return samples, face_index |
def _init_metadata(self):
"""stub"""
TextAnswerFormRecord._init_metadata(self)
FilesAnswerFormRecord._init_metadata(self)
super(AnswerTextAndFilesMixin, self)._init_metadata() | stub | Below is the the instruction that describes the task:
### Input:
stub
### Response:
def _init_metadata(self):
"""stub"""
TextAnswerFormRecord._init_metadata(self)
FilesAnswerFormRecord._init_metadata(self)
super(AnswerTextAndFilesMixin, self)._init_metadata() |
def rzz(self, theta, qubit1, qubit2):
"""Apply RZZ to circuit."""
return self.append(RZZGate(theta), [qubit1, qubit2], []) | Apply RZZ to circuit. | Below is the the instruction that describes the task:
### Input:
Apply RZZ to circuit.
### Response:
def rzz(self, theta, qubit1, qubit2):
"""Apply RZZ to circuit."""
return self.append(RZZGate(theta), [qubit1, qubit2], []) |
def visitArrayExpr(self, ctx: jsgParser.ArrayExprContext):
""" arrayExpr: OBRACKET valueType (BAR valueType)* ebnfSuffix? CBRACKET; """
from pyjsg.parser_impl.jsg_ebnf_parser import JSGEbnf
from pyjsg.parser_impl.jsg_valuetype_parser import JSGValueType
self._types = [JSGValueType(self._context, vt) for vt in ctx.valueType()]
if ctx.ebnfSuffix():
self._ebnf = JSGEbnf(self._context, ctx.ebnfSuffix()) | arrayExpr: OBRACKET valueType (BAR valueType)* ebnfSuffix? CBRACKET; | Below is the the instruction that describes the task:
### Input:
arrayExpr: OBRACKET valueType (BAR valueType)* ebnfSuffix? CBRACKET;
### Response:
def visitArrayExpr(self, ctx: jsgParser.ArrayExprContext):
""" arrayExpr: OBRACKET valueType (BAR valueType)* ebnfSuffix? CBRACKET; """
from pyjsg.parser_impl.jsg_ebnf_parser import JSGEbnf
from pyjsg.parser_impl.jsg_valuetype_parser import JSGValueType
self._types = [JSGValueType(self._context, vt) for vt in ctx.valueType()]
if ctx.ebnfSuffix():
self._ebnf = JSGEbnf(self._context, ctx.ebnfSuffix()) |
def first_address(self, skip_network_address=True):
""" Return the first IP address of this network
:param skip_network_address: this flag specifies whether this function returns address of the network \
or returns address that follows address of the network (address, that a host could have)
:return: WIPV4Address
"""
bin_address = self.__address.bin_address()
bin_address_length = len(bin_address)
if self.__mask > (bin_address_length - 2):
skip_network_address = False
for i in range(bin_address_length - self.__mask):
bin_address[self.__mask + i] = 0
if skip_network_address:
bin_address[bin_address_length - 1] = 1
return WIPV4Address(bin_address) | Return the first IP address of this network
:param skip_network_address: this flag specifies whether this function returns address of the network \
or returns address that follows address of the network (address, that a host could have)
:return: WIPV4Address | Below is the the instruction that describes the task:
### Input:
Return the first IP address of this network
:param skip_network_address: this flag specifies whether this function returns address of the network \
or returns address that follows address of the network (address, that a host could have)
:return: WIPV4Address
### Response:
def first_address(self, skip_network_address=True):
""" Return the first IP address of this network
:param skip_network_address: this flag specifies whether this function returns address of the network \
or returns address that follows address of the network (address, that a host could have)
:return: WIPV4Address
"""
bin_address = self.__address.bin_address()
bin_address_length = len(bin_address)
if self.__mask > (bin_address_length - 2):
skip_network_address = False
for i in range(bin_address_length - self.__mask):
bin_address[self.__mask + i] = 0
if skip_network_address:
bin_address[bin_address_length - 1] = 1
return WIPV4Address(bin_address) |
def onNicknameChange(
self,
mid=None,
author_id=None,
changed_for=None,
new_nickname=None,
thread_id=None,
thread_type=ThreadType.USER,
ts=None,
metadata=None,
msg=None,
):
"""
Called when the client is listening, and somebody changes the nickname of a person
:param mid: The action ID
:param author_id: The ID of the person who changed the nickname
:param changed_for: The ID of the person whom got their nickname changed
:param new_nickname: The new nickname
:param thread_id: Thread ID that the action was sent to. See :ref:`intro_threads`
:param thread_type: Type of thread that the action was sent to. See :ref:`intro_threads`
:param ts: A timestamp of the action
:param metadata: Extra metadata about the action
:param msg: A full set of the data recieved
:type thread_type: models.ThreadType
"""
log.info(
"Nickname change from {} in {} ({}) for {}: {}".format(
author_id, thread_id, thread_type.name, changed_for, new_nickname
)
) | Called when the client is listening, and somebody changes the nickname of a person
:param mid: The action ID
:param author_id: The ID of the person who changed the nickname
:param changed_for: The ID of the person whom got their nickname changed
:param new_nickname: The new nickname
:param thread_id: Thread ID that the action was sent to. See :ref:`intro_threads`
:param thread_type: Type of thread that the action was sent to. See :ref:`intro_threads`
:param ts: A timestamp of the action
:param metadata: Extra metadata about the action
:param msg: A full set of the data recieved
:type thread_type: models.ThreadType | Below is the the instruction that describes the task:
### Input:
Called when the client is listening, and somebody changes the nickname of a person
:param mid: The action ID
:param author_id: The ID of the person who changed the nickname
:param changed_for: The ID of the person whom got their nickname changed
:param new_nickname: The new nickname
:param thread_id: Thread ID that the action was sent to. See :ref:`intro_threads`
:param thread_type: Type of thread that the action was sent to. See :ref:`intro_threads`
:param ts: A timestamp of the action
:param metadata: Extra metadata about the action
:param msg: A full set of the data recieved
:type thread_type: models.ThreadType
### Response:
def onNicknameChange(
self,
mid=None,
author_id=None,
changed_for=None,
new_nickname=None,
thread_id=None,
thread_type=ThreadType.USER,
ts=None,
metadata=None,
msg=None,
):
"""
Called when the client is listening, and somebody changes the nickname of a person
:param mid: The action ID
:param author_id: The ID of the person who changed the nickname
:param changed_for: The ID of the person whom got their nickname changed
:param new_nickname: The new nickname
:param thread_id: Thread ID that the action was sent to. See :ref:`intro_threads`
:param thread_type: Type of thread that the action was sent to. See :ref:`intro_threads`
:param ts: A timestamp of the action
:param metadata: Extra metadata about the action
:param msg: A full set of the data recieved
:type thread_type: models.ThreadType
"""
log.info(
"Nickname change from {} in {} ({}) for {}: {}".format(
author_id, thread_id, thread_type.name, changed_for, new_nickname
)
) |
def wrap_exception(func: Callable) -> Callable:
"""Decorator to wrap pygatt exceptions into BluetoothBackendException."""
try:
# only do the wrapping if pygatt is installed.
# otherwise it's pointless anyway
from pygatt.backends.bgapi.exceptions import BGAPIError
from pygatt.exceptions import NotConnectedError
except ImportError:
return func
def _func_wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except BGAPIError as exception:
raise BluetoothBackendException() from exception
except NotConnectedError as exception:
raise BluetoothBackendException() from exception
return _func_wrapper | Decorator to wrap pygatt exceptions into BluetoothBackendException. | Below is the the instruction that describes the task:
### Input:
Decorator to wrap pygatt exceptions into BluetoothBackendException.
### Response:
def wrap_exception(func: Callable) -> Callable:
"""Decorator to wrap pygatt exceptions into BluetoothBackendException."""
try:
# only do the wrapping if pygatt is installed.
# otherwise it's pointless anyway
from pygatt.backends.bgapi.exceptions import BGAPIError
from pygatt.exceptions import NotConnectedError
except ImportError:
return func
def _func_wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except BGAPIError as exception:
raise BluetoothBackendException() from exception
except NotConnectedError as exception:
raise BluetoothBackendException() from exception
return _func_wrapper |
def create_boxcar(aryCnd, aryOns, aryDrt, varTr, varNumVol,
aryExclCnd=None, varTmpOvsmpl=1000.):
"""
Creation of condition time courses in temporally upsampled space.
Parameters
----------
aryCnd : np.array
1D array with condition identifiers (every condition has its own int)
aryOns : np.array, same len as aryCnd
1D array with condition onset times in seconds.
aryDrt : np.array, same len as aryCnd
1D array with condition durations of different conditions in seconds.
varTr : float, positive
Time to repeat (TR) of the (fMRI) experiment.
varNumVol : float, positive
Number of volumes of the (fMRI) data.
aryExclCnd : array
1D array containing condition identifiers for conditions to be excluded
varTmpOvsmpl : float, positive
Factor by which the time courses should be temporally upsampled.
Returns
-------
aryBxCrOut : np.array, float16
Condition time courses in temporally upsampled space.
References:
-----
[1] https://github.com/fabianp/hrf_estimation
"""
if aryExclCnd is not None:
for cond in aryExclCnd:
aryOns = aryOns[aryCnd != cond]
aryDrt = aryDrt[aryCnd != cond]
aryCnd = aryCnd[aryCnd != cond]
resolution = varTr / float(varTmpOvsmpl)
aryCnd = np.asarray(aryCnd)
aryOns = np.asarray(aryOns, dtype=np.float)
unique_conditions = np.sort(np.unique(aryCnd))
boxcar = []
for c in unique_conditions:
tmp = np.zeros(int(varNumVol * varTr/resolution))
onset_c = aryOns[aryCnd == c]
duration_c = aryDrt[aryCnd == c]
onset_idx = np.round(onset_c / resolution).astype(np.int)
duration_idx = np.round(duration_c / resolution).astype(np.int)
aux = np.arange(int(varNumVol * varTr/resolution))
for start, dur in zip(onset_idx, duration_idx):
lgc = np.logical_and(aux >= start, aux < start + dur)
tmp = tmp + lgc
assert np.all(np.less(tmp, 2))
boxcar.append(tmp)
aryBxCrOut = np.array(boxcar).T
if aryBxCrOut.shape[1] == 1:
aryBxCrOut = np.squeeze(aryBxCrOut)
return aryBxCrOut.astype('float16') | Creation of condition time courses in temporally upsampled space.
Parameters
----------
aryCnd : np.array
1D array with condition identifiers (every condition has its own int)
aryOns : np.array, same len as aryCnd
1D array with condition onset times in seconds.
aryDrt : np.array, same len as aryCnd
1D array with condition durations of different conditions in seconds.
varTr : float, positive
Time to repeat (TR) of the (fMRI) experiment.
varNumVol : float, positive
Number of volumes of the (fMRI) data.
aryExclCnd : array
1D array containing condition identifiers for conditions to be excluded
varTmpOvsmpl : float, positive
Factor by which the time courses should be temporally upsampled.
Returns
-------
aryBxCrOut : np.array, float16
Condition time courses in temporally upsampled space.
References:
-----
[1] https://github.com/fabianp/hrf_estimation | Below is the the instruction that describes the task:
### Input:
Creation of condition time courses in temporally upsampled space.
Parameters
----------
aryCnd : np.array
1D array with condition identifiers (every condition has its own int)
aryOns : np.array, same len as aryCnd
1D array with condition onset times in seconds.
aryDrt : np.array, same len as aryCnd
1D array with condition durations of different conditions in seconds.
varTr : float, positive
Time to repeat (TR) of the (fMRI) experiment.
varNumVol : float, positive
Number of volumes of the (fMRI) data.
aryExclCnd : array
1D array containing condition identifiers for conditions to be excluded
varTmpOvsmpl : float, positive
Factor by which the time courses should be temporally upsampled.
Returns
-------
aryBxCrOut : np.array, float16
Condition time courses in temporally upsampled space.
References:
-----
[1] https://github.com/fabianp/hrf_estimation
### Response:
def create_boxcar(aryCnd, aryOns, aryDrt, varTr, varNumVol,
aryExclCnd=None, varTmpOvsmpl=1000.):
"""
Creation of condition time courses in temporally upsampled space.
Parameters
----------
aryCnd : np.array
1D array with condition identifiers (every condition has its own int)
aryOns : np.array, same len as aryCnd
1D array with condition onset times in seconds.
aryDrt : np.array, same len as aryCnd
1D array with condition durations of different conditions in seconds.
varTr : float, positive
Time to repeat (TR) of the (fMRI) experiment.
varNumVol : float, positive
Number of volumes of the (fMRI) data.
aryExclCnd : array
1D array containing condition identifiers for conditions to be excluded
varTmpOvsmpl : float, positive
Factor by which the time courses should be temporally upsampled.
Returns
-------
aryBxCrOut : np.array, float16
Condition time courses in temporally upsampled space.
References:
-----
[1] https://github.com/fabianp/hrf_estimation
"""
if aryExclCnd is not None:
for cond in aryExclCnd:
aryOns = aryOns[aryCnd != cond]
aryDrt = aryDrt[aryCnd != cond]
aryCnd = aryCnd[aryCnd != cond]
resolution = varTr / float(varTmpOvsmpl)
aryCnd = np.asarray(aryCnd)
aryOns = np.asarray(aryOns, dtype=np.float)
unique_conditions = np.sort(np.unique(aryCnd))
boxcar = []
for c in unique_conditions:
tmp = np.zeros(int(varNumVol * varTr/resolution))
onset_c = aryOns[aryCnd == c]
duration_c = aryDrt[aryCnd == c]
onset_idx = np.round(onset_c / resolution).astype(np.int)
duration_idx = np.round(duration_c / resolution).astype(np.int)
aux = np.arange(int(varNumVol * varTr/resolution))
for start, dur in zip(onset_idx, duration_idx):
lgc = np.logical_and(aux >= start, aux < start + dur)
tmp = tmp + lgc
assert np.all(np.less(tmp, 2))
boxcar.append(tmp)
aryBxCrOut = np.array(boxcar).T
if aryBxCrOut.shape[1] == 1:
aryBxCrOut = np.squeeze(aryBxCrOut)
return aryBxCrOut.astype('float16') |
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
self.id = utils.cast(int, data.attrib.get('id'))
self.serverId = utils.cast(int, data.attrib.get('serverId'))
self.machineIdentifier = data.attrib.get('machineIdentifier')
self.name = data.attrib.get('name')
self.lastSeenAt = utils.toDatetime(data.attrib.get('lastSeenAt'))
self.numLibraries = utils.cast(int, data.attrib.get('numLibraries'))
self.allLibraries = utils.cast(bool, data.attrib.get('allLibraries'))
self.owned = utils.cast(bool, data.attrib.get('owned'))
self.pending = utils.cast(bool, data.attrib.get('pending')) | Load attribute values from Plex XML response. | Below is the the instruction that describes the task:
### Input:
Load attribute values from Plex XML response.
### Response:
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
self.id = utils.cast(int, data.attrib.get('id'))
self.serverId = utils.cast(int, data.attrib.get('serverId'))
self.machineIdentifier = data.attrib.get('machineIdentifier')
self.name = data.attrib.get('name')
self.lastSeenAt = utils.toDatetime(data.attrib.get('lastSeenAt'))
self.numLibraries = utils.cast(int, data.attrib.get('numLibraries'))
self.allLibraries = utils.cast(bool, data.attrib.get('allLibraries'))
self.owned = utils.cast(bool, data.attrib.get('owned'))
self.pending = utils.cast(bool, data.attrib.get('pending')) |
def ensure_started(self):
"""
Start a server and waits (blocking wait) until it is fully started.
"""
# server is either starting or stopping (or error)
if self.state in ['maintenance', 'error']:
self._wait_for_state_change(['stopped', 'started'])
if self.state == 'stopped':
self.start()
self._wait_for_state_change(['started'])
if self.state == 'started':
return True
else:
# something went wrong, fail explicitly
raise Exception('unknown server state: ' + self.state) | Start a server and waits (blocking wait) until it is fully started. | Below is the the instruction that describes the task:
### Input:
Start a server and waits (blocking wait) until it is fully started.
### Response:
def ensure_started(self):
"""
Start a server and waits (blocking wait) until it is fully started.
"""
# server is either starting or stopping (or error)
if self.state in ['maintenance', 'error']:
self._wait_for_state_change(['stopped', 'started'])
if self.state == 'stopped':
self.start()
self._wait_for_state_change(['started'])
if self.state == 'started':
return True
else:
# something went wrong, fail explicitly
raise Exception('unknown server state: ' + self.state) |
def kdeconf(kde,conf=0.683,xmin=None,xmax=None,npts=500,
shortest=True,conftol=0.001,return_max=False):
"""
Returns desired confidence interval for provided KDE object
"""
if xmin is None:
xmin = kde.dataset.min()
if xmax is None:
xmax = kde.dataset.max()
x = np.linspace(xmin,xmax,npts)
return conf_interval(x,kde(x),shortest=shortest,conf=conf,
conftol=conftol,return_max=return_max) | Returns desired confidence interval for provided KDE object | Below is the the instruction that describes the task:
### Input:
Returns desired confidence interval for provided KDE object
### Response:
def kdeconf(kde,conf=0.683,xmin=None,xmax=None,npts=500,
shortest=True,conftol=0.001,return_max=False):
"""
Returns desired confidence interval for provided KDE object
"""
if xmin is None:
xmin = kde.dataset.min()
if xmax is None:
xmax = kde.dataset.max()
x = np.linspace(xmin,xmax,npts)
return conf_interval(x,kde(x),shortest=shortest,conf=conf,
conftol=conftol,return_max=return_max) |
def from_row(row):
'''Create an advice from a CSV row'''
subject = (row[5][0].upper() + row[5][1:]) if row[5] else row[5]
return Advice.objects.create(
id=row[0],
administration=cleanup(row[1]),
type=row[2],
session=datetime.strptime(row[4], '%d/%m/%Y'),
subject=cleanup(subject),
topics=[t.title() for t in cleanup(row[6]).split(', ')],
tags=[tag.strip() for tag in row[7].split(',') if tag.strip()],
meanings=cleanup(row[8]).replace(' / ', '/').split(', '),
part=_part(row[9]),
content=cleanup(row[10]),
) | Create an advice from a CSV row | Below is the the instruction that describes the task:
### Input:
Create an advice from a CSV row
### Response:
def from_row(row):
'''Create an advice from a CSV row'''
subject = (row[5][0].upper() + row[5][1:]) if row[5] else row[5]
return Advice.objects.create(
id=row[0],
administration=cleanup(row[1]),
type=row[2],
session=datetime.strptime(row[4], '%d/%m/%Y'),
subject=cleanup(subject),
topics=[t.title() for t in cleanup(row[6]).split(', ')],
tags=[tag.strip() for tag in row[7].split(',') if tag.strip()],
meanings=cleanup(row[8]).replace(' / ', '/').split(', '),
part=_part(row[9]),
content=cleanup(row[10]),
) |
def get_class_students(self, xqdm, kcdm, jxbh):
"""
教学班查询, 查询指定教学班的所有学生
@structure {'学期': str, '班级名称': str, '学生': [{'姓名': str, '学号': int}]}
:param xqdm: 学期代码
:param kcdm: 课程代码
:param jxbh: 教学班号
"""
return self.query(GetClassStudents(xqdm, kcdm, jxbh)) | 教学班查询, 查询指定教学班的所有学生
@structure {'学期': str, '班级名称': str, '学生': [{'姓名': str, '学号': int}]}
:param xqdm: 学期代码
:param kcdm: 课程代码
:param jxbh: 教学班号 | Below is the the instruction that describes the task:
### Input:
教学班查询, 查询指定教学班的所有学生
@structure {'学期': str, '班级名称': str, '学生': [{'姓名': str, '学号': int}]}
:param xqdm: 学期代码
:param kcdm: 课程代码
:param jxbh: 教学班号
### Response:
def get_class_students(self, xqdm, kcdm, jxbh):
"""
教学班查询, 查询指定教学班的所有学生
@structure {'学期': str, '班级名称': str, '学生': [{'姓名': str, '学号': int}]}
:param xqdm: 学期代码
:param kcdm: 课程代码
:param jxbh: 教学班号
"""
return self.query(GetClassStudents(xqdm, kcdm, jxbh)) |
def increase_indent(func):
"""Decorator for makin """
def wrapper(*args, **kwargs):
global _debug_indent
_debug_indent += 1
result = func(*args, **kwargs)
_debug_indent -= 1
return result
return wrapper | Decorator for makin | Below is the the instruction that describes the task:
### Input:
Decorator for makin
### Response:
def increase_indent(func):
"""Decorator for makin """
def wrapper(*args, **kwargs):
global _debug_indent
_debug_indent += 1
result = func(*args, **kwargs)
_debug_indent -= 1
return result
return wrapper |
def move(self, target, home_flagged_axes=False):
'''
Move to the `target` Smoothieware coordinate, along any of the size
axes, XYZABC.
target: dict
dict setting the coordinate that Smoothieware will be at when
`move()` returns. `target` keys are the axis in upper-case, and the
values are the coordinate in millimeters (float)
home_flagged_axes: boolean (default=False)
If set to `True`, each axis included within the target coordinate
may be homed before moving, determined by Smoothieware's internal
homing-status flags (`True` means it has already homed). All axes'
flags are set to `False` by Smoothieware under three conditions:
1) Smoothieware boots or resets, 2) if a HALT gcode or signal
is sent, or 3) a homing/limitswitch error occured.
'''
from numpy import isclose
self.run_flag.wait()
def valid_movement(coords, axis):
return not (
(axis in DISABLE_AXES) or
(coords is None) or
isclose(coords, self.position[axis])
)
def create_coords_list(coords_dict):
return [
axis + str(round(coords, GCODE_ROUNDING_PRECISION))
for axis, coords in sorted(coords_dict.items())
if valid_movement(coords, axis)
]
backlash_target = target.copy()
backlash_target.update({
axis: value + PLUNGER_BACKLASH_MM
for axis, value in sorted(target.items())
if axis in 'BC' and self.position[axis] < value
})
target_coords = create_coords_list(target)
backlash_coords = create_coords_list(backlash_target)
if target_coords:
non_moving_axes = ''.join([
ax
for ax in AXES
if ax not in target.keys()
])
self.dwell_axes(non_moving_axes)
self.activate_axes(target.keys())
# include the current-setting gcodes within the moving gcode string
# to reduce latency, since we're setting current so much
command = self._generate_current_command()
if backlash_coords != target_coords:
command += ' ' + GCODES['MOVE'] + ''.join(backlash_coords)
command += ' ' + GCODES['MOVE'] + ''.join(target_coords)
try:
for axis in target.keys():
self.engaged_axes[axis] = True
if home_flagged_axes:
self.home_flagged_axes(''.join(list(target.keys())))
log.debug("move: {}".format(command))
# TODO (andy) a movement's timeout should be calculated by
# how long the movement is expected to take. A default timeout
# of 30 seconds prevents any movements that take longer
self._send_command(command, timeout=DEFAULT_MOVEMENT_TIMEOUT)
finally:
# dwell pipette motors because they get hot
plunger_axis_moved = ''.join(set('BC') & set(target.keys()))
if plunger_axis_moved:
self.dwell_axes(plunger_axis_moved)
self._set_saved_current()
self._update_position(target) | Move to the `target` Smoothieware coordinate, along any of the size
axes, XYZABC.
target: dict
dict setting the coordinate that Smoothieware will be at when
`move()` returns. `target` keys are the axis in upper-case, and the
values are the coordinate in millimeters (float)
home_flagged_axes: boolean (default=False)
If set to `True`, each axis included within the target coordinate
may be homed before moving, determined by Smoothieware's internal
homing-status flags (`True` means it has already homed). All axes'
flags are set to `False` by Smoothieware under three conditions:
1) Smoothieware boots or resets, 2) if a HALT gcode or signal
is sent, or 3) a homing/limitswitch error occured. | Below is the the instruction that describes the task:
### Input:
Move to the `target` Smoothieware coordinate, along any of the size
axes, XYZABC.
target: dict
dict setting the coordinate that Smoothieware will be at when
`move()` returns. `target` keys are the axis in upper-case, and the
values are the coordinate in millimeters (float)
home_flagged_axes: boolean (default=False)
If set to `True`, each axis included within the target coordinate
may be homed before moving, determined by Smoothieware's internal
homing-status flags (`True` means it has already homed). All axes'
flags are set to `False` by Smoothieware under three conditions:
1) Smoothieware boots or resets, 2) if a HALT gcode or signal
is sent, or 3) a homing/limitswitch error occured.
### Response:
def move(self, target, home_flagged_axes=False):
'''
Move to the `target` Smoothieware coordinate, along any of the size
axes, XYZABC.
target: dict
dict setting the coordinate that Smoothieware will be at when
`move()` returns. `target` keys are the axis in upper-case, and the
values are the coordinate in millimeters (float)
home_flagged_axes: boolean (default=False)
If set to `True`, each axis included within the target coordinate
may be homed before moving, determined by Smoothieware's internal
homing-status flags (`True` means it has already homed). All axes'
flags are set to `False` by Smoothieware under three conditions:
1) Smoothieware boots or resets, 2) if a HALT gcode or signal
is sent, or 3) a homing/limitswitch error occured.
'''
from numpy import isclose
self.run_flag.wait()
def valid_movement(coords, axis):
return not (
(axis in DISABLE_AXES) or
(coords is None) or
isclose(coords, self.position[axis])
)
def create_coords_list(coords_dict):
return [
axis + str(round(coords, GCODE_ROUNDING_PRECISION))
for axis, coords in sorted(coords_dict.items())
if valid_movement(coords, axis)
]
backlash_target = target.copy()
backlash_target.update({
axis: value + PLUNGER_BACKLASH_MM
for axis, value in sorted(target.items())
if axis in 'BC' and self.position[axis] < value
})
target_coords = create_coords_list(target)
backlash_coords = create_coords_list(backlash_target)
if target_coords:
non_moving_axes = ''.join([
ax
for ax in AXES
if ax not in target.keys()
])
self.dwell_axes(non_moving_axes)
self.activate_axes(target.keys())
# include the current-setting gcodes within the moving gcode string
# to reduce latency, since we're setting current so much
command = self._generate_current_command()
if backlash_coords != target_coords:
command += ' ' + GCODES['MOVE'] + ''.join(backlash_coords)
command += ' ' + GCODES['MOVE'] + ''.join(target_coords)
try:
for axis in target.keys():
self.engaged_axes[axis] = True
if home_flagged_axes:
self.home_flagged_axes(''.join(list(target.keys())))
log.debug("move: {}".format(command))
# TODO (andy) a movement's timeout should be calculated by
# how long the movement is expected to take. A default timeout
# of 30 seconds prevents any movements that take longer
self._send_command(command, timeout=DEFAULT_MOVEMENT_TIMEOUT)
finally:
# dwell pipette motors because they get hot
plunger_axis_moved = ''.join(set('BC') & set(target.keys()))
if plunger_axis_moved:
self.dwell_axes(plunger_axis_moved)
self._set_saved_current()
self._update_position(target) |
def find_last(fileobj, serial):
"""Find the last page of the stream 'serial'.
If the file is not multiplexed this function is fast. If it is,
it must read the whole the stream.
This finds the last page in the actual file object, or the last
page in the stream (with eos set), whichever comes first.
"""
# For non-muxed streams, look at the last page.
try:
fileobj.seek(-256*256, 2)
except IOError:
# The file is less than 64k in length.
fileobj.seek(0)
data = fileobj.read()
try:
index = data.rindex(b"OggS")
except ValueError:
raise error("unable to find final Ogg header")
bytesobj = cBytesIO(data[index:])
best_page = None
try:
page = OggPage(bytesobj)
except error:
pass
else:
if page.serial == serial:
if page.last:
return page
else:
best_page = page
else:
best_page = None
# The stream is muxed, so use the slow way.
fileobj.seek(0)
try:
page = OggPage(fileobj)
while not page.last:
page = OggPage(fileobj)
while page.serial != serial:
page = OggPage(fileobj)
best_page = page
return page
except error:
return best_page
except EOFError:
return best_page | Find the last page of the stream 'serial'.
If the file is not multiplexed this function is fast. If it is,
it must read the whole the stream.
This finds the last page in the actual file object, or the last
page in the stream (with eos set), whichever comes first. | Below is the the instruction that describes the task:
### Input:
Find the last page of the stream 'serial'.
If the file is not multiplexed this function is fast. If it is,
it must read the whole the stream.
This finds the last page in the actual file object, or the last
page in the stream (with eos set), whichever comes first.
### Response:
def find_last(fileobj, serial):
"""Find the last page of the stream 'serial'.
If the file is not multiplexed this function is fast. If it is,
it must read the whole the stream.
This finds the last page in the actual file object, or the last
page in the stream (with eos set), whichever comes first.
"""
# For non-muxed streams, look at the last page.
try:
fileobj.seek(-256*256, 2)
except IOError:
# The file is less than 64k in length.
fileobj.seek(0)
data = fileobj.read()
try:
index = data.rindex(b"OggS")
except ValueError:
raise error("unable to find final Ogg header")
bytesobj = cBytesIO(data[index:])
best_page = None
try:
page = OggPage(bytesobj)
except error:
pass
else:
if page.serial == serial:
if page.last:
return page
else:
best_page = page
else:
best_page = None
# The stream is muxed, so use the slow way.
fileobj.seek(0)
try:
page = OggPage(fileobj)
while not page.last:
page = OggPage(fileobj)
while page.serial != serial:
page = OggPage(fileobj)
best_page = page
return page
except error:
return best_page
except EOFError:
return best_page |
def setMood(self, mood):
"""
Update the activity message for the current user.
Args:
mood (str): new mood message
"""
self.conn("POST", "{0}/users/{1}/profile/partial".format(SkypeConnection.API_USER, self.userId),
auth=SkypeConnection.Auth.SkypeToken, json={"payload": {"mood": mood or ""}})
self.user.mood = SkypeUser.Mood(plain=mood) if mood else None | Update the activity message for the current user.
Args:
mood (str): new mood message | Below is the the instruction that describes the task:
### Input:
Update the activity message for the current user.
Args:
mood (str): new mood message
### Response:
def setMood(self, mood):
"""
Update the activity message for the current user.
Args:
mood (str): new mood message
"""
self.conn("POST", "{0}/users/{1}/profile/partial".format(SkypeConnection.API_USER, self.userId),
auth=SkypeConnection.Auth.SkypeToken, json={"payload": {"mood": mood or ""}})
self.user.mood = SkypeUser.Mood(plain=mood) if mood else None |
def configs_for_writer(writer=None, ppp_config_dir=None):
"""Generator of writer configuration files for one or more writers
Args:
writer (Optional[str]): Yield configs only for this writer
ppp_config_dir (Optional[str]): Additional configuration directory
to search for writer configuration files.
Returns: Generator of lists of configuration files
"""
search_paths = (ppp_config_dir,) if ppp_config_dir else tuple()
if writer is not None:
if not isinstance(writer, (list, tuple)):
writer = [writer]
# given a config filename or writer name
config_files = [w if w.endswith('.yaml') else w + '.yaml' for w in writer]
else:
writer_configs = glob_config(os.path.join('writers', '*.yaml'),
*search_paths)
config_files = set(writer_configs)
for config_file in config_files:
config_basename = os.path.basename(config_file)
writer_configs = config_search_paths(
os.path.join("writers", config_basename), *search_paths)
if not writer_configs:
LOG.warning("No writer configs found for '%s'", writer)
continue
yield writer_configs | Generator of writer configuration files for one or more writers
Args:
writer (Optional[str]): Yield configs only for this writer
ppp_config_dir (Optional[str]): Additional configuration directory
to search for writer configuration files.
Returns: Generator of lists of configuration files | Below is the the instruction that describes the task:
### Input:
Generator of writer configuration files for one or more writers
Args:
writer (Optional[str]): Yield configs only for this writer
ppp_config_dir (Optional[str]): Additional configuration directory
to search for writer configuration files.
Returns: Generator of lists of configuration files
### Response:
def configs_for_writer(writer=None, ppp_config_dir=None):
"""Generator of writer configuration files for one or more writers
Args:
writer (Optional[str]): Yield configs only for this writer
ppp_config_dir (Optional[str]): Additional configuration directory
to search for writer configuration files.
Returns: Generator of lists of configuration files
"""
search_paths = (ppp_config_dir,) if ppp_config_dir else tuple()
if writer is not None:
if not isinstance(writer, (list, tuple)):
writer = [writer]
# given a config filename or writer name
config_files = [w if w.endswith('.yaml') else w + '.yaml' for w in writer]
else:
writer_configs = glob_config(os.path.join('writers', '*.yaml'),
*search_paths)
config_files = set(writer_configs)
for config_file in config_files:
config_basename = os.path.basename(config_file)
writer_configs = config_search_paths(
os.path.join("writers", config_basename), *search_paths)
if not writer_configs:
LOG.warning("No writer configs found for '%s'", writer)
continue
yield writer_configs |
def update_expenditure_entry(database, entry):
"""Update a record of a expenditure report in the provided database.
@param db: The MongoDB database to operate on. The expenditures collection
will be used from this database.
@type db: pymongo.database.Database
@param entry: The entry to insert into the database, updating the entry with
the same recordID if one exists.
@type entry: dict
"""
entry = clean_entry(entry)
database.expenditures.update(
{'recordID': entry['recordID']},
{'$set': entry},
upsert=True
) | Update a record of a expenditure report in the provided database.
@param db: The MongoDB database to operate on. The expenditures collection
will be used from this database.
@type db: pymongo.database.Database
@param entry: The entry to insert into the database, updating the entry with
the same recordID if one exists.
@type entry: dict | Below is the the instruction that describes the task:
### Input:
Update a record of a expenditure report in the provided database.
@param db: The MongoDB database to operate on. The expenditures collection
will be used from this database.
@type db: pymongo.database.Database
@param entry: The entry to insert into the database, updating the entry with
the same recordID if one exists.
@type entry: dict
### Response:
def update_expenditure_entry(database, entry):
"""Update a record of a expenditure report in the provided database.
@param db: The MongoDB database to operate on. The expenditures collection
will be used from this database.
@type db: pymongo.database.Database
@param entry: The entry to insert into the database, updating the entry with
the same recordID if one exists.
@type entry: dict
"""
entry = clean_entry(entry)
database.expenditures.update(
{'recordID': entry['recordID']},
{'$set': entry},
upsert=True
) |
def absl_flags():
"""
Extracts absl-py flags that the user has specified and outputs their
key-value mapping.
By default, extracts only those flags in the current __package__
and mainfile.
Useful to put into a trial's param_map.
"""
# TODO: need same thing for argparse
flags_dict = flags.FLAGS.flags_by_module_dict()
# only include parameters from modules the user probably cares about
def _relevant_module(module_name):
if __package__ and __package__ in module_name:
return True
if module_name == sys.argv[0]:
return True
return False
return {
flag.name: flag.value for module, flags in flags_dict.items()
for flag in flags if _relevant_module(module)} | Extracts absl-py flags that the user has specified and outputs their
key-value mapping.
By default, extracts only those flags in the current __package__
and mainfile.
Useful to put into a trial's param_map. | Below is the the instruction that describes the task:
### Input:
Extracts absl-py flags that the user has specified and outputs their
key-value mapping.
By default, extracts only those flags in the current __package__
and mainfile.
Useful to put into a trial's param_map.
### Response:
def absl_flags():
"""
Extracts absl-py flags that the user has specified and outputs their
key-value mapping.
By default, extracts only those flags in the current __package__
and mainfile.
Useful to put into a trial's param_map.
"""
# TODO: need same thing for argparse
flags_dict = flags.FLAGS.flags_by_module_dict()
# only include parameters from modules the user probably cares about
def _relevant_module(module_name):
if __package__ and __package__ in module_name:
return True
if module_name == sys.argv[0]:
return True
return False
return {
flag.name: flag.value for module, flags in flags_dict.items()
for flag in flags if _relevant_module(module)} |
def initialize(self):
"""Start pydoc server"""
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
QApplication.processEvents()
self.start_server() | Start pydoc server | Below is the the instruction that describes the task:
### Input:
Start pydoc server
### Response:
def initialize(self):
"""Start pydoc server"""
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
QApplication.processEvents()
self.start_server() |
def _decrypt_data_key(
self, encrypted_data_key: EncryptedDataKey, algorithm: AlgorithmSuite, encryption_context: Dict[Text, Text]
) -> DataKey:
"""Decrypt an encrypted data key and return the plaintext.
:param data_key: Encrypted data key
:type data_key: aws_encryption_sdk.structures.EncryptedDataKey
:param algorithm: Algorithm object which directs how this Master Key will encrypt the data key
:type algorithm: aws_encryption_sdk.identifiers.Algorithm
:param dict encryption_context: Encryption context to use in decryption
:returns: Data key containing decrypted data key
:rtype: aws_encryption_sdk.structures.DataKey
:raises DecryptKeyError: if Master Key is unable to decrypt data key
"""
if encrypted_data_key.encrypted_data_key != self._encrypted_data_key:
raise DecryptKeyError(
'Master Key "{provider}" unable to decrypt data key'.format(provider=self.key_provider)
)
return self._generate_data_key(algorithm, encryption_context) | Decrypt an encrypted data key and return the plaintext.
:param data_key: Encrypted data key
:type data_key: aws_encryption_sdk.structures.EncryptedDataKey
:param algorithm: Algorithm object which directs how this Master Key will encrypt the data key
:type algorithm: aws_encryption_sdk.identifiers.Algorithm
:param dict encryption_context: Encryption context to use in decryption
:returns: Data key containing decrypted data key
:rtype: aws_encryption_sdk.structures.DataKey
:raises DecryptKeyError: if Master Key is unable to decrypt data key | Below is the the instruction that describes the task:
### Input:
Decrypt an encrypted data key and return the plaintext.
:param data_key: Encrypted data key
:type data_key: aws_encryption_sdk.structures.EncryptedDataKey
:param algorithm: Algorithm object which directs how this Master Key will encrypt the data key
:type algorithm: aws_encryption_sdk.identifiers.Algorithm
:param dict encryption_context: Encryption context to use in decryption
:returns: Data key containing decrypted data key
:rtype: aws_encryption_sdk.structures.DataKey
:raises DecryptKeyError: if Master Key is unable to decrypt data key
### Response:
def _decrypt_data_key(
self, encrypted_data_key: EncryptedDataKey, algorithm: AlgorithmSuite, encryption_context: Dict[Text, Text]
) -> DataKey:
"""Decrypt an encrypted data key and return the plaintext.
:param data_key: Encrypted data key
:type data_key: aws_encryption_sdk.structures.EncryptedDataKey
:param algorithm: Algorithm object which directs how this Master Key will encrypt the data key
:type algorithm: aws_encryption_sdk.identifiers.Algorithm
:param dict encryption_context: Encryption context to use in decryption
:returns: Data key containing decrypted data key
:rtype: aws_encryption_sdk.structures.DataKey
:raises DecryptKeyError: if Master Key is unable to decrypt data key
"""
if encrypted_data_key.encrypted_data_key != self._encrypted_data_key:
raise DecryptKeyError(
'Master Key "{provider}" unable to decrypt data key'.format(provider=self.key_provider)
)
return self._generate_data_key(algorithm, encryption_context) |
def json_normalize(data, record_path=None, meta=None,
meta_prefix=None,
record_prefix=None,
errors='raise',
sep='.'):
"""
Normalize semi-structured JSON data into a flat table.
Parameters
----------
data : dict or list of dicts
Unserialized JSON objects
record_path : string or list of strings, default None
Path in each object to list of records. If not passed, data will be
assumed to be an array of records
meta : list of paths (string or list of strings), default None
Fields to use as metadata for each record in resulting table
meta_prefix : string, default None
record_prefix : string, default None
If True, prefix records with dotted (?) path, e.g. foo.bar.field if
path to records is ['foo', 'bar']
errors : {'raise', 'ignore'}, default 'raise'
* 'ignore' : will ignore KeyError if keys listed in meta are not
always present
* 'raise' : will raise KeyError if keys listed in meta are not
always present
.. versionadded:: 0.20.0
sep : string, default '.'
Nested records will generate names separated by sep,
e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
.. versionadded:: 0.20.0
Returns
-------
frame : DataFrame
Examples
--------
>>> from pandas.io.json import json_normalize
>>> data = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}},
... {'name': {'given': 'Mose', 'family': 'Regner'}},
... {'id': 2, 'name': 'Faye Raker'}]
>>> json_normalize(data)
id name name.family name.first name.given name.last
0 1.0 NaN NaN Coleen NaN Volk
1 NaN NaN Regner NaN Mose NaN
2 2.0 Faye Raker NaN NaN NaN NaN
>>> data = [{'state': 'Florida',
... 'shortname': 'FL',
... 'info': {
... 'governor': 'Rick Scott'
... },
... 'counties': [{'name': 'Dade', 'population': 12345},
... {'name': 'Broward', 'population': 40000},
... {'name': 'Palm Beach', 'population': 60000}]},
... {'state': 'Ohio',
... 'shortname': 'OH',
... 'info': {
... 'governor': 'John Kasich'
... },
... 'counties': [{'name': 'Summit', 'population': 1234},
... {'name': 'Cuyahoga', 'population': 1337}]}]
>>> result = json_normalize(data, 'counties', ['state', 'shortname',
... ['info', 'governor']])
>>> result
name population info.governor state shortname
0 Dade 12345 Rick Scott Florida FL
1 Broward 40000 Rick Scott Florida FL
2 Palm Beach 60000 Rick Scott Florida FL
3 Summit 1234 John Kasich Ohio OH
4 Cuyahoga 1337 John Kasich Ohio OH
>>> data = {'A': [1, 2]}
>>> json_normalize(data, 'A', record_prefix='Prefix.')
Prefix.0
0 1
1 2
"""
def _pull_field(js, spec):
result = js
if isinstance(spec, list):
for field in spec:
result = result[field]
else:
result = result[spec]
return result
if isinstance(data, list) and not data:
return DataFrame()
# A bit of a hackjob
if isinstance(data, dict):
data = [data]
if record_path is None:
if any([isinstance(x, dict) for x in y.values()] for y in data):
# naive normalization, this is idempotent for flat records
# and potentially will inflate the data considerably for
# deeply nested structures:
# {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@}
#
# TODO: handle record value which are lists, at least error
# reasonably
data = nested_to_record(data, sep=sep)
return DataFrame(data)
elif not isinstance(record_path, list):
record_path = [record_path]
if meta is None:
meta = []
elif not isinstance(meta, list):
meta = [meta]
meta = [m if isinstance(m, list) else [m] for m in meta]
# Disastrously inefficient for now
records = []
lengths = []
meta_vals = defaultdict(list)
if not isinstance(sep, str):
sep = str(sep)
meta_keys = [sep.join(val) for val in meta]
def _recursive_extract(data, path, seen_meta, level=0):
if isinstance(data, dict):
data = [data]
if len(path) > 1:
for obj in data:
for val, key in zip(meta, meta_keys):
if level + 1 == len(val):
seen_meta[key] = _pull_field(obj, val[-1])
_recursive_extract(obj[path[0]], path[1:],
seen_meta, level=level + 1)
else:
for obj in data:
recs = _pull_field(obj, path[0])
# For repeating the metadata later
lengths.append(len(recs))
for val, key in zip(meta, meta_keys):
if level + 1 > len(val):
meta_val = seen_meta[key]
else:
try:
meta_val = _pull_field(obj, val[level:])
except KeyError as e:
if errors == 'ignore':
meta_val = np.nan
else:
raise KeyError("Try running with "
"errors='ignore' as key "
"{err} is not always present"
.format(err=e))
meta_vals[key].append(meta_val)
records.extend(recs)
_recursive_extract(data, record_path, {}, level=0)
result = DataFrame(records)
if record_prefix is not None:
result = result.rename(
columns=lambda x: "{p}{c}".format(p=record_prefix, c=x))
# Data types, a problem
for k, v in meta_vals.items():
if meta_prefix is not None:
k = meta_prefix + k
if k in result:
raise ValueError('Conflicting metadata name {name}, '
'need distinguishing prefix '.format(name=k))
# forcing dtype to object to avoid the metadata being casted to string
result[k] = np.array(v, dtype=object).repeat(lengths)
return result | Normalize semi-structured JSON data into a flat table.
Parameters
----------
data : dict or list of dicts
Unserialized JSON objects
record_path : string or list of strings, default None
Path in each object to list of records. If not passed, data will be
assumed to be an array of records
meta : list of paths (string or list of strings), default None
Fields to use as metadata for each record in resulting table
meta_prefix : string, default None
record_prefix : string, default None
If True, prefix records with dotted (?) path, e.g. foo.bar.field if
path to records is ['foo', 'bar']
errors : {'raise', 'ignore'}, default 'raise'
* 'ignore' : will ignore KeyError if keys listed in meta are not
always present
* 'raise' : will raise KeyError if keys listed in meta are not
always present
.. versionadded:: 0.20.0
sep : string, default '.'
Nested records will generate names separated by sep,
e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
.. versionadded:: 0.20.0
Returns
-------
frame : DataFrame
Examples
--------
>>> from pandas.io.json import json_normalize
>>> data = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}},
... {'name': {'given': 'Mose', 'family': 'Regner'}},
... {'id': 2, 'name': 'Faye Raker'}]
>>> json_normalize(data)
id name name.family name.first name.given name.last
0 1.0 NaN NaN Coleen NaN Volk
1 NaN NaN Regner NaN Mose NaN
2 2.0 Faye Raker NaN NaN NaN NaN
>>> data = [{'state': 'Florida',
... 'shortname': 'FL',
... 'info': {
... 'governor': 'Rick Scott'
... },
... 'counties': [{'name': 'Dade', 'population': 12345},
... {'name': 'Broward', 'population': 40000},
... {'name': 'Palm Beach', 'population': 60000}]},
... {'state': 'Ohio',
... 'shortname': 'OH',
... 'info': {
... 'governor': 'John Kasich'
... },
... 'counties': [{'name': 'Summit', 'population': 1234},
... {'name': 'Cuyahoga', 'population': 1337}]}]
>>> result = json_normalize(data, 'counties', ['state', 'shortname',
... ['info', 'governor']])
>>> result
name population info.governor state shortname
0 Dade 12345 Rick Scott Florida FL
1 Broward 40000 Rick Scott Florida FL
2 Palm Beach 60000 Rick Scott Florida FL
3 Summit 1234 John Kasich Ohio OH
4 Cuyahoga 1337 John Kasich Ohio OH
>>> data = {'A': [1, 2]}
>>> json_normalize(data, 'A', record_prefix='Prefix.')
Prefix.0
0 1
1 2 | Below is the the instruction that describes the task:
### Input:
Normalize semi-structured JSON data into a flat table.
Parameters
----------
data : dict or list of dicts
Unserialized JSON objects
record_path : string or list of strings, default None
Path in each object to list of records. If not passed, data will be
assumed to be an array of records
meta : list of paths (string or list of strings), default None
Fields to use as metadata for each record in resulting table
meta_prefix : string, default None
record_prefix : string, default None
If True, prefix records with dotted (?) path, e.g. foo.bar.field if
path to records is ['foo', 'bar']
errors : {'raise', 'ignore'}, default 'raise'
* 'ignore' : will ignore KeyError if keys listed in meta are not
always present
* 'raise' : will raise KeyError if keys listed in meta are not
always present
.. versionadded:: 0.20.0
sep : string, default '.'
Nested records will generate names separated by sep,
e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
.. versionadded:: 0.20.0
Returns
-------
frame : DataFrame
Examples
--------
>>> from pandas.io.json import json_normalize
>>> data = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}},
... {'name': {'given': 'Mose', 'family': 'Regner'}},
... {'id': 2, 'name': 'Faye Raker'}]
>>> json_normalize(data)
id name name.family name.first name.given name.last
0 1.0 NaN NaN Coleen NaN Volk
1 NaN NaN Regner NaN Mose NaN
2 2.0 Faye Raker NaN NaN NaN NaN
>>> data = [{'state': 'Florida',
... 'shortname': 'FL',
... 'info': {
... 'governor': 'Rick Scott'
... },
... 'counties': [{'name': 'Dade', 'population': 12345},
... {'name': 'Broward', 'population': 40000},
... {'name': 'Palm Beach', 'population': 60000}]},
... {'state': 'Ohio',
... 'shortname': 'OH',
... 'info': {
... 'governor': 'John Kasich'
... },
... 'counties': [{'name': 'Summit', 'population': 1234},
... {'name': 'Cuyahoga', 'population': 1337}]}]
>>> result = json_normalize(data, 'counties', ['state', 'shortname',
... ['info', 'governor']])
>>> result
name population info.governor state shortname
0 Dade 12345 Rick Scott Florida FL
1 Broward 40000 Rick Scott Florida FL
2 Palm Beach 60000 Rick Scott Florida FL
3 Summit 1234 John Kasich Ohio OH
4 Cuyahoga 1337 John Kasich Ohio OH
>>> data = {'A': [1, 2]}
>>> json_normalize(data, 'A', record_prefix='Prefix.')
Prefix.0
0 1
1 2
### Response:
def json_normalize(data, record_path=None, meta=None,
meta_prefix=None,
record_prefix=None,
errors='raise',
sep='.'):
"""
Normalize semi-structured JSON data into a flat table.
Parameters
----------
data : dict or list of dicts
Unserialized JSON objects
record_path : string or list of strings, default None
Path in each object to list of records. If not passed, data will be
assumed to be an array of records
meta : list of paths (string or list of strings), default None
Fields to use as metadata for each record in resulting table
meta_prefix : string, default None
record_prefix : string, default None
If True, prefix records with dotted (?) path, e.g. foo.bar.field if
path to records is ['foo', 'bar']
errors : {'raise', 'ignore'}, default 'raise'
* 'ignore' : will ignore KeyError if keys listed in meta are not
always present
* 'raise' : will raise KeyError if keys listed in meta are not
always present
.. versionadded:: 0.20.0
sep : string, default '.'
Nested records will generate names separated by sep,
e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
.. versionadded:: 0.20.0
Returns
-------
frame : DataFrame
Examples
--------
>>> from pandas.io.json import json_normalize
>>> data = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}},
... {'name': {'given': 'Mose', 'family': 'Regner'}},
... {'id': 2, 'name': 'Faye Raker'}]
>>> json_normalize(data)
id name name.family name.first name.given name.last
0 1.0 NaN NaN Coleen NaN Volk
1 NaN NaN Regner NaN Mose NaN
2 2.0 Faye Raker NaN NaN NaN NaN
>>> data = [{'state': 'Florida',
... 'shortname': 'FL',
... 'info': {
... 'governor': 'Rick Scott'
... },
... 'counties': [{'name': 'Dade', 'population': 12345},
... {'name': 'Broward', 'population': 40000},
... {'name': 'Palm Beach', 'population': 60000}]},
... {'state': 'Ohio',
... 'shortname': 'OH',
... 'info': {
... 'governor': 'John Kasich'
... },
... 'counties': [{'name': 'Summit', 'population': 1234},
... {'name': 'Cuyahoga', 'population': 1337}]}]
>>> result = json_normalize(data, 'counties', ['state', 'shortname',
... ['info', 'governor']])
>>> result
name population info.governor state shortname
0 Dade 12345 Rick Scott Florida FL
1 Broward 40000 Rick Scott Florida FL
2 Palm Beach 60000 Rick Scott Florida FL
3 Summit 1234 John Kasich Ohio OH
4 Cuyahoga 1337 John Kasich Ohio OH
>>> data = {'A': [1, 2]}
>>> json_normalize(data, 'A', record_prefix='Prefix.')
Prefix.0
0 1
1 2
"""
def _pull_field(js, spec):
result = js
if isinstance(spec, list):
for field in spec:
result = result[field]
else:
result = result[spec]
return result
if isinstance(data, list) and not data:
return DataFrame()
# A bit of a hackjob
if isinstance(data, dict):
data = [data]
if record_path is None:
if any([isinstance(x, dict) for x in y.values()] for y in data):
# naive normalization, this is idempotent for flat records
# and potentially will inflate the data considerably for
# deeply nested structures:
# {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@}
#
# TODO: handle record value which are lists, at least error
# reasonably
data = nested_to_record(data, sep=sep)
return DataFrame(data)
elif not isinstance(record_path, list):
record_path = [record_path]
if meta is None:
meta = []
elif not isinstance(meta, list):
meta = [meta]
meta = [m if isinstance(m, list) else [m] for m in meta]
# Disastrously inefficient for now
records = []
lengths = []
meta_vals = defaultdict(list)
if not isinstance(sep, str):
sep = str(sep)
meta_keys = [sep.join(val) for val in meta]
def _recursive_extract(data, path, seen_meta, level=0):
if isinstance(data, dict):
data = [data]
if len(path) > 1:
for obj in data:
for val, key in zip(meta, meta_keys):
if level + 1 == len(val):
seen_meta[key] = _pull_field(obj, val[-1])
_recursive_extract(obj[path[0]], path[1:],
seen_meta, level=level + 1)
else:
for obj in data:
recs = _pull_field(obj, path[0])
# For repeating the metadata later
lengths.append(len(recs))
for val, key in zip(meta, meta_keys):
if level + 1 > len(val):
meta_val = seen_meta[key]
else:
try:
meta_val = _pull_field(obj, val[level:])
except KeyError as e:
if errors == 'ignore':
meta_val = np.nan
else:
raise KeyError("Try running with "
"errors='ignore' as key "
"{err} is not always present"
.format(err=e))
meta_vals[key].append(meta_val)
records.extend(recs)
_recursive_extract(data, record_path, {}, level=0)
result = DataFrame(records)
if record_prefix is not None:
result = result.rename(
columns=lambda x: "{p}{c}".format(p=record_prefix, c=x))
# Data types, a problem
for k, v in meta_vals.items():
if meta_prefix is not None:
k = meta_prefix + k
if k in result:
raise ValueError('Conflicting metadata name {name}, '
'need distinguishing prefix '.format(name=k))
# forcing dtype to object to avoid the metadata being casted to string
result[k] = np.array(v, dtype=object).repeat(lengths)
return result |
def start(self):
""" Make an HTTP request with a specific method """
# TODO : Use Timeout here and _ignore_request_idle
from .nurest_session import NURESTSession
session = NURESTSession.get_current_session()
if self.async:
thread = threading.Thread(target=self._make_request, kwargs={'session': session})
thread.is_daemon = False
thread.start()
return self.transaction_id
return self._make_request(session=session) | Make an HTTP request with a specific method | Below is the the instruction that describes the task:
### Input:
Make an HTTP request with a specific method
### Response:
def start(self):
""" Make an HTTP request with a specific method """
# TODO : Use Timeout here and _ignore_request_idle
from .nurest_session import NURESTSession
session = NURESTSession.get_current_session()
if self.async:
thread = threading.Thread(target=self._make_request, kwargs={'session': session})
thread.is_daemon = False
thread.start()
return self.transaction_id
return self._make_request(session=session) |
def _ls_sites(path):
"""
List only sites in the domain_sites() to ensure we co-exist with other projects
"""
with cd(path):
sites = run('ls').split('\n')
doms = [d.name for d in domain_sites()]
dom_sites = []
for s in sites:
ds = s.split('-')[0]
ds = ds.replace('_','.')
if ds in doms and s not in dom_sites:
dom_sites.append(s)
return dom_sites | List only sites in the domain_sites() to ensure we co-exist with other projects | Below is the the instruction that describes the task:
### Input:
List only sites in the domain_sites() to ensure we co-exist with other projects
### Response:
def _ls_sites(path):
"""
List only sites in the domain_sites() to ensure we co-exist with other projects
"""
with cd(path):
sites = run('ls').split('\n')
doms = [d.name for d in domain_sites()]
dom_sites = []
for s in sites:
ds = s.split('-')[0]
ds = ds.replace('_','.')
if ds in doms and s not in dom_sites:
dom_sites.append(s)
return dom_sites |
def daemonize():
"""
Daemonize the program, ie. make it run in the "background", detach
it from its controlling terminal and from its controlling process
group session.
NOTES:
- This function also umask(0) and chdir("/")
- stdin, stdout, and stderr are redirected from/to /dev/null
SEE ALSO:
http://www.unixguide.net/unix/programming/1.7.shtml
"""
try:
pid = os.fork()
if pid > 0:
os._exit(0) # pylint: disable-msg=W0212
except OSError, e:
log.exception("first fork() failed: %d (%s)", e.errno, e.strerror)
sys.exit(1)
os.setsid()
os.umask(0)
os.chdir("/")
try:
pid = os.fork()
if pid > 0:
os._exit(0) # pylint: disable-msg=W0212
except OSError, e:
log.exception("second fork() failed: %d (%s)", e.errno, e.strerror)
sys.exit(1)
try:
devnull_fd = os.open(os.devnull, os.O_RDWR)
for stdf in (sys.__stdout__, sys.__stderr__):
try:
stdf.flush()
except Exception: # pylint: disable-msg=W0703,W0704
pass
for stdf in (sys.__stdin__, sys.__stdout__, sys.__stderr__):
try:
os.dup2(devnull_fd, stdf.fileno())
except OSError: # pylint: disable-msg=W0704
pass
except Exception: # pylint: disable-msg=W0703
log.exception("error during file descriptor redirection") | Daemonize the program, ie. make it run in the "background", detach
it from its controlling terminal and from its controlling process
group session.
NOTES:
- This function also umask(0) and chdir("/")
- stdin, stdout, and stderr are redirected from/to /dev/null
SEE ALSO:
http://www.unixguide.net/unix/programming/1.7.shtml | Below is the the instruction that describes the task:
### Input:
Daemonize the program, ie. make it run in the "background", detach
it from its controlling terminal and from its controlling process
group session.
NOTES:
- This function also umask(0) and chdir("/")
- stdin, stdout, and stderr are redirected from/to /dev/null
SEE ALSO:
http://www.unixguide.net/unix/programming/1.7.shtml
### Response:
def daemonize():
"""
Daemonize the program, ie. make it run in the "background", detach
it from its controlling terminal and from its controlling process
group session.
NOTES:
- This function also umask(0) and chdir("/")
- stdin, stdout, and stderr are redirected from/to /dev/null
SEE ALSO:
http://www.unixguide.net/unix/programming/1.7.shtml
"""
try:
pid = os.fork()
if pid > 0:
os._exit(0) # pylint: disable-msg=W0212
except OSError, e:
log.exception("first fork() failed: %d (%s)", e.errno, e.strerror)
sys.exit(1)
os.setsid()
os.umask(0)
os.chdir("/")
try:
pid = os.fork()
if pid > 0:
os._exit(0) # pylint: disable-msg=W0212
except OSError, e:
log.exception("second fork() failed: %d (%s)", e.errno, e.strerror)
sys.exit(1)
try:
devnull_fd = os.open(os.devnull, os.O_RDWR)
for stdf in (sys.__stdout__, sys.__stderr__):
try:
stdf.flush()
except Exception: # pylint: disable-msg=W0703,W0704
pass
for stdf in (sys.__stdin__, sys.__stdout__, sys.__stderr__):
try:
os.dup2(devnull_fd, stdf.fileno())
except OSError: # pylint: disable-msg=W0704
pass
except Exception: # pylint: disable-msg=W0703
log.exception("error during file descriptor redirection") |
def get_instance(self, payload):
"""
Build an instance of WorkflowRealTimeStatisticsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.workflow.workflow_real_time_statistics.WorkflowRealTimeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.workflow.workflow_real_time_statistics.WorkflowRealTimeStatisticsInstance
"""
return WorkflowRealTimeStatisticsInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
workflow_sid=self._solution['workflow_sid'],
) | Build an instance of WorkflowRealTimeStatisticsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.workflow.workflow_real_time_statistics.WorkflowRealTimeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.workflow.workflow_real_time_statistics.WorkflowRealTimeStatisticsInstance | Below is the the instruction that describes the task:
### Input:
Build an instance of WorkflowRealTimeStatisticsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.workflow.workflow_real_time_statistics.WorkflowRealTimeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.workflow.workflow_real_time_statistics.WorkflowRealTimeStatisticsInstance
### Response:
def get_instance(self, payload):
"""
Build an instance of WorkflowRealTimeStatisticsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.workflow.workflow_real_time_statistics.WorkflowRealTimeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.workflow.workflow_real_time_statistics.WorkflowRealTimeStatisticsInstance
"""
return WorkflowRealTimeStatisticsInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
workflow_sid=self._solution['workflow_sid'],
) |
def is_transaction_invalidated(transaction, state_change):
""" True if the `transaction` is made invalid by `state_change`.
Some transactions will fail due to race conditions. The races are:
- Another transaction which has the same side effect is executed before.
- Another transaction which *invalidates* the state of the smart contract
required by the local transaction is executed before it.
The first case is handled by the predicate `is_transaction_effect_satisfied`,
where a transaction from a different source which does the same thing is
considered. This predicate handles the second scenario.
A transaction can **only** invalidate another iff both share a valid
initial state but a different end state.
Valid example:
A close can invalidate a deposit, because both a close and a deposit
can be executed from an opened state (same initial state), but a close
transaction will transition the channel to a closed state which doesn't
allow for deposits (different end state).
Invalid example:
A settle transaction cannot invalidate a deposit because a settle is
only allowed for the closed state and deposits are only allowed for
the open state. In such a case a deposit should never have been sent.
The deposit transaction for an invalid state is a bug and not a
transaction which was invalidated.
"""
# Most transactions cannot be invalidated by others. These are:
#
# - close transactions
# - settle transactions
# - batch unlocks
#
# Deposits and withdraws are invalidated by the close, but these are not
# made atomic through the WAL.
is_our_failed_update_transfer = (
isinstance(state_change, ContractReceiveChannelSettled) and
isinstance(transaction, ContractSendChannelUpdateTransfer) and
state_change.token_network_identifier == transaction.token_network_identifier and
state_change.channel_identifier == transaction.channel_identifier
)
if is_our_failed_update_transfer:
return True
return False | True if the `transaction` is made invalid by `state_change`.
Some transactions will fail due to race conditions. The races are:
- Another transaction which has the same side effect is executed before.
- Another transaction which *invalidates* the state of the smart contract
required by the local transaction is executed before it.
The first case is handled by the predicate `is_transaction_effect_satisfied`,
where a transaction from a different source which does the same thing is
considered. This predicate handles the second scenario.
A transaction can **only** invalidate another iff both share a valid
initial state but a different end state.
Valid example:
A close can invalidate a deposit, because both a close and a deposit
can be executed from an opened state (same initial state), but a close
transaction will transition the channel to a closed state which doesn't
allow for deposits (different end state).
Invalid example:
A settle transaction cannot invalidate a deposit because a settle is
only allowed for the closed state and deposits are only allowed for
the open state. In such a case a deposit should never have been sent.
The deposit transaction for an invalid state is a bug and not a
transaction which was invalidated. | Below is the the instruction that describes the task:
### Input:
True if the `transaction` is made invalid by `state_change`.
Some transactions will fail due to race conditions. The races are:
- Another transaction which has the same side effect is executed before.
- Another transaction which *invalidates* the state of the smart contract
required by the local transaction is executed before it.
The first case is handled by the predicate `is_transaction_effect_satisfied`,
where a transaction from a different source which does the same thing is
considered. This predicate handles the second scenario.
A transaction can **only** invalidate another iff both share a valid
initial state but a different end state.
Valid example:
A close can invalidate a deposit, because both a close and a deposit
can be executed from an opened state (same initial state), but a close
transaction will transition the channel to a closed state which doesn't
allow for deposits (different end state).
Invalid example:
A settle transaction cannot invalidate a deposit because a settle is
only allowed for the closed state and deposits are only allowed for
the open state. In such a case a deposit should never have been sent.
The deposit transaction for an invalid state is a bug and not a
transaction which was invalidated.
### Response:
def is_transaction_invalidated(transaction, state_change):
""" True if the `transaction` is made invalid by `state_change`.
Some transactions will fail due to race conditions. The races are:
- Another transaction which has the same side effect is executed before.
- Another transaction which *invalidates* the state of the smart contract
required by the local transaction is executed before it.
The first case is handled by the predicate `is_transaction_effect_satisfied`,
where a transaction from a different source which does the same thing is
considered. This predicate handles the second scenario.
A transaction can **only** invalidate another iff both share a valid
initial state but a different end state.
Valid example:
A close can invalidate a deposit, because both a close and a deposit
can be executed from an opened state (same initial state), but a close
transaction will transition the channel to a closed state which doesn't
allow for deposits (different end state).
Invalid example:
A settle transaction cannot invalidate a deposit because a settle is
only allowed for the closed state and deposits are only allowed for
the open state. In such a case a deposit should never have been sent.
The deposit transaction for an invalid state is a bug and not a
transaction which was invalidated.
"""
# Most transactions cannot be invalidated by others. These are:
#
# - close transactions
# - settle transactions
# - batch unlocks
#
# Deposits and withdraws are invalidated by the close, but these are not
# made atomic through the WAL.
is_our_failed_update_transfer = (
isinstance(state_change, ContractReceiveChannelSettled) and
isinstance(transaction, ContractSendChannelUpdateTransfer) and
state_change.token_network_identifier == transaction.token_network_identifier and
state_change.channel_identifier == transaction.channel_identifier
)
if is_our_failed_update_transfer:
return True
return False |
def _construct_role(self, managed_policy_map):
"""Constructs a Lambda execution role based on this SAM function's Policies property.
:returns: the generated IAM Role
:rtype: model.iam.IAMRole
"""
execution_role = IAMRole(self.logical_id + 'Role', attributes=self.get_passthrough_resource_attributes())
execution_role.AssumeRolePolicyDocument = IAMRolePolicies.lambda_assume_role_policy()
managed_policy_arns = [ArnGenerator.generate_aws_managed_policy_arn('service-role/AWSLambdaBasicExecutionRole')]
if self.Tracing:
managed_policy_arns.append(ArnGenerator.generate_aws_managed_policy_arn('AWSXrayWriteOnlyAccess'))
function_policies = FunctionPolicies({"Policies": self.Policies},
# No support for policy templates in the "core"
policy_template_processor=None)
policy_documents = []
if self.DeadLetterQueue:
policy_documents.append(IAMRolePolicies.dead_letter_queue_policy(
self.dead_letter_queue_policy_actions[self.DeadLetterQueue['Type']],
self.DeadLetterQueue['TargetArn']))
for index, policy_entry in enumerate(function_policies.get()):
if policy_entry.type is PolicyTypes.POLICY_STATEMENT:
policy_documents.append({
'PolicyName': execution_role.logical_id + 'Policy' + str(index),
'PolicyDocument': policy_entry.data
})
elif policy_entry.type is PolicyTypes.MANAGED_POLICY:
# There are three options:
# Managed Policy Name (string): Try to convert to Managed Policy ARN
# Managed Policy Arn (string): Insert it directly into the list
# Intrinsic Function (dict): Insert it directly into the list
#
# When you insert into managed_policy_arns list, de-dupe to prevent same ARN from showing up twice
#
policy_arn = policy_entry.data
if isinstance(policy_entry.data, string_types) and policy_entry.data in managed_policy_map:
policy_arn = managed_policy_map[policy_entry.data]
# De-Duplicate managed policy arns before inserting. Mainly useful
# when customer specifies a managed policy which is already inserted
# by SAM, such as AWSLambdaBasicExecutionRole
if policy_arn not in managed_policy_arns:
managed_policy_arns.append(policy_arn)
else:
# Policy Templates are not supported here in the "core"
raise InvalidResourceException(
self.logical_id,
"Policy at index {} in the 'Policies' property is not valid".format(index))
execution_role.ManagedPolicyArns = list(managed_policy_arns)
execution_role.Policies = policy_documents or None
execution_role.PermissionsBoundary = self.PermissionsBoundary
return execution_role | Constructs a Lambda execution role based on this SAM function's Policies property.
:returns: the generated IAM Role
:rtype: model.iam.IAMRole | Below is the the instruction that describes the task:
### Input:
Constructs a Lambda execution role based on this SAM function's Policies property.
:returns: the generated IAM Role
:rtype: model.iam.IAMRole
### Response:
def _construct_role(self, managed_policy_map):
"""Constructs a Lambda execution role based on this SAM function's Policies property.
:returns: the generated IAM Role
:rtype: model.iam.IAMRole
"""
execution_role = IAMRole(self.logical_id + 'Role', attributes=self.get_passthrough_resource_attributes())
execution_role.AssumeRolePolicyDocument = IAMRolePolicies.lambda_assume_role_policy()
managed_policy_arns = [ArnGenerator.generate_aws_managed_policy_arn('service-role/AWSLambdaBasicExecutionRole')]
if self.Tracing:
managed_policy_arns.append(ArnGenerator.generate_aws_managed_policy_arn('AWSXrayWriteOnlyAccess'))
function_policies = FunctionPolicies({"Policies": self.Policies},
# No support for policy templates in the "core"
policy_template_processor=None)
policy_documents = []
if self.DeadLetterQueue:
policy_documents.append(IAMRolePolicies.dead_letter_queue_policy(
self.dead_letter_queue_policy_actions[self.DeadLetterQueue['Type']],
self.DeadLetterQueue['TargetArn']))
for index, policy_entry in enumerate(function_policies.get()):
if policy_entry.type is PolicyTypes.POLICY_STATEMENT:
policy_documents.append({
'PolicyName': execution_role.logical_id + 'Policy' + str(index),
'PolicyDocument': policy_entry.data
})
elif policy_entry.type is PolicyTypes.MANAGED_POLICY:
# There are three options:
# Managed Policy Name (string): Try to convert to Managed Policy ARN
# Managed Policy Arn (string): Insert it directly into the list
# Intrinsic Function (dict): Insert it directly into the list
#
# When you insert into managed_policy_arns list, de-dupe to prevent same ARN from showing up twice
#
policy_arn = policy_entry.data
if isinstance(policy_entry.data, string_types) and policy_entry.data in managed_policy_map:
policy_arn = managed_policy_map[policy_entry.data]
# De-Duplicate managed policy arns before inserting. Mainly useful
# when customer specifies a managed policy which is already inserted
# by SAM, such as AWSLambdaBasicExecutionRole
if policy_arn not in managed_policy_arns:
managed_policy_arns.append(policy_arn)
else:
# Policy Templates are not supported here in the "core"
raise InvalidResourceException(
self.logical_id,
"Policy at index {} in the 'Policies' property is not valid".format(index))
execution_role.ManagedPolicyArns = list(managed_policy_arns)
execution_role.Policies = policy_documents or None
execution_role.PermissionsBoundary = self.PermissionsBoundary
return execution_role |
def _tosubs(self, ixlist):
"""Maps a list of integer indices to sub-indices.
ixlist can contain repeated indices and does not need to be sorted.
Returns pair (ss, ms) where ss is a list of subsim numbers and ms is a
list of lists of subindices m (one list for each subsim in ss).
"""
n = len(ixlist)
N = self._n
ss = []
ms = []
if n == 0:
return ss, ms
j = 0 # the position in ixlist currently being processed
ix = ixlist[j]
if ix >= N or ix < -N:
raise IndexError(
'index %d out of bounds for list of %d sims' % (ix, N))
if ix < 0:
ix += N
while j < n:
for s in range(0, self._n):
low = self._si[s]
high = self._si[s + 1]
if ix >= low and ix < high:
ss.append(s)
msj = [ix - low]
j += 1
while j < n:
ix = ixlist[j]
if ix >= N or ix < -N:
raise IndexError(
'index %d out of bounds for list of %d sims' % (
ix, N))
if ix < 0:
ix += N
if ix < low or ix >= high:
break
msj.append(ix - low)
j += 1
ms.append(msj)
if ix < low:
break
return ss, ms | Maps a list of integer indices to sub-indices.
ixlist can contain repeated indices and does not need to be sorted.
Returns pair (ss, ms) where ss is a list of subsim numbers and ms is a
list of lists of subindices m (one list for each subsim in ss). | Below is the the instruction that describes the task:
### Input:
Maps a list of integer indices to sub-indices.
ixlist can contain repeated indices and does not need to be sorted.
Returns pair (ss, ms) where ss is a list of subsim numbers and ms is a
list of lists of subindices m (one list for each subsim in ss).
### Response:
def _tosubs(self, ixlist):
"""Maps a list of integer indices to sub-indices.
ixlist can contain repeated indices and does not need to be sorted.
Returns pair (ss, ms) where ss is a list of subsim numbers and ms is a
list of lists of subindices m (one list for each subsim in ss).
"""
n = len(ixlist)
N = self._n
ss = []
ms = []
if n == 0:
return ss, ms
j = 0 # the position in ixlist currently being processed
ix = ixlist[j]
if ix >= N or ix < -N:
raise IndexError(
'index %d out of bounds for list of %d sims' % (ix, N))
if ix < 0:
ix += N
while j < n:
for s in range(0, self._n):
low = self._si[s]
high = self._si[s + 1]
if ix >= low and ix < high:
ss.append(s)
msj = [ix - low]
j += 1
while j < n:
ix = ixlist[j]
if ix >= N or ix < -N:
raise IndexError(
'index %d out of bounds for list of %d sims' % (
ix, N))
if ix < 0:
ix += N
if ix < low or ix >= high:
break
msj.append(ix - low)
j += 1
ms.append(msj)
if ix < low:
break
return ss, ms |
def map_fit(interface, state, label, inp):
"""
Function counts occurrences of feature values for every row in given data chunk. For continuous features it returns
number of values and it calculates mean and variance for every feature.
For discrete features it counts occurrences of labels and values for every feature. It returns occurrences of pairs:
label, feature index, feature values.
"""
import numpy as np
combiner = {} # combiner used for joining of intermediate pairs
out = interface.output(0) # all outputted pairs have the same output label
for row in inp: # for every row in data chunk
row = row.strip().split(state["delimiter"]) # split row
if len(row) > 1: # check if row is empty
for i, j in enumerate(state["X_indices"]): # for defined features
if row[j] not in state["missing_vals"]: # check missing values
# creates a pair - label, feature index
pair = row[state["y_index"]] + state["delimiter"] + str(j)
if state["X_meta"][i] == "c": # continuous features
if pair in combiner:
# convert to float and store value
combiner[pair].append(np.float32(row[j]))
else:
combiner[pair] = [np.float32(row[j])]
else: # discrete features
# add feature value to pair
pair += state["delimiter"] + row[j]
# increase counts of current pair
combiner[pair] = combiner.get(pair, 0) + 1
# increase label counts
combiner[row[state["y_index"]]] = combiner.get(row[state["y_index"]], 0) + 1
for k, v in combiner.iteritems(): # all pairs in combiner are output
if len(k.split(state["delimiter"])) == 2: # continous features
# number of elements, partial mean and variance
out.add(k, (np.size(v), np.mean(v, dtype=np.float32), np.var(v, dtype=np.float32)))
else: # discrete features and labels
out.add(k, v) | Function counts occurrences of feature values for every row in given data chunk. For continuous features it returns
number of values and it calculates mean and variance for every feature.
For discrete features it counts occurrences of labels and values for every feature. It returns occurrences of pairs:
label, feature index, feature values. | Below is the the instruction that describes the task:
### Input:
Function counts occurrences of feature values for every row in given data chunk. For continuous features it returns
number of values and it calculates mean and variance for every feature.
For discrete features it counts occurrences of labels and values for every feature. It returns occurrences of pairs:
label, feature index, feature values.
### Response:
def map_fit(interface, state, label, inp):
"""
Function counts occurrences of feature values for every row in given data chunk. For continuous features it returns
number of values and it calculates mean and variance for every feature.
For discrete features it counts occurrences of labels and values for every feature. It returns occurrences of pairs:
label, feature index, feature values.
"""
import numpy as np
combiner = {} # combiner used for joining of intermediate pairs
out = interface.output(0) # all outputted pairs have the same output label
for row in inp: # for every row in data chunk
row = row.strip().split(state["delimiter"]) # split row
if len(row) > 1: # check if row is empty
for i, j in enumerate(state["X_indices"]): # for defined features
if row[j] not in state["missing_vals"]: # check missing values
# creates a pair - label, feature index
pair = row[state["y_index"]] + state["delimiter"] + str(j)
if state["X_meta"][i] == "c": # continuous features
if pair in combiner:
# convert to float and store value
combiner[pair].append(np.float32(row[j]))
else:
combiner[pair] = [np.float32(row[j])]
else: # discrete features
# add feature value to pair
pair += state["delimiter"] + row[j]
# increase counts of current pair
combiner[pair] = combiner.get(pair, 0) + 1
# increase label counts
combiner[row[state["y_index"]]] = combiner.get(row[state["y_index"]], 0) + 1
for k, v in combiner.iteritems(): # all pairs in combiner are output
if len(k.split(state["delimiter"])) == 2: # continous features
# number of elements, partial mean and variance
out.add(k, (np.size(v), np.mean(v, dtype=np.float32), np.var(v, dtype=np.float32)))
else: # discrete features and labels
out.add(k, v) |
def simplify_other(major, minor, dist):
"""
Simplify the point featurecollection of poi with another point features accoording by distance.
Attention: point featurecollection only
Keyword arguments:
major -- major geojson
minor -- minor geojson
dist -- distance
return a geojson featurecollection with two parts of featurecollection
"""
result = deepcopy(major)
if major['type'] == 'FeatureCollection' and minor['type'] == 'FeatureCollection':
arc = dist/6371000*180/math.pi*2
for minorfeature in minor['features']:
minorgeom = minorfeature['geometry']
minorlng = minorgeom['coordinates'][0]
minorlat = minorgeom['coordinates'][1]
is_accept = True
for mainfeature in major['features']:
maingeom = mainfeature['geometry']
mainlng = maingeom['coordinates'][0]
mainlat = maingeom['coordinates'][1]
if abs(minorlat-mainlat) <= arc and abs(minorlng-mainlng) <= arc:
distance = point_distance(maingeom, minorgeom)
if distance < dist:
is_accept = False
break
if is_accept:
result["features"].append(minorfeature)
return result | Simplify the point featurecollection of poi with another point features accoording by distance.
Attention: point featurecollection only
Keyword arguments:
major -- major geojson
minor -- minor geojson
dist -- distance
return a geojson featurecollection with two parts of featurecollection | Below is the the instruction that describes the task:
### Input:
Simplify the point featurecollection of poi with another point features accoording by distance.
Attention: point featurecollection only
Keyword arguments:
major -- major geojson
minor -- minor geojson
dist -- distance
return a geojson featurecollection with two parts of featurecollection
### Response:
def simplify_other(major, minor, dist):
"""
Simplify the point featurecollection of poi with another point features accoording by distance.
Attention: point featurecollection only
Keyword arguments:
major -- major geojson
minor -- minor geojson
dist -- distance
return a geojson featurecollection with two parts of featurecollection
"""
result = deepcopy(major)
if major['type'] == 'FeatureCollection' and minor['type'] == 'FeatureCollection':
arc = dist/6371000*180/math.pi*2
for minorfeature in minor['features']:
minorgeom = minorfeature['geometry']
minorlng = minorgeom['coordinates'][0]
minorlat = minorgeom['coordinates'][1]
is_accept = True
for mainfeature in major['features']:
maingeom = mainfeature['geometry']
mainlng = maingeom['coordinates'][0]
mainlat = maingeom['coordinates'][1]
if abs(minorlat-mainlat) <= arc and abs(minorlng-mainlng) <= arc:
distance = point_distance(maingeom, minorgeom)
if distance < dist:
is_accept = False
break
if is_accept:
result["features"].append(minorfeature)
return result |
def _get_id(owner, date, content):
"""
Generate an unique Atom id for the given content
"""
h = hashlib.sha256()
# Hash still contains the original project url, keep as is
h.update("github.com/spacetelescope/asv".encode('utf-8'))
for x in content:
if x is None:
h.update(",".encode('utf-8'))
else:
h.update(x.encode('utf-8'))
h.update(",".encode('utf-8'))
if date is None:
date = datetime.datetime(1970, 1, 1)
return "tag:{0},{1}:/{2}".format(owner, date.strftime('%Y-%m-%d'), h.hexdigest()) | Generate an unique Atom id for the given content | Below is the the instruction that describes the task:
### Input:
Generate an unique Atom id for the given content
### Response:
def _get_id(owner, date, content):
"""
Generate an unique Atom id for the given content
"""
h = hashlib.sha256()
# Hash still contains the original project url, keep as is
h.update("github.com/spacetelescope/asv".encode('utf-8'))
for x in content:
if x is None:
h.update(",".encode('utf-8'))
else:
h.update(x.encode('utf-8'))
h.update(",".encode('utf-8'))
if date is None:
date = datetime.datetime(1970, 1, 1)
return "tag:{0},{1}:/{2}".format(owner, date.strftime('%Y-%m-%d'), h.hexdigest()) |
def lsfiles(root=".", **kwargs):
"""
Return only files from a directory listing.
Arguments:
root (str): Path to directory. Can be relative or absolute.
**kwargs: Any additional arguments to be passed to ls().
Returns:
list of str: A list of file paths.
Raises:
OSError: If root directory does not exist.
"""
paths = ls(root=root, **kwargs)
if isfile(root):
return paths
return [_path for _path in paths if isfile(path(root, _path))] | Return only files from a directory listing.
Arguments:
root (str): Path to directory. Can be relative or absolute.
**kwargs: Any additional arguments to be passed to ls().
Returns:
list of str: A list of file paths.
Raises:
OSError: If root directory does not exist. | Below is the the instruction that describes the task:
### Input:
Return only files from a directory listing.
Arguments:
root (str): Path to directory. Can be relative or absolute.
**kwargs: Any additional arguments to be passed to ls().
Returns:
list of str: A list of file paths.
Raises:
OSError: If root directory does not exist.
### Response:
def lsfiles(root=".", **kwargs):
"""
Return only files from a directory listing.
Arguments:
root (str): Path to directory. Can be relative or absolute.
**kwargs: Any additional arguments to be passed to ls().
Returns:
list of str: A list of file paths.
Raises:
OSError: If root directory does not exist.
"""
paths = ls(root=root, **kwargs)
if isfile(root):
return paths
return [_path for _path in paths if isfile(path(root, _path))] |
def run(self):
"""
Run the schedule
"""
self.main_task.thread.start()
self.main_task.thread.join() | Run the schedule | Below is the the instruction that describes the task:
### Input:
Run the schedule
### Response:
def run(self):
"""
Run the schedule
"""
self.main_task.thread.start()
self.main_task.thread.join() |
Subsets and Splits