code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def __hash_and_stat_file(self, path, saltenv='base'):
'''
Common code for hashing and stating files
'''
try:
path = self._check_proto(path)
except MinionError as err:
if not os.path.isfile(path):
log.warning(
'specified file %s is not present to generate hash: %s',
path, err
)
return {}, None
else:
ret = {}
hash_type = self.opts.get('hash_type', 'md5')
ret['hsum'] = salt.utils.hashutils.get_hash(path, form=hash_type)
ret['hash_type'] = hash_type
return ret
load = {'path': path,
'saltenv': saltenv,
'cmd': '_file_hash'}
return self.channel.send(load) | Common code for hashing and stating files | Below is the the instruction that describes the task:
### Input:
Common code for hashing and stating files
### Response:
def __hash_and_stat_file(self, path, saltenv='base'):
'''
Common code for hashing and stating files
'''
try:
path = self._check_proto(path)
except MinionError as err:
if not os.path.isfile(path):
log.warning(
'specified file %s is not present to generate hash: %s',
path, err
)
return {}, None
else:
ret = {}
hash_type = self.opts.get('hash_type', 'md5')
ret['hsum'] = salt.utils.hashutils.get_hash(path, form=hash_type)
ret['hash_type'] = hash_type
return ret
load = {'path': path,
'saltenv': saltenv,
'cmd': '_file_hash'}
return self.channel.send(load) |
def _lt_from_ge(self, other):
"""Return a < b. Computed by @total_ordering from (not a >= b)."""
op_result = self.__ge__(other)
if op_result is NotImplemented:
return NotImplemented
return not op_result | Return a < b. Computed by @total_ordering from (not a >= b). | Below is the the instruction that describes the task:
### Input:
Return a < b. Computed by @total_ordering from (not a >= b).
### Response:
def _lt_from_ge(self, other):
"""Return a < b. Computed by @total_ordering from (not a >= b)."""
op_result = self.__ge__(other)
if op_result is NotImplemented:
return NotImplemented
return not op_result |
def _decompose_(self, qubits):
"""A quantum circuit (QFT_inv) with the following structure.
---H--@-------@--------@----------------------------------------------
| | |
------@^-0.5--+--------+---------H--@-------@-------------------------
| | | |
--------------@^-0.25--+------------@^-0.5--+---------H--@------------
| | |
-----------------------@^-0.125-------------@^-0.25------@^-0.5---H---
The number of qubits can be arbitrary.
"""
qubits = list(qubits)
while len(qubits) > 0:
q_head = qubits.pop(0)
yield cirq.H(q_head)
for i, qubit in enumerate(qubits):
yield (cirq.CZ**(-1/2.0**(i+1)))(qubit, q_head) | A quantum circuit (QFT_inv) with the following structure.
---H--@-------@--------@----------------------------------------------
| | |
------@^-0.5--+--------+---------H--@-------@-------------------------
| | | |
--------------@^-0.25--+------------@^-0.5--+---------H--@------------
| | |
-----------------------@^-0.125-------------@^-0.25------@^-0.5---H---
The number of qubits can be arbitrary. | Below is the the instruction that describes the task:
### Input:
A quantum circuit (QFT_inv) with the following structure.
---H--@-------@--------@----------------------------------------------
| | |
------@^-0.5--+--------+---------H--@-------@-------------------------
| | | |
--------------@^-0.25--+------------@^-0.5--+---------H--@------------
| | |
-----------------------@^-0.125-------------@^-0.25------@^-0.5---H---
The number of qubits can be arbitrary.
### Response:
def _decompose_(self, qubits):
"""A quantum circuit (QFT_inv) with the following structure.
---H--@-------@--------@----------------------------------------------
| | |
------@^-0.5--+--------+---------H--@-------@-------------------------
| | | |
--------------@^-0.25--+------------@^-0.5--+---------H--@------------
| | |
-----------------------@^-0.125-------------@^-0.25------@^-0.5---H---
The number of qubits can be arbitrary.
"""
qubits = list(qubits)
while len(qubits) > 0:
q_head = qubits.pop(0)
yield cirq.H(q_head)
for i, qubit in enumerate(qubits):
yield (cirq.CZ**(-1/2.0**(i+1)))(qubit, q_head) |
def _parse_substitutions(self, element):
"""
Parse word substitutions
:param element: The XML Element object
:type element: etree._Element
"""
subs = element.findall('sub')
for sub in subs:
self.agentml.set_substitution(attribute(sub, 'word'), sub.text) | Parse word substitutions
:param element: The XML Element object
:type element: etree._Element | Below is the the instruction that describes the task:
### Input:
Parse word substitutions
:param element: The XML Element object
:type element: etree._Element
### Response:
def _parse_substitutions(self, element):
"""
Parse word substitutions
:param element: The XML Element object
:type element: etree._Element
"""
subs = element.findall('sub')
for sub in subs:
self.agentml.set_substitution(attribute(sub, 'word'), sub.text) |
def should_build_with_cython(previous_cython_version, is_release):
"""
Returns the previously used Cython version (or 'unknown' if not
previously built) if Cython should be used to build extension modules from
pyx files.
"""
# Only build with Cython if, of course, Cython is installed, we're in a
# development version (i.e. not release) or the Cython-generated source
# files haven't been created yet (cython_version == 'unknown'). The latter
# case can happen even when release is True if checking out a release tag
# from the repository
have_cython = False
try:
from Cython import __version__ as cython_version # noqa
have_cython = True
except ImportError:
pass
if have_cython and (not is_release or previous_cython_version == 'unknown'):
return cython_version
else:
return False | Returns the previously used Cython version (or 'unknown' if not
previously built) if Cython should be used to build extension modules from
pyx files. | Below is the the instruction that describes the task:
### Input:
Returns the previously used Cython version (or 'unknown' if not
previously built) if Cython should be used to build extension modules from
pyx files.
### Response:
def should_build_with_cython(previous_cython_version, is_release):
"""
Returns the previously used Cython version (or 'unknown' if not
previously built) if Cython should be used to build extension modules from
pyx files.
"""
# Only build with Cython if, of course, Cython is installed, we're in a
# development version (i.e. not release) or the Cython-generated source
# files haven't been created yet (cython_version == 'unknown'). The latter
# case can happen even when release is True if checking out a release tag
# from the repository
have_cython = False
try:
from Cython import __version__ as cython_version # noqa
have_cython = True
except ImportError:
pass
if have_cython and (not is_release or previous_cython_version == 'unknown'):
return cython_version
else:
return False |
def readsGenerator(self, request):
"""
Returns a generator over the (read, nextPageToken) pairs defined
by the specified request
"""
if not request.reference_id:
raise exceptions.UnmappedReadsNotSupported()
if len(request.read_group_ids) < 1:
raise exceptions.BadRequestException(
"At least one readGroupId must be specified")
elif len(request.read_group_ids) == 1:
return self._readsGeneratorSingle(request)
else:
return self._readsGeneratorMultiple(request) | Returns a generator over the (read, nextPageToken) pairs defined
by the specified request | Below is the the instruction that describes the task:
### Input:
Returns a generator over the (read, nextPageToken) pairs defined
by the specified request
### Response:
def readsGenerator(self, request):
"""
Returns a generator over the (read, nextPageToken) pairs defined
by the specified request
"""
if not request.reference_id:
raise exceptions.UnmappedReadsNotSupported()
if len(request.read_group_ids) < 1:
raise exceptions.BadRequestException(
"At least one readGroupId must be specified")
elif len(request.read_group_ids) == 1:
return self._readsGeneratorSingle(request)
else:
return self._readsGeneratorMultiple(request) |
def tr(self, args, color=None):
"""
Method to print ASCII patterns to terminal
"""
width = self._term_size()[1]
if not args:
if color is not None:
print(self._echo("#" * width, color))
else:
print(self._echo("#" * width, "green"))
else:
for each_symbol in args:
chars = len(each_symbol)
number_chars = width // chars
if color is not None:
print(self._echo(each_symbol * number_chars, color))
else:
print(each_symbol * number_chars) | Method to print ASCII patterns to terminal | Below is the the instruction that describes the task:
### Input:
Method to print ASCII patterns to terminal
### Response:
def tr(self, args, color=None):
"""
Method to print ASCII patterns to terminal
"""
width = self._term_size()[1]
if not args:
if color is not None:
print(self._echo("#" * width, color))
else:
print(self._echo("#" * width, "green"))
else:
for each_symbol in args:
chars = len(each_symbol)
number_chars = width // chars
if color is not None:
print(self._echo(each_symbol * number_chars, color))
else:
print(each_symbol * number_chars) |
def query_string_attribute(self, target, display_mask, attr):
"""Return the value of a string attribute"""
reply = NVCtrlQueryStringAttributeReplyRequest(display=self.display,
opcode=self.display.get_extension_major(extname),
target_id=target.id(),
target_type=target.type(),
display_mask=display_mask,
attr=attr)
if not reply._data.get('flags'):
return None
return str(reply._data.get('string')).strip('\0') | Return the value of a string attribute | Below is the the instruction that describes the task:
### Input:
Return the value of a string attribute
### Response:
def query_string_attribute(self, target, display_mask, attr):
"""Return the value of a string attribute"""
reply = NVCtrlQueryStringAttributeReplyRequest(display=self.display,
opcode=self.display.get_extension_major(extname),
target_id=target.id(),
target_type=target.type(),
display_mask=display_mask,
attr=attr)
if not reply._data.get('flags'):
return None
return str(reply._data.get('string')).strip('\0') |
def create_collection(self, name="collection", position=None, **kwargs):
"""Create a new child colleciton.
Parameters
----------
name : string
Unique identifier.
position : integer (optional)
Location to insert. Default is None (append).
kwargs
Additional arguments to child collection instantiation.
Returns
-------
WrightTools Collection
New child.
"""
if name in self.item_names:
wt_exceptions.ObjectExistsWarning.warn(name)
return self[name]
collection = Collection(
filepath=self.filepath, parent=self.name, name=name, edit_local=True, **kwargs
)
if position is not None:
self.attrs["item_names"] = np.insert(
self.attrs["item_names"][:-1], position, collection.natural_name.encode()
)
setattr(self, name, collection)
return collection | Create a new child colleciton.
Parameters
----------
name : string
Unique identifier.
position : integer (optional)
Location to insert. Default is None (append).
kwargs
Additional arguments to child collection instantiation.
Returns
-------
WrightTools Collection
New child. | Below is the the instruction that describes the task:
### Input:
Create a new child colleciton.
Parameters
----------
name : string
Unique identifier.
position : integer (optional)
Location to insert. Default is None (append).
kwargs
Additional arguments to child collection instantiation.
Returns
-------
WrightTools Collection
New child.
### Response:
def create_collection(self, name="collection", position=None, **kwargs):
"""Create a new child colleciton.
Parameters
----------
name : string
Unique identifier.
position : integer (optional)
Location to insert. Default is None (append).
kwargs
Additional arguments to child collection instantiation.
Returns
-------
WrightTools Collection
New child.
"""
if name in self.item_names:
wt_exceptions.ObjectExistsWarning.warn(name)
return self[name]
collection = Collection(
filepath=self.filepath, parent=self.name, name=name, edit_local=True, **kwargs
)
if position is not None:
self.attrs["item_names"] = np.insert(
self.attrs["item_names"][:-1], position, collection.natural_name.encode()
)
setattr(self, name, collection)
return collection |
def connect(self, packet=None):
"""Connect to the server.
:param packet: RTMPPacket, this packet will be sent instead
of the regular "connect" packet.
Raises :exc:`RTMPError` if the connect attempt fails.
"""
if isinstance(packet, RTMPPacket):
packet = packet.packet
else:
packet = ffi.NULL
res = librtmp.RTMP_Connect(self.rtmp, packet)
if res < 1:
raise RTMPError("Failed to connect")
return RTMPCall(self, 1.0) | Connect to the server.
:param packet: RTMPPacket, this packet will be sent instead
of the regular "connect" packet.
Raises :exc:`RTMPError` if the connect attempt fails. | Below is the the instruction that describes the task:
### Input:
Connect to the server.
:param packet: RTMPPacket, this packet will be sent instead
of the regular "connect" packet.
Raises :exc:`RTMPError` if the connect attempt fails.
### Response:
def connect(self, packet=None):
"""Connect to the server.
:param packet: RTMPPacket, this packet will be sent instead
of the regular "connect" packet.
Raises :exc:`RTMPError` if the connect attempt fails.
"""
if isinstance(packet, RTMPPacket):
packet = packet.packet
else:
packet = ffi.NULL
res = librtmp.RTMP_Connect(self.rtmp, packet)
if res < 1:
raise RTMPError("Failed to connect")
return RTMPCall(self, 1.0) |
def _handle_tag_text(self, text):
"""Handle regular *text* inside of an HTML open tag."""
next = self._read(1)
if not self._can_recurse() or text not in self.MARKERS:
self._emit_text(text)
elif text == next == "{":
self._parse_template_or_argument()
elif text == next == "[":
self._parse_wikilink()
elif text == "<":
self._parse_tag()
else:
self._emit_text(text) | Handle regular *text* inside of an HTML open tag. | Below is the the instruction that describes the task:
### Input:
Handle regular *text* inside of an HTML open tag.
### Response:
def _handle_tag_text(self, text):
"""Handle regular *text* inside of an HTML open tag."""
next = self._read(1)
if not self._can_recurse() or text not in self.MARKERS:
self._emit_text(text)
elif text == next == "{":
self._parse_template_or_argument()
elif text == next == "[":
self._parse_wikilink()
elif text == "<":
self._parse_tag()
else:
self._emit_text(text) |
def _gen_3spec(op, path, xattr=False):
"""
Returns a Spec tuple suitable for passing to the underlying C extension.
This variant is called for operations that lack an input value.
:param str path: The path to fetch
:param bool xattr: Whether this is an extended attribute
:return: a spec suitable for passing to the underlying C extension
"""
flags = 0
if xattr:
flags |= _P.SDSPEC_F_XATTR
return Spec(op, path, flags) | Returns a Spec tuple suitable for passing to the underlying C extension.
This variant is called for operations that lack an input value.
:param str path: The path to fetch
:param bool xattr: Whether this is an extended attribute
:return: a spec suitable for passing to the underlying C extension | Below is the the instruction that describes the task:
### Input:
Returns a Spec tuple suitable for passing to the underlying C extension.
This variant is called for operations that lack an input value.
:param str path: The path to fetch
:param bool xattr: Whether this is an extended attribute
:return: a spec suitable for passing to the underlying C extension
### Response:
def _gen_3spec(op, path, xattr=False):
"""
Returns a Spec tuple suitable for passing to the underlying C extension.
This variant is called for operations that lack an input value.
:param str path: The path to fetch
:param bool xattr: Whether this is an extended attribute
:return: a spec suitable for passing to the underlying C extension
"""
flags = 0
if xattr:
flags |= _P.SDSPEC_F_XATTR
return Spec(op, path, flags) |
def _get(pseudodict, key, single=True):
"""Helper method for getting values from "multi-dict"s"""
matches = [item[1] for item in pseudodict if item[0] == key]
if single:
return matches[0]
else:
return matches | Helper method for getting values from "multi-dict"s | Below is the the instruction that describes the task:
### Input:
Helper method for getting values from "multi-dict"s
### Response:
def _get(pseudodict, key, single=True):
"""Helper method for getting values from "multi-dict"s"""
matches = [item[1] for item in pseudodict if item[0] == key]
if single:
return matches[0]
else:
return matches |
def tobytes(s, encoding='ascii'):
""" Convert string s to the 'bytes' type, in all Pythons, even
back before Python 2.6. What 'str' means varies by PY3K or not.
In Pythons before 3.0, this is technically the same as the str type
in terms of the character data in memory. """
# NOTE: after we abandon 2.5, we might simply instead use "bytes(s)"
# NOTE: after we abandon all 2.*, del this and prepend byte strings with 'b'
if PY3K:
if isinstance(s, bytes):
return s
else:
return s.encode(encoding)
else:
# for py2.6 on (before 3.0), bytes is same as str; 2.5 has no bytes
# but handle if unicode is passed
if isinstance(s, unicode):
return s.encode(encoding)
else:
return s | Convert string s to the 'bytes' type, in all Pythons, even
back before Python 2.6. What 'str' means varies by PY3K or not.
In Pythons before 3.0, this is technically the same as the str type
in terms of the character data in memory. | Below is the the instruction that describes the task:
### Input:
Convert string s to the 'bytes' type, in all Pythons, even
back before Python 2.6. What 'str' means varies by PY3K or not.
In Pythons before 3.0, this is technically the same as the str type
in terms of the character data in memory.
### Response:
def tobytes(s, encoding='ascii'):
""" Convert string s to the 'bytes' type, in all Pythons, even
back before Python 2.6. What 'str' means varies by PY3K or not.
In Pythons before 3.0, this is technically the same as the str type
in terms of the character data in memory. """
# NOTE: after we abandon 2.5, we might simply instead use "bytes(s)"
# NOTE: after we abandon all 2.*, del this and prepend byte strings with 'b'
if PY3K:
if isinstance(s, bytes):
return s
else:
return s.encode(encoding)
else:
# for py2.6 on (before 3.0), bytes is same as str; 2.5 has no bytes
# but handle if unicode is passed
if isinstance(s, unicode):
return s.encode(encoding)
else:
return s |
def setup_deploy_key(keypath='github_deploy_key', key_ext='.enc', env_name='DOCTR_DEPLOY_ENCRYPTION_KEY'):
"""
Decrypts the deploy key and configures it with ssh
The key is assumed to be encrypted as keypath + key_ext, and the
encryption key is assumed to be set in the environment variable
``env_name``. If ``env_name`` is not set, it falls back to
``DOCTR_DEPLOY_ENCRYPTION_KEY`` for backwards compatibility.
If keypath + key_ext does not exist, it falls back to
``github_deploy_key.enc`` for backwards compatibility.
"""
key = os.environ.get(env_name, os.environ.get("DOCTR_DEPLOY_ENCRYPTION_KEY", None))
if not key:
raise RuntimeError("{env_name} or DOCTR_DEPLOY_ENCRYPTION_KEY environment variable is not set. Make sure you followed the instructions from 'doctr configure' properly. You may need to re-run 'doctr configure' to fix this error."
.format(env_name=env_name))
# Legacy keyfile name
if (not os.path.isfile(keypath + key_ext) and
os.path.isfile('github_deploy_key' + key_ext)):
keypath = 'github_deploy_key'
key_filename = os.path.basename(keypath)
key = key.encode('utf-8')
decrypt_file(keypath + key_ext, key)
key_path = os.path.expanduser("~/.ssh/" + key_filename)
os.makedirs(os.path.expanduser("~/.ssh"), exist_ok=True)
os.rename(keypath, key_path)
with open(os.path.expanduser("~/.ssh/config"), 'a') as f:
f.write("Host github.com"
' IdentityFile "%s"'
" LogLevel ERROR\n" % key_path)
# start ssh-agent and add key to it
# info from SSH agent has to be put into the environment
agent_info = subprocess.check_output(['ssh-agent', '-s'])
agent_info = agent_info.decode('utf-8')
agent_info = agent_info.split()
AUTH_SOCK = agent_info[0].split('=')[1][:-1]
AGENT_PID = agent_info[3].split('=')[1][:-1]
os.putenv('SSH_AUTH_SOCK', AUTH_SOCK)
os.putenv('SSH_AGENT_PID', AGENT_PID)
run(['ssh-add', os.path.expanduser('~/.ssh/' + key_filename)]) | Decrypts the deploy key and configures it with ssh
The key is assumed to be encrypted as keypath + key_ext, and the
encryption key is assumed to be set in the environment variable
``env_name``. If ``env_name`` is not set, it falls back to
``DOCTR_DEPLOY_ENCRYPTION_KEY`` for backwards compatibility.
If keypath + key_ext does not exist, it falls back to
``github_deploy_key.enc`` for backwards compatibility. | Below is the the instruction that describes the task:
### Input:
Decrypts the deploy key and configures it with ssh
The key is assumed to be encrypted as keypath + key_ext, and the
encryption key is assumed to be set in the environment variable
``env_name``. If ``env_name`` is not set, it falls back to
``DOCTR_DEPLOY_ENCRYPTION_KEY`` for backwards compatibility.
If keypath + key_ext does not exist, it falls back to
``github_deploy_key.enc`` for backwards compatibility.
### Response:
def setup_deploy_key(keypath='github_deploy_key', key_ext='.enc', env_name='DOCTR_DEPLOY_ENCRYPTION_KEY'):
"""
Decrypts the deploy key and configures it with ssh
The key is assumed to be encrypted as keypath + key_ext, and the
encryption key is assumed to be set in the environment variable
``env_name``. If ``env_name`` is not set, it falls back to
``DOCTR_DEPLOY_ENCRYPTION_KEY`` for backwards compatibility.
If keypath + key_ext does not exist, it falls back to
``github_deploy_key.enc`` for backwards compatibility.
"""
key = os.environ.get(env_name, os.environ.get("DOCTR_DEPLOY_ENCRYPTION_KEY", None))
if not key:
raise RuntimeError("{env_name} or DOCTR_DEPLOY_ENCRYPTION_KEY environment variable is not set. Make sure you followed the instructions from 'doctr configure' properly. You may need to re-run 'doctr configure' to fix this error."
.format(env_name=env_name))
# Legacy keyfile name
if (not os.path.isfile(keypath + key_ext) and
os.path.isfile('github_deploy_key' + key_ext)):
keypath = 'github_deploy_key'
key_filename = os.path.basename(keypath)
key = key.encode('utf-8')
decrypt_file(keypath + key_ext, key)
key_path = os.path.expanduser("~/.ssh/" + key_filename)
os.makedirs(os.path.expanduser("~/.ssh"), exist_ok=True)
os.rename(keypath, key_path)
with open(os.path.expanduser("~/.ssh/config"), 'a') as f:
f.write("Host github.com"
' IdentityFile "%s"'
" LogLevel ERROR\n" % key_path)
# start ssh-agent and add key to it
# info from SSH agent has to be put into the environment
agent_info = subprocess.check_output(['ssh-agent', '-s'])
agent_info = agent_info.decode('utf-8')
agent_info = agent_info.split()
AUTH_SOCK = agent_info[0].split('=')[1][:-1]
AGENT_PID = agent_info[3].split('=')[1][:-1]
os.putenv('SSH_AUTH_SOCK', AUTH_SOCK)
os.putenv('SSH_AGENT_PID', AGENT_PID)
run(['ssh-add', os.path.expanduser('~/.ssh/' + key_filename)]) |
def download_release(download_file, release=None):
"""Downloads the "go-basic.obo" file for the specified release."""
if release is None:
release = get_latest_release()
url = 'http://viewvc.geneontology.org/viewvc/GO-SVN/ontology-releases/%s/go-basic.obo' % release
#download_file = 'go-basic_%s.obo' % release
misc.http_download(url, download_file) | Downloads the "go-basic.obo" file for the specified release. | Below is the the instruction that describes the task:
### Input:
Downloads the "go-basic.obo" file for the specified release.
### Response:
def download_release(download_file, release=None):
"""Downloads the "go-basic.obo" file for the specified release."""
if release is None:
release = get_latest_release()
url = 'http://viewvc.geneontology.org/viewvc/GO-SVN/ontology-releases/%s/go-basic.obo' % release
#download_file = 'go-basic_%s.obo' % release
misc.http_download(url, download_file) |
def plot_ppc(
data,
kind="density",
alpha=None,
mean=True,
figsize=None,
textsize=None,
data_pairs=None,
var_names=None,
coords=None,
flatten=None,
flatten_pp=None,
num_pp_samples=None,
random_seed=None,
jitter=None,
animated=False,
animation_kwargs=None,
legend=True,
):
"""
Plot for posterior predictive checks.
Parameters
----------
data : az.InferenceData object
InferenceData object containing the observed and posterior
predictive data.
kind : str
Type of plot to display (density, cumulative, or scatter). Defaults to density.
alpha : float
Opacity of posterior predictive density curves. Defaults to 0.2 for kind = density
and cumulative, for scatter defaults to 0.7
mean : bool
Whether or not to plot the mean posterior predictive distribution. Defaults to True
figsize : tuple
Figure size. If None it will be defined automatically.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be
autoscaled based on figsize.
data_pairs : dict
Dictionary containing relations between observed data and posterior predictive data.
Dictionary structure:
Key = data var_name
Value = posterior predictive var_name
For example, `data_pairs = {'y' : 'y_hat'}`
If None, it will assume that the observed data and the posterior
predictive data have the same variable name.
var_names : list
List of variables to be plotted. Defaults to all observed variables in the
model if None.
coords : dict
Dictionary mapping dimensions to selected coordinates to be plotted.
Dimensions without a mapping specified will include all coordinates for
that dimension. Defaults to including all coordinates for all
dimensions if None.
flatten : list
List of dimensions to flatten in observed_data. Only flattens across the coordinates
specified in the coords argument. Defaults to flattening all of the dimensions.
flatten_pp : list
List of dimensions to flatten in posterior_predictive. Only flattens across the coordinates
specified in the coords argument. Defaults to flattening all of the dimensions.
Dimensions should match flatten excluding dimensions for data_pairs parameters.
If flatten is defined and flatten_pp is None, then `flatten_pp=flatten`.
num_pp_samples : int
The number of posterior predictive samples to plot. For `kind` = 'scatter' and
`animation = False` if defaults to a maximum of 5 samples and will set jitter to 0.7
unless defined otherwise. Otherwise it defaults to all provided samples.
random_seed : int
Random number generator seed passed to numpy.random.seed to allow
reproducibility of the plot. By default, no seed will be provided
and the plot will change each call if a random sample is specified
by `num_pp_samples`.
jitter : float
If kind is "scatter", jitter will add random uniform noise to the height
of the ppc samples and observed data. By default 0.
animated : bool
Create an animation of one posterior predictive sample per frame. Defaults to False.
animation_kwargs : dict
Keywords passed to `animation.FuncAnimation`.
legend : bool
Add legend to figure. By default True.
Returns
-------
axes : matplotlib axes
Examples
--------
Plot the observed data KDE overlaid on posterior predictive KDEs.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('radon')
>>> az.plot_ppc(data)
Plot the overlay with empirical CDFs.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='cumulative')
Use the coords and flatten parameters to plot selected variable dimensions
across multiple plots.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, coords={'observed_county': ['ANOKA', 'BELTRAMI']}, flatten=[])
Plot the overlay using a stacked scatter plot that is particularly useful
when the sample sizes are small.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='scatter', flatten=[],
>>> coords={'observed_county': ['AITKIN', 'BELTRAMI']})
Plot random posterior predictive sub-samples.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, num_pp_samples=30, random_seed=7)
"""
for group in ("posterior_predictive", "observed_data"):
if not hasattr(data, group):
raise TypeError(
'`data` argument must have the group "{group}" for ppcplot'.format(group=group)
)
if kind.lower() not in ("density", "cumulative", "scatter"):
raise TypeError("`kind` argument must be either `density`, `cumulative`, or `scatter`")
if data_pairs is None:
data_pairs = {}
if animation_kwargs is None:
animation_kwargs = {}
if platform.system() == "Linux":
animation_kwargs.setdefault("blit", True)
else:
animation_kwargs.setdefault("blit", False)
if animated and animation_kwargs["blit"] and platform.system() != "Linux":
_log.warning(
"If you experience problems rendering the animation try setting"
"`animation_kwargs({'blit':False}) or changing the plotting backend (e.g. to TkAgg)"
)
if alpha is None:
if animated:
alpha = 1
else:
if kind.lower() == "scatter":
alpha = 0.7
else:
alpha = 0.2
if jitter is None:
jitter = 0.0
assert jitter >= 0.0
observed = data.observed_data
posterior_predictive = data.posterior_predictive
if var_names is None:
var_names = observed.data_vars
var_names = _var_names(var_names, observed)
pp_var_names = [data_pairs.get(var, var) for var in var_names]
if flatten_pp is None and flatten is None:
flatten_pp = list(posterior_predictive.dims.keys())
elif flatten_pp is None:
flatten_pp = flatten
if flatten is None:
flatten = list(observed.dims.keys())
if coords is None:
coords = {}
if random_seed is not None:
np.random.seed(random_seed)
total_pp_samples = posterior_predictive.sizes["chain"] * posterior_predictive.sizes["draw"]
if num_pp_samples is None:
if kind == "scatter" and not animated:
num_pp_samples = min(5, total_pp_samples)
else:
num_pp_samples = total_pp_samples
if (
not isinstance(num_pp_samples, Integral)
or num_pp_samples < 1
or num_pp_samples > total_pp_samples
):
raise TypeError(
"`num_pp_samples` must be an integer between 1 and "
+ "{limit}.".format(limit=total_pp_samples)
)
pp_sample_ix = np.random.choice(total_pp_samples, size=num_pp_samples, replace=False)
for key in coords.keys():
coords[key] = np.where(np.in1d(observed[key], coords[key]))[0]
obs_plotters = list(
xarray_var_iter(
observed.isel(coords), skip_dims=set(flatten), var_names=var_names, combined=True
)
)
pp_plotters = list(
xarray_var_iter(
posterior_predictive.isel(coords),
var_names=pp_var_names,
skip_dims=set(flatten_pp),
combined=True,
)
)
length_plotters = len(obs_plotters)
rows, cols = default_grid(length_plotters)
(figsize, ax_labelsize, _, xt_labelsize, linewidth, markersize) = _scale_fig_size(
figsize, textsize, rows, cols
)
fig, axes = _create_axes_grid(length_plotters, rows, cols, figsize=figsize)
for i, ax in enumerate(axes):
var_name, selection, obs_vals = obs_plotters[i]
pp_var_name, _, pp_vals = pp_plotters[i]
dtype = posterior_predictive[pp_var_name].dtype.kind
# flatten non-specified dimensions
obs_vals = obs_vals.flatten()
pp_vals = pp_vals.reshape(total_pp_samples, -1)
pp_sampled_vals = pp_vals[pp_sample_ix]
if kind == "density":
plot_kwargs = {"color": "C5", "alpha": alpha, "linewidth": 0.5 * linewidth}
if dtype == "i":
plot_kwargs["drawstyle"] = "steps-pre"
ax.plot([], color="C5", label="Posterior predictive {}".format(pp_var_name))
if dtype == "f":
plot_kde(
obs_vals,
label="Observed {}".format(var_name),
plot_kwargs={"color": "k", "linewidth": linewidth, "zorder": 3},
fill_kwargs={"alpha": 0},
ax=ax,
legend=legend,
)
else:
nbins = round(len(obs_vals) ** 0.5)
hist, bin_edges = np.histogram(obs_vals, bins=nbins, density=True)
hist = np.concatenate((hist[:1], hist))
ax.plot(
bin_edges,
hist,
label="Observed {}".format(var_name),
color="k",
linewidth=linewidth,
zorder=3,
drawstyle=plot_kwargs["drawstyle"],
)
if animated:
animate, init = _set_animation(
pp_sampled_vals, ax, dtype=dtype, kind=kind, plot_kwargs=plot_kwargs
)
else:
# run plot_kde manually with one plot call
pp_densities = []
for vals in pp_sampled_vals:
vals = np.array([vals]).flatten()
if dtype == "f":
pp_density, lower, upper = _fast_kde(vals)
pp_x = np.linspace(lower, upper, len(pp_density))
pp_densities.extend([pp_x, pp_density])
else:
nbins = round(len(vals) ** 0.5)
hist, bin_edges = np.histogram(vals, bins=nbins, density=True)
hist = np.concatenate((hist[:1], hist))
pp_densities.extend([bin_edges, hist])
ax.plot(*pp_densities, **plot_kwargs)
if mean:
if dtype == "f":
plot_kde(
pp_vals.flatten(),
plot_kwargs={
"color": "C0",
"linestyle": "--",
"linewidth": linewidth,
"zorder": 2,
},
label="Posterior predictive mean {}".format(pp_var_name),
ax=ax,
legend=legend,
)
else:
vals = pp_vals.flatten()
nbins = round(len(vals) ** 0.5)
hist, bin_edges = np.histogram(vals, bins=nbins, density=True)
hist = np.concatenate((hist[:1], hist))
ax.plot(
bin_edges,
hist,
color="C0",
linewidth=linewidth,
label="Posterior predictive mean {}".format(pp_var_name),
zorder=2,
linestyle="--",
drawstyle=plot_kwargs["drawstyle"],
)
ax.tick_params(labelsize=xt_labelsize)
ax.set_yticks([])
elif kind == "cumulative":
drawstyle = "default" if dtype == "f" else "steps-pre"
ax.plot(
*_empirical_cdf(obs_vals),
color="k",
linewidth=linewidth,
label="Observed {}".format(var_name),
drawstyle=drawstyle,
zorder=3
)
if animated:
animate, init = _set_animation(
pp_sampled_vals,
ax,
kind=kind,
alpha=alpha,
drawstyle=drawstyle,
linewidth=linewidth,
)
else:
# run plot_kde manually with one plot call
pp_densities = []
for vals in pp_sampled_vals:
vals = np.array([vals]).flatten()
pp_x, pp_density = _empirical_cdf(vals)
pp_densities.extend([pp_x, pp_density])
ax.plot(
*pp_densities, alpha=alpha, color="C5", drawstyle=drawstyle, linewidth=linewidth
)
ax.plot([], color="C5", label="Posterior predictive {}".format(pp_var_name))
if mean:
ax.plot(
*_empirical_cdf(pp_vals.flatten()),
color="C0",
linestyle="--",
linewidth=linewidth,
drawstyle=drawstyle,
label="Posterior predictive mean {}".format(pp_var_name)
)
ax.set_yticks([0, 0.5, 1])
elif kind == "scatter":
if mean:
if dtype == "f":
plot_kde(
pp_vals.flatten(),
plot_kwargs={
"color": "C0",
"linestyle": "--",
"linewidth": linewidth,
"zorder": 3,
},
label="Posterior predictive mean {}".format(pp_var_name),
ax=ax,
legend=legend,
)
else:
vals = pp_vals.flatten()
nbins = round(len(vals) ** 0.5)
hist, bin_edges = np.histogram(vals, bins=nbins, density=True)
hist = np.concatenate((hist[:1], hist))
ax.plot(
bin_edges,
hist,
color="C0",
linewidth=linewidth,
label="Posterior predictive mean {}".format(pp_var_name),
zorder=3,
linestyle="--",
drawstyle="steps-pre",
)
_, limit = ax.get_ylim()
limit *= 1.05
y_rows = np.linspace(0, limit, num_pp_samples + 1)
jitter_scale = y_rows[1] - y_rows[0]
scale_low = 0
scale_high = jitter_scale * jitter
obs_yvals = np.zeros_like(obs_vals, dtype=np.float64)
if jitter:
obs_yvals += np.random.uniform(low=scale_low, high=scale_high, size=len(obs_vals))
ax.plot(
obs_vals,
obs_yvals,
"o",
color="C0",
markersize=markersize,
alpha=alpha,
label="Observed {}".format(var_name),
zorder=4,
)
if animated:
animate, init = _set_animation(
pp_sampled_vals,
ax,
kind=kind,
height=y_rows.mean() * 0.5,
markersize=markersize,
)
else:
for vals, y in zip(pp_sampled_vals, y_rows[1:]):
vals = np.ravel(vals)
yvals = np.full_like(vals, y, dtype=np.float64)
if jitter:
yvals += np.random.uniform(low=scale_low, high=scale_high, size=len(vals))
ax.plot(
vals, yvals, "o", zorder=2, color="C5", markersize=markersize, alpha=alpha
)
ax.plot([], "C5o", label="Posterior predictive {}".format(pp_var_name))
ax.set_yticks([])
if var_name != pp_var_name:
xlabel = "{} / {}".format(var_name, pp_var_name)
else:
xlabel = var_name
ax.set_xlabel(make_label(xlabel, selection), fontsize=ax_labelsize)
if legend:
if i == 0:
ax.legend(fontsize=xt_labelsize * 0.75)
else:
ax.legend([])
if animated:
ani = animation.FuncAnimation(
fig, animate, np.arange(0, num_pp_samples), init_func=init, **animation_kwargs
)
return axes, ani
else:
return axes | Plot for posterior predictive checks.
Parameters
----------
data : az.InferenceData object
InferenceData object containing the observed and posterior
predictive data.
kind : str
Type of plot to display (density, cumulative, or scatter). Defaults to density.
alpha : float
Opacity of posterior predictive density curves. Defaults to 0.2 for kind = density
and cumulative, for scatter defaults to 0.7
mean : bool
Whether or not to plot the mean posterior predictive distribution. Defaults to True
figsize : tuple
Figure size. If None it will be defined automatically.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be
autoscaled based on figsize.
data_pairs : dict
Dictionary containing relations between observed data and posterior predictive data.
Dictionary structure:
Key = data var_name
Value = posterior predictive var_name
For example, `data_pairs = {'y' : 'y_hat'}`
If None, it will assume that the observed data and the posterior
predictive data have the same variable name.
var_names : list
List of variables to be plotted. Defaults to all observed variables in the
model if None.
coords : dict
Dictionary mapping dimensions to selected coordinates to be plotted.
Dimensions without a mapping specified will include all coordinates for
that dimension. Defaults to including all coordinates for all
dimensions if None.
flatten : list
List of dimensions to flatten in observed_data. Only flattens across the coordinates
specified in the coords argument. Defaults to flattening all of the dimensions.
flatten_pp : list
List of dimensions to flatten in posterior_predictive. Only flattens across the coordinates
specified in the coords argument. Defaults to flattening all of the dimensions.
Dimensions should match flatten excluding dimensions for data_pairs parameters.
If flatten is defined and flatten_pp is None, then `flatten_pp=flatten`.
num_pp_samples : int
The number of posterior predictive samples to plot. For `kind` = 'scatter' and
`animation = False` if defaults to a maximum of 5 samples and will set jitter to 0.7
unless defined otherwise. Otherwise it defaults to all provided samples.
random_seed : int
Random number generator seed passed to numpy.random.seed to allow
reproducibility of the plot. By default, no seed will be provided
and the plot will change each call if a random sample is specified
by `num_pp_samples`.
jitter : float
If kind is "scatter", jitter will add random uniform noise to the height
of the ppc samples and observed data. By default 0.
animated : bool
Create an animation of one posterior predictive sample per frame. Defaults to False.
animation_kwargs : dict
Keywords passed to `animation.FuncAnimation`.
legend : bool
Add legend to figure. By default True.
Returns
-------
axes : matplotlib axes
Examples
--------
Plot the observed data KDE overlaid on posterior predictive KDEs.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('radon')
>>> az.plot_ppc(data)
Plot the overlay with empirical CDFs.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='cumulative')
Use the coords and flatten parameters to plot selected variable dimensions
across multiple plots.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, coords={'observed_county': ['ANOKA', 'BELTRAMI']}, flatten=[])
Plot the overlay using a stacked scatter plot that is particularly useful
when the sample sizes are small.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='scatter', flatten=[],
>>> coords={'observed_county': ['AITKIN', 'BELTRAMI']})
Plot random posterior predictive sub-samples.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, num_pp_samples=30, random_seed=7) | Below is the the instruction that describes the task:
### Input:
Plot for posterior predictive checks.
Parameters
----------
data : az.InferenceData object
InferenceData object containing the observed and posterior
predictive data.
kind : str
Type of plot to display (density, cumulative, or scatter). Defaults to density.
alpha : float
Opacity of posterior predictive density curves. Defaults to 0.2 for kind = density
and cumulative, for scatter defaults to 0.7
mean : bool
Whether or not to plot the mean posterior predictive distribution. Defaults to True
figsize : tuple
Figure size. If None it will be defined automatically.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be
autoscaled based on figsize.
data_pairs : dict
Dictionary containing relations between observed data and posterior predictive data.
Dictionary structure:
Key = data var_name
Value = posterior predictive var_name
For example, `data_pairs = {'y' : 'y_hat'}`
If None, it will assume that the observed data and the posterior
predictive data have the same variable name.
var_names : list
List of variables to be plotted. Defaults to all observed variables in the
model if None.
coords : dict
Dictionary mapping dimensions to selected coordinates to be plotted.
Dimensions without a mapping specified will include all coordinates for
that dimension. Defaults to including all coordinates for all
dimensions if None.
flatten : list
List of dimensions to flatten in observed_data. Only flattens across the coordinates
specified in the coords argument. Defaults to flattening all of the dimensions.
flatten_pp : list
List of dimensions to flatten in posterior_predictive. Only flattens across the coordinates
specified in the coords argument. Defaults to flattening all of the dimensions.
Dimensions should match flatten excluding dimensions for data_pairs parameters.
If flatten is defined and flatten_pp is None, then `flatten_pp=flatten`.
num_pp_samples : int
The number of posterior predictive samples to plot. For `kind` = 'scatter' and
`animation = False` if defaults to a maximum of 5 samples and will set jitter to 0.7
unless defined otherwise. Otherwise it defaults to all provided samples.
random_seed : int
Random number generator seed passed to numpy.random.seed to allow
reproducibility of the plot. By default, no seed will be provided
and the plot will change each call if a random sample is specified
by `num_pp_samples`.
jitter : float
If kind is "scatter", jitter will add random uniform noise to the height
of the ppc samples and observed data. By default 0.
animated : bool
Create an animation of one posterior predictive sample per frame. Defaults to False.
animation_kwargs : dict
Keywords passed to `animation.FuncAnimation`.
legend : bool
Add legend to figure. By default True.
Returns
-------
axes : matplotlib axes
Examples
--------
Plot the observed data KDE overlaid on posterior predictive KDEs.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('radon')
>>> az.plot_ppc(data)
Plot the overlay with empirical CDFs.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='cumulative')
Use the coords and flatten parameters to plot selected variable dimensions
across multiple plots.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, coords={'observed_county': ['ANOKA', 'BELTRAMI']}, flatten=[])
Plot the overlay using a stacked scatter plot that is particularly useful
when the sample sizes are small.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='scatter', flatten=[],
>>> coords={'observed_county': ['AITKIN', 'BELTRAMI']})
Plot random posterior predictive sub-samples.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, num_pp_samples=30, random_seed=7)
### Response:
def plot_ppc(
data,
kind="density",
alpha=None,
mean=True,
figsize=None,
textsize=None,
data_pairs=None,
var_names=None,
coords=None,
flatten=None,
flatten_pp=None,
num_pp_samples=None,
random_seed=None,
jitter=None,
animated=False,
animation_kwargs=None,
legend=True,
):
"""
Plot for posterior predictive checks.
Parameters
----------
data : az.InferenceData object
InferenceData object containing the observed and posterior
predictive data.
kind : str
Type of plot to display (density, cumulative, or scatter). Defaults to density.
alpha : float
Opacity of posterior predictive density curves. Defaults to 0.2 for kind = density
and cumulative, for scatter defaults to 0.7
mean : bool
Whether or not to plot the mean posterior predictive distribution. Defaults to True
figsize : tuple
Figure size. If None it will be defined automatically.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be
autoscaled based on figsize.
data_pairs : dict
Dictionary containing relations between observed data and posterior predictive data.
Dictionary structure:
Key = data var_name
Value = posterior predictive var_name
For example, `data_pairs = {'y' : 'y_hat'}`
If None, it will assume that the observed data and the posterior
predictive data have the same variable name.
var_names : list
List of variables to be plotted. Defaults to all observed variables in the
model if None.
coords : dict
Dictionary mapping dimensions to selected coordinates to be plotted.
Dimensions without a mapping specified will include all coordinates for
that dimension. Defaults to including all coordinates for all
dimensions if None.
flatten : list
List of dimensions to flatten in observed_data. Only flattens across the coordinates
specified in the coords argument. Defaults to flattening all of the dimensions.
flatten_pp : list
List of dimensions to flatten in posterior_predictive. Only flattens across the coordinates
specified in the coords argument. Defaults to flattening all of the dimensions.
Dimensions should match flatten excluding dimensions for data_pairs parameters.
If flatten is defined and flatten_pp is None, then `flatten_pp=flatten`.
num_pp_samples : int
The number of posterior predictive samples to plot. For `kind` = 'scatter' and
`animation = False` if defaults to a maximum of 5 samples and will set jitter to 0.7
unless defined otherwise. Otherwise it defaults to all provided samples.
random_seed : int
Random number generator seed passed to numpy.random.seed to allow
reproducibility of the plot. By default, no seed will be provided
and the plot will change each call if a random sample is specified
by `num_pp_samples`.
jitter : float
If kind is "scatter", jitter will add random uniform noise to the height
of the ppc samples and observed data. By default 0.
animated : bool
Create an animation of one posterior predictive sample per frame. Defaults to False.
animation_kwargs : dict
Keywords passed to `animation.FuncAnimation`.
legend : bool
Add legend to figure. By default True.
Returns
-------
axes : matplotlib axes
Examples
--------
Plot the observed data KDE overlaid on posterior predictive KDEs.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('radon')
>>> az.plot_ppc(data)
Plot the overlay with empirical CDFs.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='cumulative')
Use the coords and flatten parameters to plot selected variable dimensions
across multiple plots.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, coords={'observed_county': ['ANOKA', 'BELTRAMI']}, flatten=[])
Plot the overlay using a stacked scatter plot that is particularly useful
when the sample sizes are small.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='scatter', flatten=[],
>>> coords={'observed_county': ['AITKIN', 'BELTRAMI']})
Plot random posterior predictive sub-samples.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, num_pp_samples=30, random_seed=7)
"""
for group in ("posterior_predictive", "observed_data"):
if not hasattr(data, group):
raise TypeError(
'`data` argument must have the group "{group}" for ppcplot'.format(group=group)
)
if kind.lower() not in ("density", "cumulative", "scatter"):
raise TypeError("`kind` argument must be either `density`, `cumulative`, or `scatter`")
if data_pairs is None:
data_pairs = {}
if animation_kwargs is None:
animation_kwargs = {}
if platform.system() == "Linux":
animation_kwargs.setdefault("blit", True)
else:
animation_kwargs.setdefault("blit", False)
if animated and animation_kwargs["blit"] and platform.system() != "Linux":
_log.warning(
"If you experience problems rendering the animation try setting"
"`animation_kwargs({'blit':False}) or changing the plotting backend (e.g. to TkAgg)"
)
if alpha is None:
if animated:
alpha = 1
else:
if kind.lower() == "scatter":
alpha = 0.7
else:
alpha = 0.2
if jitter is None:
jitter = 0.0
assert jitter >= 0.0
observed = data.observed_data
posterior_predictive = data.posterior_predictive
if var_names is None:
var_names = observed.data_vars
var_names = _var_names(var_names, observed)
pp_var_names = [data_pairs.get(var, var) for var in var_names]
if flatten_pp is None and flatten is None:
flatten_pp = list(posterior_predictive.dims.keys())
elif flatten_pp is None:
flatten_pp = flatten
if flatten is None:
flatten = list(observed.dims.keys())
if coords is None:
coords = {}
if random_seed is not None:
np.random.seed(random_seed)
total_pp_samples = posterior_predictive.sizes["chain"] * posterior_predictive.sizes["draw"]
if num_pp_samples is None:
if kind == "scatter" and not animated:
num_pp_samples = min(5, total_pp_samples)
else:
num_pp_samples = total_pp_samples
if (
not isinstance(num_pp_samples, Integral)
or num_pp_samples < 1
or num_pp_samples > total_pp_samples
):
raise TypeError(
"`num_pp_samples` must be an integer between 1 and "
+ "{limit}.".format(limit=total_pp_samples)
)
pp_sample_ix = np.random.choice(total_pp_samples, size=num_pp_samples, replace=False)
for key in coords.keys():
coords[key] = np.where(np.in1d(observed[key], coords[key]))[0]
obs_plotters = list(
xarray_var_iter(
observed.isel(coords), skip_dims=set(flatten), var_names=var_names, combined=True
)
)
pp_plotters = list(
xarray_var_iter(
posterior_predictive.isel(coords),
var_names=pp_var_names,
skip_dims=set(flatten_pp),
combined=True,
)
)
length_plotters = len(obs_plotters)
rows, cols = default_grid(length_plotters)
(figsize, ax_labelsize, _, xt_labelsize, linewidth, markersize) = _scale_fig_size(
figsize, textsize, rows, cols
)
fig, axes = _create_axes_grid(length_plotters, rows, cols, figsize=figsize)
for i, ax in enumerate(axes):
var_name, selection, obs_vals = obs_plotters[i]
pp_var_name, _, pp_vals = pp_plotters[i]
dtype = posterior_predictive[pp_var_name].dtype.kind
# flatten non-specified dimensions
obs_vals = obs_vals.flatten()
pp_vals = pp_vals.reshape(total_pp_samples, -1)
pp_sampled_vals = pp_vals[pp_sample_ix]
if kind == "density":
plot_kwargs = {"color": "C5", "alpha": alpha, "linewidth": 0.5 * linewidth}
if dtype == "i":
plot_kwargs["drawstyle"] = "steps-pre"
ax.plot([], color="C5", label="Posterior predictive {}".format(pp_var_name))
if dtype == "f":
plot_kde(
obs_vals,
label="Observed {}".format(var_name),
plot_kwargs={"color": "k", "linewidth": linewidth, "zorder": 3},
fill_kwargs={"alpha": 0},
ax=ax,
legend=legend,
)
else:
nbins = round(len(obs_vals) ** 0.5)
hist, bin_edges = np.histogram(obs_vals, bins=nbins, density=True)
hist = np.concatenate((hist[:1], hist))
ax.plot(
bin_edges,
hist,
label="Observed {}".format(var_name),
color="k",
linewidth=linewidth,
zorder=3,
drawstyle=plot_kwargs["drawstyle"],
)
if animated:
animate, init = _set_animation(
pp_sampled_vals, ax, dtype=dtype, kind=kind, plot_kwargs=plot_kwargs
)
else:
# run plot_kde manually with one plot call
pp_densities = []
for vals in pp_sampled_vals:
vals = np.array([vals]).flatten()
if dtype == "f":
pp_density, lower, upper = _fast_kde(vals)
pp_x = np.linspace(lower, upper, len(pp_density))
pp_densities.extend([pp_x, pp_density])
else:
nbins = round(len(vals) ** 0.5)
hist, bin_edges = np.histogram(vals, bins=nbins, density=True)
hist = np.concatenate((hist[:1], hist))
pp_densities.extend([bin_edges, hist])
ax.plot(*pp_densities, **plot_kwargs)
if mean:
if dtype == "f":
plot_kde(
pp_vals.flatten(),
plot_kwargs={
"color": "C0",
"linestyle": "--",
"linewidth": linewidth,
"zorder": 2,
},
label="Posterior predictive mean {}".format(pp_var_name),
ax=ax,
legend=legend,
)
else:
vals = pp_vals.flatten()
nbins = round(len(vals) ** 0.5)
hist, bin_edges = np.histogram(vals, bins=nbins, density=True)
hist = np.concatenate((hist[:1], hist))
ax.plot(
bin_edges,
hist,
color="C0",
linewidth=linewidth,
label="Posterior predictive mean {}".format(pp_var_name),
zorder=2,
linestyle="--",
drawstyle=plot_kwargs["drawstyle"],
)
ax.tick_params(labelsize=xt_labelsize)
ax.set_yticks([])
elif kind == "cumulative":
drawstyle = "default" if dtype == "f" else "steps-pre"
ax.plot(
*_empirical_cdf(obs_vals),
color="k",
linewidth=linewidth,
label="Observed {}".format(var_name),
drawstyle=drawstyle,
zorder=3
)
if animated:
animate, init = _set_animation(
pp_sampled_vals,
ax,
kind=kind,
alpha=alpha,
drawstyle=drawstyle,
linewidth=linewidth,
)
else:
# run plot_kde manually with one plot call
pp_densities = []
for vals in pp_sampled_vals:
vals = np.array([vals]).flatten()
pp_x, pp_density = _empirical_cdf(vals)
pp_densities.extend([pp_x, pp_density])
ax.plot(
*pp_densities, alpha=alpha, color="C5", drawstyle=drawstyle, linewidth=linewidth
)
ax.plot([], color="C5", label="Posterior predictive {}".format(pp_var_name))
if mean:
ax.plot(
*_empirical_cdf(pp_vals.flatten()),
color="C0",
linestyle="--",
linewidth=linewidth,
drawstyle=drawstyle,
label="Posterior predictive mean {}".format(pp_var_name)
)
ax.set_yticks([0, 0.5, 1])
elif kind == "scatter":
if mean:
if dtype == "f":
plot_kde(
pp_vals.flatten(),
plot_kwargs={
"color": "C0",
"linestyle": "--",
"linewidth": linewidth,
"zorder": 3,
},
label="Posterior predictive mean {}".format(pp_var_name),
ax=ax,
legend=legend,
)
else:
vals = pp_vals.flatten()
nbins = round(len(vals) ** 0.5)
hist, bin_edges = np.histogram(vals, bins=nbins, density=True)
hist = np.concatenate((hist[:1], hist))
ax.plot(
bin_edges,
hist,
color="C0",
linewidth=linewidth,
label="Posterior predictive mean {}".format(pp_var_name),
zorder=3,
linestyle="--",
drawstyle="steps-pre",
)
_, limit = ax.get_ylim()
limit *= 1.05
y_rows = np.linspace(0, limit, num_pp_samples + 1)
jitter_scale = y_rows[1] - y_rows[0]
scale_low = 0
scale_high = jitter_scale * jitter
obs_yvals = np.zeros_like(obs_vals, dtype=np.float64)
if jitter:
obs_yvals += np.random.uniform(low=scale_low, high=scale_high, size=len(obs_vals))
ax.plot(
obs_vals,
obs_yvals,
"o",
color="C0",
markersize=markersize,
alpha=alpha,
label="Observed {}".format(var_name),
zorder=4,
)
if animated:
animate, init = _set_animation(
pp_sampled_vals,
ax,
kind=kind,
height=y_rows.mean() * 0.5,
markersize=markersize,
)
else:
for vals, y in zip(pp_sampled_vals, y_rows[1:]):
vals = np.ravel(vals)
yvals = np.full_like(vals, y, dtype=np.float64)
if jitter:
yvals += np.random.uniform(low=scale_low, high=scale_high, size=len(vals))
ax.plot(
vals, yvals, "o", zorder=2, color="C5", markersize=markersize, alpha=alpha
)
ax.plot([], "C5o", label="Posterior predictive {}".format(pp_var_name))
ax.set_yticks([])
if var_name != pp_var_name:
xlabel = "{} / {}".format(var_name, pp_var_name)
else:
xlabel = var_name
ax.set_xlabel(make_label(xlabel, selection), fontsize=ax_labelsize)
if legend:
if i == 0:
ax.legend(fontsize=xt_labelsize * 0.75)
else:
ax.legend([])
if animated:
ani = animation.FuncAnimation(
fig, animate, np.arange(0, num_pp_samples), init_func=init, **animation_kwargs
)
return axes, ani
else:
return axes |
def greater_equal(lhs, rhs):
"""Returns the result of element-wise **greater than or equal to** (>=) comparison
operation with broadcasting.
For each element in input arrays, return 1(true) if lhs elements are greater than equal to rhs,
otherwise return 0(false).
Equivalent to ``lhs >= rhs`` and ``mx.nd.broadcast_greater_equal(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.array
First array to be compared.
rhs : scalar or mxnet.ndarray.array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
Output array of boolean values.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x >= 1).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (x >= y).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.greater_equal(x, y).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (z >= y).asnumpy()
array([[ 1., 1.],
[ 0., 1.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
op.broadcast_greater_equal,
lambda x, y: 1 if x >= y else 0,
_internal._greater_equal_scalar,
_internal._lesser_equal_scalar) | Returns the result of element-wise **greater than or equal to** (>=) comparison
operation with broadcasting.
For each element in input arrays, return 1(true) if lhs elements are greater than equal to rhs,
otherwise return 0(false).
Equivalent to ``lhs >= rhs`` and ``mx.nd.broadcast_greater_equal(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.array
First array to be compared.
rhs : scalar or mxnet.ndarray.array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
Output array of boolean values.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x >= 1).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (x >= y).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.greater_equal(x, y).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (z >= y).asnumpy()
array([[ 1., 1.],
[ 0., 1.]], dtype=float32) | Below is the the instruction that describes the task:
### Input:
Returns the result of element-wise **greater than or equal to** (>=) comparison
operation with broadcasting.
For each element in input arrays, return 1(true) if lhs elements are greater than equal to rhs,
otherwise return 0(false).
Equivalent to ``lhs >= rhs`` and ``mx.nd.broadcast_greater_equal(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.array
First array to be compared.
rhs : scalar or mxnet.ndarray.array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
Output array of boolean values.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x >= 1).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (x >= y).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.greater_equal(x, y).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (z >= y).asnumpy()
array([[ 1., 1.],
[ 0., 1.]], dtype=float32)
### Response:
def greater_equal(lhs, rhs):
"""Returns the result of element-wise **greater than or equal to** (>=) comparison
operation with broadcasting.
For each element in input arrays, return 1(true) if lhs elements are greater than equal to rhs,
otherwise return 0(false).
Equivalent to ``lhs >= rhs`` and ``mx.nd.broadcast_greater_equal(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or mxnet.ndarray.array
First array to be compared.
rhs : scalar or mxnet.ndarray.array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
Output array of boolean values.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x >= 1).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (x >= y).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.greater_equal(x, y).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (z >= y).asnumpy()
array([[ 1., 1.],
[ 0., 1.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
op.broadcast_greater_equal,
lambda x, y: 1 if x >= y else 0,
_internal._greater_equal_scalar,
_internal._lesser_equal_scalar) |
def get_peak_number(self, sample):
"""
Counts number of peaks from a sample's peak file.
:param pipelines.Sample sample: Sample object with "peaks" attribute.
"""
proc = subprocess.Popen(["wc", "-l", sample.peaks], stdout=subprocess.PIPE)
out, err = proc.communicate()
sample["peakNumber"] = re.sub("\D.*", "", out)
return sample | Counts number of peaks from a sample's peak file.
:param pipelines.Sample sample: Sample object with "peaks" attribute. | Below is the the instruction that describes the task:
### Input:
Counts number of peaks from a sample's peak file.
:param pipelines.Sample sample: Sample object with "peaks" attribute.
### Response:
def get_peak_number(self, sample):
"""
Counts number of peaks from a sample's peak file.
:param pipelines.Sample sample: Sample object with "peaks" attribute.
"""
proc = subprocess.Popen(["wc", "-l", sample.peaks], stdout=subprocess.PIPE)
out, err = proc.communicate()
sample["peakNumber"] = re.sub("\D.*", "", out)
return sample |
def DEFINE_choice(self, name, default, choices, help, constant=False):
"""A helper for defining choice string options."""
self.AddOption(
type_info.Choice(
name=name, default=default, choices=choices, description=help),
constant=constant) | A helper for defining choice string options. | Below is the the instruction that describes the task:
### Input:
A helper for defining choice string options.
### Response:
def DEFINE_choice(self, name, default, choices, help, constant=False):
"""A helper for defining choice string options."""
self.AddOption(
type_info.Choice(
name=name, default=default, choices=choices, description=help),
constant=constant) |
def make_matrix(version, reserve_regions=True, add_timing=True):
"""\
Creates a matrix of the provided `size` (w x h) initialized with the
(illegal) value 0x2.
The "timing pattern" is already added to the matrix and the version
and format areas are initialized with 0x0.
:param int version: The (Micro) QR Code version
:rtype: tuple of bytearrays
"""
size = calc_matrix_size(version)
row = [0x2] * size
matrix = tuple([bytearray(row) for i in range(size)])
if reserve_regions:
if version > 6:
# Reserve version pattern areas
for i in range(6):
# Upper right
matrix[i][-11] = 0x0
matrix[i][-10] = 0x0
matrix[i][-9] = 0x0
# Lower left
matrix[-11][i] = 0x0
matrix[-10][i] = 0x0
matrix[-9][i] = 0x0
# Reserve format pattern areas
for i in range(9):
matrix[i][8] = 0x0 # Upper left
matrix[8][i] = 0x0 # Upper bottom
if version > 0:
matrix[-i][8] = 0x0 # Bottom left
matrix[8][- i] = 0x0 # Upper right
if add_timing:
# ISO/IEC 18004:2015 -- 6.3.5 Timing pattern (page 17)
add_timing_pattern(matrix, version < 1)
return matrix | \
Creates a matrix of the provided `size` (w x h) initialized with the
(illegal) value 0x2.
The "timing pattern" is already added to the matrix and the version
and format areas are initialized with 0x0.
:param int version: The (Micro) QR Code version
:rtype: tuple of bytearrays | Below is the the instruction that describes the task:
### Input:
\
Creates a matrix of the provided `size` (w x h) initialized with the
(illegal) value 0x2.
The "timing pattern" is already added to the matrix and the version
and format areas are initialized with 0x0.
:param int version: The (Micro) QR Code version
:rtype: tuple of bytearrays
### Response:
def make_matrix(version, reserve_regions=True, add_timing=True):
"""\
Creates a matrix of the provided `size` (w x h) initialized with the
(illegal) value 0x2.
The "timing pattern" is already added to the matrix and the version
and format areas are initialized with 0x0.
:param int version: The (Micro) QR Code version
:rtype: tuple of bytearrays
"""
size = calc_matrix_size(version)
row = [0x2] * size
matrix = tuple([bytearray(row) for i in range(size)])
if reserve_regions:
if version > 6:
# Reserve version pattern areas
for i in range(6):
# Upper right
matrix[i][-11] = 0x0
matrix[i][-10] = 0x0
matrix[i][-9] = 0x0
# Lower left
matrix[-11][i] = 0x0
matrix[-10][i] = 0x0
matrix[-9][i] = 0x0
# Reserve format pattern areas
for i in range(9):
matrix[i][8] = 0x0 # Upper left
matrix[8][i] = 0x0 # Upper bottom
if version > 0:
matrix[-i][8] = 0x0 # Bottom left
matrix[8][- i] = 0x0 # Upper right
if add_timing:
# ISO/IEC 18004:2015 -- 6.3.5 Timing pattern (page 17)
add_timing_pattern(matrix, version < 1)
return matrix |
def _translate_dst_oprnd(self, operand):
"""Translate destination operand to a SMT expression.
"""
if isinstance(operand, ReilRegisterOperand):
return self._translate_dst_register_oprnd(operand)
else:
raise Exception("Invalid operand type") | Translate destination operand to a SMT expression. | Below is the the instruction that describes the task:
### Input:
Translate destination operand to a SMT expression.
### Response:
def _translate_dst_oprnd(self, operand):
"""Translate destination operand to a SMT expression.
"""
if isinstance(operand, ReilRegisterOperand):
return self._translate_dst_register_oprnd(operand)
else:
raise Exception("Invalid operand type") |
def separate_groups(groups, key, total):
"""Separate the group into overloaded and under-loaded groups.
The revised over-loaded groups increases the choice space for future
selection of most suitable group based on search criteria.
For example:
Given the groups (a:4, b:4, c:3, d:2) where the number represents the number
of elements for each group.
smart_separate_groups sets 'a' and 'c' as optimal, 'b' as over-loaded
and 'd' as under-loaded.
separate-groups combines 'a' with 'b' as over-loaded, allowing to select
between these two groups to transfer the element to 'd'.
:param groups: list of groups
:param key: function to retrieve element count from group
:param total: total number of elements to distribute
:returns: sorted lists of over loaded (descending) and under
loaded (ascending) group
"""
optimum, extra = compute_optimum(len(groups), total)
over_loaded, under_loaded, optimal = _smart_separate_groups(groups, key, total)
# If every group is optimal return
if not extra:
return over_loaded, under_loaded
# Some groups in optimal may have a number of elements that is optimum + 1.
# In this case they should be considered over_loaded.
potential_under_loaded = [
group for group in optimal
if key(group) == optimum
]
potential_over_loaded = [
group for group in optimal
if key(group) > optimum
]
revised_under_loaded = under_loaded + potential_under_loaded
revised_over_loaded = over_loaded + potential_over_loaded
return (
sorted(revised_over_loaded, key=key, reverse=True),
sorted(revised_under_loaded, key=key),
) | Separate the group into overloaded and under-loaded groups.
The revised over-loaded groups increases the choice space for future
selection of most suitable group based on search criteria.
For example:
Given the groups (a:4, b:4, c:3, d:2) where the number represents the number
of elements for each group.
smart_separate_groups sets 'a' and 'c' as optimal, 'b' as over-loaded
and 'd' as under-loaded.
separate-groups combines 'a' with 'b' as over-loaded, allowing to select
between these two groups to transfer the element to 'd'.
:param groups: list of groups
:param key: function to retrieve element count from group
:param total: total number of elements to distribute
:returns: sorted lists of over loaded (descending) and under
loaded (ascending) group | Below is the the instruction that describes the task:
### Input:
Separate the group into overloaded and under-loaded groups.
The revised over-loaded groups increases the choice space for future
selection of most suitable group based on search criteria.
For example:
Given the groups (a:4, b:4, c:3, d:2) where the number represents the number
of elements for each group.
smart_separate_groups sets 'a' and 'c' as optimal, 'b' as over-loaded
and 'd' as under-loaded.
separate-groups combines 'a' with 'b' as over-loaded, allowing to select
between these two groups to transfer the element to 'd'.
:param groups: list of groups
:param key: function to retrieve element count from group
:param total: total number of elements to distribute
:returns: sorted lists of over loaded (descending) and under
loaded (ascending) group
### Response:
def separate_groups(groups, key, total):
"""Separate the group into overloaded and under-loaded groups.
The revised over-loaded groups increases the choice space for future
selection of most suitable group based on search criteria.
For example:
Given the groups (a:4, b:4, c:3, d:2) where the number represents the number
of elements for each group.
smart_separate_groups sets 'a' and 'c' as optimal, 'b' as over-loaded
and 'd' as under-loaded.
separate-groups combines 'a' with 'b' as over-loaded, allowing to select
between these two groups to transfer the element to 'd'.
:param groups: list of groups
:param key: function to retrieve element count from group
:param total: total number of elements to distribute
:returns: sorted lists of over loaded (descending) and under
loaded (ascending) group
"""
optimum, extra = compute_optimum(len(groups), total)
over_loaded, under_loaded, optimal = _smart_separate_groups(groups, key, total)
# If every group is optimal return
if not extra:
return over_loaded, under_loaded
# Some groups in optimal may have a number of elements that is optimum + 1.
# In this case they should be considered over_loaded.
potential_under_loaded = [
group for group in optimal
if key(group) == optimum
]
potential_over_loaded = [
group for group in optimal
if key(group) > optimum
]
revised_under_loaded = under_loaded + potential_under_loaded
revised_over_loaded = over_loaded + potential_over_loaded
return (
sorted(revised_over_loaded, key=key, reverse=True),
sorted(revised_under_loaded, key=key),
) |
def split_by_commas(maybe_s: str) -> Tuple[str, ...]:
"""Split a string by commas, but allow escaped commas.
- If maybe_s is falsey, returns an empty tuple
- Ignore backslashed commas
"""
if not maybe_s:
return ()
parts: List[str] = []
split_by_backslash = maybe_s.split(r'\,')
for split_by_backslash_part in split_by_backslash:
splitby_comma = split_by_backslash_part.split(',')
if parts:
parts[-1] += ',' + splitby_comma[0]
else:
parts.append(splitby_comma[0])
parts.extend(splitby_comma[1:])
return tuple(parts) | Split a string by commas, but allow escaped commas.
- If maybe_s is falsey, returns an empty tuple
- Ignore backslashed commas | Below is the the instruction that describes the task:
### Input:
Split a string by commas, but allow escaped commas.
- If maybe_s is falsey, returns an empty tuple
- Ignore backslashed commas
### Response:
def split_by_commas(maybe_s: str) -> Tuple[str, ...]:
"""Split a string by commas, but allow escaped commas.
- If maybe_s is falsey, returns an empty tuple
- Ignore backslashed commas
"""
if not maybe_s:
return ()
parts: List[str] = []
split_by_backslash = maybe_s.split(r'\,')
for split_by_backslash_part in split_by_backslash:
splitby_comma = split_by_backslash_part.split(',')
if parts:
parts[-1] += ',' + splitby_comma[0]
else:
parts.append(splitby_comma[0])
parts.extend(splitby_comma[1:])
return tuple(parts) |
def combine_first(self, other):
"""
Update null elements with value in the same location in `other`.
Combine two DataFrame objects by filling null values in one DataFrame
with non-null values from other DataFrame. The row and column indexes
of the resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
Provided DataFrame to use to fill null values.
Returns
-------
DataFrame
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function.
Examples
--------
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine_first(df2)
A B
0 1.0 3.0
1 0.0 4.0
Null values still persist if the location of that null value
does not exist in `other`
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])
>>> df1.combine_first(df2)
A B C
0 NaN 4.0 NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
import pandas.core.computation.expressions as expressions
def extract_values(arr):
# Does two things:
# 1. maybe gets the values from the Series / Index
# 2. convert datelike to i8
if isinstance(arr, (ABCIndexClass, ABCSeries)):
arr = arr._values
if needs_i8_conversion(arr):
if is_extension_array_dtype(arr.dtype):
arr = arr.asi8
else:
arr = arr.view('i8')
return arr
def combiner(x, y):
mask = isna(x)
if isinstance(mask, (ABCIndexClass, ABCSeries)):
mask = mask._values
x_values = extract_values(x)
y_values = extract_values(y)
# If the column y in other DataFrame is not in first DataFrame,
# just return y_values.
if y.name not in self.columns:
return y_values
return expressions.where(mask, y_values, x_values)
return self.combine(other, combiner, overwrite=False) | Update null elements with value in the same location in `other`.
Combine two DataFrame objects by filling null values in one DataFrame
with non-null values from other DataFrame. The row and column indexes
of the resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
Provided DataFrame to use to fill null values.
Returns
-------
DataFrame
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function.
Examples
--------
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine_first(df2)
A B
0 1.0 3.0
1 0.0 4.0
Null values still persist if the location of that null value
does not exist in `other`
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])
>>> df1.combine_first(df2)
A B C
0 NaN 4.0 NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0 | Below is the the instruction that describes the task:
### Input:
Update null elements with value in the same location in `other`.
Combine two DataFrame objects by filling null values in one DataFrame
with non-null values from other DataFrame. The row and column indexes
of the resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
Provided DataFrame to use to fill null values.
Returns
-------
DataFrame
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function.
Examples
--------
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine_first(df2)
A B
0 1.0 3.0
1 0.0 4.0
Null values still persist if the location of that null value
does not exist in `other`
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])
>>> df1.combine_first(df2)
A B C
0 NaN 4.0 NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
### Response:
def combine_first(self, other):
"""
Update null elements with value in the same location in `other`.
Combine two DataFrame objects by filling null values in one DataFrame
with non-null values from other DataFrame. The row and column indexes
of the resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
Provided DataFrame to use to fill null values.
Returns
-------
DataFrame
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function.
Examples
--------
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine_first(df2)
A B
0 1.0 3.0
1 0.0 4.0
Null values still persist if the location of that null value
does not exist in `other`
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])
>>> df1.combine_first(df2)
A B C
0 NaN 4.0 NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
import pandas.core.computation.expressions as expressions
def extract_values(arr):
# Does two things:
# 1. maybe gets the values from the Series / Index
# 2. convert datelike to i8
if isinstance(arr, (ABCIndexClass, ABCSeries)):
arr = arr._values
if needs_i8_conversion(arr):
if is_extension_array_dtype(arr.dtype):
arr = arr.asi8
else:
arr = arr.view('i8')
return arr
def combiner(x, y):
mask = isna(x)
if isinstance(mask, (ABCIndexClass, ABCSeries)):
mask = mask._values
x_values = extract_values(x)
y_values = extract_values(y)
# If the column y in other DataFrame is not in first DataFrame,
# just return y_values.
if y.name not in self.columns:
return y_values
return expressions.where(mask, y_values, x_values)
return self.combine(other, combiner, overwrite=False) |
def is_valid(self):
"""
Check image integrity.
Tries to compute the checksum for each raster layer and returns False if this fails.
See this forum entry:
`How to check if image is valid? <https://lists.osgeo.org/pipermail/gdal-dev/2013-November/037520.html>`_.
Returns
-------
bool
is the file valid?
"""
for i in range(self.raster.RasterCount):
try:
checksum = self.raster.GetRasterBand(i + 1).Checksum()
except RuntimeError:
return False
return True | Check image integrity.
Tries to compute the checksum for each raster layer and returns False if this fails.
See this forum entry:
`How to check if image is valid? <https://lists.osgeo.org/pipermail/gdal-dev/2013-November/037520.html>`_.
Returns
-------
bool
is the file valid? | Below is the the instruction that describes the task:
### Input:
Check image integrity.
Tries to compute the checksum for each raster layer and returns False if this fails.
See this forum entry:
`How to check if image is valid? <https://lists.osgeo.org/pipermail/gdal-dev/2013-November/037520.html>`_.
Returns
-------
bool
is the file valid?
### Response:
def is_valid(self):
"""
Check image integrity.
Tries to compute the checksum for each raster layer and returns False if this fails.
See this forum entry:
`How to check if image is valid? <https://lists.osgeo.org/pipermail/gdal-dev/2013-November/037520.html>`_.
Returns
-------
bool
is the file valid?
"""
for i in range(self.raster.RasterCount):
try:
checksum = self.raster.GetRasterBand(i + 1).Checksum()
except RuntimeError:
return False
return True |
def topic_detail(request, slug):
"""
A detail view of a Topic
Templates:
:template:`faq/topic_detail.html`
Context:
topic
An :model:`faq.Topic` object.
question_list
A list of all published :model:`faq.Question` objects that relate
to the given :model:`faq.Topic`.
"""
extra_context = {
'question_list': Question.objects.published().filter(topic__slug=slug),
}
return object_detail(request, queryset=Topic.objects.published(),
extra_context=extra_context, template_object_name='topic', slug=slug) | A detail view of a Topic
Templates:
:template:`faq/topic_detail.html`
Context:
topic
An :model:`faq.Topic` object.
question_list
A list of all published :model:`faq.Question` objects that relate
to the given :model:`faq.Topic`. | Below is the the instruction that describes the task:
### Input:
A detail view of a Topic
Templates:
:template:`faq/topic_detail.html`
Context:
topic
An :model:`faq.Topic` object.
question_list
A list of all published :model:`faq.Question` objects that relate
to the given :model:`faq.Topic`.
### Response:
def topic_detail(request, slug):
"""
A detail view of a Topic
Templates:
:template:`faq/topic_detail.html`
Context:
topic
An :model:`faq.Topic` object.
question_list
A list of all published :model:`faq.Question` objects that relate
to the given :model:`faq.Topic`.
"""
extra_context = {
'question_list': Question.objects.published().filter(topic__slug=slug),
}
return object_detail(request, queryset=Topic.objects.published(),
extra_context=extra_context, template_object_name='topic', slug=slug) |
def update_selection(self):
"""
Convenience function update display (figures, text boxes and
statistics windows) with a new selection of specimen
"""
self.clear_boxes()
# commented out to allow propogation of higher level viewing state
self.clear_high_level_pars()
if self.UPPER_LEVEL_SHOW != "specimens":
self.mean_type_box.SetValue("None")
# --------------------------
# check if the coordinate system in the window exists (if not change to "specimen" coordinate system)
# --------------------------
coordinate_system = self.coordinates_box.GetValue()
if coordinate_system == 'tilt-corrected' and \
len(self.Data[self.s]['zijdblock_tilt']) == 0:
self.coordinates_box.SetStringSelection('specimen')
elif coordinate_system == 'geographic' and \
len(self.Data[self.s]['zijdblock_geo']) == 0:
self.coordinates_box.SetStringSelection("specimen")
if coordinate_system != self.coordinates_box.GetValue() and self.ie_open:
self.ie.coordinates_box.SetStringSelection(
self.coordinates_box.GetValue())
self.ie.update_editor()
coordinate_system = self.coordinates_box.GetValue()
self.COORDINATE_SYSTEM = coordinate_system
# --------------------------
# update treatment list
# --------------------------
self.update_bounds_boxes()
# --------------------------
# update high level boxes
# --------------------------
high_level = self.level_box.GetValue()
old_string = self.level_names.GetValue()
new_string = old_string
if high_level == 'sample':
if self.s in self.Data_hierarchy['sample_of_specimen']:
new_string = self.Data_hierarchy['sample_of_specimen'][self.s]
else:
new_string = ''
if high_level == 'site':
if self.s in self.Data_hierarchy['site_of_specimen']:
new_string = self.Data_hierarchy['site_of_specimen'][self.s]
else:
new_string = ''
if high_level == 'location':
if self.s in self.Data_hierarchy['location_of_specimen']:
new_string = self.Data_hierarchy['location_of_specimen'][self.s]
else:
new_string = ''
self.level_names.SetValue(new_string)
if self.ie_open and new_string != old_string:
self.ie.level_names.SetValue(new_string)
self.ie.on_select_level_name(-1, True)
# --------------------------
# update PCA box
# --------------------------
self.update_PCA_box()
# update warning
self.generate_warning_text()
self.update_warning_box()
# update choices in the fit box
self.update_fit_boxes()
self.update_mean_fit_box()
# measurements text box
self.Add_text()
# draw figures
if self.current_fit:
self.draw_figure(self.s, False)
else:
self.draw_figure(self.s, True)
# update high level stats
self.update_high_level_stats()
# redraw interpretations
self.update_GUI_with_new_interpretation() | Convenience function update display (figures, text boxes and
statistics windows) with a new selection of specimen | Below is the the instruction that describes the task:
### Input:
Convenience function update display (figures, text boxes and
statistics windows) with a new selection of specimen
### Response:
def update_selection(self):
"""
Convenience function update display (figures, text boxes and
statistics windows) with a new selection of specimen
"""
self.clear_boxes()
# commented out to allow propogation of higher level viewing state
self.clear_high_level_pars()
if self.UPPER_LEVEL_SHOW != "specimens":
self.mean_type_box.SetValue("None")
# --------------------------
# check if the coordinate system in the window exists (if not change to "specimen" coordinate system)
# --------------------------
coordinate_system = self.coordinates_box.GetValue()
if coordinate_system == 'tilt-corrected' and \
len(self.Data[self.s]['zijdblock_tilt']) == 0:
self.coordinates_box.SetStringSelection('specimen')
elif coordinate_system == 'geographic' and \
len(self.Data[self.s]['zijdblock_geo']) == 0:
self.coordinates_box.SetStringSelection("specimen")
if coordinate_system != self.coordinates_box.GetValue() and self.ie_open:
self.ie.coordinates_box.SetStringSelection(
self.coordinates_box.GetValue())
self.ie.update_editor()
coordinate_system = self.coordinates_box.GetValue()
self.COORDINATE_SYSTEM = coordinate_system
# --------------------------
# update treatment list
# --------------------------
self.update_bounds_boxes()
# --------------------------
# update high level boxes
# --------------------------
high_level = self.level_box.GetValue()
old_string = self.level_names.GetValue()
new_string = old_string
if high_level == 'sample':
if self.s in self.Data_hierarchy['sample_of_specimen']:
new_string = self.Data_hierarchy['sample_of_specimen'][self.s]
else:
new_string = ''
if high_level == 'site':
if self.s in self.Data_hierarchy['site_of_specimen']:
new_string = self.Data_hierarchy['site_of_specimen'][self.s]
else:
new_string = ''
if high_level == 'location':
if self.s in self.Data_hierarchy['location_of_specimen']:
new_string = self.Data_hierarchy['location_of_specimen'][self.s]
else:
new_string = ''
self.level_names.SetValue(new_string)
if self.ie_open and new_string != old_string:
self.ie.level_names.SetValue(new_string)
self.ie.on_select_level_name(-1, True)
# --------------------------
# update PCA box
# --------------------------
self.update_PCA_box()
# update warning
self.generate_warning_text()
self.update_warning_box()
# update choices in the fit box
self.update_fit_boxes()
self.update_mean_fit_box()
# measurements text box
self.Add_text()
# draw figures
if self.current_fit:
self.draw_figure(self.s, False)
else:
self.draw_figure(self.s, True)
# update high level stats
self.update_high_level_stats()
# redraw interpretations
self.update_GUI_with_new_interpretation() |
def provision(self, instance_id: str, service_details: ProvisionDetails, async_allowed: bool) -> ProvisionedServiceSpec:
"""Provision the new instance
see openbrokerapi documentation
Returns:
ProvisionedServiceSpec
"""
if service_details.plan_id == self._backend.config.UUID_PLANS_EXISTING_CLUSTER:
# Provision the instance on an Existing Atlas Cluster
# Find or create the instance
instance = self._backend.find(instance_id)
# Create the instance if needed
return self._backend.create(instance, service_details.parameters, existing=True)
# Plan not supported
raise ErrPlanUnsupported(service_details.plan_id) | Provision the new instance
see openbrokerapi documentation
Returns:
ProvisionedServiceSpec | Below is the the instruction that describes the task:
### Input:
Provision the new instance
see openbrokerapi documentation
Returns:
ProvisionedServiceSpec
### Response:
def provision(self, instance_id: str, service_details: ProvisionDetails, async_allowed: bool) -> ProvisionedServiceSpec:
"""Provision the new instance
see openbrokerapi documentation
Returns:
ProvisionedServiceSpec
"""
if service_details.plan_id == self._backend.config.UUID_PLANS_EXISTING_CLUSTER:
# Provision the instance on an Existing Atlas Cluster
# Find or create the instance
instance = self._backend.find(instance_id)
# Create the instance if needed
return self._backend.create(instance, service_details.parameters, existing=True)
# Plan not supported
raise ErrPlanUnsupported(service_details.plan_id) |
def dot_solve(self, y):
r"""
Compute the inner product of a vector with the inverse of the
covariance matrix applied to itself:
.. math::
y\,K^{-1}\,y
Args:
y (ndarray[nsamples]): The vector :math:`y`.
"""
return np.dot(y.T, cho_solve(self._factor, y)) | r"""
Compute the inner product of a vector with the inverse of the
covariance matrix applied to itself:
.. math::
y\,K^{-1}\,y
Args:
y (ndarray[nsamples]): The vector :math:`y`. | Below is the the instruction that describes the task:
### Input:
r"""
Compute the inner product of a vector with the inverse of the
covariance matrix applied to itself:
.. math::
y\,K^{-1}\,y
Args:
y (ndarray[nsamples]): The vector :math:`y`.
### Response:
def dot_solve(self, y):
r"""
Compute the inner product of a vector with the inverse of the
covariance matrix applied to itself:
.. math::
y\,K^{-1}\,y
Args:
y (ndarray[nsamples]): The vector :math:`y`.
"""
return np.dot(y.T, cho_solve(self._factor, y)) |
def _extract_services_list_helper(services):
"""Extract a OrderedDict of {service: [ports]} of the supplied services
for use by the other functions.
The services object can either be:
- None : no services were passed (an empty dict is returned)
- a list of strings
- A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- An array of [{'service': service_name, ...}, ...]
@param services: see above
@returns OrderedDict(service: [ports], ...)
"""
if services is None:
return {}
if isinstance(services, dict):
services = services.values()
# either extract the list of services from the dictionary, or if
# it is a simple string, use that. i.e. works with mixed lists.
_s = OrderedDict()
for s in services:
if isinstance(s, dict) and 'service' in s:
_s[s['service']] = s.get('ports', [])
if isinstance(s, str):
_s[s] = []
return _s | Extract a OrderedDict of {service: [ports]} of the supplied services
for use by the other functions.
The services object can either be:
- None : no services were passed (an empty dict is returned)
- a list of strings
- A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- An array of [{'service': service_name, ...}, ...]
@param services: see above
@returns OrderedDict(service: [ports], ...) | Below is the the instruction that describes the task:
### Input:
Extract a OrderedDict of {service: [ports]} of the supplied services
for use by the other functions.
The services object can either be:
- None : no services were passed (an empty dict is returned)
- a list of strings
- A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- An array of [{'service': service_name, ...}, ...]
@param services: see above
@returns OrderedDict(service: [ports], ...)
### Response:
def _extract_services_list_helper(services):
"""Extract a OrderedDict of {service: [ports]} of the supplied services
for use by the other functions.
The services object can either be:
- None : no services were passed (an empty dict is returned)
- a list of strings
- A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- An array of [{'service': service_name, ...}, ...]
@param services: see above
@returns OrderedDict(service: [ports], ...)
"""
if services is None:
return {}
if isinstance(services, dict):
services = services.values()
# either extract the list of services from the dictionary, or if
# it is a simple string, use that. i.e. works with mixed lists.
_s = OrderedDict()
for s in services:
if isinstance(s, dict) and 'service' in s:
_s[s['service']] = s.get('ports', [])
if isinstance(s, str):
_s[s] = []
return _s |
def monitors(self, **kwargs):
'''Return expressions that should be computed to monitor training.
Returns
-------
monitors : list of (name, expression) pairs
A list of named monitor expressions to compute for this network.
'''
monitors = super(Classifier, self).monitors(**kwargs)
regs = regularizers.from_kwargs(self, **kwargs)
outputs, _ = self.build_graph(regs)
return monitors + [('acc', self.losses[0].accuracy(outputs))] | Return expressions that should be computed to monitor training.
Returns
-------
monitors : list of (name, expression) pairs
A list of named monitor expressions to compute for this network. | Below is the the instruction that describes the task:
### Input:
Return expressions that should be computed to monitor training.
Returns
-------
monitors : list of (name, expression) pairs
A list of named monitor expressions to compute for this network.
### Response:
def monitors(self, **kwargs):
'''Return expressions that should be computed to monitor training.
Returns
-------
monitors : list of (name, expression) pairs
A list of named monitor expressions to compute for this network.
'''
monitors = super(Classifier, self).monitors(**kwargs)
regs = regularizers.from_kwargs(self, **kwargs)
outputs, _ = self.build_graph(regs)
return monitors + [('acc', self.losses[0].accuracy(outputs))] |
def blink(self, state=True):
"""
Starts or stops the blinking state for this button. This only
works for when the toolbutton is in Shadowed or Colored mode.
:param state | <bool>
:return <bool> | success
"""
if self._blinking == state:
return True
elif not self.graphicsEffect():
return False
else:
self._blinking = state
if state:
self.startTimer(self.blinkInterval()) | Starts or stops the blinking state for this button. This only
works for when the toolbutton is in Shadowed or Colored mode.
:param state | <bool>
:return <bool> | success | Below is the the instruction that describes the task:
### Input:
Starts or stops the blinking state for this button. This only
works for when the toolbutton is in Shadowed or Colored mode.
:param state | <bool>
:return <bool> | success
### Response:
def blink(self, state=True):
"""
Starts or stops the blinking state for this button. This only
works for when the toolbutton is in Shadowed or Colored mode.
:param state | <bool>
:return <bool> | success
"""
if self._blinking == state:
return True
elif not self.graphicsEffect():
return False
else:
self._blinking = state
if state:
self.startTimer(self.blinkInterval()) |
def forward(self, data_batch, is_train=None):
"""Forward computation. Here we do nothing but to keep a reference to
the scores and the labels so that we can do backward computation.
Parameters
----------
data_batch : DataBatch
Could be anything with similar API implemented.
is_train : bool
Default is ``None``, which means `is_train` takes the value of ``self.for_training``.
"""
self._scores = data_batch.data[0]
if is_train is None:
is_train = self.for_training
if is_train:
self._labels = data_batch.label[0] | Forward computation. Here we do nothing but to keep a reference to
the scores and the labels so that we can do backward computation.
Parameters
----------
data_batch : DataBatch
Could be anything with similar API implemented.
is_train : bool
Default is ``None``, which means `is_train` takes the value of ``self.for_training``. | Below is the the instruction that describes the task:
### Input:
Forward computation. Here we do nothing but to keep a reference to
the scores and the labels so that we can do backward computation.
Parameters
----------
data_batch : DataBatch
Could be anything with similar API implemented.
is_train : bool
Default is ``None``, which means `is_train` takes the value of ``self.for_training``.
### Response:
def forward(self, data_batch, is_train=None):
"""Forward computation. Here we do nothing but to keep a reference to
the scores and the labels so that we can do backward computation.
Parameters
----------
data_batch : DataBatch
Could be anything with similar API implemented.
is_train : bool
Default is ``None``, which means `is_train` takes the value of ``self.for_training``.
"""
self._scores = data_batch.data[0]
if is_train is None:
is_train = self.for_training
if is_train:
self._labels = data_batch.label[0] |
def get_grouped(self, go_ntsets, go_all, gosubdag, **kws):
"""Get Grouped object."""
kws_grpd = {k:v for k, v in kws.items() if k in Grouped.kws_dict}
kws_grpd['go2nt'] = self._init_go2ntpresent(go_ntsets, go_all, gosubdag)
return Grouped(gosubdag, self.godag.version, **kws_grpd) | Get Grouped object. | Below is the the instruction that describes the task:
### Input:
Get Grouped object.
### Response:
def get_grouped(self, go_ntsets, go_all, gosubdag, **kws):
"""Get Grouped object."""
kws_grpd = {k:v for k, v in kws.items() if k in Grouped.kws_dict}
kws_grpd['go2nt'] = self._init_go2ntpresent(go_ntsets, go_all, gosubdag)
return Grouped(gosubdag, self.godag.version, **kws_grpd) |
def _delete(self, url):
"""Wrapper around request.delete() to use the API prefix. Returns a JSON response."""
req = self._session.delete(self._api_prefix + url)
return self._action(req) | Wrapper around request.delete() to use the API prefix. Returns a JSON response. | Below is the the instruction that describes the task:
### Input:
Wrapper around request.delete() to use the API prefix. Returns a JSON response.
### Response:
def _delete(self, url):
"""Wrapper around request.delete() to use the API prefix. Returns a JSON response."""
req = self._session.delete(self._api_prefix + url)
return self._action(req) |
def rank_width(self):
"""
Returns the width of each rank in the graph. #TODO
"""
rank_width = defaultdict(int)
node_rank = self.node_rank()
for rank in node_rank.values():
rank_width[rank] += 1
return dict(rank_width) | Returns the width of each rank in the graph. #TODO | Below is the the instruction that describes the task:
### Input:
Returns the width of each rank in the graph. #TODO
### Response:
def rank_width(self):
"""
Returns the width of each rank in the graph. #TODO
"""
rank_width = defaultdict(int)
node_rank = self.node_rank()
for rank in node_rank.values():
rank_width[rank] += 1
return dict(rank_width) |
def _build_connstr(host, port, bucket):
"""
Converts a 1.x host:port specification to a connection string
"""
hostlist = []
if isinstance(host, (tuple, list)):
for curhost in host:
if isinstance(curhost, (list, tuple)):
hostlist.append(_fmthost(*curhost))
else:
hostlist.append(curhost)
else:
hostlist.append(_fmthost(host, port))
return 'http://{0}/{1}'.format(','.join(hostlist), bucket) | Converts a 1.x host:port specification to a connection string | Below is the the instruction that describes the task:
### Input:
Converts a 1.x host:port specification to a connection string
### Response:
def _build_connstr(host, port, bucket):
"""
Converts a 1.x host:port specification to a connection string
"""
hostlist = []
if isinstance(host, (tuple, list)):
for curhost in host:
if isinstance(curhost, (list, tuple)):
hostlist.append(_fmthost(*curhost))
else:
hostlist.append(curhost)
else:
hostlist.append(_fmthost(host, port))
return 'http://{0}/{1}'.format(','.join(hostlist), bucket) |
def get_manylinux_wheel_url(self, package_name, package_version):
"""
For a given package name, returns a link to the download URL,
else returns None.
Related: https://github.com/Miserlou/Zappa/issues/398
Examples here: https://gist.github.com/perrygeo/9545f94eaddec18a65fd7b56880adbae
This function downloads metadata JSON of `package_name` from Pypi
and examines if the package has a manylinux wheel. This function
also caches the JSON file so that we don't have to poll Pypi
every time.
"""
cached_pypi_info_dir = os.path.join(tempfile.gettempdir(), 'cached_pypi_info')
if not os.path.isdir(cached_pypi_info_dir):
os.makedirs(cached_pypi_info_dir)
# Even though the metadata is for the package, we save it in a
# filename that includes the package's version. This helps in
# invalidating the cached file if the user moves to a different
# version of the package.
# Related: https://github.com/Miserlou/Zappa/issues/899
json_file = '{0!s}-{1!s}.json'.format(package_name, package_version)
json_file_path = os.path.join(cached_pypi_info_dir, json_file)
if os.path.exists(json_file_path):
with open(json_file_path, 'rb') as metafile:
data = json.load(metafile)
else:
url = 'https://pypi.python.org/pypi/{}/json'.format(package_name)
try:
res = requests.get(url, timeout=float(os.environ.get('PIP_TIMEOUT', 1.5)))
data = res.json()
except Exception as e: # pragma: no cover
return None
with open(json_file_path, 'wb') as metafile:
jsondata = json.dumps(data)
metafile.write(bytes(jsondata, "utf-8"))
if package_version not in data['releases']:
return None
for f in data['releases'][package_version]:
if f['filename'].endswith(self.manylinux_wheel_file_suffix):
return f['url']
return None | For a given package name, returns a link to the download URL,
else returns None.
Related: https://github.com/Miserlou/Zappa/issues/398
Examples here: https://gist.github.com/perrygeo/9545f94eaddec18a65fd7b56880adbae
This function downloads metadata JSON of `package_name` from Pypi
and examines if the package has a manylinux wheel. This function
also caches the JSON file so that we don't have to poll Pypi
every time. | Below is the the instruction that describes the task:
### Input:
For a given package name, returns a link to the download URL,
else returns None.
Related: https://github.com/Miserlou/Zappa/issues/398
Examples here: https://gist.github.com/perrygeo/9545f94eaddec18a65fd7b56880adbae
This function downloads metadata JSON of `package_name` from Pypi
and examines if the package has a manylinux wheel. This function
also caches the JSON file so that we don't have to poll Pypi
every time.
### Response:
def get_manylinux_wheel_url(self, package_name, package_version):
"""
For a given package name, returns a link to the download URL,
else returns None.
Related: https://github.com/Miserlou/Zappa/issues/398
Examples here: https://gist.github.com/perrygeo/9545f94eaddec18a65fd7b56880adbae
This function downloads metadata JSON of `package_name` from Pypi
and examines if the package has a manylinux wheel. This function
also caches the JSON file so that we don't have to poll Pypi
every time.
"""
cached_pypi_info_dir = os.path.join(tempfile.gettempdir(), 'cached_pypi_info')
if not os.path.isdir(cached_pypi_info_dir):
os.makedirs(cached_pypi_info_dir)
# Even though the metadata is for the package, we save it in a
# filename that includes the package's version. This helps in
# invalidating the cached file if the user moves to a different
# version of the package.
# Related: https://github.com/Miserlou/Zappa/issues/899
json_file = '{0!s}-{1!s}.json'.format(package_name, package_version)
json_file_path = os.path.join(cached_pypi_info_dir, json_file)
if os.path.exists(json_file_path):
with open(json_file_path, 'rb') as metafile:
data = json.load(metafile)
else:
url = 'https://pypi.python.org/pypi/{}/json'.format(package_name)
try:
res = requests.get(url, timeout=float(os.environ.get('PIP_TIMEOUT', 1.5)))
data = res.json()
except Exception as e: # pragma: no cover
return None
with open(json_file_path, 'wb') as metafile:
jsondata = json.dumps(data)
metafile.write(bytes(jsondata, "utf-8"))
if package_version not in data['releases']:
return None
for f in data['releases'][package_version]:
if f['filename'].endswith(self.manylinux_wheel_file_suffix):
return f['url']
return None |
def set_version(context: Context, version=None, bump=False):
"""
Updates the version of MTP-common
"""
if bump and version:
raise TaskError('You cannot bump and set a specific version')
if bump:
from mtp_common import VERSION
version = list(VERSION)
version[-1] += 1
else:
try:
version = list(map(int, version.split('.')))
assert len(version) == 3
except (AttributeError, ValueError, AssertionError):
raise TaskError('Version must be in the form N.N.N')
dotted_version = '.'.join(map(str, version))
replacements = [
(r'^VERSION =.*$',
'VERSION = (%s)' % ', '.join(map(str, version)),
'mtp_common/__init__.py'),
(r'^ "version":.*$',
' "version": "%s",' % dotted_version,
'package.json'),
]
for search, replacement, path in replacements:
with open(os.path.join(root_path, path)) as f:
content = f.read()
content = re.sub(search, replacement, content, flags=re.MULTILINE)
with open(os.path.join(root_path, path), 'w') as f:
f.write(content)
context.debug('Updated version to %s' % dotted_version) | Updates the version of MTP-common | Below is the the instruction that describes the task:
### Input:
Updates the version of MTP-common
### Response:
def set_version(context: Context, version=None, bump=False):
"""
Updates the version of MTP-common
"""
if bump and version:
raise TaskError('You cannot bump and set a specific version')
if bump:
from mtp_common import VERSION
version = list(VERSION)
version[-1] += 1
else:
try:
version = list(map(int, version.split('.')))
assert len(version) == 3
except (AttributeError, ValueError, AssertionError):
raise TaskError('Version must be in the form N.N.N')
dotted_version = '.'.join(map(str, version))
replacements = [
(r'^VERSION =.*$',
'VERSION = (%s)' % ', '.join(map(str, version)),
'mtp_common/__init__.py'),
(r'^ "version":.*$',
' "version": "%s",' % dotted_version,
'package.json'),
]
for search, replacement, path in replacements:
with open(os.path.join(root_path, path)) as f:
content = f.read()
content = re.sub(search, replacement, content, flags=re.MULTILINE)
with open(os.path.join(root_path, path), 'w') as f:
f.write(content)
context.debug('Updated version to %s' % dotted_version) |
def _convert_date_to_dict(field_date):
"""
Convert native python ``datetime.date`` object to a format supported by the API
"""
return {DAY: field_date.day, MONTH: field_date.month, YEAR: field_date.year} | Convert native python ``datetime.date`` object to a format supported by the API | Below is the the instruction that describes the task:
### Input:
Convert native python ``datetime.date`` object to a format supported by the API
### Response:
def _convert_date_to_dict(field_date):
"""
Convert native python ``datetime.date`` object to a format supported by the API
"""
return {DAY: field_date.day, MONTH: field_date.month, YEAR: field_date.year} |
def union(self, other, rename=False):
'''
Union/add two topologies together to form a larger topology.
If rename is False, the method assumes that node names
don't clash (i.e., you've called addNodeLabelPrefix or
you've explicitly chosen names to avoid clashes).
If rename is True, nodes/links are relabeled such that the
new "prefix" for each node is the graph name (i.e., for graph
name A, node h1 is renamed A_h1).
This method returns a new Topology object and does not modify
either topology used for unioning.
'''
if rename:
self.nxgraph = Topology.__relabel_graph(self.__nxgraph, self.name)
other.nxgraph = Topology.__relabel_graph(other.__nxgraph, other.name)
nxgraph = nx.union(self.nxgraph, other.nxgraph, name="{}_{}".format(self.name, other.name))
newtopo = Topology(nxgraph=nxgraph, name="{}_{}".format(self.name, other.name))
return newtopo | Union/add two topologies together to form a larger topology.
If rename is False, the method assumes that node names
don't clash (i.e., you've called addNodeLabelPrefix or
you've explicitly chosen names to avoid clashes).
If rename is True, nodes/links are relabeled such that the
new "prefix" for each node is the graph name (i.e., for graph
name A, node h1 is renamed A_h1).
This method returns a new Topology object and does not modify
either topology used for unioning. | Below is the the instruction that describes the task:
### Input:
Union/add two topologies together to form a larger topology.
If rename is False, the method assumes that node names
don't clash (i.e., you've called addNodeLabelPrefix or
you've explicitly chosen names to avoid clashes).
If rename is True, nodes/links are relabeled such that the
new "prefix" for each node is the graph name (i.e., for graph
name A, node h1 is renamed A_h1).
This method returns a new Topology object and does not modify
either topology used for unioning.
### Response:
def union(self, other, rename=False):
'''
Union/add two topologies together to form a larger topology.
If rename is False, the method assumes that node names
don't clash (i.e., you've called addNodeLabelPrefix or
you've explicitly chosen names to avoid clashes).
If rename is True, nodes/links are relabeled such that the
new "prefix" for each node is the graph name (i.e., for graph
name A, node h1 is renamed A_h1).
This method returns a new Topology object and does not modify
either topology used for unioning.
'''
if rename:
self.nxgraph = Topology.__relabel_graph(self.__nxgraph, self.name)
other.nxgraph = Topology.__relabel_graph(other.__nxgraph, other.name)
nxgraph = nx.union(self.nxgraph, other.nxgraph, name="{}_{}".format(self.name, other.name))
newtopo = Topology(nxgraph=nxgraph, name="{}_{}".format(self.name, other.name))
return newtopo |
def check_repository_existence(params):
"""Check repository existence.
:param argparse.Namespace params: parameters
"""
repodir = os.path.join(params.outdir, params.name)
if os.path.isdir(repodir):
raise Conflict(
'Package repository "{0}" has already exists.'.format(repodir)) | Check repository existence.
:param argparse.Namespace params: parameters | Below is the the instruction that describes the task:
### Input:
Check repository existence.
:param argparse.Namespace params: parameters
### Response:
def check_repository_existence(params):
"""Check repository existence.
:param argparse.Namespace params: parameters
"""
repodir = os.path.join(params.outdir, params.name)
if os.path.isdir(repodir):
raise Conflict(
'Package repository "{0}" has already exists.'.format(repodir)) |
def egress(self, envelope, http_headers, operation, binding_options):
"""Overriding the egress function to set our headers.
Args:
envelope: An Element with the SOAP request data.
http_headers: A dict of the current http headers.
operation: The SoapOperation instance.
binding_options: An options dict for the SOAP binding.
Returns:
A tuple of the envelope and headers.
"""
custom_headers = self._header_handler.GetHTTPHeaders()
http_headers.update(custom_headers)
return envelope, http_headers | Overriding the egress function to set our headers.
Args:
envelope: An Element with the SOAP request data.
http_headers: A dict of the current http headers.
operation: The SoapOperation instance.
binding_options: An options dict for the SOAP binding.
Returns:
A tuple of the envelope and headers. | Below is the the instruction that describes the task:
### Input:
Overriding the egress function to set our headers.
Args:
envelope: An Element with the SOAP request data.
http_headers: A dict of the current http headers.
operation: The SoapOperation instance.
binding_options: An options dict for the SOAP binding.
Returns:
A tuple of the envelope and headers.
### Response:
def egress(self, envelope, http_headers, operation, binding_options):
"""Overriding the egress function to set our headers.
Args:
envelope: An Element with the SOAP request data.
http_headers: A dict of the current http headers.
operation: The SoapOperation instance.
binding_options: An options dict for the SOAP binding.
Returns:
A tuple of the envelope and headers.
"""
custom_headers = self._header_handler.GetHTTPHeaders()
http_headers.update(custom_headers)
return envelope, http_headers |
def validate_url(value):
""" Validate url. """
if not re.match(VIMEO_URL_RE, value) and not re.match(YOUTUBE_URL_RE, value):
raise ValidationError('Invalid URL - only Youtube, Vimeo can be used.') | Validate url. | Below is the the instruction that describes the task:
### Input:
Validate url.
### Response:
def validate_url(value):
""" Validate url. """
if not re.match(VIMEO_URL_RE, value) and not re.match(YOUTUBE_URL_RE, value):
raise ValidationError('Invalid URL - only Youtube, Vimeo can be used.') |
def does_not_contain_duplicates(self):
"""Asserts that val is iterable and does not contain any duplicate items."""
try:
if len(self.val) == len(set(self.val)):
return self
except TypeError:
raise TypeError('val is not iterable')
self._err('Expected <%s> to not contain duplicates, but did.' % self.val) | Asserts that val is iterable and does not contain any duplicate items. | Below is the the instruction that describes the task:
### Input:
Asserts that val is iterable and does not contain any duplicate items.
### Response:
def does_not_contain_duplicates(self):
"""Asserts that val is iterable and does not contain any duplicate items."""
try:
if len(self.val) == len(set(self.val)):
return self
except TypeError:
raise TypeError('val is not iterable')
self._err('Expected <%s> to not contain duplicates, but did.' % self.val) |
def flagants(self, threshold=50):
""" Flags solutions with amplitude more than threshold larger than median.
"""
# identify very low gain amps not already flagged
badsols = n.where( (n.median(self.amp)/self.amp > threshold) & (self.flagged == False))[0]
if len(badsols):
self.logger.info('Solutions %s flagged (times %s, ants %s, freqs %s) for low gain amplitude.' % (str(badsols), self.mjd[badsols], self.antname[badsols], self.ifid[badsols]))
for sol in badsols:
self.flagged[sol] = True | Flags solutions with amplitude more than threshold larger than median. | Below is the the instruction that describes the task:
### Input:
Flags solutions with amplitude more than threshold larger than median.
### Response:
def flagants(self, threshold=50):
""" Flags solutions with amplitude more than threshold larger than median.
"""
# identify very low gain amps not already flagged
badsols = n.where( (n.median(self.amp)/self.amp > threshold) & (self.flagged == False))[0]
if len(badsols):
self.logger.info('Solutions %s flagged (times %s, ants %s, freqs %s) for low gain amplitude.' % (str(badsols), self.mjd[badsols], self.antname[badsols], self.ifid[badsols]))
for sol in badsols:
self.flagged[sol] = True |
def set_log_type_name(self, logType, name):
"""
Set a logtype name.
:Parameters:
#. logType (string): A defined logging type.
#. name (string): The logtype new name.
"""
assert logType in self.__logTypeStdoutFlags.keys(), "logType '%s' not defined" %logType
assert isinstance(name, basestring), "name must be a string"
name = str(name)
self.__logTypeNames[logType] = name | Set a logtype name.
:Parameters:
#. logType (string): A defined logging type.
#. name (string): The logtype new name. | Below is the the instruction that describes the task:
### Input:
Set a logtype name.
:Parameters:
#. logType (string): A defined logging type.
#. name (string): The logtype new name.
### Response:
def set_log_type_name(self, logType, name):
"""
Set a logtype name.
:Parameters:
#. logType (string): A defined logging type.
#. name (string): The logtype new name.
"""
assert logType in self.__logTypeStdoutFlags.keys(), "logType '%s' not defined" %logType
assert isinstance(name, basestring), "name must be a string"
name = str(name)
self.__logTypeNames[logType] = name |
def write(self, symbol, item, metadata=None, chunker=DateChunker(), audit=None, **kwargs):
"""
Writes data from item to symbol in the database
Parameters
----------
symbol: str
the symbol that will be used to reference the written data
item: Dataframe or Series
the data to write the database
metadata: ?
optional per symbol metadata
chunker: Object of type Chunker
A chunker that chunks the data in item
audit: dict
audit information
kwargs:
optional keyword args that are passed to the chunker. Includes:
chunk_size:
used by chunker to break data into discrete chunks.
see specific chunkers for more information about this param.
func: function
function to apply to each chunk before writing. Function
can not modify the date column.
"""
if not isinstance(item, (DataFrame, Series)):
raise Exception("Can only chunk DataFrames and Series")
self._arctic_lib.check_quota()
previous_shas = []
doc = {}
meta = {}
doc[SYMBOL] = symbol
doc[LEN] = len(item)
doc[SERIALIZER] = self.serializer.TYPE
doc[CHUNKER] = chunker.TYPE
doc[USERMETA] = metadata
sym = self._get_symbol_info(symbol)
if sym:
previous_shas = set([Binary(x[SHA]) for x in self._collection.find({SYMBOL: symbol},
projection={SHA: True, '_id': False},
)])
ops = []
meta_ops = []
chunk_count = 0
for start, end, chunk_size, record in chunker.to_chunks(item, **kwargs):
chunk_count += 1
data = self.serializer.serialize(record)
doc[CHUNK_SIZE] = chunk_size
doc[METADATA] = {'columns': data[METADATA][COLUMNS] if COLUMNS in data[METADATA] else ''}
meta = data[METADATA]
for i in xrange(int(len(data[DATA]) / MAX_CHUNK_SIZE + 1)):
chunk = {DATA: Binary(data[DATA][i * MAX_CHUNK_SIZE: (i + 1) * MAX_CHUNK_SIZE])}
chunk[SEGMENT] = i
chunk[START] = meta[START] = start
chunk[END] = meta[END] = end
chunk[SYMBOL] = meta[SYMBOL] = symbol
dates = [chunker.chunk_to_str(start), chunker.chunk_to_str(end), str(chunk[SEGMENT]).encode('ascii')]
chunk[SHA] = self._checksum(dates, chunk[DATA])
meta_ops.append(pymongo.ReplaceOne({SYMBOL: symbol,
START: start,
END: end},
meta, upsert=True))
if chunk[SHA] not in previous_shas:
ops.append(pymongo.UpdateOne({SYMBOL: symbol,
START: start,
END: end,
SEGMENT: chunk[SEGMENT]},
{'$set': chunk}, upsert=True))
else:
# already exists, dont need to update in mongo
previous_shas.remove(chunk[SHA])
if ops:
self._collection.bulk_write(ops, ordered=False)
if meta_ops:
self._mdata.bulk_write(meta_ops, ordered=False)
doc[CHUNK_COUNT] = chunk_count
doc[APPEND_COUNT] = 0
if previous_shas:
mongo_retry(self._collection.delete_many)({SYMBOL: symbol, SHA: {'$in': list(previous_shas)}})
mongo_retry(self._symbols.update_one)({SYMBOL: symbol},
{'$set': doc},
upsert=True)
if audit is not None:
audit['symbol'] = symbol
audit['action'] = 'write'
audit['chunks'] = chunk_count
self._audit.insert_one(audit) | Writes data from item to symbol in the database
Parameters
----------
symbol: str
the symbol that will be used to reference the written data
item: Dataframe or Series
the data to write the database
metadata: ?
optional per symbol metadata
chunker: Object of type Chunker
A chunker that chunks the data in item
audit: dict
audit information
kwargs:
optional keyword args that are passed to the chunker. Includes:
chunk_size:
used by chunker to break data into discrete chunks.
see specific chunkers for more information about this param.
func: function
function to apply to each chunk before writing. Function
can not modify the date column. | Below is the the instruction that describes the task:
### Input:
Writes data from item to symbol in the database
Parameters
----------
symbol: str
the symbol that will be used to reference the written data
item: Dataframe or Series
the data to write the database
metadata: ?
optional per symbol metadata
chunker: Object of type Chunker
A chunker that chunks the data in item
audit: dict
audit information
kwargs:
optional keyword args that are passed to the chunker. Includes:
chunk_size:
used by chunker to break data into discrete chunks.
see specific chunkers for more information about this param.
func: function
function to apply to each chunk before writing. Function
can not modify the date column.
### Response:
def write(self, symbol, item, metadata=None, chunker=DateChunker(), audit=None, **kwargs):
"""
Writes data from item to symbol in the database
Parameters
----------
symbol: str
the symbol that will be used to reference the written data
item: Dataframe or Series
the data to write the database
metadata: ?
optional per symbol metadata
chunker: Object of type Chunker
A chunker that chunks the data in item
audit: dict
audit information
kwargs:
optional keyword args that are passed to the chunker. Includes:
chunk_size:
used by chunker to break data into discrete chunks.
see specific chunkers for more information about this param.
func: function
function to apply to each chunk before writing. Function
can not modify the date column.
"""
if not isinstance(item, (DataFrame, Series)):
raise Exception("Can only chunk DataFrames and Series")
self._arctic_lib.check_quota()
previous_shas = []
doc = {}
meta = {}
doc[SYMBOL] = symbol
doc[LEN] = len(item)
doc[SERIALIZER] = self.serializer.TYPE
doc[CHUNKER] = chunker.TYPE
doc[USERMETA] = metadata
sym = self._get_symbol_info(symbol)
if sym:
previous_shas = set([Binary(x[SHA]) for x in self._collection.find({SYMBOL: symbol},
projection={SHA: True, '_id': False},
)])
ops = []
meta_ops = []
chunk_count = 0
for start, end, chunk_size, record in chunker.to_chunks(item, **kwargs):
chunk_count += 1
data = self.serializer.serialize(record)
doc[CHUNK_SIZE] = chunk_size
doc[METADATA] = {'columns': data[METADATA][COLUMNS] if COLUMNS in data[METADATA] else ''}
meta = data[METADATA]
for i in xrange(int(len(data[DATA]) / MAX_CHUNK_SIZE + 1)):
chunk = {DATA: Binary(data[DATA][i * MAX_CHUNK_SIZE: (i + 1) * MAX_CHUNK_SIZE])}
chunk[SEGMENT] = i
chunk[START] = meta[START] = start
chunk[END] = meta[END] = end
chunk[SYMBOL] = meta[SYMBOL] = symbol
dates = [chunker.chunk_to_str(start), chunker.chunk_to_str(end), str(chunk[SEGMENT]).encode('ascii')]
chunk[SHA] = self._checksum(dates, chunk[DATA])
meta_ops.append(pymongo.ReplaceOne({SYMBOL: symbol,
START: start,
END: end},
meta, upsert=True))
if chunk[SHA] not in previous_shas:
ops.append(pymongo.UpdateOne({SYMBOL: symbol,
START: start,
END: end,
SEGMENT: chunk[SEGMENT]},
{'$set': chunk}, upsert=True))
else:
# already exists, dont need to update in mongo
previous_shas.remove(chunk[SHA])
if ops:
self._collection.bulk_write(ops, ordered=False)
if meta_ops:
self._mdata.bulk_write(meta_ops, ordered=False)
doc[CHUNK_COUNT] = chunk_count
doc[APPEND_COUNT] = 0
if previous_shas:
mongo_retry(self._collection.delete_many)({SYMBOL: symbol, SHA: {'$in': list(previous_shas)}})
mongo_retry(self._symbols.update_one)({SYMBOL: symbol},
{'$set': doc},
upsert=True)
if audit is not None:
audit['symbol'] = symbol
audit['action'] = 'write'
audit['chunks'] = chunk_count
self._audit.insert_one(audit) |
def clean(self):
"""
Clean form fields prior to database entry.
In this case, the major cleaning operation is substituting a None value for a blank
value in the Catalog field.
"""
cleaned_data = super(EnterpriseCustomerAdminForm, self).clean()
if 'catalog' in cleaned_data and not cleaned_data['catalog']:
cleaned_data['catalog'] = None
return cleaned_data | Clean form fields prior to database entry.
In this case, the major cleaning operation is substituting a None value for a blank
value in the Catalog field. | Below is the the instruction that describes the task:
### Input:
Clean form fields prior to database entry.
In this case, the major cleaning operation is substituting a None value for a blank
value in the Catalog field.
### Response:
def clean(self):
"""
Clean form fields prior to database entry.
In this case, the major cleaning operation is substituting a None value for a blank
value in the Catalog field.
"""
cleaned_data = super(EnterpriseCustomerAdminForm, self).clean()
if 'catalog' in cleaned_data and not cleaned_data['catalog']:
cleaned_data['catalog'] = None
return cleaned_data |
def flatten_check(out:Tensor, targ:Tensor) -> Tensor:
"Check that `out` and `targ` have the same number of elements and flatten them."
out,targ = out.contiguous().view(-1),targ.contiguous().view(-1)
assert len(out) == len(targ), f"Expected output and target to have the same number of elements but got {len(out)} and {len(targ)}."
return out,targ | Check that `out` and `targ` have the same number of elements and flatten them. | Below is the the instruction that describes the task:
### Input:
Check that `out` and `targ` have the same number of elements and flatten them.
### Response:
def flatten_check(out:Tensor, targ:Tensor) -> Tensor:
"Check that `out` and `targ` have the same number of elements and flatten them."
out,targ = out.contiguous().view(-1),targ.contiguous().view(-1)
assert len(out) == len(targ), f"Expected output and target to have the same number of elements but got {len(out)} and {len(targ)}."
return out,targ |
def complete(self):
"""
Called by the associated task to let us know that its state
has changed (e.g. from FUTURE to COMPLETED.)
"""
self._set_state(self.COMPLETED)
return self.task_spec._on_complete(self) | Called by the associated task to let us know that its state
has changed (e.g. from FUTURE to COMPLETED.) | Below is the the instruction that describes the task:
### Input:
Called by the associated task to let us know that its state
has changed (e.g. from FUTURE to COMPLETED.)
### Response:
def complete(self):
"""
Called by the associated task to let us know that its state
has changed (e.g. from FUTURE to COMPLETED.)
"""
self._set_state(self.COMPLETED)
return self.task_spec._on_complete(self) |
def split_on_condition(seq, condition):
"""Split a sequence into two iterables without looping twice"""
l1, l2 = tee((condition(item), item) for item in seq)
return (i for p, i in l1 if p), (i for p, i in l2 if not p) | Split a sequence into two iterables without looping twice | Below is the the instruction that describes the task:
### Input:
Split a sequence into two iterables without looping twice
### Response:
def split_on_condition(seq, condition):
"""Split a sequence into two iterables without looping twice"""
l1, l2 = tee((condition(item), item) for item in seq)
return (i for p, i in l1 if p), (i for p, i in l2 if not p) |
def parameter_values(self):
"""
Parameter values for this inspection situation. This correlate to
the the situation_context.
:rtype: list(SituationParameterValue)
"""
for param in self.data.get('parameter_values', []):
cache = ElementCache(data=self.make_request(href=param))
name = '{}'.format(cache.type.title()).replace('_', '')
yield type(name, (SituationParameterValue,), {
'data': cache})(name=cache.name, type=cache.type, href=param) | Parameter values for this inspection situation. This correlate to
the the situation_context.
:rtype: list(SituationParameterValue) | Below is the the instruction that describes the task:
### Input:
Parameter values for this inspection situation. This correlate to
the the situation_context.
:rtype: list(SituationParameterValue)
### Response:
def parameter_values(self):
"""
Parameter values for this inspection situation. This correlate to
the the situation_context.
:rtype: list(SituationParameterValue)
"""
for param in self.data.get('parameter_values', []):
cache = ElementCache(data=self.make_request(href=param))
name = '{}'.format(cache.type.title()).replace('_', '')
yield type(name, (SituationParameterValue,), {
'data': cache})(name=cache.name, type=cache.type, href=param) |
def build_standard_field(self, field_name, model_field):
"""
Create regular model fields.
"""
field_mapping = ClassLookupDict(self.serializer_field_mapping)
field_class = field_mapping[model_field]
field_kwargs = get_field_kwargs(field_name, model_field)
if 'choices' in field_kwargs:
# Fields with choices get coerced into `ChoiceField`
# instead of using their regular typed field.
field_class = self.serializer_choice_field
# Some model fields may introduce kwargs that would not be valid
# for the choice field. We need to strip these out.
# Eg. models.DecimalField(max_digits=3, decimal_places=1, choices=DECIMAL_CHOICES)
valid_kwargs = set((
'read_only', 'write_only',
'required', 'default', 'initial', 'source',
'label', 'help_text', 'style',
'error_messages', 'validators', 'allow_null', 'allow_blank',
'choices'
))
for key in list(field_kwargs.keys()):
if key not in valid_kwargs:
field_kwargs.pop(key)
if not issubclass(field_class, ModelField):
# `model_field` is only valid for the fallback case of
# `ModelField`, which is used when no other typed field
# matched to the model field.
field_kwargs.pop('model_field', None)
if not issubclass(field_class, CharField) and not issubclass(field_class, ChoiceField):
# `allow_blank` is only valid for textual fields.
field_kwargs.pop('allow_blank', None)
if postgres_fields and isinstance(model_field, postgres_fields.ArrayField):
# Populate the `child` argument on `ListField` instances generated
# for the PostgrSQL specfic `ArrayField`.
child_model_field = model_field.base_field
child_field_class, child_field_kwargs = self.build_standard_field(
'child', child_model_field
)
field_kwargs['child'] = child_field_class(**child_field_kwargs)
return field_class, field_kwargs | Create regular model fields. | Below is the the instruction that describes the task:
### Input:
Create regular model fields.
### Response:
def build_standard_field(self, field_name, model_field):
"""
Create regular model fields.
"""
field_mapping = ClassLookupDict(self.serializer_field_mapping)
field_class = field_mapping[model_field]
field_kwargs = get_field_kwargs(field_name, model_field)
if 'choices' in field_kwargs:
# Fields with choices get coerced into `ChoiceField`
# instead of using their regular typed field.
field_class = self.serializer_choice_field
# Some model fields may introduce kwargs that would not be valid
# for the choice field. We need to strip these out.
# Eg. models.DecimalField(max_digits=3, decimal_places=1, choices=DECIMAL_CHOICES)
valid_kwargs = set((
'read_only', 'write_only',
'required', 'default', 'initial', 'source',
'label', 'help_text', 'style',
'error_messages', 'validators', 'allow_null', 'allow_blank',
'choices'
))
for key in list(field_kwargs.keys()):
if key not in valid_kwargs:
field_kwargs.pop(key)
if not issubclass(field_class, ModelField):
# `model_field` is only valid for the fallback case of
# `ModelField`, which is used when no other typed field
# matched to the model field.
field_kwargs.pop('model_field', None)
if not issubclass(field_class, CharField) and not issubclass(field_class, ChoiceField):
# `allow_blank` is only valid for textual fields.
field_kwargs.pop('allow_blank', None)
if postgres_fields and isinstance(model_field, postgres_fields.ArrayField):
# Populate the `child` argument on `ListField` instances generated
# for the PostgrSQL specfic `ArrayField`.
child_model_field = model_field.base_field
child_field_class, child_field_kwargs = self.build_standard_field(
'child', child_model_field
)
field_kwargs['child'] = child_field_class(**child_field_kwargs)
return field_class, field_kwargs |
def pop_header(self, hkey, ignore_error=False):
"""
This will remove and return the specified header value.
Parameters
----------
hkey
Header key you wish to pop.
You can specify either a key string or an index.
ignore_error=False
Whether to quietly ignore any errors (i.e., hkey not found).
"""
# try the integer approach first to allow negative values
if type(hkey) is not str:
try:
return self.headers.pop(self.hkeys.pop(hkey))
except:
if not ignore_error:
print("ERROR: pop_header() could not find hkey "+str(hkey))
return None
else:
try:
# find the key integer and pop it
hkey = self.hkeys.index(hkey)
# pop it!
return self.headers.pop(self.hkeys.pop(hkey))
except:
if not ignore_error:
print("ERROR: pop_header() could not find hkey "+str(hkey))
return | This will remove and return the specified header value.
Parameters
----------
hkey
Header key you wish to pop.
You can specify either a key string or an index.
ignore_error=False
Whether to quietly ignore any errors (i.e., hkey not found). | Below is the the instruction that describes the task:
### Input:
This will remove and return the specified header value.
Parameters
----------
hkey
Header key you wish to pop.
You can specify either a key string or an index.
ignore_error=False
Whether to quietly ignore any errors (i.e., hkey not found).
### Response:
def pop_header(self, hkey, ignore_error=False):
"""
This will remove and return the specified header value.
Parameters
----------
hkey
Header key you wish to pop.
You can specify either a key string or an index.
ignore_error=False
Whether to quietly ignore any errors (i.e., hkey not found).
"""
# try the integer approach first to allow negative values
if type(hkey) is not str:
try:
return self.headers.pop(self.hkeys.pop(hkey))
except:
if not ignore_error:
print("ERROR: pop_header() could not find hkey "+str(hkey))
return None
else:
try:
# find the key integer and pop it
hkey = self.hkeys.index(hkey)
# pop it!
return self.headers.pop(self.hkeys.pop(hkey))
except:
if not ignore_error:
print("ERROR: pop_header() could not find hkey "+str(hkey))
return |
def messages(self):
"""Return remaining messages before limiting."""
return int(math.floor(((self.limit.unit_value - self.level) /
self.limit.unit_value) * self.limit.value)) | Return remaining messages before limiting. | Below is the the instruction that describes the task:
### Input:
Return remaining messages before limiting.
### Response:
def messages(self):
"""Return remaining messages before limiting."""
return int(math.floor(((self.limit.unit_value - self.level) /
self.limit.unit_value) * self.limit.value)) |
def _check_buffer(self, data, ctype):
"""Convert buffer to cdata and check for valid size."""
assert ctype in _ffi_types.values()
if not isinstance(data, bytes):
data = _ffi.from_buffer(data)
frames, remainder = divmod(len(data),
self.channels * _ffi.sizeof(ctype))
if remainder:
raise ValueError("Data size must be a multiple of frame size")
return data, frames | Convert buffer to cdata and check for valid size. | Below is the the instruction that describes the task:
### Input:
Convert buffer to cdata and check for valid size.
### Response:
def _check_buffer(self, data, ctype):
"""Convert buffer to cdata and check for valid size."""
assert ctype in _ffi_types.values()
if not isinstance(data, bytes):
data = _ffi.from_buffer(data)
frames, remainder = divmod(len(data),
self.channels * _ffi.sizeof(ctype))
if remainder:
raise ValueError("Data size must be a multiple of frame size")
return data, frames |
def realms(self, details=False):
"""Return the realms / satellites configuration
Returns an object containing the hierarchical realms configuration with the main
information about each realm:
{
All: {
satellites: {
pollers: [
"poller-master"
],
reactionners: [
"reactionner-master"
],
schedulers: [
"scheduler-master", "scheduler-master-3", "scheduler-master-2"
],
brokers: [
"broker-master"
],
receivers: [
"receiver-master", "receiver-nsca"
]
},
children: { },
name: "All",
members: [
"host_1", "host_0", "host_3", "host_2", "host_11", "localhost"
],
level: 0
},
North: {
...
}
}
Sub realms defined inside a realm are provided in the `children` property of their
parent realm and they contain the same information as their parent..
The `members` realm contain the list of the hosts members of the realm.
If ``details`` is required, each realm will contain more information about each satellite
involved in the realm management:
{
All: {
satellites: {
pollers: [
{
passive: false,
name: "poller-master",
livestate_output: "poller/poller-master is up and running.",
reachable: true,
uri: "http://127.0.0.1:7771/",
alive: true,
realm_name: "All",
manage_sub_realms: true,
spare: false,
polling_interval: 5,
configuration_sent: true,
active: true,
livestate: 0,
max_check_attempts: 3,
last_check: 1532242300.593074,
type: "poller"
}
],
reactionners: [
{
passive: false,
name: "reactionner-master",
livestate_output: "reactionner/reactionner-master is up and running.",
reachable: true,
uri: "http://127.0.0.1:7769/",
alive: true,
realm_name: "All",
manage_sub_realms: true,
spare: false,
polling_interval: 5,
configuration_sent: true,
active: true,
livestate: 0,
max_check_attempts: 3,
last_check: 1532242300.587762,
type: "reactionner"
}
]
:return: dict containing realms / satellites
:rtype: dict
"""
def get_realm_info(realm, realms, satellites, details=False):
"""Get the realm and its children information
:return: None
"""
res = {
"name": realm.get_name(),
"level": realm.level,
"hosts": realm.members,
"hostgroups": realm.group_members,
"children": {},
"satellites": {
}
}
for child in realm.realm_members:
child = realms.find_by_name(child)
if not child:
continue
realm_infos = get_realm_info(child, realms, satellites, details=details)
res['children'][child.get_name()] = realm_infos
for sat_type in ['scheduler', 'reactionner', 'broker', 'receiver', 'poller']:
res["satellites"][sat_type + 's'] = []
sats = realm.get_potential_satellites_by_type(satellites, sat_type)
for sat in sats:
if details:
res["satellites"][sat_type + 's'][sat.name] = sat.give_satellite_json()
else:
res["satellites"][sat_type + 's'].append(sat.name)
return res
if details is not False:
details = bool(details)
# Report our daemons states, but only if a dispatcher and the configuration is loaded
if not getattr(self.app, 'dispatcher', None) or not getattr(self.app, 'conf', None):
return {'_status': u'ERR', '_message': "Not yet available. Please come back later."}
res = {}
higher_realms = [realm for realm in self.app.conf.realms if realm.level == 0]
for realm in higher_realms:
res[realm.get_name()] = get_realm_info(realm, self.app.conf.realms,
self.app.dispatcher.all_daemons_links)
return res | Return the realms / satellites configuration
Returns an object containing the hierarchical realms configuration with the main
information about each realm:
{
All: {
satellites: {
pollers: [
"poller-master"
],
reactionners: [
"reactionner-master"
],
schedulers: [
"scheduler-master", "scheduler-master-3", "scheduler-master-2"
],
brokers: [
"broker-master"
],
receivers: [
"receiver-master", "receiver-nsca"
]
},
children: { },
name: "All",
members: [
"host_1", "host_0", "host_3", "host_2", "host_11", "localhost"
],
level: 0
},
North: {
...
}
}
Sub realms defined inside a realm are provided in the `children` property of their
parent realm and they contain the same information as their parent..
The `members` realm contain the list of the hosts members of the realm.
If ``details`` is required, each realm will contain more information about each satellite
involved in the realm management:
{
All: {
satellites: {
pollers: [
{
passive: false,
name: "poller-master",
livestate_output: "poller/poller-master is up and running.",
reachable: true,
uri: "http://127.0.0.1:7771/",
alive: true,
realm_name: "All",
manage_sub_realms: true,
spare: false,
polling_interval: 5,
configuration_sent: true,
active: true,
livestate: 0,
max_check_attempts: 3,
last_check: 1532242300.593074,
type: "poller"
}
],
reactionners: [
{
passive: false,
name: "reactionner-master",
livestate_output: "reactionner/reactionner-master is up and running.",
reachable: true,
uri: "http://127.0.0.1:7769/",
alive: true,
realm_name: "All",
manage_sub_realms: true,
spare: false,
polling_interval: 5,
configuration_sent: true,
active: true,
livestate: 0,
max_check_attempts: 3,
last_check: 1532242300.587762,
type: "reactionner"
}
]
:return: dict containing realms / satellites
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Return the realms / satellites configuration
Returns an object containing the hierarchical realms configuration with the main
information about each realm:
{
All: {
satellites: {
pollers: [
"poller-master"
],
reactionners: [
"reactionner-master"
],
schedulers: [
"scheduler-master", "scheduler-master-3", "scheduler-master-2"
],
brokers: [
"broker-master"
],
receivers: [
"receiver-master", "receiver-nsca"
]
},
children: { },
name: "All",
members: [
"host_1", "host_0", "host_3", "host_2", "host_11", "localhost"
],
level: 0
},
North: {
...
}
}
Sub realms defined inside a realm are provided in the `children` property of their
parent realm and they contain the same information as their parent..
The `members` realm contain the list of the hosts members of the realm.
If ``details`` is required, each realm will contain more information about each satellite
involved in the realm management:
{
All: {
satellites: {
pollers: [
{
passive: false,
name: "poller-master",
livestate_output: "poller/poller-master is up and running.",
reachable: true,
uri: "http://127.0.0.1:7771/",
alive: true,
realm_name: "All",
manage_sub_realms: true,
spare: false,
polling_interval: 5,
configuration_sent: true,
active: true,
livestate: 0,
max_check_attempts: 3,
last_check: 1532242300.593074,
type: "poller"
}
],
reactionners: [
{
passive: false,
name: "reactionner-master",
livestate_output: "reactionner/reactionner-master is up and running.",
reachable: true,
uri: "http://127.0.0.1:7769/",
alive: true,
realm_name: "All",
manage_sub_realms: true,
spare: false,
polling_interval: 5,
configuration_sent: true,
active: true,
livestate: 0,
max_check_attempts: 3,
last_check: 1532242300.587762,
type: "reactionner"
}
]
:return: dict containing realms / satellites
:rtype: dict
### Response:
def realms(self, details=False):
"""Return the realms / satellites configuration
Returns an object containing the hierarchical realms configuration with the main
information about each realm:
{
All: {
satellites: {
pollers: [
"poller-master"
],
reactionners: [
"reactionner-master"
],
schedulers: [
"scheduler-master", "scheduler-master-3", "scheduler-master-2"
],
brokers: [
"broker-master"
],
receivers: [
"receiver-master", "receiver-nsca"
]
},
children: { },
name: "All",
members: [
"host_1", "host_0", "host_3", "host_2", "host_11", "localhost"
],
level: 0
},
North: {
...
}
}
Sub realms defined inside a realm are provided in the `children` property of their
parent realm and they contain the same information as their parent..
The `members` realm contain the list of the hosts members of the realm.
If ``details`` is required, each realm will contain more information about each satellite
involved in the realm management:
{
All: {
satellites: {
pollers: [
{
passive: false,
name: "poller-master",
livestate_output: "poller/poller-master is up and running.",
reachable: true,
uri: "http://127.0.0.1:7771/",
alive: true,
realm_name: "All",
manage_sub_realms: true,
spare: false,
polling_interval: 5,
configuration_sent: true,
active: true,
livestate: 0,
max_check_attempts: 3,
last_check: 1532242300.593074,
type: "poller"
}
],
reactionners: [
{
passive: false,
name: "reactionner-master",
livestate_output: "reactionner/reactionner-master is up and running.",
reachable: true,
uri: "http://127.0.0.1:7769/",
alive: true,
realm_name: "All",
manage_sub_realms: true,
spare: false,
polling_interval: 5,
configuration_sent: true,
active: true,
livestate: 0,
max_check_attempts: 3,
last_check: 1532242300.587762,
type: "reactionner"
}
]
:return: dict containing realms / satellites
:rtype: dict
"""
def get_realm_info(realm, realms, satellites, details=False):
"""Get the realm and its children information
:return: None
"""
res = {
"name": realm.get_name(),
"level": realm.level,
"hosts": realm.members,
"hostgroups": realm.group_members,
"children": {},
"satellites": {
}
}
for child in realm.realm_members:
child = realms.find_by_name(child)
if not child:
continue
realm_infos = get_realm_info(child, realms, satellites, details=details)
res['children'][child.get_name()] = realm_infos
for sat_type in ['scheduler', 'reactionner', 'broker', 'receiver', 'poller']:
res["satellites"][sat_type + 's'] = []
sats = realm.get_potential_satellites_by_type(satellites, sat_type)
for sat in sats:
if details:
res["satellites"][sat_type + 's'][sat.name] = sat.give_satellite_json()
else:
res["satellites"][sat_type + 's'].append(sat.name)
return res
if details is not False:
details = bool(details)
# Report our daemons states, but only if a dispatcher and the configuration is loaded
if not getattr(self.app, 'dispatcher', None) or not getattr(self.app, 'conf', None):
return {'_status': u'ERR', '_message': "Not yet available. Please come back later."}
res = {}
higher_realms = [realm for realm in self.app.conf.realms if realm.level == 0]
for realm in higher_realms:
res[realm.get_name()] = get_realm_info(realm, self.app.conf.realms,
self.app.dispatcher.all_daemons_links)
return res |
def mavlink_packet(self, msg):
'''handle an incoming mavlink packet'''
type = msg.get_type()
master = self.master
# add some status fields
if type in [ 'RC_CHANNELS' ]:
ilock = self.get_rc_input(msg, self.interlock_channel)
if ilock <= 0:
self.console.set_status('ILOCK', 'ILOCK:--', fg='grey', row=4)
elif ilock >= 1800:
self.console.set_status('ILOCK', 'ILOCK:ON', fg='red', row=4)
else:
self.console.set_status('ILOCK', 'ILOCK:OFF', fg='green', row=4)
override = self.get_rc_input(msg, self.override_channel)
if override <= 0:
self.console.set_status('OVR', 'OVR:--', fg='grey', row=4)
elif override >= 1800:
self.console.set_status('OVR', 'OVR:ON', fg='red', row=4)
else:
self.console.set_status('OVR', 'OVR:OFF', fg='green', row=4)
zeroi = self.get_rc_input(msg, self.zero_I_channel)
if zeroi <= 0:
self.console.set_status('ZEROI', 'ZEROI:--', fg='grey', row=4)
elif zeroi >= 1800:
self.console.set_status('ZEROI', 'ZEROI:ON', fg='red', row=4)
else:
self.console.set_status('ZEROI', 'ZEROI:OFF', fg='green', row=4)
novtol = self.get_rc_input(msg, self.no_vtol_channel)
if novtol <= 0:
self.console.set_status('NOVTOL', 'NOVTOL:--', fg='grey', row=4)
elif novtol >= 1800:
self.console.set_status('NOVTOL', 'NOVTOL:ON', fg='red', row=4)
else:
self.console.set_status('NOVTOL', 'NOVTOL:OFF', fg='green', row=4)
if type in [ 'SERVO_OUTPUT_RAW' ]:
rsc = self.get_pwm_output(msg, self.rsc_out_channel)
if rsc <= 0:
self.console.set_status('RSC', 'RSC:--', fg='grey', row=4)
elif rsc <= 1200:
self.console.set_status('RSC', 'RSC:%u' % rsc, fg='red', row=4)
elif rsc <= 1600:
self.console.set_status('RSC', 'RSC:%u' % rsc, fg='orange', row=4)
else:
self.console.set_status('RSC', 'RSC:%u' % rsc, fg='green', row=4)
thr = self.get_pwm_output(msg, self.fwd_thr_channel)
if thr <= 0:
self.console.set_status('FTHR', 'FTHR:--', fg='grey', row=4)
elif thr <= 1100:
self.console.set_status('FTHR', 'FTHR:%u' % thr, fg='red', row=4)
elif thr <= 1500:
self.console.set_status('FTHR', 'FTHR:%u' % thr, fg='orange', row=4)
else:
self.console.set_status('FTHR', 'FTHR:%u' % thr, fg='green', row=4)
if type in [ 'RPM' ]:
rpm = msg.rpm1
if rpm < 1000:
rpm_colour = 'red'
elif rpm < 2000:
rpm_colour = 'orange'
else:
rpm_colour = 'green'
self.console.set_status('RPM', 'RPM: %u' % rpm, fg=rpm_colour, row=4) | handle an incoming mavlink packet | Below is the the instruction that describes the task:
### Input:
handle an incoming mavlink packet
### Response:
def mavlink_packet(self, msg):
'''handle an incoming mavlink packet'''
type = msg.get_type()
master = self.master
# add some status fields
if type in [ 'RC_CHANNELS' ]:
ilock = self.get_rc_input(msg, self.interlock_channel)
if ilock <= 0:
self.console.set_status('ILOCK', 'ILOCK:--', fg='grey', row=4)
elif ilock >= 1800:
self.console.set_status('ILOCK', 'ILOCK:ON', fg='red', row=4)
else:
self.console.set_status('ILOCK', 'ILOCK:OFF', fg='green', row=4)
override = self.get_rc_input(msg, self.override_channel)
if override <= 0:
self.console.set_status('OVR', 'OVR:--', fg='grey', row=4)
elif override >= 1800:
self.console.set_status('OVR', 'OVR:ON', fg='red', row=4)
else:
self.console.set_status('OVR', 'OVR:OFF', fg='green', row=4)
zeroi = self.get_rc_input(msg, self.zero_I_channel)
if zeroi <= 0:
self.console.set_status('ZEROI', 'ZEROI:--', fg='grey', row=4)
elif zeroi >= 1800:
self.console.set_status('ZEROI', 'ZEROI:ON', fg='red', row=4)
else:
self.console.set_status('ZEROI', 'ZEROI:OFF', fg='green', row=4)
novtol = self.get_rc_input(msg, self.no_vtol_channel)
if novtol <= 0:
self.console.set_status('NOVTOL', 'NOVTOL:--', fg='grey', row=4)
elif novtol >= 1800:
self.console.set_status('NOVTOL', 'NOVTOL:ON', fg='red', row=4)
else:
self.console.set_status('NOVTOL', 'NOVTOL:OFF', fg='green', row=4)
if type in [ 'SERVO_OUTPUT_RAW' ]:
rsc = self.get_pwm_output(msg, self.rsc_out_channel)
if rsc <= 0:
self.console.set_status('RSC', 'RSC:--', fg='grey', row=4)
elif rsc <= 1200:
self.console.set_status('RSC', 'RSC:%u' % rsc, fg='red', row=4)
elif rsc <= 1600:
self.console.set_status('RSC', 'RSC:%u' % rsc, fg='orange', row=4)
else:
self.console.set_status('RSC', 'RSC:%u' % rsc, fg='green', row=4)
thr = self.get_pwm_output(msg, self.fwd_thr_channel)
if thr <= 0:
self.console.set_status('FTHR', 'FTHR:--', fg='grey', row=4)
elif thr <= 1100:
self.console.set_status('FTHR', 'FTHR:%u' % thr, fg='red', row=4)
elif thr <= 1500:
self.console.set_status('FTHR', 'FTHR:%u' % thr, fg='orange', row=4)
else:
self.console.set_status('FTHR', 'FTHR:%u' % thr, fg='green', row=4)
if type in [ 'RPM' ]:
rpm = msg.rpm1
if rpm < 1000:
rpm_colour = 'red'
elif rpm < 2000:
rpm_colour = 'orange'
else:
rpm_colour = 'green'
self.console.set_status('RPM', 'RPM: %u' % rpm, fg=rpm_colour, row=4) |
def listBlockSummaries(self, block_name="", dataset="", detail=False):
"""
API that returns summary information like total size and total number of events in a dataset or a list of blocks
:param block_name: list block summaries for block_name(s)
:type block_name: str, list
:param dataset: list block summaries for all blocks in dataset
:type dataset: str
:param detail: list summary by block names if detail=True, default=False
:type detail: str, bool
:returns: list of dicts containing total block_sizes, file_counts and event_counts of dataset or blocks provided
"""
if bool(dataset)+bool(block_name)!=1:
dbsExceptionHandler("dbsException-invalid-input2",
dbsExceptionCode["dbsException-invalid-input2"],
self.logger.exception,
"Dataset or block_names must be specified at a time.")
if block_name and isinstance(block_name, basestring):
try:
block_name = [str(block_name)]
except:
dbsExceptionHandler("dbsException-invalid-input", "Invalid block_name for listBlockSummaries. ")
for this_block_name in block_name:
if re.search("[*, %]", this_block_name):
dbsExceptionHandler("dbsException-invalid-input2",
dbsExceptionCode["dbsException-invalid-input2"],
self.logger.exception,
"No wildcards are allowed in block_name list")
if re.search("[*, %]", dataset):
dbsExceptionHandler("dbsException-invalid-input2",
dbsExceptionCode["dbsException-invalid-input2"],
self.logger.exception,
"No wildcards are allowed in dataset")
data = []
try:
with self.dbi.connection() as conn:
data = self.dbsBlockSummaryListDAO.execute(conn, block_name, dataset, detail)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listBlockSummaries. %s\n. Exception trace: \n %s" % (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error',
dbsExceptionCode['dbsException-server-error'],
self.logger.exception,
sError)
for item in data:
yield item | API that returns summary information like total size and total number of events in a dataset or a list of blocks
:param block_name: list block summaries for block_name(s)
:type block_name: str, list
:param dataset: list block summaries for all blocks in dataset
:type dataset: str
:param detail: list summary by block names if detail=True, default=False
:type detail: str, bool
:returns: list of dicts containing total block_sizes, file_counts and event_counts of dataset or blocks provided | Below is the the instruction that describes the task:
### Input:
API that returns summary information like total size and total number of events in a dataset or a list of blocks
:param block_name: list block summaries for block_name(s)
:type block_name: str, list
:param dataset: list block summaries for all blocks in dataset
:type dataset: str
:param detail: list summary by block names if detail=True, default=False
:type detail: str, bool
:returns: list of dicts containing total block_sizes, file_counts and event_counts of dataset or blocks provided
### Response:
def listBlockSummaries(self, block_name="", dataset="", detail=False):
"""
API that returns summary information like total size and total number of events in a dataset or a list of blocks
:param block_name: list block summaries for block_name(s)
:type block_name: str, list
:param dataset: list block summaries for all blocks in dataset
:type dataset: str
:param detail: list summary by block names if detail=True, default=False
:type detail: str, bool
:returns: list of dicts containing total block_sizes, file_counts and event_counts of dataset or blocks provided
"""
if bool(dataset)+bool(block_name)!=1:
dbsExceptionHandler("dbsException-invalid-input2",
dbsExceptionCode["dbsException-invalid-input2"],
self.logger.exception,
"Dataset or block_names must be specified at a time.")
if block_name and isinstance(block_name, basestring):
try:
block_name = [str(block_name)]
except:
dbsExceptionHandler("dbsException-invalid-input", "Invalid block_name for listBlockSummaries. ")
for this_block_name in block_name:
if re.search("[*, %]", this_block_name):
dbsExceptionHandler("dbsException-invalid-input2",
dbsExceptionCode["dbsException-invalid-input2"],
self.logger.exception,
"No wildcards are allowed in block_name list")
if re.search("[*, %]", dataset):
dbsExceptionHandler("dbsException-invalid-input2",
dbsExceptionCode["dbsException-invalid-input2"],
self.logger.exception,
"No wildcards are allowed in dataset")
data = []
try:
with self.dbi.connection() as conn:
data = self.dbsBlockSummaryListDAO.execute(conn, block_name, dataset, detail)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listBlockSummaries. %s\n. Exception trace: \n %s" % (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error',
dbsExceptionCode['dbsException-server-error'],
self.logger.exception,
sError)
for item in data:
yield item |
def _compose_restart(services):
"""Well, this is annoying. Compose 1.2 shipped with the
restart functionality fucking broken, so we can't set a faster
timeout than 10 seconds (which is way too long) using Compose.
We are therefore resigned to trying to hack this together
ourselves. Lame.
Relevant fix which will make it into the next release:
https://github.com/docker/compose/pull/1318"""
def _restart_container(client, container):
log_to_client('Restarting {}'.format(get_canonical_container_name(container)))
client.restart(container['Id'], timeout=1)
assembled_specs = get_assembled_specs()
if services == []:
services = [spec.name for spec in assembled_specs.get_apps_and_services()]
logging.info('Restarting service containers from list: {}'.format(services))
client = get_docker_client()
for service in services:
container = get_container_for_app_or_service(service, include_exited=True)
if container is None:
log_to_client('No container found for {}'.format(service))
continue
stopped_linked_containers = _check_stopped_linked_containers(container, assembled_specs)
if stopped_linked_containers:
log_to_client('No running containers {0}, which are linked to by {1}. Cannot restart {1}'.format(
stopped_linked_containers, service))
else:
_restart_container(client, container) | Well, this is annoying. Compose 1.2 shipped with the
restart functionality fucking broken, so we can't set a faster
timeout than 10 seconds (which is way too long) using Compose.
We are therefore resigned to trying to hack this together
ourselves. Lame.
Relevant fix which will make it into the next release:
https://github.com/docker/compose/pull/1318 | Below is the the instruction that describes the task:
### Input:
Well, this is annoying. Compose 1.2 shipped with the
restart functionality fucking broken, so we can't set a faster
timeout than 10 seconds (which is way too long) using Compose.
We are therefore resigned to trying to hack this together
ourselves. Lame.
Relevant fix which will make it into the next release:
https://github.com/docker/compose/pull/1318
### Response:
def _compose_restart(services):
"""Well, this is annoying. Compose 1.2 shipped with the
restart functionality fucking broken, so we can't set a faster
timeout than 10 seconds (which is way too long) using Compose.
We are therefore resigned to trying to hack this together
ourselves. Lame.
Relevant fix which will make it into the next release:
https://github.com/docker/compose/pull/1318"""
def _restart_container(client, container):
log_to_client('Restarting {}'.format(get_canonical_container_name(container)))
client.restart(container['Id'], timeout=1)
assembled_specs = get_assembled_specs()
if services == []:
services = [spec.name for spec in assembled_specs.get_apps_and_services()]
logging.info('Restarting service containers from list: {}'.format(services))
client = get_docker_client()
for service in services:
container = get_container_for_app_or_service(service, include_exited=True)
if container is None:
log_to_client('No container found for {}'.format(service))
continue
stopped_linked_containers = _check_stopped_linked_containers(container, assembled_specs)
if stopped_linked_containers:
log_to_client('No running containers {0}, which are linked to by {1}. Cannot restart {1}'.format(
stopped_linked_containers, service))
else:
_restart_container(client, container) |
def launch_cif_clean(cif_filter, cif_select, group_cif_raw, group_cif_clean, group_structure, group_workchain, node,
max_entries, skip_check, parse_engine, daemon):
"""Run the `CifCleanWorkChain` on the entries in a group with raw imported CifData nodes.
It will use the `cif_filter` and `cif_select` scripts of `cod-tools` to clean the input cif file. Additionally, if
the `group-structure` option is passed, the workchain will also attempt to use the given parse engine to parse the
cleaned `CifData` to obtain the structure and then use SeeKpath to find the primitive structure, which, if
successful, will be added to the `group-structure` group.
"""
# pylint: disable=too-many-arguments,too-many-locals,too-many-statements,too-many-branches
import inspect
from datetime import datetime
from aiida import orm
from aiida.engine import launch
from aiida.plugins import DataFactory, WorkflowFactory
from aiida_codtools.common.cli import echo_utc
from aiida_codtools.common.resources import get_default_options
from aiida_codtools.common.utils import get_input_node
CifData = DataFactory('cif') # pylint: disable=invalid-name
CifCleanWorkChain = WorkflowFactory('codtools.cif_clean') # pylint: disable=invalid-name
# Collect the dictionary of not None parameters passed to the launch script and print to screen
local_vars = locals()
launch_paramaters = {}
for arg in inspect.getargspec(launch_cif_clean.callback).args: # pylint: disable=deprecated-method
if arg in local_vars and local_vars[arg]:
launch_paramaters[arg] = local_vars[arg]
click.echo('=' * 80)
click.echo('Starting on {}'.format(datetime.utcnow().isoformat()))
click.echo('Launch parameters: {}'.format(launch_paramaters))
click.echo('-' * 80)
if group_cif_raw is not None:
# Get CifData nodes that should actually be submitted according to the input filters
builder = orm.QueryBuilder()
builder.append(orm.Group, filters={'id': {'==': group_cif_raw.pk}}, tag='group')
if skip_check:
builder.append(CifData, with_group='group', project=['*'])
else:
# Get CifData nodes that already have an associated workchain node in the `group_workchain` group.
submitted = orm.QueryBuilder()
submitted.append(orm.WorkChainNode, tag='workchain')
submitted.append(orm.Group, filters={'id': {'==': group_workchain.pk}}, with_node='workchain')
submitted.append(orm.CifData, with_outgoing='workchain', tag='data', project=['id'])
submitted_nodes = set(pk for entry in submitted.all() for pk in entry)
if submitted_nodes:
filters = {'id': {'!in': submitted_nodes}}
else:
filters = {}
# Get all CifData nodes that are not included in the submitted node list
builder.append(CifData, with_group='group', filters=filters, project=['*'])
if max_entries is not None:
builder.limit(int(max_entries))
nodes = [entry[0] for entry in builder.all()]
elif node is not None:
nodes = [node]
else:
raise click.BadParameter('you have to specify either --group-cif-raw or --node')
counter = 0
node_cif_filter_parameters = get_input_node(orm.Dict, {
'fix-syntax-errors': True,
'use-c-parser': True,
'use-datablocks-without-coordinates': True,
})
node_cif_select_parameters = get_input_node(orm.Dict, {
'canonicalize-tag-names': True,
'dont-treat-dots-as-underscores': True,
'invert': True,
'tags': '_publ_author_name,_citation_journal_abbrev',
'use-c-parser': True,
})
node_options = get_input_node(orm.Dict, get_default_options())
node_parse_engine = get_input_node(orm.Str, parse_engine)
node_site_tolerance = get_input_node(orm.Float, 5E-4)
node_symprec = get_input_node(orm.Float, 5E-3)
for cif in nodes:
inputs = {
'cif': cif,
'cif_filter': cif_filter,
'cif_select': cif_select,
'cif_filter_parameters': node_cif_filter_parameters,
'cif_select_parameters': node_cif_select_parameters,
'options': node_options,
'parse_engine': node_parse_engine,
'site_tolerance': node_site_tolerance,
'symprec': node_symprec,
}
if group_cif_clean is not None:
inputs['group_cif'] = group_cif_clean
if group_structure is not None:
inputs['group_structure'] = group_structure
if daemon:
workchain = launch.submit(CifCleanWorkChain, **inputs)
echo_utc('CifData<{}> submitting: {}<{}>'.format(cif.pk, CifCleanWorkChain.__name__, workchain.pk))
else:
echo_utc('CifData<{}> running: {}'.format(cif.pk, CifCleanWorkChain.__name__))
_, workchain = launch.run_get_node(CifCleanWorkChain, **inputs)
if group_workchain is not None:
group_workchain.add_nodes([workchain])
counter += 1
if max_entries is not None and counter >= max_entries:
break
click.echo('-' * 80)
click.echo('Submitted {} new workchains'.format(counter))
click.echo('Stopping on {}'.format(datetime.utcnow().isoformat()))
click.echo('=' * 80) | Run the `CifCleanWorkChain` on the entries in a group with raw imported CifData nodes.
It will use the `cif_filter` and `cif_select` scripts of `cod-tools` to clean the input cif file. Additionally, if
the `group-structure` option is passed, the workchain will also attempt to use the given parse engine to parse the
cleaned `CifData` to obtain the structure and then use SeeKpath to find the primitive structure, which, if
successful, will be added to the `group-structure` group. | Below is the the instruction that describes the task:
### Input:
Run the `CifCleanWorkChain` on the entries in a group with raw imported CifData nodes.
It will use the `cif_filter` and `cif_select` scripts of `cod-tools` to clean the input cif file. Additionally, if
the `group-structure` option is passed, the workchain will also attempt to use the given parse engine to parse the
cleaned `CifData` to obtain the structure and then use SeeKpath to find the primitive structure, which, if
successful, will be added to the `group-structure` group.
### Response:
def launch_cif_clean(cif_filter, cif_select, group_cif_raw, group_cif_clean, group_structure, group_workchain, node,
max_entries, skip_check, parse_engine, daemon):
"""Run the `CifCleanWorkChain` on the entries in a group with raw imported CifData nodes.
It will use the `cif_filter` and `cif_select` scripts of `cod-tools` to clean the input cif file. Additionally, if
the `group-structure` option is passed, the workchain will also attempt to use the given parse engine to parse the
cleaned `CifData` to obtain the structure and then use SeeKpath to find the primitive structure, which, if
successful, will be added to the `group-structure` group.
"""
# pylint: disable=too-many-arguments,too-many-locals,too-many-statements,too-many-branches
import inspect
from datetime import datetime
from aiida import orm
from aiida.engine import launch
from aiida.plugins import DataFactory, WorkflowFactory
from aiida_codtools.common.cli import echo_utc
from aiida_codtools.common.resources import get_default_options
from aiida_codtools.common.utils import get_input_node
CifData = DataFactory('cif') # pylint: disable=invalid-name
CifCleanWorkChain = WorkflowFactory('codtools.cif_clean') # pylint: disable=invalid-name
# Collect the dictionary of not None parameters passed to the launch script and print to screen
local_vars = locals()
launch_paramaters = {}
for arg in inspect.getargspec(launch_cif_clean.callback).args: # pylint: disable=deprecated-method
if arg in local_vars and local_vars[arg]:
launch_paramaters[arg] = local_vars[arg]
click.echo('=' * 80)
click.echo('Starting on {}'.format(datetime.utcnow().isoformat()))
click.echo('Launch parameters: {}'.format(launch_paramaters))
click.echo('-' * 80)
if group_cif_raw is not None:
# Get CifData nodes that should actually be submitted according to the input filters
builder = orm.QueryBuilder()
builder.append(orm.Group, filters={'id': {'==': group_cif_raw.pk}}, tag='group')
if skip_check:
builder.append(CifData, with_group='group', project=['*'])
else:
# Get CifData nodes that already have an associated workchain node in the `group_workchain` group.
submitted = orm.QueryBuilder()
submitted.append(orm.WorkChainNode, tag='workchain')
submitted.append(orm.Group, filters={'id': {'==': group_workchain.pk}}, with_node='workchain')
submitted.append(orm.CifData, with_outgoing='workchain', tag='data', project=['id'])
submitted_nodes = set(pk for entry in submitted.all() for pk in entry)
if submitted_nodes:
filters = {'id': {'!in': submitted_nodes}}
else:
filters = {}
# Get all CifData nodes that are not included in the submitted node list
builder.append(CifData, with_group='group', filters=filters, project=['*'])
if max_entries is not None:
builder.limit(int(max_entries))
nodes = [entry[0] for entry in builder.all()]
elif node is not None:
nodes = [node]
else:
raise click.BadParameter('you have to specify either --group-cif-raw or --node')
counter = 0
node_cif_filter_parameters = get_input_node(orm.Dict, {
'fix-syntax-errors': True,
'use-c-parser': True,
'use-datablocks-without-coordinates': True,
})
node_cif_select_parameters = get_input_node(orm.Dict, {
'canonicalize-tag-names': True,
'dont-treat-dots-as-underscores': True,
'invert': True,
'tags': '_publ_author_name,_citation_journal_abbrev',
'use-c-parser': True,
})
node_options = get_input_node(orm.Dict, get_default_options())
node_parse_engine = get_input_node(orm.Str, parse_engine)
node_site_tolerance = get_input_node(orm.Float, 5E-4)
node_symprec = get_input_node(orm.Float, 5E-3)
for cif in nodes:
inputs = {
'cif': cif,
'cif_filter': cif_filter,
'cif_select': cif_select,
'cif_filter_parameters': node_cif_filter_parameters,
'cif_select_parameters': node_cif_select_parameters,
'options': node_options,
'parse_engine': node_parse_engine,
'site_tolerance': node_site_tolerance,
'symprec': node_symprec,
}
if group_cif_clean is not None:
inputs['group_cif'] = group_cif_clean
if group_structure is not None:
inputs['group_structure'] = group_structure
if daemon:
workchain = launch.submit(CifCleanWorkChain, **inputs)
echo_utc('CifData<{}> submitting: {}<{}>'.format(cif.pk, CifCleanWorkChain.__name__, workchain.pk))
else:
echo_utc('CifData<{}> running: {}'.format(cif.pk, CifCleanWorkChain.__name__))
_, workchain = launch.run_get_node(CifCleanWorkChain, **inputs)
if group_workchain is not None:
group_workchain.add_nodes([workchain])
counter += 1
if max_entries is not None and counter >= max_entries:
break
click.echo('-' * 80)
click.echo('Submitted {} new workchains'.format(counter))
click.echo('Stopping on {}'.format(datetime.utcnow().isoformat()))
click.echo('=' * 80) |
def merge(self, other_roc):
"""
Ingest the values of another DistributedROC object into this one and update the statistics inplace.
Args:
other_roc: another DistributedROC object.
"""
if other_roc.thresholds.size == self.thresholds.size and np.all(other_roc.thresholds == self.thresholds):
self.contingency_tables += other_roc.contingency_tables
else:
print("Input table thresholds do not match.") | Ingest the values of another DistributedROC object into this one and update the statistics inplace.
Args:
other_roc: another DistributedROC object. | Below is the the instruction that describes the task:
### Input:
Ingest the values of another DistributedROC object into this one and update the statistics inplace.
Args:
other_roc: another DistributedROC object.
### Response:
def merge(self, other_roc):
"""
Ingest the values of another DistributedROC object into this one and update the statistics inplace.
Args:
other_roc: another DistributedROC object.
"""
if other_roc.thresholds.size == self.thresholds.size and np.all(other_roc.thresholds == self.thresholds):
self.contingency_tables += other_roc.contingency_tables
else:
print("Input table thresholds do not match.") |
def create_message(self, channel_id, text):
"""
Sends a message to a Discord channel or user via REST API
Args:
channel_id (string): ID of destingation Discord channel
text (string): Content of message
"""
baseurl = self.rest_baseurl + \
'/channels/{}/messages'.format(channel_id)
requests.post(baseurl,
headers=self.headers,
data=json.dumps({'content': text})) | Sends a message to a Discord channel or user via REST API
Args:
channel_id (string): ID of destingation Discord channel
text (string): Content of message | Below is the the instruction that describes the task:
### Input:
Sends a message to a Discord channel or user via REST API
Args:
channel_id (string): ID of destingation Discord channel
text (string): Content of message
### Response:
def create_message(self, channel_id, text):
"""
Sends a message to a Discord channel or user via REST API
Args:
channel_id (string): ID of destingation Discord channel
text (string): Content of message
"""
baseurl = self.rest_baseurl + \
'/channels/{}/messages'.format(channel_id)
requests.post(baseurl,
headers=self.headers,
data=json.dumps({'content': text})) |
def OSPFNeighborState_NeighborState(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
OSPFNeighborState = ET.SubElement(config, "OSPFNeighborState", xmlns="http://brocade.com/ns/brocade-notification-stream")
NeighborState = ET.SubElement(OSPFNeighborState, "NeighborState")
NeighborState.text = kwargs.pop('NeighborState')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def OSPFNeighborState_NeighborState(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
OSPFNeighborState = ET.SubElement(config, "OSPFNeighborState", xmlns="http://brocade.com/ns/brocade-notification-stream")
NeighborState = ET.SubElement(OSPFNeighborState, "NeighborState")
NeighborState.text = kwargs.pop('NeighborState')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def toVerticalPotential(Pot,R,phi=None):
"""
NAME:
toVerticalPotential
PURPOSE:
convert a Potential to a vertical potential at a given R
INPUT:
Pot - Potential instance or list of such instances
R - Galactocentric radius at which to evaluate the vertical potential (can be Quantity)
phi= (None) Galactocentric azimuth at which to evaluate the vertical potential (can be Quantity); required if Pot is non-axisymmetric
OUTPUT:
(list of) linearPotential instance(s)
HISTORY:
2018-10-07 - Written - Bovy (UofT)
"""
Pot= flatten(Pot)
if _APY_LOADED:
if isinstance(R,units.Quantity):
if hasattr(Pot,'_ro'):
R= R.to(units.kpc).value/Pot._ro
else:
R= R.to(units.kpc).value/Pot[0]._ro
if isinstance(phi,units.Quantity):
phi= phi.to(units.rad).value
if isinstance(Pot,list):
out= []
for pot in Pot:
if isinstance(pot,linearPotential):
out.append(pot)
elif isinstance(pot,Potential):
out.append(verticalPotential(pot,R,phi=phi))
elif isinstance(pot,planarPotential):
raise PotentialError("Input to 'toVerticalPotential' cannot be a planarPotential")
else:
raise PotentialError("Input to 'toVerticalPotential' is neither an RZPotential-instance or a list of such instances")
return out
elif isinstance(Pot,Potential):
return verticalPotential(Pot,R,phi=phi)
elif isinstance(Pot,linearPotential):
return Pot
elif isinstance(Pot,planarPotential):
raise PotentialError("Input to 'toVerticalPotential' cannot be a planarPotential")
else:
raise PotentialError("Input to 'toVerticalPotential' is neither an Potential-instance or a list of such instances") | NAME:
toVerticalPotential
PURPOSE:
convert a Potential to a vertical potential at a given R
INPUT:
Pot - Potential instance or list of such instances
R - Galactocentric radius at which to evaluate the vertical potential (can be Quantity)
phi= (None) Galactocentric azimuth at which to evaluate the vertical potential (can be Quantity); required if Pot is non-axisymmetric
OUTPUT:
(list of) linearPotential instance(s)
HISTORY:
2018-10-07 - Written - Bovy (UofT) | Below is the the instruction that describes the task:
### Input:
NAME:
toVerticalPotential
PURPOSE:
convert a Potential to a vertical potential at a given R
INPUT:
Pot - Potential instance or list of such instances
R - Galactocentric radius at which to evaluate the vertical potential (can be Quantity)
phi= (None) Galactocentric azimuth at which to evaluate the vertical potential (can be Quantity); required if Pot is non-axisymmetric
OUTPUT:
(list of) linearPotential instance(s)
HISTORY:
2018-10-07 - Written - Bovy (UofT)
### Response:
def toVerticalPotential(Pot,R,phi=None):
"""
NAME:
toVerticalPotential
PURPOSE:
convert a Potential to a vertical potential at a given R
INPUT:
Pot - Potential instance or list of such instances
R - Galactocentric radius at which to evaluate the vertical potential (can be Quantity)
phi= (None) Galactocentric azimuth at which to evaluate the vertical potential (can be Quantity); required if Pot is non-axisymmetric
OUTPUT:
(list of) linearPotential instance(s)
HISTORY:
2018-10-07 - Written - Bovy (UofT)
"""
Pot= flatten(Pot)
if _APY_LOADED:
if isinstance(R,units.Quantity):
if hasattr(Pot,'_ro'):
R= R.to(units.kpc).value/Pot._ro
else:
R= R.to(units.kpc).value/Pot[0]._ro
if isinstance(phi,units.Quantity):
phi= phi.to(units.rad).value
if isinstance(Pot,list):
out= []
for pot in Pot:
if isinstance(pot,linearPotential):
out.append(pot)
elif isinstance(pot,Potential):
out.append(verticalPotential(pot,R,phi=phi))
elif isinstance(pot,planarPotential):
raise PotentialError("Input to 'toVerticalPotential' cannot be a planarPotential")
else:
raise PotentialError("Input to 'toVerticalPotential' is neither an RZPotential-instance or a list of such instances")
return out
elif isinstance(Pot,Potential):
return verticalPotential(Pot,R,phi=phi)
elif isinstance(Pot,linearPotential):
return Pot
elif isinstance(Pot,planarPotential):
raise PotentialError("Input to 'toVerticalPotential' cannot be a planarPotential")
else:
raise PotentialError("Input to 'toVerticalPotential' is neither an Potential-instance or a list of such instances") |
def _register_factory(self, factory_name, factory, override):
# type: (str, type, bool) -> None
"""
Registers a component factory
:param factory_name: The name of the factory
:param factory: The factory class object
:param override: If true, previous factory is overridden, else an
exception is risen if a previous factory with that
name already exists
:raise ValueError: The factory name already exists or is invalid
:raise TypeError: Invalid factory type
"""
if not factory_name or not is_string(factory_name):
raise ValueError("A factory name must be a non-empty string")
if not inspect.isclass(factory):
raise TypeError(
"Invalid factory class '{0}'".format(type(factory).__name__)
)
with self.__factories_lock:
if factory_name in self.__factories:
if override:
_logger.info("Overriding factory '%s'", factory_name)
else:
raise ValueError(
"'{0}' factory already exist".format(factory_name)
)
self.__factories[factory_name] = factory
# Trigger an event
self._fire_ipopo_event(
constants.IPopoEvent.REGISTERED, factory_name
) | Registers a component factory
:param factory_name: The name of the factory
:param factory: The factory class object
:param override: If true, previous factory is overridden, else an
exception is risen if a previous factory with that
name already exists
:raise ValueError: The factory name already exists or is invalid
:raise TypeError: Invalid factory type | Below is the the instruction that describes the task:
### Input:
Registers a component factory
:param factory_name: The name of the factory
:param factory: The factory class object
:param override: If true, previous factory is overridden, else an
exception is risen if a previous factory with that
name already exists
:raise ValueError: The factory name already exists or is invalid
:raise TypeError: Invalid factory type
### Response:
def _register_factory(self, factory_name, factory, override):
# type: (str, type, bool) -> None
"""
Registers a component factory
:param factory_name: The name of the factory
:param factory: The factory class object
:param override: If true, previous factory is overridden, else an
exception is risen if a previous factory with that
name already exists
:raise ValueError: The factory name already exists or is invalid
:raise TypeError: Invalid factory type
"""
if not factory_name or not is_string(factory_name):
raise ValueError("A factory name must be a non-empty string")
if not inspect.isclass(factory):
raise TypeError(
"Invalid factory class '{0}'".format(type(factory).__name__)
)
with self.__factories_lock:
if factory_name in self.__factories:
if override:
_logger.info("Overriding factory '%s'", factory_name)
else:
raise ValueError(
"'{0}' factory already exist".format(factory_name)
)
self.__factories[factory_name] = factory
# Trigger an event
self._fire_ipopo_event(
constants.IPopoEvent.REGISTERED, factory_name
) |
def pdfdump(self, filename=None, **kargs):
"""pdfdump(filename=None, layer_shift=0, rebuild=1)
Creates a PDF file describing a packet. If filename is not provided a temporary file is created and xpdf is called."""
canvas = self.canvas_dump(**kargs)
if filename is None:
fname = get_temp_file(autoext=".pdf")
canvas.writePDFfile(fname)
subprocess.Popen([conf.prog.pdfreader, fname+".pdf"])
else:
canvas.writePDFfile(filename) | pdfdump(filename=None, layer_shift=0, rebuild=1)
Creates a PDF file describing a packet. If filename is not provided a temporary file is created and xpdf is called. | Below is the the instruction that describes the task:
### Input:
pdfdump(filename=None, layer_shift=0, rebuild=1)
Creates a PDF file describing a packet. If filename is not provided a temporary file is created and xpdf is called.
### Response:
def pdfdump(self, filename=None, **kargs):
"""pdfdump(filename=None, layer_shift=0, rebuild=1)
Creates a PDF file describing a packet. If filename is not provided a temporary file is created and xpdf is called."""
canvas = self.canvas_dump(**kargs)
if filename is None:
fname = get_temp_file(autoext=".pdf")
canvas.writePDFfile(fname)
subprocess.Popen([conf.prog.pdfreader, fname+".pdf"])
else:
canvas.writePDFfile(filename) |
def qs_alphabet_filter(parser, token):
"""
The parser/tokenizer for the queryset alphabet filter.
{% qs_alphabet_filter <queryset> <field name> [<template name>] [strip_params=comma,delim,list] %}
{% qs_alphabet_filter objects lastname myapp/template.html %}
The template name is optional and uses alphafilter/alphabet.html if not
specified
"""
bits = token.split_contents()
if len(bits) == 3:
return AlphabetFilterNode(bits[1], bits[2])
elif len(bits) == 4:
if "=" in bits[3]:
key, val = bits[3].split('=')
return AlphabetFilterNode(bits[1], bits[2], strip_params=val)
else:
return AlphabetFilterNode(bits[1], bits[2], template_name=bits[3])
elif len(bits) == 5:
key, val = bits[4].split('=')
return AlphabetFilterNode(bits[1], bits[2], bits[3], bits[4])
else:
raise TemplateSyntaxError("%s is called with a queryset and field "
"name, and optionally a template." % bits[0]) | The parser/tokenizer for the queryset alphabet filter.
{% qs_alphabet_filter <queryset> <field name> [<template name>] [strip_params=comma,delim,list] %}
{% qs_alphabet_filter objects lastname myapp/template.html %}
The template name is optional and uses alphafilter/alphabet.html if not
specified | Below is the the instruction that describes the task:
### Input:
The parser/tokenizer for the queryset alphabet filter.
{% qs_alphabet_filter <queryset> <field name> [<template name>] [strip_params=comma,delim,list] %}
{% qs_alphabet_filter objects lastname myapp/template.html %}
The template name is optional and uses alphafilter/alphabet.html if not
specified
### Response:
def qs_alphabet_filter(parser, token):
"""
The parser/tokenizer for the queryset alphabet filter.
{% qs_alphabet_filter <queryset> <field name> [<template name>] [strip_params=comma,delim,list] %}
{% qs_alphabet_filter objects lastname myapp/template.html %}
The template name is optional and uses alphafilter/alphabet.html if not
specified
"""
bits = token.split_contents()
if len(bits) == 3:
return AlphabetFilterNode(bits[1], bits[2])
elif len(bits) == 4:
if "=" in bits[3]:
key, val = bits[3].split('=')
return AlphabetFilterNode(bits[1], bits[2], strip_params=val)
else:
return AlphabetFilterNode(bits[1], bits[2], template_name=bits[3])
elif len(bits) == 5:
key, val = bits[4].split('=')
return AlphabetFilterNode(bits[1], bits[2], bits[3], bits[4])
else:
raise TemplateSyntaxError("%s is called with a queryset and field "
"name, and optionally a template." % bits[0]) |
def do_rewind(self, line):
"""
rewind
"""
self.print_response("Rewinding from frame %s to 0" % self.bot._frame)
self.bot._frame = 0 | rewind | Below is the the instruction that describes the task:
### Input:
rewind
### Response:
def do_rewind(self, line):
"""
rewind
"""
self.print_response("Rewinding from frame %s to 0" % self.bot._frame)
self.bot._frame = 0 |
def list_sessions(self, updated_since=None, max_results=100, skip=0, **kwargs):
"""List session IDs.
List the Session IDs with pending messages in the queue where the state of the session
has been updated since the timestamp provided. If no timestamp is provided, all will be returned.
If the state of a session has never been set, it will not be returned regardless of whether
there are messages pending.
:param updated_since: The UTC datetime from which to return updated pending Session IDs.
:type updated_since: datetime.datetime
:param max_results: The maximum number of Session IDs to return. Default value is 100.
:type max_results: int
:param skip: The page value to jump to. Default value is 0.
:type skip: int
:rtype: list[str]
Example:
.. literalinclude:: ../examples/test_examples.py
:start-after: [START list_sessions_service_bus]
:end-before: [END list_sessions_service_bus]
:language: python
:dedent: 4
:caption: Get the Ids of session which have messages pending in the queue
"""
if self.entity and not self.requires_session:
raise ValueError("This is not a sessionful entity.")
message = {
'last-updated-time': updated_since or datetime.datetime.utcfromtimestamp(0),
'skip': types.AMQPInt(skip),
'top': types.AMQPInt(max_results),
}
with BaseHandler(self.entity_uri, self.auth_config, debug=self.debug, **kwargs) as handler:
return handler._mgmt_request_response( # pylint: disable=protected-access
REQUEST_RESPONSE_GET_MESSAGE_SESSIONS_OPERATION,
message,
mgmt_handlers.list_sessions_op) | List session IDs.
List the Session IDs with pending messages in the queue where the state of the session
has been updated since the timestamp provided. If no timestamp is provided, all will be returned.
If the state of a session has never been set, it will not be returned regardless of whether
there are messages pending.
:param updated_since: The UTC datetime from which to return updated pending Session IDs.
:type updated_since: datetime.datetime
:param max_results: The maximum number of Session IDs to return. Default value is 100.
:type max_results: int
:param skip: The page value to jump to. Default value is 0.
:type skip: int
:rtype: list[str]
Example:
.. literalinclude:: ../examples/test_examples.py
:start-after: [START list_sessions_service_bus]
:end-before: [END list_sessions_service_bus]
:language: python
:dedent: 4
:caption: Get the Ids of session which have messages pending in the queue | Below is the the instruction that describes the task:
### Input:
List session IDs.
List the Session IDs with pending messages in the queue where the state of the session
has been updated since the timestamp provided. If no timestamp is provided, all will be returned.
If the state of a session has never been set, it will not be returned regardless of whether
there are messages pending.
:param updated_since: The UTC datetime from which to return updated pending Session IDs.
:type updated_since: datetime.datetime
:param max_results: The maximum number of Session IDs to return. Default value is 100.
:type max_results: int
:param skip: The page value to jump to. Default value is 0.
:type skip: int
:rtype: list[str]
Example:
.. literalinclude:: ../examples/test_examples.py
:start-after: [START list_sessions_service_bus]
:end-before: [END list_sessions_service_bus]
:language: python
:dedent: 4
:caption: Get the Ids of session which have messages pending in the queue
### Response:
def list_sessions(self, updated_since=None, max_results=100, skip=0, **kwargs):
"""List session IDs.
List the Session IDs with pending messages in the queue where the state of the session
has been updated since the timestamp provided. If no timestamp is provided, all will be returned.
If the state of a session has never been set, it will not be returned regardless of whether
there are messages pending.
:param updated_since: The UTC datetime from which to return updated pending Session IDs.
:type updated_since: datetime.datetime
:param max_results: The maximum number of Session IDs to return. Default value is 100.
:type max_results: int
:param skip: The page value to jump to. Default value is 0.
:type skip: int
:rtype: list[str]
Example:
.. literalinclude:: ../examples/test_examples.py
:start-after: [START list_sessions_service_bus]
:end-before: [END list_sessions_service_bus]
:language: python
:dedent: 4
:caption: Get the Ids of session which have messages pending in the queue
"""
if self.entity and not self.requires_session:
raise ValueError("This is not a sessionful entity.")
message = {
'last-updated-time': updated_since or datetime.datetime.utcfromtimestamp(0),
'skip': types.AMQPInt(skip),
'top': types.AMQPInt(max_results),
}
with BaseHandler(self.entity_uri, self.auth_config, debug=self.debug, **kwargs) as handler:
return handler._mgmt_request_response( # pylint: disable=protected-access
REQUEST_RESPONSE_GET_MESSAGE_SESSIONS_OPERATION,
message,
mgmt_handlers.list_sessions_op) |
def mcmc(transform, loglikelihood, parameter_names, nsteps=40000, nburn=400,
stdevs=0.1, start = 0.5, **problem):
"""
**Metropolis Hastings MCMC**
with automatic step width adaption.
Burnin period is also used to guess steps.
:param nburn: number of burnin steps
:param stdevs: step widths to start with
"""
if 'seed' in problem:
numpy.random.seed(problem['seed'])
n_params = len(parameter_names)
def like(cube):
cube = numpy.array(cube)
if (cube <= 1e-10).any() or (cube >= 1-1e-10).any():
return -1e100
params = transform(cube)
return loglikelihood(params)
start = start + numpy.zeros(n_params)
stdevs = stdevs + numpy.zeros(n_params)
def compute_stepwidths(chain):
return numpy.std(chain, axis=0) / 3
import matplotlib.pyplot as plt
plt.figure(figsize=(7, 7))
steps = numpy.array([0.1]*(n_params))
print 'burn-in (1/2)...'
chain, prob, _, steps_ = mcmc_advance(start, steps, like, nsteps=nburn / 2, adapt=True)
steps = compute_stepwidths(chain)
print 'burn-in (2/2)...'
chain, prob, _, steps_ = mcmc_advance(chain[-1], steps, like, nsteps=nburn / 2, adapt=True)
steps = compute_stepwidths(chain)
print 'recording chain ...'
chain, prob, _, steps_ = mcmc_advance(chain[-1], steps, like, nsteps=nsteps)
chain = numpy.array(chain)
i = numpy.argmax(prob)
final = chain[-1]
print 'postprocessing...'
chain = numpy.array([transform(params) for params in chain])
return dict(start=chain[-1], maximum=chain[i], seeds=[final, chain[i]], chain=chain, method='Metropolis MCMC') | **Metropolis Hastings MCMC**
with automatic step width adaption.
Burnin period is also used to guess steps.
:param nburn: number of burnin steps
:param stdevs: step widths to start with | Below is the the instruction that describes the task:
### Input:
**Metropolis Hastings MCMC**
with automatic step width adaption.
Burnin period is also used to guess steps.
:param nburn: number of burnin steps
:param stdevs: step widths to start with
### Response:
def mcmc(transform, loglikelihood, parameter_names, nsteps=40000, nburn=400,
stdevs=0.1, start = 0.5, **problem):
"""
**Metropolis Hastings MCMC**
with automatic step width adaption.
Burnin period is also used to guess steps.
:param nburn: number of burnin steps
:param stdevs: step widths to start with
"""
if 'seed' in problem:
numpy.random.seed(problem['seed'])
n_params = len(parameter_names)
def like(cube):
cube = numpy.array(cube)
if (cube <= 1e-10).any() or (cube >= 1-1e-10).any():
return -1e100
params = transform(cube)
return loglikelihood(params)
start = start + numpy.zeros(n_params)
stdevs = stdevs + numpy.zeros(n_params)
def compute_stepwidths(chain):
return numpy.std(chain, axis=0) / 3
import matplotlib.pyplot as plt
plt.figure(figsize=(7, 7))
steps = numpy.array([0.1]*(n_params))
print 'burn-in (1/2)...'
chain, prob, _, steps_ = mcmc_advance(start, steps, like, nsteps=nburn / 2, adapt=True)
steps = compute_stepwidths(chain)
print 'burn-in (2/2)...'
chain, prob, _, steps_ = mcmc_advance(chain[-1], steps, like, nsteps=nburn / 2, adapt=True)
steps = compute_stepwidths(chain)
print 'recording chain ...'
chain, prob, _, steps_ = mcmc_advance(chain[-1], steps, like, nsteps=nsteps)
chain = numpy.array(chain)
i = numpy.argmax(prob)
final = chain[-1]
print 'postprocessing...'
chain = numpy.array([transform(params) for params in chain])
return dict(start=chain[-1], maximum=chain[i], seeds=[final, chain[i]], chain=chain, method='Metropolis MCMC') |
def Wang_Chiang_Lu(m, x, rhol, rhog, mul, mug, D, roughness=0, L=1):
r'''Calculates two-phase pressure drop with the Wang, Chiang, and Lu (1997)
correlation given in [1]_ and reviewed in [2]_ and [3]_.
.. math::
\Delta P = \Delta P_{g} \phi_g^2
.. math::
\phi_g^2 = 1 + 9.397X^{0.62} + 0.564X^{2.45} \text{ for } G >= 200 kg/m^2/s
.. math::
\phi_g^2 = 1 + CX + X^2 \text{ for lower mass fluxes}
.. math::
C = 0.000004566X^{0.128}Re_{lo}^{0.938}\left(\frac{\rho_l}{\rho_g}
\right)^{-2.15}\left(\frac{\mu_l}{\mu_g}\right)^{5.1}
.. math::
X^2 = \frac{\Delta P_l}{\Delta P_g}
Parameters
----------
m : float
Mass flow rate of fluid, [kg/s]
x : float
Quality of fluid, [-]
rhol : float
Liquid density, [kg/m^3]
rhog : float
Gas density, [kg/m^3]
mul : float
Viscosity of liquid, [Pa*s]
mug : float
Viscosity of gas, [Pa*s]
D : float
Diameter of pipe, [m]
roughness : float, optional
Roughness of pipe for use in calculating friction factor, [m]
L : float, optional
Length of pipe, [m]
Returns
-------
dP : float
Pressure drop of the two-phase flow, [Pa]
Notes
-----
Examples
--------
>>> Wang_Chiang_Lu(m=0.6, x=0.1, rhol=915., rhog=2.67, mul=180E-6,
... mug=14E-6, D=0.05, roughness=0, L=1)
448.29981978639154
References
----------
.. [1] Wang, Chi-Chuan, Ching-Shan Chiang, and Ding-Chong Lu. "Visual
Observation of Two-Phase Flow Pattern of R-22, R-134a, and R-407C in a
6.5-Mm Smooth Tube." Experimental Thermal and Fluid Science 15, no. 4
(November 1, 1997): 395-405. doi:10.1016/S0894-1777(97)00007-1.
.. [2] Kim, Sung-Min, and Issam Mudawar. "Universal Approach to Predicting
Two-Phase Frictional Pressure Drop for Adiabatic and Condensing Mini/
Micro-Channel Flows." International Journal of Heat and Mass Transfer
55, no. 11-12 (May 2012): 3246-61.
doi:10.1016/j.ijheatmasstransfer.2012.02.047.
.. [3] Xu, Yu, Xiande Fang, Xianghui Su, Zhanru Zhou, and Weiwei Chen.
"Evaluation of Frictional Pressure Drop Correlations for Two-Phase Flow
in Pipes." Nuclear Engineering and Design, SI : CFD4NRS-3, 253 (December
2012): 86-97. doi:10.1016/j.nucengdes.2012.08.007.
'''
G_tp = m/(pi/4*D**2)
# Actual Liquid flow
v_l = m*(1-x)/rhol/(pi/4*D**2)
Re_l = Reynolds(V=v_l, rho=rhol, mu=mul, D=D)
fd_l = friction_factor(Re=Re_l, eD=roughness/D)
dP_l = fd_l*L/D*(0.5*rhol*v_l**2)
# Actual gas flow
v_g = m*x/rhog/(pi/4*D**2)
Re_g = Reynolds(V=v_g, rho=rhog, mu=mug, D=D)
fd_g = friction_factor(Re=Re_g, eD=roughness/D)
dP_g = fd_g*L/D*(0.5*rhog*v_g**2)
X = (dP_l/dP_g)**0.5
if G_tp >= 200:
phi_g2 = 1 + 9.397*X**0.62 + 0.564*X**2.45
else:
# Liquid-only flow; Re_lo is oddly needed
v_lo = m/rhol/(pi/4*D**2)
Re_lo = Reynolds(V=v_lo, rho=rhol, mu=mul, D=D)
C = 0.000004566*X**0.128*Re_lo**0.938*(rhol/rhog)**-2.15*(mul/mug)**5.1
phi_g2 = 1 + C*X + X**2
return dP_g*phi_g2 | r'''Calculates two-phase pressure drop with the Wang, Chiang, and Lu (1997)
correlation given in [1]_ and reviewed in [2]_ and [3]_.
.. math::
\Delta P = \Delta P_{g} \phi_g^2
.. math::
\phi_g^2 = 1 + 9.397X^{0.62} + 0.564X^{2.45} \text{ for } G >= 200 kg/m^2/s
.. math::
\phi_g^2 = 1 + CX + X^2 \text{ for lower mass fluxes}
.. math::
C = 0.000004566X^{0.128}Re_{lo}^{0.938}\left(\frac{\rho_l}{\rho_g}
\right)^{-2.15}\left(\frac{\mu_l}{\mu_g}\right)^{5.1}
.. math::
X^2 = \frac{\Delta P_l}{\Delta P_g}
Parameters
----------
m : float
Mass flow rate of fluid, [kg/s]
x : float
Quality of fluid, [-]
rhol : float
Liquid density, [kg/m^3]
rhog : float
Gas density, [kg/m^3]
mul : float
Viscosity of liquid, [Pa*s]
mug : float
Viscosity of gas, [Pa*s]
D : float
Diameter of pipe, [m]
roughness : float, optional
Roughness of pipe for use in calculating friction factor, [m]
L : float, optional
Length of pipe, [m]
Returns
-------
dP : float
Pressure drop of the two-phase flow, [Pa]
Notes
-----
Examples
--------
>>> Wang_Chiang_Lu(m=0.6, x=0.1, rhol=915., rhog=2.67, mul=180E-6,
... mug=14E-6, D=0.05, roughness=0, L=1)
448.29981978639154
References
----------
.. [1] Wang, Chi-Chuan, Ching-Shan Chiang, and Ding-Chong Lu. "Visual
Observation of Two-Phase Flow Pattern of R-22, R-134a, and R-407C in a
6.5-Mm Smooth Tube." Experimental Thermal and Fluid Science 15, no. 4
(November 1, 1997): 395-405. doi:10.1016/S0894-1777(97)00007-1.
.. [2] Kim, Sung-Min, and Issam Mudawar. "Universal Approach to Predicting
Two-Phase Frictional Pressure Drop for Adiabatic and Condensing Mini/
Micro-Channel Flows." International Journal of Heat and Mass Transfer
55, no. 11-12 (May 2012): 3246-61.
doi:10.1016/j.ijheatmasstransfer.2012.02.047.
.. [3] Xu, Yu, Xiande Fang, Xianghui Su, Zhanru Zhou, and Weiwei Chen.
"Evaluation of Frictional Pressure Drop Correlations for Two-Phase Flow
in Pipes." Nuclear Engineering and Design, SI : CFD4NRS-3, 253 (December
2012): 86-97. doi:10.1016/j.nucengdes.2012.08.007. | Below is the the instruction that describes the task:
### Input:
r'''Calculates two-phase pressure drop with the Wang, Chiang, and Lu (1997)
correlation given in [1]_ and reviewed in [2]_ and [3]_.
.. math::
\Delta P = \Delta P_{g} \phi_g^2
.. math::
\phi_g^2 = 1 + 9.397X^{0.62} + 0.564X^{2.45} \text{ for } G >= 200 kg/m^2/s
.. math::
\phi_g^2 = 1 + CX + X^2 \text{ for lower mass fluxes}
.. math::
C = 0.000004566X^{0.128}Re_{lo}^{0.938}\left(\frac{\rho_l}{\rho_g}
\right)^{-2.15}\left(\frac{\mu_l}{\mu_g}\right)^{5.1}
.. math::
X^2 = \frac{\Delta P_l}{\Delta P_g}
Parameters
----------
m : float
Mass flow rate of fluid, [kg/s]
x : float
Quality of fluid, [-]
rhol : float
Liquid density, [kg/m^3]
rhog : float
Gas density, [kg/m^3]
mul : float
Viscosity of liquid, [Pa*s]
mug : float
Viscosity of gas, [Pa*s]
D : float
Diameter of pipe, [m]
roughness : float, optional
Roughness of pipe for use in calculating friction factor, [m]
L : float, optional
Length of pipe, [m]
Returns
-------
dP : float
Pressure drop of the two-phase flow, [Pa]
Notes
-----
Examples
--------
>>> Wang_Chiang_Lu(m=0.6, x=0.1, rhol=915., rhog=2.67, mul=180E-6,
... mug=14E-6, D=0.05, roughness=0, L=1)
448.29981978639154
References
----------
.. [1] Wang, Chi-Chuan, Ching-Shan Chiang, and Ding-Chong Lu. "Visual
Observation of Two-Phase Flow Pattern of R-22, R-134a, and R-407C in a
6.5-Mm Smooth Tube." Experimental Thermal and Fluid Science 15, no. 4
(November 1, 1997): 395-405. doi:10.1016/S0894-1777(97)00007-1.
.. [2] Kim, Sung-Min, and Issam Mudawar. "Universal Approach to Predicting
Two-Phase Frictional Pressure Drop for Adiabatic and Condensing Mini/
Micro-Channel Flows." International Journal of Heat and Mass Transfer
55, no. 11-12 (May 2012): 3246-61.
doi:10.1016/j.ijheatmasstransfer.2012.02.047.
.. [3] Xu, Yu, Xiande Fang, Xianghui Su, Zhanru Zhou, and Weiwei Chen.
"Evaluation of Frictional Pressure Drop Correlations for Two-Phase Flow
in Pipes." Nuclear Engineering and Design, SI : CFD4NRS-3, 253 (December
2012): 86-97. doi:10.1016/j.nucengdes.2012.08.007.
### Response:
def Wang_Chiang_Lu(m, x, rhol, rhog, mul, mug, D, roughness=0, L=1):
r'''Calculates two-phase pressure drop with the Wang, Chiang, and Lu (1997)
correlation given in [1]_ and reviewed in [2]_ and [3]_.
.. math::
\Delta P = \Delta P_{g} \phi_g^2
.. math::
\phi_g^2 = 1 + 9.397X^{0.62} + 0.564X^{2.45} \text{ for } G >= 200 kg/m^2/s
.. math::
\phi_g^2 = 1 + CX + X^2 \text{ for lower mass fluxes}
.. math::
C = 0.000004566X^{0.128}Re_{lo}^{0.938}\left(\frac{\rho_l}{\rho_g}
\right)^{-2.15}\left(\frac{\mu_l}{\mu_g}\right)^{5.1}
.. math::
X^2 = \frac{\Delta P_l}{\Delta P_g}
Parameters
----------
m : float
Mass flow rate of fluid, [kg/s]
x : float
Quality of fluid, [-]
rhol : float
Liquid density, [kg/m^3]
rhog : float
Gas density, [kg/m^3]
mul : float
Viscosity of liquid, [Pa*s]
mug : float
Viscosity of gas, [Pa*s]
D : float
Diameter of pipe, [m]
roughness : float, optional
Roughness of pipe for use in calculating friction factor, [m]
L : float, optional
Length of pipe, [m]
Returns
-------
dP : float
Pressure drop of the two-phase flow, [Pa]
Notes
-----
Examples
--------
>>> Wang_Chiang_Lu(m=0.6, x=0.1, rhol=915., rhog=2.67, mul=180E-6,
... mug=14E-6, D=0.05, roughness=0, L=1)
448.29981978639154
References
----------
.. [1] Wang, Chi-Chuan, Ching-Shan Chiang, and Ding-Chong Lu. "Visual
Observation of Two-Phase Flow Pattern of R-22, R-134a, and R-407C in a
6.5-Mm Smooth Tube." Experimental Thermal and Fluid Science 15, no. 4
(November 1, 1997): 395-405. doi:10.1016/S0894-1777(97)00007-1.
.. [2] Kim, Sung-Min, and Issam Mudawar. "Universal Approach to Predicting
Two-Phase Frictional Pressure Drop for Adiabatic and Condensing Mini/
Micro-Channel Flows." International Journal of Heat and Mass Transfer
55, no. 11-12 (May 2012): 3246-61.
doi:10.1016/j.ijheatmasstransfer.2012.02.047.
.. [3] Xu, Yu, Xiande Fang, Xianghui Su, Zhanru Zhou, and Weiwei Chen.
"Evaluation of Frictional Pressure Drop Correlations for Two-Phase Flow
in Pipes." Nuclear Engineering and Design, SI : CFD4NRS-3, 253 (December
2012): 86-97. doi:10.1016/j.nucengdes.2012.08.007.
'''
G_tp = m/(pi/4*D**2)
# Actual Liquid flow
v_l = m*(1-x)/rhol/(pi/4*D**2)
Re_l = Reynolds(V=v_l, rho=rhol, mu=mul, D=D)
fd_l = friction_factor(Re=Re_l, eD=roughness/D)
dP_l = fd_l*L/D*(0.5*rhol*v_l**2)
# Actual gas flow
v_g = m*x/rhog/(pi/4*D**2)
Re_g = Reynolds(V=v_g, rho=rhog, mu=mug, D=D)
fd_g = friction_factor(Re=Re_g, eD=roughness/D)
dP_g = fd_g*L/D*(0.5*rhog*v_g**2)
X = (dP_l/dP_g)**0.5
if G_tp >= 200:
phi_g2 = 1 + 9.397*X**0.62 + 0.564*X**2.45
else:
# Liquid-only flow; Re_lo is oddly needed
v_lo = m/rhol/(pi/4*D**2)
Re_lo = Reynolds(V=v_lo, rho=rhol, mu=mul, D=D)
C = 0.000004566*X**0.128*Re_lo**0.938*(rhol/rhog)**-2.15*(mul/mug)**5.1
phi_g2 = 1 + C*X + X**2
return dP_g*phi_g2 |
def update(self, data, length=None):
"""
Hashes given byte string
@param data - string to hash
@param length - if not specifed, entire string is hashed,
otherwise only first length bytes
"""
if self.digest_finalized:
raise DigestError("No updates allowed")
if not isinstance(data, bintype):
raise TypeError("A byte string is expected")
if length is None:
length = len(data)
elif length > len(data):
raise ValueError("Specified length is greater than length of data")
result = libcrypto.EVP_DigestUpdate(self.ctx, c_char_p(data), length)
if result != 1:
raise DigestError("Unable to update digest") | Hashes given byte string
@param data - string to hash
@param length - if not specifed, entire string is hashed,
otherwise only first length bytes | Below is the the instruction that describes the task:
### Input:
Hashes given byte string
@param data - string to hash
@param length - if not specifed, entire string is hashed,
otherwise only first length bytes
### Response:
def update(self, data, length=None):
"""
Hashes given byte string
@param data - string to hash
@param length - if not specifed, entire string is hashed,
otherwise only first length bytes
"""
if self.digest_finalized:
raise DigestError("No updates allowed")
if not isinstance(data, bintype):
raise TypeError("A byte string is expected")
if length is None:
length = len(data)
elif length > len(data):
raise ValueError("Specified length is greater than length of data")
result = libcrypto.EVP_DigestUpdate(self.ctx, c_char_p(data), length)
if result != 1:
raise DigestError("Unable to update digest") |
def press_event(self):
""" The mouse press event that initiated a mouse drag, if any.
"""
if self.mouse_event.press_event is None:
return None
ev = self.copy()
ev.mouse_event = self.mouse_event.press_event
return ev | The mouse press event that initiated a mouse drag, if any. | Below is the the instruction that describes the task:
### Input:
The mouse press event that initiated a mouse drag, if any.
### Response:
def press_event(self):
""" The mouse press event that initiated a mouse drag, if any.
"""
if self.mouse_event.press_event is None:
return None
ev = self.copy()
ev.mouse_event = self.mouse_event.press_event
return ev |
def _observe_timeseries_fn(timeseries):
"""Build an observation_noise_fn that observes a Tensor timeseries."""
def observation_noise_fn(t):
current_slice = timeseries[..., t, :]
return tfd.MultivariateNormalDiag(
loc=current_slice,
scale_diag=tf.zeros_like(current_slice))
return observation_noise_fn | Build an observation_noise_fn that observes a Tensor timeseries. | Below is the the instruction that describes the task:
### Input:
Build an observation_noise_fn that observes a Tensor timeseries.
### Response:
def _observe_timeseries_fn(timeseries):
"""Build an observation_noise_fn that observes a Tensor timeseries."""
def observation_noise_fn(t):
current_slice = timeseries[..., t, :]
return tfd.MultivariateNormalDiag(
loc=current_slice,
scale_diag=tf.zeros_like(current_slice))
return observation_noise_fn |
def get_ddG_results(self):
"""Parse the results from BuildModel and get the delta delta G's.
A positive ddG means that the mutation(s) is destabilzing, negative means stabilizing.
- highly stabilising (ΔΔG < −1.84 kcal/mol);
- stabilising (−1.84 kcal/mol ≤ ΔΔG < −0.92 kcal/mol);
- slightly stabilising (−0.92 kcal/mol ≤ ΔΔG < −0.46 kcal/mol);
- neutral (−0.46 kcal/mol < ΔΔG ≤ +0.46 kcal/mol);
- slightly destabilising (+0.46 kcal/mol < ΔΔG ≤ +0.92 kcal/mol);
- destabilising (+0.92 kcal/mol < ΔΔG ≤ +1.84 kcal/mol);
- highly destabilising (ΔΔG > +1.84 kcal/mol).
Returns:
dict: Dictionary of mutation group to predicted ddG.
"""
foldx_avg_df = self.df_mutation_ddG_avg
foldx_avg_ddG = {}
results = foldx_avg_df[['Pdb', 'total energy', 'SD']].T.to_dict().values()
for r in results:
ident = r['Pdb'].split('_')[-1]
ddG = r['total energy']
ddG_sd = r['SD']
foldx_avg_ddG[self.mutation_index_to_group[int(ident)]] = (ddG, ddG_sd)
return foldx_avg_ddG | Parse the results from BuildModel and get the delta delta G's.
A positive ddG means that the mutation(s) is destabilzing, negative means stabilizing.
- highly stabilising (ΔΔG < −1.84 kcal/mol);
- stabilising (−1.84 kcal/mol ≤ ΔΔG < −0.92 kcal/mol);
- slightly stabilising (−0.92 kcal/mol ≤ ΔΔG < −0.46 kcal/mol);
- neutral (−0.46 kcal/mol < ΔΔG ≤ +0.46 kcal/mol);
- slightly destabilising (+0.46 kcal/mol < ΔΔG ≤ +0.92 kcal/mol);
- destabilising (+0.92 kcal/mol < ΔΔG ≤ +1.84 kcal/mol);
- highly destabilising (ΔΔG > +1.84 kcal/mol).
Returns:
dict: Dictionary of mutation group to predicted ddG. | Below is the the instruction that describes the task:
### Input:
Parse the results from BuildModel and get the delta delta G's.
A positive ddG means that the mutation(s) is destabilzing, negative means stabilizing.
- highly stabilising (ΔΔG < −1.84 kcal/mol);
- stabilising (−1.84 kcal/mol ≤ ΔΔG < −0.92 kcal/mol);
- slightly stabilising (−0.92 kcal/mol ≤ ΔΔG < −0.46 kcal/mol);
- neutral (−0.46 kcal/mol < ΔΔG ≤ +0.46 kcal/mol);
- slightly destabilising (+0.46 kcal/mol < ΔΔG ≤ +0.92 kcal/mol);
- destabilising (+0.92 kcal/mol < ΔΔG ≤ +1.84 kcal/mol);
- highly destabilising (ΔΔG > +1.84 kcal/mol).
Returns:
dict: Dictionary of mutation group to predicted ddG.
### Response:
def get_ddG_results(self):
"""Parse the results from BuildModel and get the delta delta G's.
A positive ddG means that the mutation(s) is destabilzing, negative means stabilizing.
- highly stabilising (ΔΔG < −1.84 kcal/mol);
- stabilising (−1.84 kcal/mol ≤ ΔΔG < −0.92 kcal/mol);
- slightly stabilising (−0.92 kcal/mol ≤ ΔΔG < −0.46 kcal/mol);
- neutral (−0.46 kcal/mol < ΔΔG ≤ +0.46 kcal/mol);
- slightly destabilising (+0.46 kcal/mol < ΔΔG ≤ +0.92 kcal/mol);
- destabilising (+0.92 kcal/mol < ΔΔG ≤ +1.84 kcal/mol);
- highly destabilising (ΔΔG > +1.84 kcal/mol).
Returns:
dict: Dictionary of mutation group to predicted ddG.
"""
foldx_avg_df = self.df_mutation_ddG_avg
foldx_avg_ddG = {}
results = foldx_avg_df[['Pdb', 'total energy', 'SD']].T.to_dict().values()
for r in results:
ident = r['Pdb'].split('_')[-1]
ddG = r['total energy']
ddG_sd = r['SD']
foldx_avg_ddG[self.mutation_index_to_group[int(ident)]] = (ddG, ddG_sd)
return foldx_avg_ddG |
def build_dir_tree(self, files):
""" Convert a flat file dict into the tree format used for storage """
def helper(split_files):
this_dir = {'files' : {}, 'dirs' : {}}
dirs = defaultdict(list)
for fle in split_files:
index = fle[0]; fileinfo = fle[1]
if len(index) == 1:
fileinfo['path'] = index[0] # store only the file name instead of the whole path
this_dir['files'][fileinfo['path']] = fileinfo
elif len(index) > 1:
dirs[index[0]].append((index[1:], fileinfo))
for name,info in dirs.iteritems():
this_dir['dirs'][name] = helper(info)
return this_dir
return helper([(name.split('/')[1:], file_info) for name, file_info in files.iteritems()]) | Convert a flat file dict into the tree format used for storage | Below is the the instruction that describes the task:
### Input:
Convert a flat file dict into the tree format used for storage
### Response:
def build_dir_tree(self, files):
""" Convert a flat file dict into the tree format used for storage """
def helper(split_files):
this_dir = {'files' : {}, 'dirs' : {}}
dirs = defaultdict(list)
for fle in split_files:
index = fle[0]; fileinfo = fle[1]
if len(index) == 1:
fileinfo['path'] = index[0] # store only the file name instead of the whole path
this_dir['files'][fileinfo['path']] = fileinfo
elif len(index) > 1:
dirs[index[0]].append((index[1:], fileinfo))
for name,info in dirs.iteritems():
this_dir['dirs'][name] = helper(info)
return this_dir
return helper([(name.split('/')[1:], file_info) for name, file_info in files.iteritems()]) |
def version(self):
"""
Compute the version identifier for this functional node using the
func code and local names. Optionally, also allow closed-over variable
values to affect the version number when closure_fingerprint is
specified
"""
try:
f = self.func.__call__.__code__
except AttributeError:
f = self.func.__code__
h = md5()
h.update(f.co_code)
h.update(str(f.co_names).encode())
try:
closure = self.func.__closure__
except AttributeError:
return h.hexdigest()
if closure is None or self.closure_fingerprint is None:
return h.hexdigest()
d = dict(
(name, cell.cell_contents)
for name, cell in zip(f.co_freevars, closure))
h.update(self.closure_fingerprint(d).encode())
return h.hexdigest() | Compute the version identifier for this functional node using the
func code and local names. Optionally, also allow closed-over variable
values to affect the version number when closure_fingerprint is
specified | Below is the the instruction that describes the task:
### Input:
Compute the version identifier for this functional node using the
func code and local names. Optionally, also allow closed-over variable
values to affect the version number when closure_fingerprint is
specified
### Response:
def version(self):
"""
Compute the version identifier for this functional node using the
func code and local names. Optionally, also allow closed-over variable
values to affect the version number when closure_fingerprint is
specified
"""
try:
f = self.func.__call__.__code__
except AttributeError:
f = self.func.__code__
h = md5()
h.update(f.co_code)
h.update(str(f.co_names).encode())
try:
closure = self.func.__closure__
except AttributeError:
return h.hexdigest()
if closure is None or self.closure_fingerprint is None:
return h.hexdigest()
d = dict(
(name, cell.cell_contents)
for name, cell in zip(f.co_freevars, closure))
h.update(self.closure_fingerprint(d).encode())
return h.hexdigest() |
def _srm(self, data):
"""Expectation-Maximization algorithm for fitting the probabilistic SRM.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
Returns
-------
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response.
"""
subjects = len(data)
self.random_state_ = np.random.RandomState(self.rand_seed)
random_states = [
np.random.RandomState(self.random_state_.randint(2 ** 32))
for i in range(len(data))]
# Initialization step: initialize the outputs with initial values,
# voxels with the number of voxels in each subject.
w, _ = _init_w_transforms(data, self.features, random_states)
shared_response = self._compute_shared_response(data, w)
if logger.isEnabledFor(logging.INFO):
# Calculate the current objective function value
objective = self._objective_function(data, w, shared_response)
logger.info('Objective function %f' % objective)
# Main loop of the algorithm
for iteration in range(self.n_iter):
logger.info('Iteration %d' % (iteration + 1))
# Update each subject's mapping transform W_i:
for subject in range(subjects):
a_subject = data[subject].dot(shared_response.T)
perturbation = np.zeros(a_subject.shape)
np.fill_diagonal(perturbation, 0.001)
u_subject, _, v_subject = np.linalg.svd(
a_subject + perturbation, full_matrices=False)
w[subject] = u_subject.dot(v_subject)
# Update the shared response:
shared_response = self._compute_shared_response(data, w)
if logger.isEnabledFor(logging.INFO):
# Calculate the current objective function value
objective = self._objective_function(data, w, shared_response)
logger.info('Objective function %f' % objective)
return w, shared_response | Expectation-Maximization algorithm for fitting the probabilistic SRM.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
Returns
-------
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response. | Below is the the instruction that describes the task:
### Input:
Expectation-Maximization algorithm for fitting the probabilistic SRM.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
Returns
-------
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response.
### Response:
def _srm(self, data):
"""Expectation-Maximization algorithm for fitting the probabilistic SRM.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
Returns
-------
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response.
"""
subjects = len(data)
self.random_state_ = np.random.RandomState(self.rand_seed)
random_states = [
np.random.RandomState(self.random_state_.randint(2 ** 32))
for i in range(len(data))]
# Initialization step: initialize the outputs with initial values,
# voxels with the number of voxels in each subject.
w, _ = _init_w_transforms(data, self.features, random_states)
shared_response = self._compute_shared_response(data, w)
if logger.isEnabledFor(logging.INFO):
# Calculate the current objective function value
objective = self._objective_function(data, w, shared_response)
logger.info('Objective function %f' % objective)
# Main loop of the algorithm
for iteration in range(self.n_iter):
logger.info('Iteration %d' % (iteration + 1))
# Update each subject's mapping transform W_i:
for subject in range(subjects):
a_subject = data[subject].dot(shared_response.T)
perturbation = np.zeros(a_subject.shape)
np.fill_diagonal(perturbation, 0.001)
u_subject, _, v_subject = np.linalg.svd(
a_subject + perturbation, full_matrices=False)
w[subject] = u_subject.dot(v_subject)
# Update the shared response:
shared_response = self._compute_shared_response(data, w)
if logger.isEnabledFor(logging.INFO):
# Calculate the current objective function value
objective = self._objective_function(data, w, shared_response)
logger.info('Objective function %f' % objective)
return w, shared_response |
def _parse_reported_packages_from_install_output(output):
'''
Parses the output of "opkg install" to determine what packages would have been
installed by an operation run with the --noaction flag.
We are looking for lines like:
Installing <package> (<version>) on <target>
or
Upgrading <package> from <oldVersion> to <version> on root
'''
reported_pkgs = {}
install_pattern = re.compile(r'Installing\s(?P<package>.*?)\s\((?P<version>.*?)\)\son\s(?P<target>.*?)')
upgrade_pattern = re.compile(r'Upgrading\s(?P<package>.*?)\sfrom\s(?P<oldVersion>.*?)\sto\s(?P<version>.*?)\son\s(?P<target>.*?)')
for line in salt.utils.itertools.split(output, '\n'):
match = install_pattern.match(line)
if match is None:
match = upgrade_pattern.match(line)
if match:
reported_pkgs[match.group('package')] = match.group('version')
return reported_pkgs | Parses the output of "opkg install" to determine what packages would have been
installed by an operation run with the --noaction flag.
We are looking for lines like:
Installing <package> (<version>) on <target>
or
Upgrading <package> from <oldVersion> to <version> on root | Below is the the instruction that describes the task:
### Input:
Parses the output of "opkg install" to determine what packages would have been
installed by an operation run with the --noaction flag.
We are looking for lines like:
Installing <package> (<version>) on <target>
or
Upgrading <package> from <oldVersion> to <version> on root
### Response:
def _parse_reported_packages_from_install_output(output):
'''
Parses the output of "opkg install" to determine what packages would have been
installed by an operation run with the --noaction flag.
We are looking for lines like:
Installing <package> (<version>) on <target>
or
Upgrading <package> from <oldVersion> to <version> on root
'''
reported_pkgs = {}
install_pattern = re.compile(r'Installing\s(?P<package>.*?)\s\((?P<version>.*?)\)\son\s(?P<target>.*?)')
upgrade_pattern = re.compile(r'Upgrading\s(?P<package>.*?)\sfrom\s(?P<oldVersion>.*?)\sto\s(?P<version>.*?)\son\s(?P<target>.*?)')
for line in salt.utils.itertools.split(output, '\n'):
match = install_pattern.match(line)
if match is None:
match = upgrade_pattern.match(line)
if match:
reported_pkgs[match.group('package')] = match.group('version')
return reported_pkgs |
def parse(self, fail_callback=None):
""" Parse text fields and file fields for values and files """
# get text fields
for field in self.field_arguments:
self.values[field['name']] = self.__get_value(field['name'])
if self.values[field['name']] is None and field['required']:
if fail_callback is not None:
fail_callback()
self.__invalid_request(field['error'])
# get file fields
for file in self.file_arguments:
self.files[file['name']] = self.__get_file(file)
if self.files[file['name']] is None and file['required']:
if fail_callback is not None:
fail_callback()
self.__invalid_request(file['error']) | Parse text fields and file fields for values and files | Below is the the instruction that describes the task:
### Input:
Parse text fields and file fields for values and files
### Response:
def parse(self, fail_callback=None):
""" Parse text fields and file fields for values and files """
# get text fields
for field in self.field_arguments:
self.values[field['name']] = self.__get_value(field['name'])
if self.values[field['name']] is None and field['required']:
if fail_callback is not None:
fail_callback()
self.__invalid_request(field['error'])
# get file fields
for file in self.file_arguments:
self.files[file['name']] = self.__get_file(file)
if self.files[file['name']] is None and file['required']:
if fail_callback is not None:
fail_callback()
self.__invalid_request(file['error']) |
def parse(self, input_text, syncmap):
"""
Read from SMIL file.
Limitations:
1. parses only ``<par>`` elements, in order
2. timings must have ``hh:mm:ss.mmm`` or ``ss.mmm`` format (autodetected)
3. both ``clipBegin`` and ``clipEnd`` attributes of ``<audio>`` must be populated
"""
from lxml import etree
smil_ns = "{http://www.w3.org/ns/SMIL}"
root = etree.fromstring(gf.safe_bytes(input_text))
for par in root.iter(smil_ns + "par"):
for child in par:
if child.tag == (smil_ns + "text"):
identifier = gf.safe_unicode(gf.split_url(child.get("src"))[1])
elif child.tag == (smil_ns + "audio"):
begin_text = child.get("clipBegin")
if ":" in begin_text:
begin = gf.time_from_hhmmssmmm(begin_text)
else:
begin = gf.time_from_ssmmm(begin_text)
end_text = child.get("clipEnd")
if ":" in end_text:
end = gf.time_from_hhmmssmmm(end_text)
else:
end = gf.time_from_ssmmm(end_text)
# TODO read text from additional text_file?
self._add_fragment(
syncmap=syncmap,
identifier=identifier,
lines=[u""],
begin=begin,
end=end
) | Read from SMIL file.
Limitations:
1. parses only ``<par>`` elements, in order
2. timings must have ``hh:mm:ss.mmm`` or ``ss.mmm`` format (autodetected)
3. both ``clipBegin`` and ``clipEnd`` attributes of ``<audio>`` must be populated | Below is the the instruction that describes the task:
### Input:
Read from SMIL file.
Limitations:
1. parses only ``<par>`` elements, in order
2. timings must have ``hh:mm:ss.mmm`` or ``ss.mmm`` format (autodetected)
3. both ``clipBegin`` and ``clipEnd`` attributes of ``<audio>`` must be populated
### Response:
def parse(self, input_text, syncmap):
"""
Read from SMIL file.
Limitations:
1. parses only ``<par>`` elements, in order
2. timings must have ``hh:mm:ss.mmm`` or ``ss.mmm`` format (autodetected)
3. both ``clipBegin`` and ``clipEnd`` attributes of ``<audio>`` must be populated
"""
from lxml import etree
smil_ns = "{http://www.w3.org/ns/SMIL}"
root = etree.fromstring(gf.safe_bytes(input_text))
for par in root.iter(smil_ns + "par"):
for child in par:
if child.tag == (smil_ns + "text"):
identifier = gf.safe_unicode(gf.split_url(child.get("src"))[1])
elif child.tag == (smil_ns + "audio"):
begin_text = child.get("clipBegin")
if ":" in begin_text:
begin = gf.time_from_hhmmssmmm(begin_text)
else:
begin = gf.time_from_ssmmm(begin_text)
end_text = child.get("clipEnd")
if ":" in end_text:
end = gf.time_from_hhmmssmmm(end_text)
else:
end = gf.time_from_ssmmm(end_text)
# TODO read text from additional text_file?
self._add_fragment(
syncmap=syncmap,
identifier=identifier,
lines=[u""],
begin=begin,
end=end
) |
def register_func_list(self, func_and_handler):
""" register a function to determine if the handle
should be used for the type
"""
for func, handler in func_and_handler:
self._function_dispatch.register(func, handler)
self.dispatch.cache_clear() | register a function to determine if the handle
should be used for the type | Below is the the instruction that describes the task:
### Input:
register a function to determine if the handle
should be used for the type
### Response:
def register_func_list(self, func_and_handler):
""" register a function to determine if the handle
should be used for the type
"""
for func, handler in func_and_handler:
self._function_dispatch.register(func, handler)
self.dispatch.cache_clear() |
def callback_prototype(prototype):
"""Decorator to process a callback prototype.
A callback prototype is a function whose signature includes all the values
that will be passed by the callback API in question.
The original function will be returned, with a ``prototype.adapt`` attribute
which can be used to prepare third party callbacks.
"""
protosig = signature(prototype)
positional, keyword = [], []
for name, param in protosig.parameters.items():
if param.kind in (Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD):
raise TypeError("*args/**kwargs not supported in prototypes")
if (param.default is not Parameter.empty) \
or (param.kind == Parameter.KEYWORD_ONLY):
keyword.append(name)
else:
positional.append(name)
kwargs = dict.fromkeys(keyword)
def adapt(callback):
"""Introspect and prepare a third party callback."""
sig = signature(callback)
try:
# XXX: callback can have extra optional parameters - OK?
sig.bind(*positional, **kwargs)
return callback
except TypeError:
pass
# Match up arguments
unmatched_pos = positional[:]
unmatched_kw = kwargs.copy()
unrecognised = []
# TODO: unrecognised parameters with default values - OK?
for name, param in sig.parameters.items():
# print(name, param.kind) #DBG
if param.kind == Parameter.POSITIONAL_ONLY:
if len(unmatched_pos) > 0:
unmatched_pos.pop(0)
else:
unrecognised.append(name)
elif param.kind == Parameter.POSITIONAL_OR_KEYWORD:
if (param.default is not Parameter.empty) and (name in unmatched_kw):
unmatched_kw.pop(name)
elif len(unmatched_pos) > 0:
unmatched_pos.pop(0)
else:
unrecognised.append(name)
elif param.kind == Parameter.VAR_POSITIONAL:
unmatched_pos = []
elif param.kind == Parameter.KEYWORD_ONLY:
if name in unmatched_kw:
unmatched_kw.pop(name)
else:
unrecognised.append(name)
else: # VAR_KEYWORD
unmatched_kw = {}
# print(unmatched_pos, unmatched_kw, unrecognised) #DBG
if unrecognised:
raise TypeError("Function {!r} had unmatched arguments: {}".format(callback, unrecognised))
n_positional = len(positional) - len(unmatched_pos)
@wraps(callback)
def adapted(*args, **kwargs):
"""Wrapper for third party callbacks that discards excess arguments"""
# print(args, kwargs)
args = args[:n_positional]
for name in unmatched_kw:
# XXX: Could name not be in kwargs?
kwargs.pop(name)
# print(args, kwargs, unmatched_pos, cut_positional, unmatched_kw)
return callback(*args, **kwargs)
return adapted
prototype.adapt = adapt
return prototype | Decorator to process a callback prototype.
A callback prototype is a function whose signature includes all the values
that will be passed by the callback API in question.
The original function will be returned, with a ``prototype.adapt`` attribute
which can be used to prepare third party callbacks. | Below is the the instruction that describes the task:
### Input:
Decorator to process a callback prototype.
A callback prototype is a function whose signature includes all the values
that will be passed by the callback API in question.
The original function will be returned, with a ``prototype.adapt`` attribute
which can be used to prepare third party callbacks.
### Response:
def callback_prototype(prototype):
"""Decorator to process a callback prototype.
A callback prototype is a function whose signature includes all the values
that will be passed by the callback API in question.
The original function will be returned, with a ``prototype.adapt`` attribute
which can be used to prepare third party callbacks.
"""
protosig = signature(prototype)
positional, keyword = [], []
for name, param in protosig.parameters.items():
if param.kind in (Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD):
raise TypeError("*args/**kwargs not supported in prototypes")
if (param.default is not Parameter.empty) \
or (param.kind == Parameter.KEYWORD_ONLY):
keyword.append(name)
else:
positional.append(name)
kwargs = dict.fromkeys(keyword)
def adapt(callback):
"""Introspect and prepare a third party callback."""
sig = signature(callback)
try:
# XXX: callback can have extra optional parameters - OK?
sig.bind(*positional, **kwargs)
return callback
except TypeError:
pass
# Match up arguments
unmatched_pos = positional[:]
unmatched_kw = kwargs.copy()
unrecognised = []
# TODO: unrecognised parameters with default values - OK?
for name, param in sig.parameters.items():
# print(name, param.kind) #DBG
if param.kind == Parameter.POSITIONAL_ONLY:
if len(unmatched_pos) > 0:
unmatched_pos.pop(0)
else:
unrecognised.append(name)
elif param.kind == Parameter.POSITIONAL_OR_KEYWORD:
if (param.default is not Parameter.empty) and (name in unmatched_kw):
unmatched_kw.pop(name)
elif len(unmatched_pos) > 0:
unmatched_pos.pop(0)
else:
unrecognised.append(name)
elif param.kind == Parameter.VAR_POSITIONAL:
unmatched_pos = []
elif param.kind == Parameter.KEYWORD_ONLY:
if name in unmatched_kw:
unmatched_kw.pop(name)
else:
unrecognised.append(name)
else: # VAR_KEYWORD
unmatched_kw = {}
# print(unmatched_pos, unmatched_kw, unrecognised) #DBG
if unrecognised:
raise TypeError("Function {!r} had unmatched arguments: {}".format(callback, unrecognised))
n_positional = len(positional) - len(unmatched_pos)
@wraps(callback)
def adapted(*args, **kwargs):
"""Wrapper for third party callbacks that discards excess arguments"""
# print(args, kwargs)
args = args[:n_positional]
for name in unmatched_kw:
# XXX: Could name not be in kwargs?
kwargs.pop(name)
# print(args, kwargs, unmatched_pos, cut_positional, unmatched_kw)
return callback(*args, **kwargs)
return adapted
prototype.adapt = adapt
return prototype |
def _make_exception(self, response):
"""
In case of exception, construct the exception
object that holds all important values returned by
the response.
:return: The exception instance
:rtype: PocketException
"""
headers = response.headers
limit_headers = []
if 'X-Limit-User-Limit' in headers:
limit_headers = [
headers['X-Limit-User-Limit'],
headers['X-Limit-User-Remaining'],
headers['X-Limit-User-Reset'],
headers['X-Limit-Key-Limit'],
headers['X-Limit-Key-Remaining'],
headers['X-Limit-Key-Reset']
]
x_error_code = int(headers['X-Error-Code'])
exc = PocketException
if x_error_code in self.auth_error_codes:
exc = PocketAutException
return exc(
response.status_code,
x_error_code,
headers['X-Error'],
*limit_headers
) | In case of exception, construct the exception
object that holds all important values returned by
the response.
:return: The exception instance
:rtype: PocketException | Below is the the instruction that describes the task:
### Input:
In case of exception, construct the exception
object that holds all important values returned by
the response.
:return: The exception instance
:rtype: PocketException
### Response:
def _make_exception(self, response):
"""
In case of exception, construct the exception
object that holds all important values returned by
the response.
:return: The exception instance
:rtype: PocketException
"""
headers = response.headers
limit_headers = []
if 'X-Limit-User-Limit' in headers:
limit_headers = [
headers['X-Limit-User-Limit'],
headers['X-Limit-User-Remaining'],
headers['X-Limit-User-Reset'],
headers['X-Limit-Key-Limit'],
headers['X-Limit-Key-Remaining'],
headers['X-Limit-Key-Reset']
]
x_error_code = int(headers['X-Error-Code'])
exc = PocketException
if x_error_code in self.auth_error_codes:
exc = PocketAutException
return exc(
response.status_code,
x_error_code,
headers['X-Error'],
*limit_headers
) |
def _basis_notes_path(name, data_dir):
'''Form a path to the notes for a basis set'''
data_dir = fix_data_dir(data_dir)
bs_data = _get_basis_metadata(name, data_dir)
# the notes file is the same as the base file name, with a .notes extension
filebase = bs_data['basename']
file_path = os.path.join(data_dir, filebase + '.notes')
return file_path | Form a path to the notes for a basis set | Below is the the instruction that describes the task:
### Input:
Form a path to the notes for a basis set
### Response:
def _basis_notes_path(name, data_dir):
'''Form a path to the notes for a basis set'''
data_dir = fix_data_dir(data_dir)
bs_data = _get_basis_metadata(name, data_dir)
# the notes file is the same as the base file name, with a .notes extension
filebase = bs_data['basename']
file_path = os.path.join(data_dir, filebase + '.notes')
return file_path |
def _deserialize(self, value, attr, data):
"""Deserialize string value."""
value = super(TrimmedString, self)._deserialize(value, attr, data)
return value.strip() | Deserialize string value. | Below is the the instruction that describes the task:
### Input:
Deserialize string value.
### Response:
def _deserialize(self, value, attr, data):
"""Deserialize string value."""
value = super(TrimmedString, self)._deserialize(value, attr, data)
return value.strip() |
def police_priority_map_exceed_map_pri3_exceed(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
police_priority_map = ET.SubElement(config, "police-priority-map", xmlns="urn:brocade.com:mgmt:brocade-policer")
name_key = ET.SubElement(police_priority_map, "name")
name_key.text = kwargs.pop('name')
exceed = ET.SubElement(police_priority_map, "exceed")
map_pri3_exceed = ET.SubElement(exceed, "map-pri3-exceed")
map_pri3_exceed.text = kwargs.pop('map_pri3_exceed')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def police_priority_map_exceed_map_pri3_exceed(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
police_priority_map = ET.SubElement(config, "police-priority-map", xmlns="urn:brocade.com:mgmt:brocade-policer")
name_key = ET.SubElement(police_priority_map, "name")
name_key.text = kwargs.pop('name')
exceed = ET.SubElement(police_priority_map, "exceed")
map_pri3_exceed = ET.SubElement(exceed, "map-pri3-exceed")
map_pri3_exceed.text = kwargs.pop('map_pri3_exceed')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def plot_punchcard(df, metric='lines', title='punchcard', by=None):
"""
Uses modified plotting code from https://bitbucket.org/birkenfeld/hgpunchcard
:param df:
:param metric:
:param title:
:return:
"""
if not HAS_MPL:
raise ImportError('Must have matplotlib installed to use the plotting functions')
# find how many plots we are making
if by is not None:
unique_vals = set(df[by].values.tolist())
else:
unique_vals = ['foo']
for idx, val in enumerate(unique_vals):
if by is not None:
sub_df = df[df[by] == val]
else:
sub_df = df
fig = plt.figure(figsize=(8, title and 3 or 2.5), facecolor='#ffffff')
ax = fig.add_subplot('111', axisbg='#ffffff')
fig.subplots_adjust(left=0.06, bottom=0.04, right=0.98, top=0.95)
if by is not None:
ax.set_title(title + ' (%s)' % (str(val), ), y=0.96).set_color('#333333')
else:
ax.set_title(title, y=0.96).set_color('#333333')
ax.set_frame_on(False)
ax.scatter(sub_df['hour_of_day'], sub_df['day_of_week'], s=sub_df[metric], c='#333333', edgecolor='#333333')
for line in ax.get_xticklines() + ax.get_yticklines():
line.set_alpha(0.0)
dist = -0.8
ax.plot([dist, 23.5], [dist, dist], c='#555555')
ax.plot([dist, dist], [dist, 6.4], c='#555555')
ax.set_xlim(-1, 24)
ax.set_ylim(-0.9, 6.9)
ax.set_yticks(range(7))
for tx in ax.set_yticklabels(['Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat', 'Sun']):
tx.set_color('#555555')
tx.set_size('x-small')
ax.set_xticks(range(24))
for tx in ax.set_xticklabels(['%02d' % x for x in range(24)]):
tx.set_color('#555555')
tx.set_size('x-small')
ax.set_aspect('equal')
if idx + 1 == len(unique_vals):
plt.show(block=True)
else:
plt.show(block=False) | Uses modified plotting code from https://bitbucket.org/birkenfeld/hgpunchcard
:param df:
:param metric:
:param title:
:return: | Below is the the instruction that describes the task:
### Input:
Uses modified plotting code from https://bitbucket.org/birkenfeld/hgpunchcard
:param df:
:param metric:
:param title:
:return:
### Response:
def plot_punchcard(df, metric='lines', title='punchcard', by=None):
"""
Uses modified plotting code from https://bitbucket.org/birkenfeld/hgpunchcard
:param df:
:param metric:
:param title:
:return:
"""
if not HAS_MPL:
raise ImportError('Must have matplotlib installed to use the plotting functions')
# find how many plots we are making
if by is not None:
unique_vals = set(df[by].values.tolist())
else:
unique_vals = ['foo']
for idx, val in enumerate(unique_vals):
if by is not None:
sub_df = df[df[by] == val]
else:
sub_df = df
fig = plt.figure(figsize=(8, title and 3 or 2.5), facecolor='#ffffff')
ax = fig.add_subplot('111', axisbg='#ffffff')
fig.subplots_adjust(left=0.06, bottom=0.04, right=0.98, top=0.95)
if by is not None:
ax.set_title(title + ' (%s)' % (str(val), ), y=0.96).set_color('#333333')
else:
ax.set_title(title, y=0.96).set_color('#333333')
ax.set_frame_on(False)
ax.scatter(sub_df['hour_of_day'], sub_df['day_of_week'], s=sub_df[metric], c='#333333', edgecolor='#333333')
for line in ax.get_xticklines() + ax.get_yticklines():
line.set_alpha(0.0)
dist = -0.8
ax.plot([dist, 23.5], [dist, dist], c='#555555')
ax.plot([dist, dist], [dist, 6.4], c='#555555')
ax.set_xlim(-1, 24)
ax.set_ylim(-0.9, 6.9)
ax.set_yticks(range(7))
for tx in ax.set_yticklabels(['Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat', 'Sun']):
tx.set_color('#555555')
tx.set_size('x-small')
ax.set_xticks(range(24))
for tx in ax.set_xticklabels(['%02d' % x for x in range(24)]):
tx.set_color('#555555')
tx.set_size('x-small')
ax.set_aspect('equal')
if idx + 1 == len(unique_vals):
plt.show(block=True)
else:
plt.show(block=False) |
def cache_name(self):
"""
Used in django 1.x
"""
lang = get_language()
cache = build_localized_fieldname(self.accessor, lang)
return "_%s_cache" % cache | Used in django 1.x | Below is the the instruction that describes the task:
### Input:
Used in django 1.x
### Response:
def cache_name(self):
"""
Used in django 1.x
"""
lang = get_language()
cache = build_localized_fieldname(self.accessor, lang)
return "_%s_cache" % cache |
def _execute(self, execute_inputs, execute_outputs, backward_execution=False):
"""Calls the custom execute function of the script.py of the state
"""
self._script.build_module()
outcome_item = self._script.execute(self, execute_inputs, execute_outputs, backward_execution)
# in the case of backward execution the outcome is not relevant
if backward_execution:
return
# If the state was preempted, the state must be left on the preempted outcome
if self.preempted:
return Outcome(-2, "preempted")
# Outcome id was returned
if outcome_item in self.outcomes:
return self.outcomes[outcome_item]
# Outcome name was returned
for outcome_id, outcome in self.outcomes.items():
if outcome.name == outcome_item:
return self.outcomes[outcome_id]
logger.error("Returned outcome of {0} not existing: {1}".format(self, outcome_item))
return Outcome(-1, "aborted") | Calls the custom execute function of the script.py of the state | Below is the the instruction that describes the task:
### Input:
Calls the custom execute function of the script.py of the state
### Response:
def _execute(self, execute_inputs, execute_outputs, backward_execution=False):
"""Calls the custom execute function of the script.py of the state
"""
self._script.build_module()
outcome_item = self._script.execute(self, execute_inputs, execute_outputs, backward_execution)
# in the case of backward execution the outcome is not relevant
if backward_execution:
return
# If the state was preempted, the state must be left on the preempted outcome
if self.preempted:
return Outcome(-2, "preempted")
# Outcome id was returned
if outcome_item in self.outcomes:
return self.outcomes[outcome_item]
# Outcome name was returned
for outcome_id, outcome in self.outcomes.items():
if outcome.name == outcome_item:
return self.outcomes[outcome_id]
logger.error("Returned outcome of {0} not existing: {1}".format(self, outcome_item))
return Outcome(-1, "aborted") |
def length_of_national_destination_code(numobj):
"""Return length of the national destination code code for a number.
Gets the length of the national destination code (NDC) from the
PhoneNumber object passed in, so that clients could use it to split a
national significant number into NDC and subscriber number. The NDC of a
phone number is normally the first group of digit(s) right after the
country calling code when the number is formatted in the international
format, if there is a subscriber number part that follows.
N.B.: similar to an area code, not all numbers have an NDC!
An example of how this could be used:
>>> import phonenumbers
>>> numobj = phonenumbers.parse("18002530000", "US")
>>> nsn = phonenumbers.national_significant_number(numobj)
>>> ndc_len = phonenumbers.length_of_national_destination_code(numobj)
>>> if ndc_len > 0:
... national_destination_code = nsn[:ndc_len]
... subscriber_number = nsn[ndc_len:]
... else:
... national_destination_code = ""
... subscriber_number = nsn
Refer to the unittests to see the difference between this function and
length_of_geographical_area_code.
Arguments:
numobj -- The PhoneNumber object to find the length of the NDC from.
Returns the length of NDC of the PhoneNumber object passed in, which
could be zero.
"""
if numobj.extension is not None:
# We don't want to alter the object given to us, but we don't want to
# include the extension when we format it, so we copy it and clear the
# extension here.
copied_numobj = PhoneNumber()
copied_numobj.merge_from(numobj)
copied_numobj.extension = None
else:
copied_numobj = numobj
nsn = format_number(copied_numobj, PhoneNumberFormat.INTERNATIONAL)
number_groups = re.split(NON_DIGITS_PATTERN, nsn)
# The pattern will start with "+COUNTRY_CODE " so the first group will
# always be the empty string (before the + symbol) and the second group
# will be the country calling code. The third group will be area code if
# it is not the last group.
if len(number_groups) <= 3:
return 0
if number_type(numobj) == PhoneNumberType.MOBILE:
# For example Argentinian mobile numbers, when formatted in the
# international format, are in the form of +54 9 NDC XXXX... As a
# result, we take the length of the third group (NDC) and add the
# length of the second group (which is the mobile token), which also
# forms part of the national significant number. This assumes that
# the mobile token is always formatted separately from the rest of the
# phone number.
mobile_token = country_mobile_token(numobj.country_code)
if mobile_token != U_EMPTY_STRING:
return len(number_groups[2]) + len(number_groups[3])
return len(number_groups[2]) | Return length of the national destination code code for a number.
Gets the length of the national destination code (NDC) from the
PhoneNumber object passed in, so that clients could use it to split a
national significant number into NDC and subscriber number. The NDC of a
phone number is normally the first group of digit(s) right after the
country calling code when the number is formatted in the international
format, if there is a subscriber number part that follows.
N.B.: similar to an area code, not all numbers have an NDC!
An example of how this could be used:
>>> import phonenumbers
>>> numobj = phonenumbers.parse("18002530000", "US")
>>> nsn = phonenumbers.national_significant_number(numobj)
>>> ndc_len = phonenumbers.length_of_national_destination_code(numobj)
>>> if ndc_len > 0:
... national_destination_code = nsn[:ndc_len]
... subscriber_number = nsn[ndc_len:]
... else:
... national_destination_code = ""
... subscriber_number = nsn
Refer to the unittests to see the difference between this function and
length_of_geographical_area_code.
Arguments:
numobj -- The PhoneNumber object to find the length of the NDC from.
Returns the length of NDC of the PhoneNumber object passed in, which
could be zero. | Below is the the instruction that describes the task:
### Input:
Return length of the national destination code code for a number.
Gets the length of the national destination code (NDC) from the
PhoneNumber object passed in, so that clients could use it to split a
national significant number into NDC and subscriber number. The NDC of a
phone number is normally the first group of digit(s) right after the
country calling code when the number is formatted in the international
format, if there is a subscriber number part that follows.
N.B.: similar to an area code, not all numbers have an NDC!
An example of how this could be used:
>>> import phonenumbers
>>> numobj = phonenumbers.parse("18002530000", "US")
>>> nsn = phonenumbers.national_significant_number(numobj)
>>> ndc_len = phonenumbers.length_of_national_destination_code(numobj)
>>> if ndc_len > 0:
... national_destination_code = nsn[:ndc_len]
... subscriber_number = nsn[ndc_len:]
... else:
... national_destination_code = ""
... subscriber_number = nsn
Refer to the unittests to see the difference between this function and
length_of_geographical_area_code.
Arguments:
numobj -- The PhoneNumber object to find the length of the NDC from.
Returns the length of NDC of the PhoneNumber object passed in, which
could be zero.
### Response:
def length_of_national_destination_code(numobj):
"""Return length of the national destination code code for a number.
Gets the length of the national destination code (NDC) from the
PhoneNumber object passed in, so that clients could use it to split a
national significant number into NDC and subscriber number. The NDC of a
phone number is normally the first group of digit(s) right after the
country calling code when the number is formatted in the international
format, if there is a subscriber number part that follows.
N.B.: similar to an area code, not all numbers have an NDC!
An example of how this could be used:
>>> import phonenumbers
>>> numobj = phonenumbers.parse("18002530000", "US")
>>> nsn = phonenumbers.national_significant_number(numobj)
>>> ndc_len = phonenumbers.length_of_national_destination_code(numobj)
>>> if ndc_len > 0:
... national_destination_code = nsn[:ndc_len]
... subscriber_number = nsn[ndc_len:]
... else:
... national_destination_code = ""
... subscriber_number = nsn
Refer to the unittests to see the difference between this function and
length_of_geographical_area_code.
Arguments:
numobj -- The PhoneNumber object to find the length of the NDC from.
Returns the length of NDC of the PhoneNumber object passed in, which
could be zero.
"""
if numobj.extension is not None:
# We don't want to alter the object given to us, but we don't want to
# include the extension when we format it, so we copy it and clear the
# extension here.
copied_numobj = PhoneNumber()
copied_numobj.merge_from(numobj)
copied_numobj.extension = None
else:
copied_numobj = numobj
nsn = format_number(copied_numobj, PhoneNumberFormat.INTERNATIONAL)
number_groups = re.split(NON_DIGITS_PATTERN, nsn)
# The pattern will start with "+COUNTRY_CODE " so the first group will
# always be the empty string (before the + symbol) and the second group
# will be the country calling code. The third group will be area code if
# it is not the last group.
if len(number_groups) <= 3:
return 0
if number_type(numobj) == PhoneNumberType.MOBILE:
# For example Argentinian mobile numbers, when formatted in the
# international format, are in the form of +54 9 NDC XXXX... As a
# result, we take the length of the third group (NDC) and add the
# length of the second group (which is the mobile token), which also
# forms part of the national significant number. This assumes that
# the mobile token is always formatted separately from the rest of the
# phone number.
mobile_token = country_mobile_token(numobj.country_code)
if mobile_token != U_EMPTY_STRING:
return len(number_groups[2]) + len(number_groups[3])
return len(number_groups[2]) |
def get_offset(self, envelope):
"""Returns a 4-tuple pixel window (x_offset, y_offset, x_size, y_size).
Arguments:
envelope -- coordinate extent tuple or Envelope
"""
if isinstance(envelope, collections.Sequence):
envelope = Envelope(envelope)
if not (self.envelope.contains(envelope) or
self.envelope.intersects(envelope)):
raise ValueError('Envelope does not intersect with this extent')
coords = self.affine.transform((envelope.ul, envelope.lr))
nxy = [(min(dest, size) - origin) or 1
for size, origin, dest in zip(self.size, *coords)]
return coords[0] + tuple(nxy) | Returns a 4-tuple pixel window (x_offset, y_offset, x_size, y_size).
Arguments:
envelope -- coordinate extent tuple or Envelope | Below is the the instruction that describes the task:
### Input:
Returns a 4-tuple pixel window (x_offset, y_offset, x_size, y_size).
Arguments:
envelope -- coordinate extent tuple or Envelope
### Response:
def get_offset(self, envelope):
"""Returns a 4-tuple pixel window (x_offset, y_offset, x_size, y_size).
Arguments:
envelope -- coordinate extent tuple or Envelope
"""
if isinstance(envelope, collections.Sequence):
envelope = Envelope(envelope)
if not (self.envelope.contains(envelope) or
self.envelope.intersects(envelope)):
raise ValueError('Envelope does not intersect with this extent')
coords = self.affine.transform((envelope.ul, envelope.lr))
nxy = [(min(dest, size) - origin) or 1
for size, origin, dest in zip(self.size, *coords)]
return coords[0] + tuple(nxy) |
async def get_entry(self, entry):
"""
GET /api/entries/{entry}.{_format}
Retrieve a single entry
:param entry: \w+ an integer The Entry ID
:return data related to the ext
"""
params = {'access_token': self.token}
url = '/api/entries/{entry}.{ext}'.format(entry=entry,
ext=self.format)
return await self.query(url, "get", **params) | GET /api/entries/{entry}.{_format}
Retrieve a single entry
:param entry: \w+ an integer The Entry ID
:return data related to the ext | Below is the the instruction that describes the task:
### Input:
GET /api/entries/{entry}.{_format}
Retrieve a single entry
:param entry: \w+ an integer The Entry ID
:return data related to the ext
### Response:
async def get_entry(self, entry):
"""
GET /api/entries/{entry}.{_format}
Retrieve a single entry
:param entry: \w+ an integer The Entry ID
:return data related to the ext
"""
params = {'access_token': self.token}
url = '/api/entries/{entry}.{ext}'.format(entry=entry,
ext=self.format)
return await self.query(url, "get", **params) |
def parse_func_body(self):
"""If success, return a tuple (args, body)"""
self.save()
self._expected = []
if self.next_is_rc(Tokens.OPAR, False): # do not render right hidden
self.handle_hidden_right() # render hidden after new level
args = self.parse_param_list()
if args is not None: # may be an empty table
if self.next_is_rc(Tokens.CPAR, False): # do not render right hidden
self.handle_hidden_right() # render hidden after new level
body = self.parse_block()
if body:
self._expected = []
token = self.next_is_rc(Tokens.END, False)
if token:
body.stop_char = token.stop
self.success()
return args, body
else:
self.abort()
else:
self.abort()
return self.failure() | If success, return a tuple (args, body) | Below is the the instruction that describes the task:
### Input:
If success, return a tuple (args, body)
### Response:
def parse_func_body(self):
"""If success, return a tuple (args, body)"""
self.save()
self._expected = []
if self.next_is_rc(Tokens.OPAR, False): # do not render right hidden
self.handle_hidden_right() # render hidden after new level
args = self.parse_param_list()
if args is not None: # may be an empty table
if self.next_is_rc(Tokens.CPAR, False): # do not render right hidden
self.handle_hidden_right() # render hidden after new level
body = self.parse_block()
if body:
self._expected = []
token = self.next_is_rc(Tokens.END, False)
if token:
body.stop_char = token.stop
self.success()
return args, body
else:
self.abort()
else:
self.abort()
return self.failure() |
def no_intersection(to_validate, constraint, violation_cfg):
"""
Returns violation message if validated and constraint sets have no intersection
:param to_validate:
:param constraint:
:param violation_cfg:
:return:
"""
if len(constraint) == 0 or len(set(constraint).intersection(to_validate)) > 0:
return None
else:
violation_cfg[Check.CFG_KEY_VIOLATION_MSG] = violation_cfg[Check.CFG_KEY_VIOLATION_MSG].format(constraint)
return violation_cfg | Returns violation message if validated and constraint sets have no intersection
:param to_validate:
:param constraint:
:param violation_cfg:
:return: | Below is the the instruction that describes the task:
### Input:
Returns violation message if validated and constraint sets have no intersection
:param to_validate:
:param constraint:
:param violation_cfg:
:return:
### Response:
def no_intersection(to_validate, constraint, violation_cfg):
"""
Returns violation message if validated and constraint sets have no intersection
:param to_validate:
:param constraint:
:param violation_cfg:
:return:
"""
if len(constraint) == 0 or len(set(constraint).intersection(to_validate)) > 0:
return None
else:
violation_cfg[Check.CFG_KEY_VIOLATION_MSG] = violation_cfg[Check.CFG_KEY_VIOLATION_MSG].format(constraint)
return violation_cfg |
def pack_req(cls, trd_side, order_type, price, qty,
code, adjust_limit, trd_env, sec_mkt_str, acc_id, trd_mkt, conn_id):
"""Convert from user request for place order to PLS request"""
from futuquant.common.pb.Trd_PlaceOrder_pb2 import Request
req = Request()
serial_no = get_unique_id32()
req.c2s.packetID.serialNo = serial_no
req.c2s.packetID.connID = conn_id
req.c2s.header.trdEnv = TRD_ENV_MAP[trd_env]
req.c2s.header.accID = acc_id
req.c2s.header.trdMarket = TRD_MKT_MAP[trd_mkt]
req.c2s.trdSide = TRD_SIDE_MAP[trd_side]
req.c2s.orderType = ORDER_TYPE_MAP[order_type]
req.c2s.code = code
req.c2s.qty = qty
req.c2s.price = price
req.c2s.adjustPrice = adjust_limit != 0
req.c2s.adjustSideAndLimit = adjust_limit
proto_qot_mkt = MKT_MAP.get(sec_mkt_str, Qot_Common_pb2.QotMarket_Unknown)
proto_trd_sec_mkt = QOT_MARKET_TO_TRD_SEC_MARKET_MAP.get(proto_qot_mkt,
Trd_Common_pb2.TrdSecMarket_Unknown)
req.c2s.secMarket = proto_trd_sec_mkt
return pack_pb_req(req, ProtoId.Trd_PlaceOrder, conn_id, serial_no) | Convert from user request for place order to PLS request | Below is the the instruction that describes the task:
### Input:
Convert from user request for place order to PLS request
### Response:
def pack_req(cls, trd_side, order_type, price, qty,
code, adjust_limit, trd_env, sec_mkt_str, acc_id, trd_mkt, conn_id):
"""Convert from user request for place order to PLS request"""
from futuquant.common.pb.Trd_PlaceOrder_pb2 import Request
req = Request()
serial_no = get_unique_id32()
req.c2s.packetID.serialNo = serial_no
req.c2s.packetID.connID = conn_id
req.c2s.header.trdEnv = TRD_ENV_MAP[trd_env]
req.c2s.header.accID = acc_id
req.c2s.header.trdMarket = TRD_MKT_MAP[trd_mkt]
req.c2s.trdSide = TRD_SIDE_MAP[trd_side]
req.c2s.orderType = ORDER_TYPE_MAP[order_type]
req.c2s.code = code
req.c2s.qty = qty
req.c2s.price = price
req.c2s.adjustPrice = adjust_limit != 0
req.c2s.adjustSideAndLimit = adjust_limit
proto_qot_mkt = MKT_MAP.get(sec_mkt_str, Qot_Common_pb2.QotMarket_Unknown)
proto_trd_sec_mkt = QOT_MARKET_TO_TRD_SEC_MARKET_MAP.get(proto_qot_mkt,
Trd_Common_pb2.TrdSecMarket_Unknown)
req.c2s.secMarket = proto_trd_sec_mkt
return pack_pb_req(req, ProtoId.Trd_PlaceOrder, conn_id, serial_no) |
def do_erase(self):
"""! @brief Handle 'erase' subcommand."""
self._increase_logging(["pyocd.tools.loader", "pyocd"])
session = ConnectHelper.session_with_chosen_probe(
project_dir=self._args.project_dir,
config_file=self._args.config,
user_script=self._args.script,
no_config=self._args.no_config,
pack=self._args.pack,
unique_id=self._args.unique_id,
target_override=self._args.target_override,
frequency=self._args.frequency,
blocking=False,
**convert_session_options(self._args.options))
if session is None:
sys.exit(1)
with session:
mode = self._args.erase_mode or loader.FlashEraser.Mode.SECTOR
eraser = loader.FlashEraser(session, mode)
addresses = flatten_args(self._args.addresses)
eraser.erase(addresses) | ! @brief Handle 'erase' subcommand. | Below is the the instruction that describes the task:
### Input:
! @brief Handle 'erase' subcommand.
### Response:
def do_erase(self):
"""! @brief Handle 'erase' subcommand."""
self._increase_logging(["pyocd.tools.loader", "pyocd"])
session = ConnectHelper.session_with_chosen_probe(
project_dir=self._args.project_dir,
config_file=self._args.config,
user_script=self._args.script,
no_config=self._args.no_config,
pack=self._args.pack,
unique_id=self._args.unique_id,
target_override=self._args.target_override,
frequency=self._args.frequency,
blocking=False,
**convert_session_options(self._args.options))
if session is None:
sys.exit(1)
with session:
mode = self._args.erase_mode or loader.FlashEraser.Mode.SECTOR
eraser = loader.FlashEraser(session, mode)
addresses = flatten_args(self._args.addresses)
eraser.erase(addresses) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.