code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def log_once(log_func, msg, *args, **kwargs):
""""Logs a message only once."""
if msg not in _LOG_ONCE_SEEN:
log_func(msg, *args, **kwargs)
# Key on the message, ignoring args. This should fit most use cases.
_LOG_ONCE_SEEN.add(msg) | Logs a message only once. | Below is the the instruction that describes the task:
### Input:
Logs a message only once.
### Response:
def log_once(log_func, msg, *args, **kwargs):
""""Logs a message only once."""
if msg not in _LOG_ONCE_SEEN:
log_func(msg, *args, **kwargs)
# Key on the message, ignoring args. This should fit most use cases.
_LOG_ONCE_SEEN.add(msg) |
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
argument_parser = argparse.ArgumentParser(description=(
'Calculates a message digest hash for every file in a directory or '
'storage media image.'))
argument_parser.add_argument(
'source', nargs='?', action='store', metavar='image.raw', default=None,
help=('path of the directory or filename of a storage media image '
'containing the file.'))
argument_parser.add_argument(
'--no-auto-recurse', '--no_auto_recurse', dest='no_auto_recurse',
action='store_true', default=False, help=(
'Indicate that the source scanner should not auto-recurse.'))
options = argument_parser.parse_args()
if not options.source:
print('Source value is missing.')
print('')
argument_parser.print_help()
print('')
return False
logging.basicConfig(
level=logging.INFO, format='[%(levelname)s] %(message)s')
output_writer = StdoutWriter()
if not output_writer.Open():
print('Unable to open output writer.')
print('')
return False
return_value = True
source_analyzer = SourceAnalyzer(auto_recurse=not options.no_auto_recurse)
try:
source_analyzer.Analyze(options.source, output_writer)
print('Completed.')
except KeyboardInterrupt:
return_value = False
print('Aborted by user.')
output_writer.Close()
return return_value | The main program function.
Returns:
bool: True if successful or False if not. | Below is the the instruction that describes the task:
### Input:
The main program function.
Returns:
bool: True if successful or False if not.
### Response:
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
argument_parser = argparse.ArgumentParser(description=(
'Calculates a message digest hash for every file in a directory or '
'storage media image.'))
argument_parser.add_argument(
'source', nargs='?', action='store', metavar='image.raw', default=None,
help=('path of the directory or filename of a storage media image '
'containing the file.'))
argument_parser.add_argument(
'--no-auto-recurse', '--no_auto_recurse', dest='no_auto_recurse',
action='store_true', default=False, help=(
'Indicate that the source scanner should not auto-recurse.'))
options = argument_parser.parse_args()
if not options.source:
print('Source value is missing.')
print('')
argument_parser.print_help()
print('')
return False
logging.basicConfig(
level=logging.INFO, format='[%(levelname)s] %(message)s')
output_writer = StdoutWriter()
if not output_writer.Open():
print('Unable to open output writer.')
print('')
return False
return_value = True
source_analyzer = SourceAnalyzer(auto_recurse=not options.no_auto_recurse)
try:
source_analyzer.Analyze(options.source, output_writer)
print('Completed.')
except KeyboardInterrupt:
return_value = False
print('Aborted by user.')
output_writer.Close()
return return_value |
def setup_destination(self):
"""Setup output directory based on self.dst and self.identifier.
Returns the output directory name on success, raises and exception on
failure.
"""
# Do we have a separate identifier?
if (not self.identifier):
# No separate identifier specified, split off the last path segment
# of the source name, strip the extension to get the identifier
self.identifier = os.path.splitext(os.path.split(self.src)[1])[0]
# Done if dryrun, else setup self.dst first
if (self.dryrun):
return
if (not self.dst):
raise IIIFStaticError("No destination directory specified!")
dst = self.dst
if (os.path.isdir(dst)):
# Exists, OK
pass
elif (os.path.isfile(dst)):
raise IIIFStaticError(
"Can't write to directory %s: a file of that name exists" % dst)
else:
os.makedirs(dst)
# Second, create identifier based subdir if necessary
outd = os.path.join(dst, self.identifier)
if (os.path.isdir(outd)):
# Nothing for now, perhaps should delete?
self.logger.warning(
"Output directory %s already exists, adding/updating files" % outd)
pass
elif (os.path.isfile(outd)):
raise IIIFStaticError(
"Can't write to directory %s: a file of that name exists" % outd)
else:
os.makedirs(outd)
self.logger.debug("Output directory %s" % outd) | Setup output directory based on self.dst and self.identifier.
Returns the output directory name on success, raises and exception on
failure. | Below is the the instruction that describes the task:
### Input:
Setup output directory based on self.dst and self.identifier.
Returns the output directory name on success, raises and exception on
failure.
### Response:
def setup_destination(self):
"""Setup output directory based on self.dst and self.identifier.
Returns the output directory name on success, raises and exception on
failure.
"""
# Do we have a separate identifier?
if (not self.identifier):
# No separate identifier specified, split off the last path segment
# of the source name, strip the extension to get the identifier
self.identifier = os.path.splitext(os.path.split(self.src)[1])[0]
# Done if dryrun, else setup self.dst first
if (self.dryrun):
return
if (not self.dst):
raise IIIFStaticError("No destination directory specified!")
dst = self.dst
if (os.path.isdir(dst)):
# Exists, OK
pass
elif (os.path.isfile(dst)):
raise IIIFStaticError(
"Can't write to directory %s: a file of that name exists" % dst)
else:
os.makedirs(dst)
# Second, create identifier based subdir if necessary
outd = os.path.join(dst, self.identifier)
if (os.path.isdir(outd)):
# Nothing for now, perhaps should delete?
self.logger.warning(
"Output directory %s already exists, adding/updating files" % outd)
pass
elif (os.path.isfile(outd)):
raise IIIFStaticError(
"Can't write to directory %s: a file of that name exists" % outd)
else:
os.makedirs(outd)
self.logger.debug("Output directory %s" % outd) |
def cleanOptions(options):
"""
Takes an options dict and returns a tuple containing the daemonize boolean,
the reload boolean, and the parsed list of cleaned options as would be
expected to be passed to hx
"""
_reload = options.pop('reload')
dev = options.pop('dev')
opts = []
store_true = [
'--nocache', '--global_cache', '--quiet', '--loud'
]
store_false = []
for key, value in options.items():
key = '--' + key
if (key in store_true and value) or (key in store_false and not value):
opts += [key, ]
elif value:
opts += [key, str(value)]
return _reload, opts | Takes an options dict and returns a tuple containing the daemonize boolean,
the reload boolean, and the parsed list of cleaned options as would be
expected to be passed to hx | Below is the the instruction that describes the task:
### Input:
Takes an options dict and returns a tuple containing the daemonize boolean,
the reload boolean, and the parsed list of cleaned options as would be
expected to be passed to hx
### Response:
def cleanOptions(options):
"""
Takes an options dict and returns a tuple containing the daemonize boolean,
the reload boolean, and the parsed list of cleaned options as would be
expected to be passed to hx
"""
_reload = options.pop('reload')
dev = options.pop('dev')
opts = []
store_true = [
'--nocache', '--global_cache', '--quiet', '--loud'
]
store_false = []
for key, value in options.items():
key = '--' + key
if (key in store_true and value) or (key in store_false and not value):
opts += [key, ]
elif value:
opts += [key, str(value)]
return _reload, opts |
def _ConvertValueBinaryDataToUBInt64(self, value):
"""Converts a binary data value into an integer.
Args:
value (bytes): binary data value containing an unsigned 64-bit big-endian
integer.
Returns:
int: integer representation of binary data value or None if value is
not set.
Raises:
ParseError: if the integer value cannot be parsed.
"""
if not value:
return None
integer_map = self._GetDataTypeMap('uint64be')
try:
return self._ReadStructureFromByteStream(value, 0, integer_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse integer value with error: {0!s}'.format(
exception)) | Converts a binary data value into an integer.
Args:
value (bytes): binary data value containing an unsigned 64-bit big-endian
integer.
Returns:
int: integer representation of binary data value or None if value is
not set.
Raises:
ParseError: if the integer value cannot be parsed. | Below is the the instruction that describes the task:
### Input:
Converts a binary data value into an integer.
Args:
value (bytes): binary data value containing an unsigned 64-bit big-endian
integer.
Returns:
int: integer representation of binary data value or None if value is
not set.
Raises:
ParseError: if the integer value cannot be parsed.
### Response:
def _ConvertValueBinaryDataToUBInt64(self, value):
"""Converts a binary data value into an integer.
Args:
value (bytes): binary data value containing an unsigned 64-bit big-endian
integer.
Returns:
int: integer representation of binary data value or None if value is
not set.
Raises:
ParseError: if the integer value cannot be parsed.
"""
if not value:
return None
integer_map = self._GetDataTypeMap('uint64be')
try:
return self._ReadStructureFromByteStream(value, 0, integer_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse integer value with error: {0!s}'.format(
exception)) |
def setup(menu=True):
"""Setup integration
Registers Pyblish for Maya plug-ins and appends an item to the File-menu
Attributes:
console (bool): Display console with GUI
port (int, optional): Port from which to start looking for an
available port to connect with Pyblish QML, default
provided by Pyblish Integration.
"""
if self._has_been_setup:
teardown()
register_plugins()
register_host()
if menu:
add_to_filemenu()
self._has_menu = True
self._has_been_setup = True
print("Pyblish loaded successfully.") | Setup integration
Registers Pyblish for Maya plug-ins and appends an item to the File-menu
Attributes:
console (bool): Display console with GUI
port (int, optional): Port from which to start looking for an
available port to connect with Pyblish QML, default
provided by Pyblish Integration. | Below is the the instruction that describes the task:
### Input:
Setup integration
Registers Pyblish for Maya plug-ins and appends an item to the File-menu
Attributes:
console (bool): Display console with GUI
port (int, optional): Port from which to start looking for an
available port to connect with Pyblish QML, default
provided by Pyblish Integration.
### Response:
def setup(menu=True):
"""Setup integration
Registers Pyblish for Maya plug-ins and appends an item to the File-menu
Attributes:
console (bool): Display console with GUI
port (int, optional): Port from which to start looking for an
available port to connect with Pyblish QML, default
provided by Pyblish Integration.
"""
if self._has_been_setup:
teardown()
register_plugins()
register_host()
if menu:
add_to_filemenu()
self._has_menu = True
self._has_been_setup = True
print("Pyblish loaded successfully.") |
def create_basic_op_node(op_name, node, kwargs):
"""Helper function to create a basic operator
node that doesn't contain op specific attrs"""
name, input_nodes, _ = get_inputs(node, kwargs)
node = onnx.helper.make_node(
op_name,
input_nodes,
[name],
name=name
)
return [node] | Helper function to create a basic operator
node that doesn't contain op specific attrs | Below is the the instruction that describes the task:
### Input:
Helper function to create a basic operator
node that doesn't contain op specific attrs
### Response:
def create_basic_op_node(op_name, node, kwargs):
"""Helper function to create a basic operator
node that doesn't contain op specific attrs"""
name, input_nodes, _ = get_inputs(node, kwargs)
node = onnx.helper.make_node(
op_name,
input_nodes,
[name],
name=name
)
return [node] |
def load_and_preprocess_imdb_data(n_gram=None):
"""Load IMDb data and augment with hashed n-gram features."""
X_train, y_train, X_test, y_test = tl.files.load_imdb_dataset(nb_words=VOCAB_SIZE)
if n_gram is not None:
X_train = np.array([augment_with_ngrams(x, VOCAB_SIZE, N_BUCKETS, n=n_gram) for x in X_train])
X_test = np.array([augment_with_ngrams(x, VOCAB_SIZE, N_BUCKETS, n=n_gram) for x in X_test])
return X_train, y_train, X_test, y_test | Load IMDb data and augment with hashed n-gram features. | Below is the the instruction that describes the task:
### Input:
Load IMDb data and augment with hashed n-gram features.
### Response:
def load_and_preprocess_imdb_data(n_gram=None):
"""Load IMDb data and augment with hashed n-gram features."""
X_train, y_train, X_test, y_test = tl.files.load_imdb_dataset(nb_words=VOCAB_SIZE)
if n_gram is not None:
X_train = np.array([augment_with_ngrams(x, VOCAB_SIZE, N_BUCKETS, n=n_gram) for x in X_train])
X_test = np.array([augment_with_ngrams(x, VOCAB_SIZE, N_BUCKETS, n=n_gram) for x in X_test])
return X_train, y_train, X_test, y_test |
def create_new_metadata(self, rsa_public_key):
# type: (EncryptionMetadata,
# cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey)
# -> None
"""Create new metadata entries for encryption (upload)
:param EncryptionMetadata self: this
:param cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey:
rsa public key
"""
self._rsa_public_key = rsa_public_key
self._symkey = os.urandom(
blobxfer.operations.crypto._AES256_KEYLENGTH_BYTES)
self._signkey = os.urandom(
blobxfer.operations.crypto._AES256_KEYLENGTH_BYTES)
self.content_encryption_iv = os.urandom(AES256_BLOCKSIZE_BYTES)
self.encryption_agent = EncryptionAgent(
encryption_algorithm=EncryptionMetadata._ENCRYPTION_ALGORITHM,
protocol=EncryptionMetadata._ENCRYPTION_PROTOCOL_VERSION,
)
self.encryption_mode = EncryptionMetadata._ENCRYPTION_MODE | Create new metadata entries for encryption (upload)
:param EncryptionMetadata self: this
:param cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey:
rsa public key | Below is the the instruction that describes the task:
### Input:
Create new metadata entries for encryption (upload)
:param EncryptionMetadata self: this
:param cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey:
rsa public key
### Response:
def create_new_metadata(self, rsa_public_key):
# type: (EncryptionMetadata,
# cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey)
# -> None
"""Create new metadata entries for encryption (upload)
:param EncryptionMetadata self: this
:param cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey:
rsa public key
"""
self._rsa_public_key = rsa_public_key
self._symkey = os.urandom(
blobxfer.operations.crypto._AES256_KEYLENGTH_BYTES)
self._signkey = os.urandom(
blobxfer.operations.crypto._AES256_KEYLENGTH_BYTES)
self.content_encryption_iv = os.urandom(AES256_BLOCKSIZE_BYTES)
self.encryption_agent = EncryptionAgent(
encryption_algorithm=EncryptionMetadata._ENCRYPTION_ALGORITHM,
protocol=EncryptionMetadata._ENCRYPTION_PROTOCOL_VERSION,
)
self.encryption_mode = EncryptionMetadata._ENCRYPTION_MODE |
def idle_task(self):
'''run periodic tasks'''
if self.starting_motor:
if self.gasheli_settings.ignition_disable_time > 0:
elapsed = time.time() - self.motor_t1
if elapsed >= self.gasheli_settings.ignition_disable_time:
self.module('rc').set_override_chan(self.gasheli_settings.ignition_chan-1, self.old_override)
self.starting_motor = False
if self.stopping_motor:
elapsed = time.time() - self.motor_t1
if elapsed >= self.gasheli_settings.ignition_stop_time:
# hand back control to RC
self.module('rc').set_override_chan(self.gasheli_settings.ignition_chan-1, self.old_override)
self.stopping_motor = False | run periodic tasks | Below is the the instruction that describes the task:
### Input:
run periodic tasks
### Response:
def idle_task(self):
'''run periodic tasks'''
if self.starting_motor:
if self.gasheli_settings.ignition_disable_time > 0:
elapsed = time.time() - self.motor_t1
if elapsed >= self.gasheli_settings.ignition_disable_time:
self.module('rc').set_override_chan(self.gasheli_settings.ignition_chan-1, self.old_override)
self.starting_motor = False
if self.stopping_motor:
elapsed = time.time() - self.motor_t1
if elapsed >= self.gasheli_settings.ignition_stop_time:
# hand back control to RC
self.module('rc').set_override_chan(self.gasheli_settings.ignition_chan-1, self.old_override)
self.stopping_motor = False |
def applications(self):
"""returns all the group applications to join"""
url = self._url + "/applications"
params = {"f" : "json"}
res = self._get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
items = []
if "applications" in res.keys():
for apps in res['applications']:
items.append(
self.Application(url="%s/%s" % (self._url, apps['username']),
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
)
return items | returns all the group applications to join | Below is the the instruction that describes the task:
### Input:
returns all the group applications to join
### Response:
def applications(self):
"""returns all the group applications to join"""
url = self._url + "/applications"
params = {"f" : "json"}
res = self._get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
items = []
if "applications" in res.keys():
for apps in res['applications']:
items.append(
self.Application(url="%s/%s" % (self._url, apps['username']),
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
)
return items |
def real_python3(python, version_dict):
"""
Determine the path of the real python executable, which is then used for
venv creation. This is necessary, because an active virtualenv environment
will cause venv creation to malfunction. By getting the path of the real
executable, this issue is bypassed.
The provided `python` path may be either:
- A real python executable
- A virtual python executable (with venv)
- A virtual python executable (with virtualenv)
If the virtual environment was created with virtualenv, the `sys` module
will have a `real_prefix` attribute, which points to the directory where
the real python files are installed.
If `real_prefix` is not present, the environment was not created with
virtualenv, and the python executable is safe to use.
The `version_dict` is used for attempting to derive the real executable
path. This is necessary when the name of the virtual python executable
does not exist in the Python installation's directory. For example, if
the `basepython` is explicitly set to `python`, tox will use this name
instead of attempting `pythonX.Y`. In many cases, Python 3 installations
do not contain an executable named `python`, so we attempt to derive this
from the version info. e.g., `python3.6.5`, `python3.6`, then `python3`.
"""
args = [python, '-c', 'import sys; print(sys.real_prefix)']
# get python prefix
try:
output = subprocess.check_output(args, stderr=subprocess.STDOUT)
prefix = output.decode('UTF-8').strip()
except subprocess.CalledProcessError:
# process fails, implies *not* in active virtualenv
return python
# determine absolute binary path
if os.name == 'nt': # pragma: no cover
paths = [os.path.join(prefix, os.path.basename(python))]
else:
paths = [os.path.join(prefix, 'bin', python) for python in [
os.path.basename(python),
'python%(major)d.%(minor)d.%(micro)d' % version_dict,
'python%(major)d.%(minor)d' % version_dict,
'python%(major)d' % version_dict,
'python',
]]
for path in paths:
if os.path.isfile(path):
break
else:
path = None
# the executable path must exist
assert path, '\n- '.join(['Could not find interpreter. Attempted:'] + paths)
v1 = subprocess.check_output([python, '--version'])
v2 = subprocess.check_output([path, '--version'])
assert v1 == v2, 'Expected versions to match (%s != %s).' % (v1, v2)
return path | Determine the path of the real python executable, which is then used for
venv creation. This is necessary, because an active virtualenv environment
will cause venv creation to malfunction. By getting the path of the real
executable, this issue is bypassed.
The provided `python` path may be either:
- A real python executable
- A virtual python executable (with venv)
- A virtual python executable (with virtualenv)
If the virtual environment was created with virtualenv, the `sys` module
will have a `real_prefix` attribute, which points to the directory where
the real python files are installed.
If `real_prefix` is not present, the environment was not created with
virtualenv, and the python executable is safe to use.
The `version_dict` is used for attempting to derive the real executable
path. This is necessary when the name of the virtual python executable
does not exist in the Python installation's directory. For example, if
the `basepython` is explicitly set to `python`, tox will use this name
instead of attempting `pythonX.Y`. In many cases, Python 3 installations
do not contain an executable named `python`, so we attempt to derive this
from the version info. e.g., `python3.6.5`, `python3.6`, then `python3`. | Below is the the instruction that describes the task:
### Input:
Determine the path of the real python executable, which is then used for
venv creation. This is necessary, because an active virtualenv environment
will cause venv creation to malfunction. By getting the path of the real
executable, this issue is bypassed.
The provided `python` path may be either:
- A real python executable
- A virtual python executable (with venv)
- A virtual python executable (with virtualenv)
If the virtual environment was created with virtualenv, the `sys` module
will have a `real_prefix` attribute, which points to the directory where
the real python files are installed.
If `real_prefix` is not present, the environment was not created with
virtualenv, and the python executable is safe to use.
The `version_dict` is used for attempting to derive the real executable
path. This is necessary when the name of the virtual python executable
does not exist in the Python installation's directory. For example, if
the `basepython` is explicitly set to `python`, tox will use this name
instead of attempting `pythonX.Y`. In many cases, Python 3 installations
do not contain an executable named `python`, so we attempt to derive this
from the version info. e.g., `python3.6.5`, `python3.6`, then `python3`.
### Response:
def real_python3(python, version_dict):
"""
Determine the path of the real python executable, which is then used for
venv creation. This is necessary, because an active virtualenv environment
will cause venv creation to malfunction. By getting the path of the real
executable, this issue is bypassed.
The provided `python` path may be either:
- A real python executable
- A virtual python executable (with venv)
- A virtual python executable (with virtualenv)
If the virtual environment was created with virtualenv, the `sys` module
will have a `real_prefix` attribute, which points to the directory where
the real python files are installed.
If `real_prefix` is not present, the environment was not created with
virtualenv, and the python executable is safe to use.
The `version_dict` is used for attempting to derive the real executable
path. This is necessary when the name of the virtual python executable
does not exist in the Python installation's directory. For example, if
the `basepython` is explicitly set to `python`, tox will use this name
instead of attempting `pythonX.Y`. In many cases, Python 3 installations
do not contain an executable named `python`, so we attempt to derive this
from the version info. e.g., `python3.6.5`, `python3.6`, then `python3`.
"""
args = [python, '-c', 'import sys; print(sys.real_prefix)']
# get python prefix
try:
output = subprocess.check_output(args, stderr=subprocess.STDOUT)
prefix = output.decode('UTF-8').strip()
except subprocess.CalledProcessError:
# process fails, implies *not* in active virtualenv
return python
# determine absolute binary path
if os.name == 'nt': # pragma: no cover
paths = [os.path.join(prefix, os.path.basename(python))]
else:
paths = [os.path.join(prefix, 'bin', python) for python in [
os.path.basename(python),
'python%(major)d.%(minor)d.%(micro)d' % version_dict,
'python%(major)d.%(minor)d' % version_dict,
'python%(major)d' % version_dict,
'python',
]]
for path in paths:
if os.path.isfile(path):
break
else:
path = None
# the executable path must exist
assert path, '\n- '.join(['Could not find interpreter. Attempted:'] + paths)
v1 = subprocess.check_output([python, '--version'])
v2 = subprocess.check_output([path, '--version'])
assert v1 == v2, 'Expected versions to match (%s != %s).' % (v1, v2)
return path |
def delete_project(self, id):
"""
Delete a project from the Gitlab server
Gitlab currently returns a Boolean True if the deleted and as such we return an
empty Dictionary
:param id: The ID of the project or NAMESPACE/PROJECT_NAME
:return: Dictionary
:raise: HttpError: If invalid response returned
"""
url = '/projects/{id}'.format(id=id)
response = self.delete(url)
if response is True:
return {}
else:
return response | Delete a project from the Gitlab server
Gitlab currently returns a Boolean True if the deleted and as such we return an
empty Dictionary
:param id: The ID of the project or NAMESPACE/PROJECT_NAME
:return: Dictionary
:raise: HttpError: If invalid response returned | Below is the the instruction that describes the task:
### Input:
Delete a project from the Gitlab server
Gitlab currently returns a Boolean True if the deleted and as such we return an
empty Dictionary
:param id: The ID of the project or NAMESPACE/PROJECT_NAME
:return: Dictionary
:raise: HttpError: If invalid response returned
### Response:
def delete_project(self, id):
"""
Delete a project from the Gitlab server
Gitlab currently returns a Boolean True if the deleted and as such we return an
empty Dictionary
:param id: The ID of the project or NAMESPACE/PROJECT_NAME
:return: Dictionary
:raise: HttpError: If invalid response returned
"""
url = '/projects/{id}'.format(id=id)
response = self.delete(url)
if response is True:
return {}
else:
return response |
def shuffle(qsize=1024, iterable=None):
"""
add example
:param qsize:
:param iterable:
:return:
"""
@iterflow
def shuffleit(it):
from random import randrange
q = []
for i, d in enumerate(it):
q.insert(randrange(0, len(q) + 1), d)
if i < qsize:
continue
yield q.pop(randrange(0, len(q)))
while q:
yield q.pop(randrange(0, len(q)))
return shuffleit if iterable is None else shuffleit(iterable) | add example
:param qsize:
:param iterable:
:return: | Below is the the instruction that describes the task:
### Input:
add example
:param qsize:
:param iterable:
:return:
### Response:
def shuffle(qsize=1024, iterable=None):
"""
add example
:param qsize:
:param iterable:
:return:
"""
@iterflow
def shuffleit(it):
from random import randrange
q = []
for i, d in enumerate(it):
q.insert(randrange(0, len(q) + 1), d)
if i < qsize:
continue
yield q.pop(randrange(0, len(q)))
while q:
yield q.pop(randrange(0, len(q)))
return shuffleit if iterable is None else shuffleit(iterable) |
def _create_model(self, X, Y):
"""
Creates the model given some input data X and Y.
"""
# --- define kernel
self.input_dim = X.shape[1]
if self.kernel is None:
kern = GPy.kern.Matern52(self.input_dim, variance=1., ARD=self.ARD) #+ GPy.kern.Bias(self.input_dim)
else:
kern = self.kernel
self.kernel = None
# --- define model
noise_var = Y.var()*0.01 if self.noise_var is None else self.noise_var
if not self.sparse:
self.model = GPy.models.GPRegression(X, Y, kernel=kern, noise_var=noise_var)
else:
self.model = GPy.models.SparseGPRegression(X, Y, kernel=kern, num_inducing=self.num_inducing)
# --- restrict variance if exact evaluations of the objective
if self.exact_feval:
self.model.Gaussian_noise.constrain_fixed(1e-6, warning=False)
else:
# --- We make sure we do not get ridiculously small residual noise variance
self.model.Gaussian_noise.constrain_bounded(1e-9, 1e6, warning=False) | Creates the model given some input data X and Y. | Below is the the instruction that describes the task:
### Input:
Creates the model given some input data X and Y.
### Response:
def _create_model(self, X, Y):
"""
Creates the model given some input data X and Y.
"""
# --- define kernel
self.input_dim = X.shape[1]
if self.kernel is None:
kern = GPy.kern.Matern52(self.input_dim, variance=1., ARD=self.ARD) #+ GPy.kern.Bias(self.input_dim)
else:
kern = self.kernel
self.kernel = None
# --- define model
noise_var = Y.var()*0.01 if self.noise_var is None else self.noise_var
if not self.sparse:
self.model = GPy.models.GPRegression(X, Y, kernel=kern, noise_var=noise_var)
else:
self.model = GPy.models.SparseGPRegression(X, Y, kernel=kern, num_inducing=self.num_inducing)
# --- restrict variance if exact evaluations of the objective
if self.exact_feval:
self.model.Gaussian_noise.constrain_fixed(1e-6, warning=False)
else:
# --- We make sure we do not get ridiculously small residual noise variance
self.model.Gaussian_noise.constrain_bounded(1e-9, 1e6, warning=False) |
def sequence(self, other, exclude_list_fields=None):
"""Return a copy of this object which combines all the fields common to both `self` and `other`.
List fields will be concatenated.
The return type of this method is the type of `self` (or whatever `.copy()` returns), but the
`other` argument can be any `_ExtensibleAlgebraic` instance.
"""
exclude_list_fields = frozenset(exclude_list_fields or [])
overwrite_kwargs = {}
nonexistent_excluded_fields = exclude_list_fields - self._list_fields
if nonexistent_excluded_fields:
raise self.AlgebraicDataError(
"Fields {} to exclude from a sequence() were not found in this object's list fields: {}. "
"This object is {}, the other object is {}."
.format(nonexistent_excluded_fields, self._list_fields, self, other))
shared_list_fields = (self._list_fields
& other._list_fields
- exclude_list_fields)
if not shared_list_fields:
raise self.AlgebraicDataError(
"Objects to sequence have no shared fields after excluding {}. "
"This object is {}, with list fields: {}. "
"The other object is {}, with list fields: {}."
.format(exclude_list_fields, self, self._list_fields, other, other._list_fields))
for list_field_name in shared_list_fields:
lhs_value = getattr(self, list_field_name)
rhs_value = getattr(other, list_field_name)
overwrite_kwargs[list_field_name] = lhs_value + rhs_value
return self.copy(**overwrite_kwargs) | Return a copy of this object which combines all the fields common to both `self` and `other`.
List fields will be concatenated.
The return type of this method is the type of `self` (or whatever `.copy()` returns), but the
`other` argument can be any `_ExtensibleAlgebraic` instance. | Below is the the instruction that describes the task:
### Input:
Return a copy of this object which combines all the fields common to both `self` and `other`.
List fields will be concatenated.
The return type of this method is the type of `self` (or whatever `.copy()` returns), but the
`other` argument can be any `_ExtensibleAlgebraic` instance.
### Response:
def sequence(self, other, exclude_list_fields=None):
"""Return a copy of this object which combines all the fields common to both `self` and `other`.
List fields will be concatenated.
The return type of this method is the type of `self` (or whatever `.copy()` returns), but the
`other` argument can be any `_ExtensibleAlgebraic` instance.
"""
exclude_list_fields = frozenset(exclude_list_fields or [])
overwrite_kwargs = {}
nonexistent_excluded_fields = exclude_list_fields - self._list_fields
if nonexistent_excluded_fields:
raise self.AlgebraicDataError(
"Fields {} to exclude from a sequence() were not found in this object's list fields: {}. "
"This object is {}, the other object is {}."
.format(nonexistent_excluded_fields, self._list_fields, self, other))
shared_list_fields = (self._list_fields
& other._list_fields
- exclude_list_fields)
if not shared_list_fields:
raise self.AlgebraicDataError(
"Objects to sequence have no shared fields after excluding {}. "
"This object is {}, with list fields: {}. "
"The other object is {}, with list fields: {}."
.format(exclude_list_fields, self, self._list_fields, other, other._list_fields))
for list_field_name in shared_list_fields:
lhs_value = getattr(self, list_field_name)
rhs_value = getattr(other, list_field_name)
overwrite_kwargs[list_field_name] = lhs_value + rhs_value
return self.copy(**overwrite_kwargs) |
def average_neighbor_distance(points, num_neigh):
"""!
@brief Returns average distance for establish links between specified number of nearest neighbors.
@param[in] points (list): Input data, list of points where each point represented by list.
@param[in] num_neigh (uint): Number of neighbors that should be used for distance calculation.
@return (double) Average distance for establish links between 'num_neigh' in data set 'points'.
"""
if num_neigh > len(points) - 1:
raise NameError('Impossible to calculate average distance to neighbors when number of object is less than number of neighbors.');
dist_matrix = [ [ 0.0 for i in range(len(points)) ] for j in range(len(points)) ];
for i in range(0, len(points), 1):
for j in range(i + 1, len(points), 1):
distance = euclidean_distance(points[i], points[j]);
dist_matrix[i][j] = distance;
dist_matrix[j][i] = distance;
dist_matrix[i] = sorted(dist_matrix[i]);
total_distance = 0;
for i in range(0, len(points), 1):
# start from 0 - first element is distance to itself.
for j in range(0, num_neigh, 1):
total_distance += dist_matrix[i][j + 1];
return ( total_distance / (num_neigh * len(points)) ); | !
@brief Returns average distance for establish links between specified number of nearest neighbors.
@param[in] points (list): Input data, list of points where each point represented by list.
@param[in] num_neigh (uint): Number of neighbors that should be used for distance calculation.
@return (double) Average distance for establish links between 'num_neigh' in data set 'points'. | Below is the the instruction that describes the task:
### Input:
!
@brief Returns average distance for establish links between specified number of nearest neighbors.
@param[in] points (list): Input data, list of points where each point represented by list.
@param[in] num_neigh (uint): Number of neighbors that should be used for distance calculation.
@return (double) Average distance for establish links between 'num_neigh' in data set 'points'.
### Response:
def average_neighbor_distance(points, num_neigh):
"""!
@brief Returns average distance for establish links between specified number of nearest neighbors.
@param[in] points (list): Input data, list of points where each point represented by list.
@param[in] num_neigh (uint): Number of neighbors that should be used for distance calculation.
@return (double) Average distance for establish links between 'num_neigh' in data set 'points'.
"""
if num_neigh > len(points) - 1:
raise NameError('Impossible to calculate average distance to neighbors when number of object is less than number of neighbors.');
dist_matrix = [ [ 0.0 for i in range(len(points)) ] for j in range(len(points)) ];
for i in range(0, len(points), 1):
for j in range(i + 1, len(points), 1):
distance = euclidean_distance(points[i], points[j]);
dist_matrix[i][j] = distance;
dist_matrix[j][i] = distance;
dist_matrix[i] = sorted(dist_matrix[i]);
total_distance = 0;
for i in range(0, len(points), 1):
# start from 0 - first element is distance to itself.
for j in range(0, num_neigh, 1):
total_distance += dist_matrix[i][j + 1];
return ( total_distance / (num_neigh * len(points)) ); |
def set_hook_data(self, key, data):
"""Set hook data for the given key.
Args:
key(str): The key to store the hook data in.
data(:class:`collections.Mapping`): A dictionary of data to store,
as returned from a hook.
"""
if not isinstance(data, collections.Mapping):
raise ValueError("Hook (key: %s) data must be an instance of "
"collections.Mapping (a dictionary for "
"example)." % key)
if key in self.hook_data:
raise KeyError("Hook data for key %s already exists, each hook "
"must have a unique data_key.", key)
self.hook_data[key] = data | Set hook data for the given key.
Args:
key(str): The key to store the hook data in.
data(:class:`collections.Mapping`): A dictionary of data to store,
as returned from a hook. | Below is the the instruction that describes the task:
### Input:
Set hook data for the given key.
Args:
key(str): The key to store the hook data in.
data(:class:`collections.Mapping`): A dictionary of data to store,
as returned from a hook.
### Response:
def set_hook_data(self, key, data):
"""Set hook data for the given key.
Args:
key(str): The key to store the hook data in.
data(:class:`collections.Mapping`): A dictionary of data to store,
as returned from a hook.
"""
if not isinstance(data, collections.Mapping):
raise ValueError("Hook (key: %s) data must be an instance of "
"collections.Mapping (a dictionary for "
"example)." % key)
if key in self.hook_data:
raise KeyError("Hook data for key %s already exists, each hook "
"must have a unique data_key.", key)
self.hook_data[key] = data |
def delete_record(self, record_id):
"""
Delete a record with record_id.
"""
self._delete(
urljoin(self.base_url, "informationobjects/{}".format(record_id)),
expected_response=204,
)
return {"status": "Deleted"} | Delete a record with record_id. | Below is the the instruction that describes the task:
### Input:
Delete a record with record_id.
### Response:
def delete_record(self, record_id):
"""
Delete a record with record_id.
"""
self._delete(
urljoin(self.base_url, "informationobjects/{}".format(record_id)),
expected_response=204,
)
return {"status": "Deleted"} |
def get_by_id(self, reply_id):
'''
Get the reply by id.
'''
reply = MReply.get_by_uid(reply_id)
logger.info('get_reply: {0}'.format(reply_id))
self.render('misc/reply/show_reply.html',
reply=reply,
username=reply.user_name,
date=reply.date,
vote=reply.vote,
uid=reply.uid,
userinfo=self.userinfo,
kwd={}) | Get the reply by id. | Below is the the instruction that describes the task:
### Input:
Get the reply by id.
### Response:
def get_by_id(self, reply_id):
'''
Get the reply by id.
'''
reply = MReply.get_by_uid(reply_id)
logger.info('get_reply: {0}'.format(reply_id))
self.render('misc/reply/show_reply.html',
reply=reply,
username=reply.user_name,
date=reply.date,
vote=reply.vote,
uid=reply.uid,
userinfo=self.userinfo,
kwd={}) |
def strict_dependencies(self, dep_context):
"""
:param dep_context: A DependencyContext with configuration for the request.
:return: targets that this target "strictly" depends on. This set of dependencies contains
only directly declared dependencies, with two exceptions:
1) aliases are expanded transitively
2) the strict_dependencies of targets exported targets exported by
strict_dependencies (transitively).
:rtype: list of Target
"""
strict_deps = self._cached_strict_dependencies_map.get(dep_context, None)
if strict_deps is None:
default_predicate = self._closure_dep_predicate({self}, **dep_context.target_closure_kwargs)
# TODO(#5977): this branch needs testing!
if not default_predicate:
def default_predicate(*args, **kwargs):
return True
def dep_predicate(source, dependency):
if not default_predicate(source, dependency):
return False
# Always expand aliases.
if type(source) in dep_context.alias_types:
return True
# Traverse other dependencies if they are exported.
if source._dep_is_exported(dependency):
return True
return False
dep_addresses = [d.address for d in self.dependencies
if default_predicate(self, d)
]
result = self._build_graph.transitive_subgraph_of_addresses_bfs(
addresses=dep_addresses,
dep_predicate=dep_predicate
)
strict_deps = OrderedSet()
for declared in result:
if type(declared) in dep_context.alias_types:
continue
if isinstance(declared, dep_context.types_with_closure):
strict_deps.update(declared.closure(
bfs=True,
**dep_context.target_closure_kwargs))
strict_deps.add(declared)
strict_deps = list(strict_deps)
self._cached_strict_dependencies_map[dep_context] = strict_deps
return strict_deps | :param dep_context: A DependencyContext with configuration for the request.
:return: targets that this target "strictly" depends on. This set of dependencies contains
only directly declared dependencies, with two exceptions:
1) aliases are expanded transitively
2) the strict_dependencies of targets exported targets exported by
strict_dependencies (transitively).
:rtype: list of Target | Below is the the instruction that describes the task:
### Input:
:param dep_context: A DependencyContext with configuration for the request.
:return: targets that this target "strictly" depends on. This set of dependencies contains
only directly declared dependencies, with two exceptions:
1) aliases are expanded transitively
2) the strict_dependencies of targets exported targets exported by
strict_dependencies (transitively).
:rtype: list of Target
### Response:
def strict_dependencies(self, dep_context):
"""
:param dep_context: A DependencyContext with configuration for the request.
:return: targets that this target "strictly" depends on. This set of dependencies contains
only directly declared dependencies, with two exceptions:
1) aliases are expanded transitively
2) the strict_dependencies of targets exported targets exported by
strict_dependencies (transitively).
:rtype: list of Target
"""
strict_deps = self._cached_strict_dependencies_map.get(dep_context, None)
if strict_deps is None:
default_predicate = self._closure_dep_predicate({self}, **dep_context.target_closure_kwargs)
# TODO(#5977): this branch needs testing!
if not default_predicate:
def default_predicate(*args, **kwargs):
return True
def dep_predicate(source, dependency):
if not default_predicate(source, dependency):
return False
# Always expand aliases.
if type(source) in dep_context.alias_types:
return True
# Traverse other dependencies if they are exported.
if source._dep_is_exported(dependency):
return True
return False
dep_addresses = [d.address for d in self.dependencies
if default_predicate(self, d)
]
result = self._build_graph.transitive_subgraph_of_addresses_bfs(
addresses=dep_addresses,
dep_predicate=dep_predicate
)
strict_deps = OrderedSet()
for declared in result:
if type(declared) in dep_context.alias_types:
continue
if isinstance(declared, dep_context.types_with_closure):
strict_deps.update(declared.closure(
bfs=True,
**dep_context.target_closure_kwargs))
strict_deps.add(declared)
strict_deps = list(strict_deps)
self._cached_strict_dependencies_map[dep_context] = strict_deps
return strict_deps |
def replace_u_start_month(month):
"""Find the earliest legitimate month."""
month = month.lstrip('-')
if month == 'uu' or month == '0u':
return '01'
if month == 'u0':
return '10'
return month.replace('u', '0') | Find the earliest legitimate month. | Below is the the instruction that describes the task:
### Input:
Find the earliest legitimate month.
### Response:
def replace_u_start_month(month):
"""Find the earliest legitimate month."""
month = month.lstrip('-')
if month == 'uu' or month == '0u':
return '01'
if month == 'u0':
return '10'
return month.replace('u', '0') |
def extract_altitude(self):
'''
Extract altitude
'''
altitude_ref = {
0: 1,
1: -1}
fields = ['GPS GPSAltitude', 'EXIF GPS GPSAltitude']
refs = ['GPS GPSAltitudeRef', 'EXIF GPS GPSAltitudeRef']
altitude, _ = self._extract_alternative_fields(fields, 0, float)
ref = 0 if not any([True for x in refs if x in self.tags]) else [
self.tags[x].values for x in refs if x in self.tags][0][0]
return altitude * altitude_ref[ref] | Extract altitude | Below is the the instruction that describes the task:
### Input:
Extract altitude
### Response:
def extract_altitude(self):
'''
Extract altitude
'''
altitude_ref = {
0: 1,
1: -1}
fields = ['GPS GPSAltitude', 'EXIF GPS GPSAltitude']
refs = ['GPS GPSAltitudeRef', 'EXIF GPS GPSAltitudeRef']
altitude, _ = self._extract_alternative_fields(fields, 0, float)
ref = 0 if not any([True for x in refs if x in self.tags]) else [
self.tags[x].values for x in refs if x in self.tags][0][0]
return altitude * altitude_ref[ref] |
def _set_microcode(self, v, load=False):
"""
Setter method for microcode, mapped from YANG variable /firmware/peripheral_update/microcode (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_microcode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_microcode() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=microcode.microcode, is_container='container', presence=False, yang_name="microcode", rest_name="microcode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Microcode image'}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """microcode must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=microcode.microcode, is_container='container', presence=False, yang_name="microcode", rest_name="microcode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Microcode image'}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='container', is_config=True)""",
})
self.__microcode = t
if hasattr(self, '_set'):
self._set() | Setter method for microcode, mapped from YANG variable /firmware/peripheral_update/microcode (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_microcode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_microcode() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for microcode, mapped from YANG variable /firmware/peripheral_update/microcode (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_microcode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_microcode() directly.
### Response:
def _set_microcode(self, v, load=False):
"""
Setter method for microcode, mapped from YANG variable /firmware/peripheral_update/microcode (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_microcode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_microcode() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=microcode.microcode, is_container='container', presence=False, yang_name="microcode", rest_name="microcode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Microcode image'}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """microcode must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=microcode.microcode, is_container='container', presence=False, yang_name="microcode", rest_name="microcode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Microcode image'}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='container', is_config=True)""",
})
self.__microcode = t
if hasattr(self, '_set'):
self._set() |
def convert_to_numpy_bytes(data, length=None):
""" Decodes data to Numpy UTF-8 econded string (bytes\_).
Decodes `data` to a Numpy UTF-8 encoded string, which is
``numpy.bytes_``, or an array of them in which case it will be ASCII
encoded instead. If it can't be decoded, it is returned as
is. Unsigned integers, Python string types (``str``, ``bytes``), and
``numpy.unicode_`` (UTF-32) are supported.
For an array of unsigned integers, it may be desirable to make an
array with strings of some specified length as opposed to an array
of the same size with each element being a one element string. This
naturally arises when converting strings to unsigned integer types
in the first place, so it needs to be reversible. The `length`
parameter specifies how many to group together into a string
(desired string length). For 1d arrays, this is along its only
dimension. For higher dimensional arrays, it is done along each row
(across columns). So, for a 3x10x5 input array of uints and a
`length` of 5, the output array would be a 3x2x5 of 5 element
strings.
Parameters
----------
data : some type
Data decode into a Numpy UTF-8 encoded string/s.
length : int or None, optional
The number of consecutive elements (in the case of unsigned
integer `data`) to compose each string in the output array from.
``None`` indicates the full amount for a 1d array or the number
of columns (full length of row) for a higher dimension array.
Returns
-------
b : numpy.bytes\_ or numpy.ndarray of numpy.bytes\_ or data
If `data` can be decoded into a ``numpy.bytes_`` or a
``numpy.ndarray`` of them, the decoded version is returned.
Otherwise, `data` is returned unchanged.
See Also
--------
convert_to_str
convert_to_numpy_str
numpy.bytes\_
"""
# The method of conversion depends on its type.
if isinstance(data, np.bytes_) or (isinstance(data, np.ndarray) \
and data.dtype.char == 'S'):
# It is already an np.bytes_ or array of them, so nothing needs
# to be done.
return data
elif isinstance(data, (bytes, bytearray)):
# Easily converted through constructor.
return np.bytes_(data)
elif (sys.hexversion >= 0x03000000 and isinstance(data, str)) \
or (sys.hexversion < 0x03000000 \
and isinstance(data, unicode)):
return np.bytes_(data.encode('UTF-8'))
elif isinstance(data, (np.uint16, np.uint32)):
# They are single UTF-16 or UTF-32 scalars, and are easily
# converted to a UTF-8 string and then passed through the
# constructor.
return np.bytes_(convert_to_str(data).encode('UTF-8'))
elif isinstance(data, np.uint8):
# It is just the uint8 version of the character, so it just
# needs to be have the dtype essentially changed by having its
# bytes read into ndarray.
return np.ndarray(shape=tuple(), dtype='S1',
buffer=data.flatten().tostring())[()]
elif isinstance(data, np.ndarray) and data.dtype.char == 'U':
# We just need to convert it elementwise.
new_data = np.zeros(shape=data.shape,
dtype='S' + str(data.dtype.itemsize))
for index, x in np.ndenumerate(data):
new_data[index] = np.bytes_(x.encode('UTF-8'))
return new_data
elif isinstance(data, np.ndarray) \
and data.dtype.name in ('uint8', 'uint16', 'uint32'):
# It is an ndarray of some uint type. How it is converted
# depends on its shape. If its shape is just (), then it is just
# a scalar wrapped in an array, which can be converted by
# recursing the scalar value back into this function.
shape = list(data.shape)
if len(shape) == 0:
return convert_to_numpy_bytes(data[()])
# As there are more than one element, it gets a bit more
# complicated. We need to take the subarrays of the specified
# length along columns (1D arrays will be treated as row arrays
# here), each of those converted to an str_ scalar (normal
# string) and stuffed into a new array.
#
# If the length was not given, it needs to be set to full. Then
# the shape of the new array needs to be calculated (divide the
# appropriate dimension, which depends on the number of
# dimentions).
if len(shape) == 1:
if length is None:
length2 = shape[0]
new_shape = (shape[0],)
else:
length2 = length
new_shape = (shape[0]//length2,)
else:
if length is None:
length2 = shape[-1]
else:
length2 = length
new_shape = copy.deepcopy(shape)
new_shape[-1] //= length2
# The new array can be made as all zeros (nulls) with enough
# padding to hold everything (dtype='UL' where 'L' is the
# length). It will start out as a 1d array and be reshaped into
# the proper shape later (makes indexing easier).
new_data = np.zeros(shape=(np.prod(new_shape),),
dtype='S'+str(length2))
# With data flattened into a 1d array, we just need to take
# length sized chunks, convert them (if they are uint8 or 16,
# then decode to str first, if they are uint32, put them as an
# input buffer for an ndarray of type 'U').
data = data.flatten()
for i in range(0, new_data.shape[0]):
chunk = data[(i*length2):((i+1)*length2)]
if data.dtype.name == 'uint8':
new_data[i] = np.ndarray(shape=tuple(),
dtype=new_data.dtype,
buffer=chunk.tostring())[()]
else:
new_data[i] = np.bytes_( \
convert_to_str(chunk).encode('UTF-8'))
# Only thing is left is to reshape it.
return new_data.reshape(tuple(new_shape))
else:
# Couldn't figure out what it is, so nothing can be done but
# return it as is.
return data | Decodes data to Numpy UTF-8 econded string (bytes\_).
Decodes `data` to a Numpy UTF-8 encoded string, which is
``numpy.bytes_``, or an array of them in which case it will be ASCII
encoded instead. If it can't be decoded, it is returned as
is. Unsigned integers, Python string types (``str``, ``bytes``), and
``numpy.unicode_`` (UTF-32) are supported.
For an array of unsigned integers, it may be desirable to make an
array with strings of some specified length as opposed to an array
of the same size with each element being a one element string. This
naturally arises when converting strings to unsigned integer types
in the first place, so it needs to be reversible. The `length`
parameter specifies how many to group together into a string
(desired string length). For 1d arrays, this is along its only
dimension. For higher dimensional arrays, it is done along each row
(across columns). So, for a 3x10x5 input array of uints and a
`length` of 5, the output array would be a 3x2x5 of 5 element
strings.
Parameters
----------
data : some type
Data decode into a Numpy UTF-8 encoded string/s.
length : int or None, optional
The number of consecutive elements (in the case of unsigned
integer `data`) to compose each string in the output array from.
``None`` indicates the full amount for a 1d array or the number
of columns (full length of row) for a higher dimension array.
Returns
-------
b : numpy.bytes\_ or numpy.ndarray of numpy.bytes\_ or data
If `data` can be decoded into a ``numpy.bytes_`` or a
``numpy.ndarray`` of them, the decoded version is returned.
Otherwise, `data` is returned unchanged.
See Also
--------
convert_to_str
convert_to_numpy_str
numpy.bytes\_ | Below is the the instruction that describes the task:
### Input:
Decodes data to Numpy UTF-8 econded string (bytes\_).
Decodes `data` to a Numpy UTF-8 encoded string, which is
``numpy.bytes_``, or an array of them in which case it will be ASCII
encoded instead. If it can't be decoded, it is returned as
is. Unsigned integers, Python string types (``str``, ``bytes``), and
``numpy.unicode_`` (UTF-32) are supported.
For an array of unsigned integers, it may be desirable to make an
array with strings of some specified length as opposed to an array
of the same size with each element being a one element string. This
naturally arises when converting strings to unsigned integer types
in the first place, so it needs to be reversible. The `length`
parameter specifies how many to group together into a string
(desired string length). For 1d arrays, this is along its only
dimension. For higher dimensional arrays, it is done along each row
(across columns). So, for a 3x10x5 input array of uints and a
`length` of 5, the output array would be a 3x2x5 of 5 element
strings.
Parameters
----------
data : some type
Data decode into a Numpy UTF-8 encoded string/s.
length : int or None, optional
The number of consecutive elements (in the case of unsigned
integer `data`) to compose each string in the output array from.
``None`` indicates the full amount for a 1d array or the number
of columns (full length of row) for a higher dimension array.
Returns
-------
b : numpy.bytes\_ or numpy.ndarray of numpy.bytes\_ or data
If `data` can be decoded into a ``numpy.bytes_`` or a
``numpy.ndarray`` of them, the decoded version is returned.
Otherwise, `data` is returned unchanged.
See Also
--------
convert_to_str
convert_to_numpy_str
numpy.bytes\_
### Response:
def convert_to_numpy_bytes(data, length=None):
""" Decodes data to Numpy UTF-8 econded string (bytes\_).
Decodes `data` to a Numpy UTF-8 encoded string, which is
``numpy.bytes_``, or an array of them in which case it will be ASCII
encoded instead. If it can't be decoded, it is returned as
is. Unsigned integers, Python string types (``str``, ``bytes``), and
``numpy.unicode_`` (UTF-32) are supported.
For an array of unsigned integers, it may be desirable to make an
array with strings of some specified length as opposed to an array
of the same size with each element being a one element string. This
naturally arises when converting strings to unsigned integer types
in the first place, so it needs to be reversible. The `length`
parameter specifies how many to group together into a string
(desired string length). For 1d arrays, this is along its only
dimension. For higher dimensional arrays, it is done along each row
(across columns). So, for a 3x10x5 input array of uints and a
`length` of 5, the output array would be a 3x2x5 of 5 element
strings.
Parameters
----------
data : some type
Data decode into a Numpy UTF-8 encoded string/s.
length : int or None, optional
The number of consecutive elements (in the case of unsigned
integer `data`) to compose each string in the output array from.
``None`` indicates the full amount for a 1d array or the number
of columns (full length of row) for a higher dimension array.
Returns
-------
b : numpy.bytes\_ or numpy.ndarray of numpy.bytes\_ or data
If `data` can be decoded into a ``numpy.bytes_`` or a
``numpy.ndarray`` of them, the decoded version is returned.
Otherwise, `data` is returned unchanged.
See Also
--------
convert_to_str
convert_to_numpy_str
numpy.bytes\_
"""
# The method of conversion depends on its type.
if isinstance(data, np.bytes_) or (isinstance(data, np.ndarray) \
and data.dtype.char == 'S'):
# It is already an np.bytes_ or array of them, so nothing needs
# to be done.
return data
elif isinstance(data, (bytes, bytearray)):
# Easily converted through constructor.
return np.bytes_(data)
elif (sys.hexversion >= 0x03000000 and isinstance(data, str)) \
or (sys.hexversion < 0x03000000 \
and isinstance(data, unicode)):
return np.bytes_(data.encode('UTF-8'))
elif isinstance(data, (np.uint16, np.uint32)):
# They are single UTF-16 or UTF-32 scalars, and are easily
# converted to a UTF-8 string and then passed through the
# constructor.
return np.bytes_(convert_to_str(data).encode('UTF-8'))
elif isinstance(data, np.uint8):
# It is just the uint8 version of the character, so it just
# needs to be have the dtype essentially changed by having its
# bytes read into ndarray.
return np.ndarray(shape=tuple(), dtype='S1',
buffer=data.flatten().tostring())[()]
elif isinstance(data, np.ndarray) and data.dtype.char == 'U':
# We just need to convert it elementwise.
new_data = np.zeros(shape=data.shape,
dtype='S' + str(data.dtype.itemsize))
for index, x in np.ndenumerate(data):
new_data[index] = np.bytes_(x.encode('UTF-8'))
return new_data
elif isinstance(data, np.ndarray) \
and data.dtype.name in ('uint8', 'uint16', 'uint32'):
# It is an ndarray of some uint type. How it is converted
# depends on its shape. If its shape is just (), then it is just
# a scalar wrapped in an array, which can be converted by
# recursing the scalar value back into this function.
shape = list(data.shape)
if len(shape) == 0:
return convert_to_numpy_bytes(data[()])
# As there are more than one element, it gets a bit more
# complicated. We need to take the subarrays of the specified
# length along columns (1D arrays will be treated as row arrays
# here), each of those converted to an str_ scalar (normal
# string) and stuffed into a new array.
#
# If the length was not given, it needs to be set to full. Then
# the shape of the new array needs to be calculated (divide the
# appropriate dimension, which depends on the number of
# dimentions).
if len(shape) == 1:
if length is None:
length2 = shape[0]
new_shape = (shape[0],)
else:
length2 = length
new_shape = (shape[0]//length2,)
else:
if length is None:
length2 = shape[-1]
else:
length2 = length
new_shape = copy.deepcopy(shape)
new_shape[-1] //= length2
# The new array can be made as all zeros (nulls) with enough
# padding to hold everything (dtype='UL' where 'L' is the
# length). It will start out as a 1d array and be reshaped into
# the proper shape later (makes indexing easier).
new_data = np.zeros(shape=(np.prod(new_shape),),
dtype='S'+str(length2))
# With data flattened into a 1d array, we just need to take
# length sized chunks, convert them (if they are uint8 or 16,
# then decode to str first, if they are uint32, put them as an
# input buffer for an ndarray of type 'U').
data = data.flatten()
for i in range(0, new_data.shape[0]):
chunk = data[(i*length2):((i+1)*length2)]
if data.dtype.name == 'uint8':
new_data[i] = np.ndarray(shape=tuple(),
dtype=new_data.dtype,
buffer=chunk.tostring())[()]
else:
new_data[i] = np.bytes_( \
convert_to_str(chunk).encode('UTF-8'))
# Only thing is left is to reshape it.
return new_data.reshape(tuple(new_shape))
else:
# Couldn't figure out what it is, so nothing can be done but
# return it as is.
return data |
def to_bytes(self, frame, state):
"""
Convert a single frame into bytes that can be transmitted on
the stream.
:param frame: The frame to convert. Should be the same type
of object returned by ``to_frame()``.
:param state: An instance of ``FramerState``. This object may
be used to track information across calls to the
method.
:returns: Bytes that may be transmitted on the stream.
"""
# Generate and return the frame
return (self.begin +
self.nop.join(six.binary_type(frame).split(self.prefix)) +
self.end) | Convert a single frame into bytes that can be transmitted on
the stream.
:param frame: The frame to convert. Should be the same type
of object returned by ``to_frame()``.
:param state: An instance of ``FramerState``. This object may
be used to track information across calls to the
method.
:returns: Bytes that may be transmitted on the stream. | Below is the the instruction that describes the task:
### Input:
Convert a single frame into bytes that can be transmitted on
the stream.
:param frame: The frame to convert. Should be the same type
of object returned by ``to_frame()``.
:param state: An instance of ``FramerState``. This object may
be used to track information across calls to the
method.
:returns: Bytes that may be transmitted on the stream.
### Response:
def to_bytes(self, frame, state):
"""
Convert a single frame into bytes that can be transmitted on
the stream.
:param frame: The frame to convert. Should be the same type
of object returned by ``to_frame()``.
:param state: An instance of ``FramerState``. This object may
be used to track information across calls to the
method.
:returns: Bytes that may be transmitted on the stream.
"""
# Generate and return the frame
return (self.begin +
self.nop.join(six.binary_type(frame).split(self.prefix)) +
self.end) |
def encode_args(args, extra=False):
"""
Encode a list of arguments
"""
if not args:
return ''
methodargs = ', '.join([encode(a) for a in args])
if extra:
methodargs += ', '
return methodargs | Encode a list of arguments | Below is the the instruction that describes the task:
### Input:
Encode a list of arguments
### Response:
def encode_args(args, extra=False):
"""
Encode a list of arguments
"""
if not args:
return ''
methodargs = ', '.join([encode(a) for a in args])
if extra:
methodargs += ', '
return methodargs |
def discretize(self, method, *args, **kwargs):
"""
Discretizes the continuous distribution into discrete
probability masses using various methods.
Parameters
----------
method : A Discretizer Class from pgmpy.discretize
*args, **kwargs:
The parameters to be given to the Discretizer Class.
Returns
-------
An n-D array or a DiscreteFactor object according to the discretiztion
method used.
Examples
--------
>>> import numpy as np
>>> from scipy.special import beta
>>> from pgmpy.factors.continuous import ContinuousFactor
>>> from pgmpy.factors.continuous import RoundingDiscretizer
>>> def dirichlet_pdf(x, y):
... return (np.power(x, 1) * np.power(y, 2)) / beta(x, y)
>>> dirichlet_factor = ContinuousFactor(['x', 'y'], dirichlet_pdf)
>>> dirichlet_factor.discretize(RoundingDiscretizer, low=1, high=2, cardinality=5)
# TODO: finish this
"""
return method(self, *args, **kwargs).get_discrete_values() | Discretizes the continuous distribution into discrete
probability masses using various methods.
Parameters
----------
method : A Discretizer Class from pgmpy.discretize
*args, **kwargs:
The parameters to be given to the Discretizer Class.
Returns
-------
An n-D array or a DiscreteFactor object according to the discretiztion
method used.
Examples
--------
>>> import numpy as np
>>> from scipy.special import beta
>>> from pgmpy.factors.continuous import ContinuousFactor
>>> from pgmpy.factors.continuous import RoundingDiscretizer
>>> def dirichlet_pdf(x, y):
... return (np.power(x, 1) * np.power(y, 2)) / beta(x, y)
>>> dirichlet_factor = ContinuousFactor(['x', 'y'], dirichlet_pdf)
>>> dirichlet_factor.discretize(RoundingDiscretizer, low=1, high=2, cardinality=5)
# TODO: finish this | Below is the the instruction that describes the task:
### Input:
Discretizes the continuous distribution into discrete
probability masses using various methods.
Parameters
----------
method : A Discretizer Class from pgmpy.discretize
*args, **kwargs:
The parameters to be given to the Discretizer Class.
Returns
-------
An n-D array or a DiscreteFactor object according to the discretiztion
method used.
Examples
--------
>>> import numpy as np
>>> from scipy.special import beta
>>> from pgmpy.factors.continuous import ContinuousFactor
>>> from pgmpy.factors.continuous import RoundingDiscretizer
>>> def dirichlet_pdf(x, y):
... return (np.power(x, 1) * np.power(y, 2)) / beta(x, y)
>>> dirichlet_factor = ContinuousFactor(['x', 'y'], dirichlet_pdf)
>>> dirichlet_factor.discretize(RoundingDiscretizer, low=1, high=2, cardinality=5)
# TODO: finish this
### Response:
def discretize(self, method, *args, **kwargs):
"""
Discretizes the continuous distribution into discrete
probability masses using various methods.
Parameters
----------
method : A Discretizer Class from pgmpy.discretize
*args, **kwargs:
The parameters to be given to the Discretizer Class.
Returns
-------
An n-D array or a DiscreteFactor object according to the discretiztion
method used.
Examples
--------
>>> import numpy as np
>>> from scipy.special import beta
>>> from pgmpy.factors.continuous import ContinuousFactor
>>> from pgmpy.factors.continuous import RoundingDiscretizer
>>> def dirichlet_pdf(x, y):
... return (np.power(x, 1) * np.power(y, 2)) / beta(x, y)
>>> dirichlet_factor = ContinuousFactor(['x', 'y'], dirichlet_pdf)
>>> dirichlet_factor.discretize(RoundingDiscretizer, low=1, high=2, cardinality=5)
# TODO: finish this
"""
return method(self, *args, **kwargs).get_discrete_values() |
def GetWSAActionInput(operation):
"""Find wsa:Action attribute, and return value or the default."""
attr = operation.input.action
if attr is not None:
return attr
portType = operation.getPortType()
targetNamespace = portType.getTargetNamespace()
ptName = portType.name
msgName = operation.input.name
if not msgName:
msgName = operation.name + 'Request'
if targetNamespace.endswith('/'):
return '%s%s/%s' %(targetNamespace, ptName, msgName)
return '%s/%s/%s' %(targetNamespace, ptName, msgName) | Find wsa:Action attribute, and return value or the default. | Below is the the instruction that describes the task:
### Input:
Find wsa:Action attribute, and return value or the default.
### Response:
def GetWSAActionInput(operation):
"""Find wsa:Action attribute, and return value or the default."""
attr = operation.input.action
if attr is not None:
return attr
portType = operation.getPortType()
targetNamespace = portType.getTargetNamespace()
ptName = portType.name
msgName = operation.input.name
if not msgName:
msgName = operation.name + 'Request'
if targetNamespace.endswith('/'):
return '%s%s/%s' %(targetNamespace, ptName, msgName)
return '%s/%s/%s' %(targetNamespace, ptName, msgName) |
def get_etag(file_path):
"""Return a strong Entity Tag for a (file)path.
http://www.webdav.org/specs/rfc4918.html#etag
Returns the following as entity tags::
Non-file - md5(pathname)
Win32 - md5(pathname)-lastmodifiedtime-filesize
Others - inode-lastmodifiedtime-filesize
"""
# (At least on Vista) os.path.exists returns False, if a file name contains
# special characters, even if it is correctly UTF-8 encoded.
# So we convert to unicode. On the other hand, md5() needs a byte string.
if compat.is_bytes(file_path):
unicodeFilePath = to_unicode_safe(file_path)
else:
unicodeFilePath = file_path
file_path = file_path.encode("utf8")
if not os.path.isfile(unicodeFilePath):
return md5(file_path).hexdigest()
if sys.platform == "win32":
statresults = os.stat(unicodeFilePath)
return (
md5(file_path).hexdigest()
+ "-"
+ str(statresults[stat.ST_MTIME])
+ "-"
+ str(statresults[stat.ST_SIZE])
)
else:
statresults = os.stat(unicodeFilePath)
return (
str(statresults[stat.ST_INO])
+ "-"
+ str(statresults[stat.ST_MTIME])
+ "-"
+ str(statresults[stat.ST_SIZE])
) | Return a strong Entity Tag for a (file)path.
http://www.webdav.org/specs/rfc4918.html#etag
Returns the following as entity tags::
Non-file - md5(pathname)
Win32 - md5(pathname)-lastmodifiedtime-filesize
Others - inode-lastmodifiedtime-filesize | Below is the the instruction that describes the task:
### Input:
Return a strong Entity Tag for a (file)path.
http://www.webdav.org/specs/rfc4918.html#etag
Returns the following as entity tags::
Non-file - md5(pathname)
Win32 - md5(pathname)-lastmodifiedtime-filesize
Others - inode-lastmodifiedtime-filesize
### Response:
def get_etag(file_path):
"""Return a strong Entity Tag for a (file)path.
http://www.webdav.org/specs/rfc4918.html#etag
Returns the following as entity tags::
Non-file - md5(pathname)
Win32 - md5(pathname)-lastmodifiedtime-filesize
Others - inode-lastmodifiedtime-filesize
"""
# (At least on Vista) os.path.exists returns False, if a file name contains
# special characters, even if it is correctly UTF-8 encoded.
# So we convert to unicode. On the other hand, md5() needs a byte string.
if compat.is_bytes(file_path):
unicodeFilePath = to_unicode_safe(file_path)
else:
unicodeFilePath = file_path
file_path = file_path.encode("utf8")
if not os.path.isfile(unicodeFilePath):
return md5(file_path).hexdigest()
if sys.platform == "win32":
statresults = os.stat(unicodeFilePath)
return (
md5(file_path).hexdigest()
+ "-"
+ str(statresults[stat.ST_MTIME])
+ "-"
+ str(statresults[stat.ST_SIZE])
)
else:
statresults = os.stat(unicodeFilePath)
return (
str(statresults[stat.ST_INO])
+ "-"
+ str(statresults[stat.ST_MTIME])
+ "-"
+ str(statresults[stat.ST_SIZE])
) |
def parse_hpo_gene(hpo_line):
"""Parse hpo gene information
Args:
hpo_line(str): A iterable with hpo phenotype lines
Yields:
hpo_info(dict)
"""
if not len(hpo_line) > 3:
return {}
hpo_line = hpo_line.rstrip().split('\t')
hpo_info = {}
hpo_info['hgnc_symbol'] = hpo_line[1]
hpo_info['description'] = hpo_line[2]
hpo_info['hpo_id'] = hpo_line[3]
return hpo_info | Parse hpo gene information
Args:
hpo_line(str): A iterable with hpo phenotype lines
Yields:
hpo_info(dict) | Below is the the instruction that describes the task:
### Input:
Parse hpo gene information
Args:
hpo_line(str): A iterable with hpo phenotype lines
Yields:
hpo_info(dict)
### Response:
def parse_hpo_gene(hpo_line):
"""Parse hpo gene information
Args:
hpo_line(str): A iterable with hpo phenotype lines
Yields:
hpo_info(dict)
"""
if not len(hpo_line) > 3:
return {}
hpo_line = hpo_line.rstrip().split('\t')
hpo_info = {}
hpo_info['hgnc_symbol'] = hpo_line[1]
hpo_info['description'] = hpo_line[2]
hpo_info['hpo_id'] = hpo_line[3]
return hpo_info |
def save_libsvm(X, y, path):
"""Save data as a LibSVM file.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector.
path (str): Path to the CSV file to save data.
"""
dump_svmlight_file(X, y, path, zero_based=False) | Save data as a LibSVM file.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector.
path (str): Path to the CSV file to save data. | Below is the the instruction that describes the task:
### Input:
Save data as a LibSVM file.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector.
path (str): Path to the CSV file to save data.
### Response:
def save_libsvm(X, y, path):
"""Save data as a LibSVM file.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector.
path (str): Path to the CSV file to save data.
"""
dump_svmlight_file(X, y, path, zero_based=False) |
def spawn(self, function, *args, **kwargs):
# type: (Callable[..., Any], *Any, **Any) -> Spawned
"""Runs the function in a worker thread, returning a Result object
Args:
function: Function to run
args: Positional arguments to run the function with
kwargs: Keyword arguments to run the function with
Returns:
Spawned: Something you can call wait(timeout) on to see when it's
finished executing
"""
assert self.state != STOPPED, "Can't spawn when process stopped"
spawned = Spawned(function, args, kwargs)
self._spawned.append(spawned)
self._spawn_count += 1
# Filter out things that are ready to avoid memory leaks
if self._spawn_count > SPAWN_CLEAR_COUNT:
self._clear_spawn_list()
return spawned | Runs the function in a worker thread, returning a Result object
Args:
function: Function to run
args: Positional arguments to run the function with
kwargs: Keyword arguments to run the function with
Returns:
Spawned: Something you can call wait(timeout) on to see when it's
finished executing | Below is the the instruction that describes the task:
### Input:
Runs the function in a worker thread, returning a Result object
Args:
function: Function to run
args: Positional arguments to run the function with
kwargs: Keyword arguments to run the function with
Returns:
Spawned: Something you can call wait(timeout) on to see when it's
finished executing
### Response:
def spawn(self, function, *args, **kwargs):
# type: (Callable[..., Any], *Any, **Any) -> Spawned
"""Runs the function in a worker thread, returning a Result object
Args:
function: Function to run
args: Positional arguments to run the function with
kwargs: Keyword arguments to run the function with
Returns:
Spawned: Something you can call wait(timeout) on to see when it's
finished executing
"""
assert self.state != STOPPED, "Can't spawn when process stopped"
spawned = Spawned(function, args, kwargs)
self._spawned.append(spawned)
self._spawn_count += 1
# Filter out things that are ready to avoid memory leaks
if self._spawn_count > SPAWN_CLEAR_COUNT:
self._clear_spawn_list()
return spawned |
def get_block(self, usage_id, for_parent=None):
"""
Create an XBlock instance in this runtime.
The `usage_id` is used to find the XBlock class and data.
"""
def_id = self.id_reader.get_definition_id(usage_id)
try:
block_type = self.id_reader.get_block_type(def_id)
except NoSuchDefinition:
raise NoSuchUsage(repr(usage_id))
keys = ScopeIds(self.user_id, block_type, def_id, usage_id)
block = self.construct_xblock(block_type, keys, for_parent=for_parent)
return block | Create an XBlock instance in this runtime.
The `usage_id` is used to find the XBlock class and data. | Below is the the instruction that describes the task:
### Input:
Create an XBlock instance in this runtime.
The `usage_id` is used to find the XBlock class and data.
### Response:
def get_block(self, usage_id, for_parent=None):
"""
Create an XBlock instance in this runtime.
The `usage_id` is used to find the XBlock class and data.
"""
def_id = self.id_reader.get_definition_id(usage_id)
try:
block_type = self.id_reader.get_block_type(def_id)
except NoSuchDefinition:
raise NoSuchUsage(repr(usage_id))
keys = ScopeIds(self.user_id, block_type, def_id, usage_id)
block = self.construct_xblock(block_type, keys, for_parent=for_parent)
return block |
def before_log(logger, log_level):
"""Before call strategy that logs to some logger the attempt."""
def log_it(retry_state):
logger.log(log_level,
"Starting call to '%s', this is the %s time calling it.",
_utils.get_callback_name(retry_state.fn),
_utils.to_ordinal(retry_state.attempt_number))
return log_it | Before call strategy that logs to some logger the attempt. | Below is the the instruction that describes the task:
### Input:
Before call strategy that logs to some logger the attempt.
### Response:
def before_log(logger, log_level):
"""Before call strategy that logs to some logger the attempt."""
def log_it(retry_state):
logger.log(log_level,
"Starting call to '%s', this is the %s time calling it.",
_utils.get_callback_name(retry_state.fn),
_utils.to_ordinal(retry_state.attempt_number))
return log_it |
def async_comp_check(self, original, loc, tokens):
"""Check for Python 3.6 async comprehension."""
return self.check_py("36", "async comprehension", original, loc, tokens) | Check for Python 3.6 async comprehension. | Below is the the instruction that describes the task:
### Input:
Check for Python 3.6 async comprehension.
### Response:
def async_comp_check(self, original, loc, tokens):
"""Check for Python 3.6 async comprehension."""
return self.check_py("36", "async comprehension", original, loc, tokens) |
def on_packet(packet):
""" Callback function that is called everytime a data packet arrives from QTM """
print("Framenumber: {}".format(packet.framenumber))
header, markers = packet.get_3d_markers()
print("Component info: {}".format(header))
for marker in markers:
print("\t", marker) | Callback function that is called everytime a data packet arrives from QTM | Below is the the instruction that describes the task:
### Input:
Callback function that is called everytime a data packet arrives from QTM
### Response:
def on_packet(packet):
""" Callback function that is called everytime a data packet arrives from QTM """
print("Framenumber: {}".format(packet.framenumber))
header, markers = packet.get_3d_markers()
print("Component info: {}".format(header))
for marker in markers:
print("\t", marker) |
def _propagate_up(self, handle, target_id, name=None):
"""
In a non-master context, propagate an update towards the master.
:param int handle:
:data:`mitogen.core.ADD_ROUTE` or :data:`mitogen.core.DEL_ROUTE`
:param int target_id:
ID of the connecting or disconnecting context.
:param str name:
For :data:`mitogen.core.ADD_ROUTE`, the name of the new context
assigned by its parent. This is used by parents to assign the
:attr:`mitogen.core.Context.name` attribute.
"""
if self.parent:
stream = self.router.stream_by_id(self.parent.context_id)
self._send_one(stream, handle, target_id, name) | In a non-master context, propagate an update towards the master.
:param int handle:
:data:`mitogen.core.ADD_ROUTE` or :data:`mitogen.core.DEL_ROUTE`
:param int target_id:
ID of the connecting or disconnecting context.
:param str name:
For :data:`mitogen.core.ADD_ROUTE`, the name of the new context
assigned by its parent. This is used by parents to assign the
:attr:`mitogen.core.Context.name` attribute. | Below is the the instruction that describes the task:
### Input:
In a non-master context, propagate an update towards the master.
:param int handle:
:data:`mitogen.core.ADD_ROUTE` or :data:`mitogen.core.DEL_ROUTE`
:param int target_id:
ID of the connecting or disconnecting context.
:param str name:
For :data:`mitogen.core.ADD_ROUTE`, the name of the new context
assigned by its parent. This is used by parents to assign the
:attr:`mitogen.core.Context.name` attribute.
### Response:
def _propagate_up(self, handle, target_id, name=None):
"""
In a non-master context, propagate an update towards the master.
:param int handle:
:data:`mitogen.core.ADD_ROUTE` or :data:`mitogen.core.DEL_ROUTE`
:param int target_id:
ID of the connecting or disconnecting context.
:param str name:
For :data:`mitogen.core.ADD_ROUTE`, the name of the new context
assigned by its parent. This is used by parents to assign the
:attr:`mitogen.core.Context.name` attribute.
"""
if self.parent:
stream = self.router.stream_by_id(self.parent.context_id)
self._send_one(stream, handle, target_id, name) |
def _read(self):
'''
Read in from disk
'''
if msgpack is None:
log.error('Cache cannot be read from the disk: msgpack is missing')
elif not os.path.exists(self._path):
log.debug('Cache path does not exist for reading: %s', self._path)
else:
try:
with salt.utils.files.fopen(self._path, 'rb') as fp_:
cache = salt.utils.data.decode(msgpack.load(fp_, encoding=__salt_system_encoding__))
if "CacheDisk_cachetime" in cache: # new format
self._dict = cache["CacheDisk_data"]
self._key_cache_time = cache["CacheDisk_cachetime"]
else: # old format
self._dict = cache
timestamp = os.path.getmtime(self._path)
for key in self._dict:
self._key_cache_time[key] = timestamp
if log.isEnabledFor(logging.DEBUG):
log.debug('Disk cache retrieved: %s', cache)
except (IOError, OSError) as err:
log.error('Error while reading disk cache from %s: %s', self._path, err) | Read in from disk | Below is the the instruction that describes the task:
### Input:
Read in from disk
### Response:
def _read(self):
'''
Read in from disk
'''
if msgpack is None:
log.error('Cache cannot be read from the disk: msgpack is missing')
elif not os.path.exists(self._path):
log.debug('Cache path does not exist for reading: %s', self._path)
else:
try:
with salt.utils.files.fopen(self._path, 'rb') as fp_:
cache = salt.utils.data.decode(msgpack.load(fp_, encoding=__salt_system_encoding__))
if "CacheDisk_cachetime" in cache: # new format
self._dict = cache["CacheDisk_data"]
self._key_cache_time = cache["CacheDisk_cachetime"]
else: # old format
self._dict = cache
timestamp = os.path.getmtime(self._path)
for key in self._dict:
self._key_cache_time[key] = timestamp
if log.isEnabledFor(logging.DEBUG):
log.debug('Disk cache retrieved: %s', cache)
except (IOError, OSError) as err:
log.error('Error while reading disk cache from %s: %s', self._path, err) |
def bugreport(dest_file="default.log"):
"""
Prints dumpsys, dumpstate, and logcat data to the screen, for the purposes of bug reporting
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_BUGREPORT]
try:
dest_file_handler = open(dest_file, "w")
except IOError:
print("IOError: Failed to create a log file")
# We have to check if device is available or not before executing this command
# as adb bugreport will wait-for-device infinitely and does not come out of
# loop
# Execute only if device is available only
if _isDeviceAvailable():
result = _exec_command_to_file(adb_full_cmd, dest_file_handler)
return (result, "Success: Bug report saved to: " + dest_file)
else:
return (0, "Device Not Found") | Prints dumpsys, dumpstate, and logcat data to the screen, for the purposes of bug reporting
:return: result of _exec_command() execution | Below is the the instruction that describes the task:
### Input:
Prints dumpsys, dumpstate, and logcat data to the screen, for the purposes of bug reporting
:return: result of _exec_command() execution
### Response:
def bugreport(dest_file="default.log"):
"""
Prints dumpsys, dumpstate, and logcat data to the screen, for the purposes of bug reporting
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_BUGREPORT]
try:
dest_file_handler = open(dest_file, "w")
except IOError:
print("IOError: Failed to create a log file")
# We have to check if device is available or not before executing this command
# as adb bugreport will wait-for-device infinitely and does not come out of
# loop
# Execute only if device is available only
if _isDeviceAvailable():
result = _exec_command_to_file(adb_full_cmd, dest_file_handler)
return (result, "Success: Bug report saved to: " + dest_file)
else:
return (0, "Device Not Found") |
def _pick_best_quality_score(vrn_file):
"""Flexible quality score selection, picking the best available.
Implementation based on discussion:
https://github.com/bcbio/bcbio-nextgen/commit/a538cecd86c0000d17d3f9d4f8ac9d2da04f9884#commitcomment-14539249
(RTG=AVR/GATK=VQSLOD/MuTect=t_lod_fstar, otherwise GQ, otherwise QUAL, otherwise DP.)
For MuTect, it's not clear how to get t_lod_fstar, the right quality score, into VCF cleanly.
MuTect2 has TLOD in the INFO field.
"""
# pysam fails on checking reference contigs if input is empty
if not vcfutils.vcf_has_variants(vrn_file):
return "DP"
to_check = 25
scores = collections.defaultdict(int)
try:
in_handle = VariantFile(vrn_file)
except ValueError:
raise ValueError("Failed to parse input file in preparation for validation: %s" % vrn_file)
with contextlib.closing(in_handle) as val_in:
for i, rec in enumerate(val_in):
if i > to_check:
break
if "VQSLOD" in rec.info and rec.info.get("VQSLOD") is not None:
scores["INFO=VQSLOD"] += 1
if "TLOD" in rec.info and rec.info.get("TLOD") is not None:
scores["INFO=TLOD"] += 1
for skey in ["AVR", "GQ", "DP"]:
if len(rec.samples) > 0 and rec.samples[0].get(skey) is not None:
scores[skey] += 1
if rec.qual:
scores["QUAL"] += 1
for key in ["AVR", "INFO=VQSLOD", "INFO=TLOD", "GQ", "QUAL", "DP"]:
if scores[key] > 0:
return key
raise ValueError("Did not find quality score for validation from %s" % vrn_file) | Flexible quality score selection, picking the best available.
Implementation based on discussion:
https://github.com/bcbio/bcbio-nextgen/commit/a538cecd86c0000d17d3f9d4f8ac9d2da04f9884#commitcomment-14539249
(RTG=AVR/GATK=VQSLOD/MuTect=t_lod_fstar, otherwise GQ, otherwise QUAL, otherwise DP.)
For MuTect, it's not clear how to get t_lod_fstar, the right quality score, into VCF cleanly.
MuTect2 has TLOD in the INFO field. | Below is the the instruction that describes the task:
### Input:
Flexible quality score selection, picking the best available.
Implementation based on discussion:
https://github.com/bcbio/bcbio-nextgen/commit/a538cecd86c0000d17d3f9d4f8ac9d2da04f9884#commitcomment-14539249
(RTG=AVR/GATK=VQSLOD/MuTect=t_lod_fstar, otherwise GQ, otherwise QUAL, otherwise DP.)
For MuTect, it's not clear how to get t_lod_fstar, the right quality score, into VCF cleanly.
MuTect2 has TLOD in the INFO field.
### Response:
def _pick_best_quality_score(vrn_file):
"""Flexible quality score selection, picking the best available.
Implementation based on discussion:
https://github.com/bcbio/bcbio-nextgen/commit/a538cecd86c0000d17d3f9d4f8ac9d2da04f9884#commitcomment-14539249
(RTG=AVR/GATK=VQSLOD/MuTect=t_lod_fstar, otherwise GQ, otherwise QUAL, otherwise DP.)
For MuTect, it's not clear how to get t_lod_fstar, the right quality score, into VCF cleanly.
MuTect2 has TLOD in the INFO field.
"""
# pysam fails on checking reference contigs if input is empty
if not vcfutils.vcf_has_variants(vrn_file):
return "DP"
to_check = 25
scores = collections.defaultdict(int)
try:
in_handle = VariantFile(vrn_file)
except ValueError:
raise ValueError("Failed to parse input file in preparation for validation: %s" % vrn_file)
with contextlib.closing(in_handle) as val_in:
for i, rec in enumerate(val_in):
if i > to_check:
break
if "VQSLOD" in rec.info and rec.info.get("VQSLOD") is not None:
scores["INFO=VQSLOD"] += 1
if "TLOD" in rec.info and rec.info.get("TLOD") is not None:
scores["INFO=TLOD"] += 1
for skey in ["AVR", "GQ", "DP"]:
if len(rec.samples) > 0 and rec.samples[0].get(skey) is not None:
scores[skey] += 1
if rec.qual:
scores["QUAL"] += 1
for key in ["AVR", "INFO=VQSLOD", "INFO=TLOD", "GQ", "QUAL", "DP"]:
if scores[key] > 0:
return key
raise ValueError("Did not find quality score for validation from %s" % vrn_file) |
def value(self, val):
"""Set the color using length-N array of (from HSV)"""
hsv = self._hsv
hsv[:, 2] = _array_clip_val(val)
self.rgba = _hsv_to_rgb(hsv) | Set the color using length-N array of (from HSV) | Below is the the instruction that describes the task:
### Input:
Set the color using length-N array of (from HSV)
### Response:
def value(self, val):
"""Set the color using length-N array of (from HSV)"""
hsv = self._hsv
hsv[:, 2] = _array_clip_val(val)
self.rgba = _hsv_to_rgb(hsv) |
def patch_request(self, id_or_uri, body, timeout=-1, custom_headers=None):
"""
Uses the PATCH to update a resource.
Only one operation can be performed in each PATCH call.
Args:
id_or_uri: Can be either the resource ID or the resource URI.
body: Patch request body
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
Updated resource.
"""
uri = self.build_uri(id_or_uri)
logger.debug('Patch resource (uri = %s, data = %s)' % (uri, body))
custom_headers_copy = custom_headers.copy() if custom_headers else {}
if self._connection._apiVersion >= 300 and 'Content-Type' not in custom_headers_copy:
custom_headers_copy['Content-Type'] = 'application/json-patch+json'
task, entity = self._connection.patch(uri, body, custom_headers=custom_headers_copy)
if not task:
return entity
return self._task_monitor.wait_for_task(task, timeout) | Uses the PATCH to update a resource.
Only one operation can be performed in each PATCH call.
Args:
id_or_uri: Can be either the resource ID or the resource URI.
body: Patch request body
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
Updated resource. | Below is the the instruction that describes the task:
### Input:
Uses the PATCH to update a resource.
Only one operation can be performed in each PATCH call.
Args:
id_or_uri: Can be either the resource ID or the resource URI.
body: Patch request body
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
Updated resource.
### Response:
def patch_request(self, id_or_uri, body, timeout=-1, custom_headers=None):
"""
Uses the PATCH to update a resource.
Only one operation can be performed in each PATCH call.
Args:
id_or_uri: Can be either the resource ID or the resource URI.
body: Patch request body
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
Updated resource.
"""
uri = self.build_uri(id_or_uri)
logger.debug('Patch resource (uri = %s, data = %s)' % (uri, body))
custom_headers_copy = custom_headers.copy() if custom_headers else {}
if self._connection._apiVersion >= 300 and 'Content-Type' not in custom_headers_copy:
custom_headers_copy['Content-Type'] = 'application/json-patch+json'
task, entity = self._connection.patch(uri, body, custom_headers=custom_headers_copy)
if not task:
return entity
return self._task_monitor.wait_for_task(task, timeout) |
def update_team_days_off(self, days_off_patch, team_context, iteration_id):
"""UpdateTeamDaysOff.
Set a team's days off for an iteration
:param :class:`<TeamSettingsDaysOffPatch> <azure.devops.v5_0.work.models.TeamSettingsDaysOffPatch>` days_off_patch: Team's days off patch containting a list of start and end dates
:param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:rtype: :class:`<TeamSettingsDaysOff> <azure.devops.v5_0.work.models.TeamSettingsDaysOff>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if iteration_id is not None:
route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str')
content = self._serialize.body(days_off_patch, 'TeamSettingsDaysOffPatch')
response = self._send(http_method='PATCH',
location_id='2d4faa2e-9150-4cbf-a47a-932b1b4a0773',
version='5.0',
route_values=route_values,
content=content)
return self._deserialize('TeamSettingsDaysOff', response) | UpdateTeamDaysOff.
Set a team's days off for an iteration
:param :class:`<TeamSettingsDaysOffPatch> <azure.devops.v5_0.work.models.TeamSettingsDaysOffPatch>` days_off_patch: Team's days off patch containting a list of start and end dates
:param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:rtype: :class:`<TeamSettingsDaysOff> <azure.devops.v5_0.work.models.TeamSettingsDaysOff>` | Below is the the instruction that describes the task:
### Input:
UpdateTeamDaysOff.
Set a team's days off for an iteration
:param :class:`<TeamSettingsDaysOffPatch> <azure.devops.v5_0.work.models.TeamSettingsDaysOffPatch>` days_off_patch: Team's days off patch containting a list of start and end dates
:param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:rtype: :class:`<TeamSettingsDaysOff> <azure.devops.v5_0.work.models.TeamSettingsDaysOff>`
### Response:
def update_team_days_off(self, days_off_patch, team_context, iteration_id):
"""UpdateTeamDaysOff.
Set a team's days off for an iteration
:param :class:`<TeamSettingsDaysOffPatch> <azure.devops.v5_0.work.models.TeamSettingsDaysOffPatch>` days_off_patch: Team's days off patch containting a list of start and end dates
:param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:rtype: :class:`<TeamSettingsDaysOff> <azure.devops.v5_0.work.models.TeamSettingsDaysOff>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if iteration_id is not None:
route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str')
content = self._serialize.body(days_off_patch, 'TeamSettingsDaysOffPatch')
response = self._send(http_method='PATCH',
location_id='2d4faa2e-9150-4cbf-a47a-932b1b4a0773',
version='5.0',
route_values=route_values,
content=content)
return self._deserialize('TeamSettingsDaysOff', response) |
def read_block_data(self, block):
"""Read LEB data from file
Argument:
Obj:block -- Block data is desired for.
"""
self.seek(block.file_offset + block.ec_hdr.data_offset)
buf = self._fhandle.read(block.size - block.ec_hdr.data_offset - block.vid_hdr.data_pad)
return buf | Read LEB data from file
Argument:
Obj:block -- Block data is desired for. | Below is the the instruction that describes the task:
### Input:
Read LEB data from file
Argument:
Obj:block -- Block data is desired for.
### Response:
def read_block_data(self, block):
"""Read LEB data from file
Argument:
Obj:block -- Block data is desired for.
"""
self.seek(block.file_offset + block.ec_hdr.data_offset)
buf = self._fhandle.read(block.size - block.ec_hdr.data_offset - block.vid_hdr.data_pad)
return buf |
def to_staff(email_class, **data):
"""
Email staff users
"""
for user in get_user_model().objects.filter(is_staff=True):
try:
email_class().send([user.email], user.language, **data)
except AttributeError:
email_class().send([user.email], translation.get_language(), **data) | Email staff users | Below is the the instruction that describes the task:
### Input:
Email staff users
### Response:
def to_staff(email_class, **data):
"""
Email staff users
"""
for user in get_user_model().objects.filter(is_staff=True):
try:
email_class().send([user.email], user.language, **data)
except AttributeError:
email_class().send([user.email], translation.get_language(), **data) |
def OSXEnumerateRunningServicesFromClient(args):
"""Get running launchd jobs.
Args:
args: Unused.
Yields:
`rdf_client.OSXServiceInformation` instances.
Raises:
UnsupportedOSVersionError: for OS X earlier than 10.6.
"""
del args # Unused.
osx_version = client_utils_osx.OSXVersion()
version_array = osx_version.VersionAsMajorMinor()
if version_array[:2] < [10, 6]:
raise UnsupportedOSVersionError(
"ServiceManagement API unsupported on < 10.6. This client is %s" %
osx_version.VersionString())
launchd_list = GetRunningLaunchDaemons()
parser = osx_launchd.OSXLaunchdJobDict(launchd_list)
for job in parser.Parse():
response = CreateServiceProto(job)
yield response | Get running launchd jobs.
Args:
args: Unused.
Yields:
`rdf_client.OSXServiceInformation` instances.
Raises:
UnsupportedOSVersionError: for OS X earlier than 10.6. | Below is the the instruction that describes the task:
### Input:
Get running launchd jobs.
Args:
args: Unused.
Yields:
`rdf_client.OSXServiceInformation` instances.
Raises:
UnsupportedOSVersionError: for OS X earlier than 10.6.
### Response:
def OSXEnumerateRunningServicesFromClient(args):
"""Get running launchd jobs.
Args:
args: Unused.
Yields:
`rdf_client.OSXServiceInformation` instances.
Raises:
UnsupportedOSVersionError: for OS X earlier than 10.6.
"""
del args # Unused.
osx_version = client_utils_osx.OSXVersion()
version_array = osx_version.VersionAsMajorMinor()
if version_array[:2] < [10, 6]:
raise UnsupportedOSVersionError(
"ServiceManagement API unsupported on < 10.6. This client is %s" %
osx_version.VersionString())
launchd_list = GetRunningLaunchDaemons()
parser = osx_launchd.OSXLaunchdJobDict(launchd_list)
for job in parser.Parse():
response = CreateServiceProto(job)
yield response |
def _token_to_subtoken_ids(self, token):
"""Converts token to a list of subtoken ids.
Args:
token: a string.
Returns:
a list of integers in the range [0, vocab_size)
"""
cache_location = hash(token) % self._cache_size
cache_key, cache_value = self._cache[cache_location]
if cache_key == token:
return cache_value
ret = self._escaped_token_to_subtoken_ids(
_escape_token(token, self._alphabet))
self._cache[cache_location] = (token, ret)
return ret | Converts token to a list of subtoken ids.
Args:
token: a string.
Returns:
a list of integers in the range [0, vocab_size) | Below is the the instruction that describes the task:
### Input:
Converts token to a list of subtoken ids.
Args:
token: a string.
Returns:
a list of integers in the range [0, vocab_size)
### Response:
def _token_to_subtoken_ids(self, token):
"""Converts token to a list of subtoken ids.
Args:
token: a string.
Returns:
a list of integers in the range [0, vocab_size)
"""
cache_location = hash(token) % self._cache_size
cache_key, cache_value = self._cache[cache_location]
if cache_key == token:
return cache_value
ret = self._escaped_token_to_subtoken_ids(
_escape_token(token, self._alphabet))
self._cache[cache_location] = (token, ret)
return ret |
def LOGGER(filename):
"""creates a logger with the given name.
You can use it as follows::
log = cloudmesh.common.LOGGER(__file__)
log.error("this is an error")
log.info("this is an info")
log.warning("this is a warning")
"""
pwd = os.getcwd()
name = filename.replace(pwd, "$PWD")
try:
(first, name) = name.split("site-packages")
name += "... site"
except:
pass
loglevel = logging.CRITICAL
try:
level = grep("loglevel:", config_file(
"/cloudmesh_debug.yaml")).strip().split(":")[1].strip().lower()
if level.upper() == "DEBUG":
loglevel = logging.DEBUG
elif level.upper() == "INFO":
loglevel = logging.INFO
elif level.upper() == "WARNING":
loglevel = logging.WARNING
elif level.upper() == "ERROR":
loglevel = logging.ERROR
else:
level = logging.CRITICAL
except:
# print "LOGLEVEL NOT FOUND"
loglevel = logging.DEBUG
log = logging.getLogger(name)
log.setLevel(loglevel)
formatter = logging.Formatter(
'CM {0:>50}:%(lineno)s: %(levelname)6s - %(message)s'.format(name))
# formatter = logging.Formatter(
# 'CM {0:>50}: %(levelname)6s - %(module)s:%(lineno)s %funcName)s: %(message)s'.format(name))
handler = logging.StreamHandler()
handler.setFormatter(formatter)
log.addHandler(handler)
return log | creates a logger with the given name.
You can use it as follows::
log = cloudmesh.common.LOGGER(__file__)
log.error("this is an error")
log.info("this is an info")
log.warning("this is a warning") | Below is the the instruction that describes the task:
### Input:
creates a logger with the given name.
You can use it as follows::
log = cloudmesh.common.LOGGER(__file__)
log.error("this is an error")
log.info("this is an info")
log.warning("this is a warning")
### Response:
def LOGGER(filename):
"""creates a logger with the given name.
You can use it as follows::
log = cloudmesh.common.LOGGER(__file__)
log.error("this is an error")
log.info("this is an info")
log.warning("this is a warning")
"""
pwd = os.getcwd()
name = filename.replace(pwd, "$PWD")
try:
(first, name) = name.split("site-packages")
name += "... site"
except:
pass
loglevel = logging.CRITICAL
try:
level = grep("loglevel:", config_file(
"/cloudmesh_debug.yaml")).strip().split(":")[1].strip().lower()
if level.upper() == "DEBUG":
loglevel = logging.DEBUG
elif level.upper() == "INFO":
loglevel = logging.INFO
elif level.upper() == "WARNING":
loglevel = logging.WARNING
elif level.upper() == "ERROR":
loglevel = logging.ERROR
else:
level = logging.CRITICAL
except:
# print "LOGLEVEL NOT FOUND"
loglevel = logging.DEBUG
log = logging.getLogger(name)
log.setLevel(loglevel)
formatter = logging.Formatter(
'CM {0:>50}:%(lineno)s: %(levelname)6s - %(message)s'.format(name))
# formatter = logging.Formatter(
# 'CM {0:>50}: %(levelname)6s - %(module)s:%(lineno)s %funcName)s: %(message)s'.format(name))
handler = logging.StreamHandler()
handler.setFormatter(formatter)
log.addHandler(handler)
return log |
def diff_compute(self, text1, text2, checklines, deadline):
"""Find the differences between two texts. Assumes that the texts do not
have any common prefix or suffix.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
checklines: Speedup flag. If false, then don't run a line-level diff
first to identify the changed areas.
If true, then run a faster, slightly less optimal diff.
deadline: Time when the diff should be complete by.
Returns:
Array of changes.
"""
if not text1:
# Just add some text (speedup).
return [(self.DIFF_INSERT, text2)]
if not text2:
# Just delete some text (speedup).
return [(self.DIFF_DELETE, text1)]
if len(text1) > len(text2):
(longtext, shorttext) = (text1, text2)
else:
(shorttext, longtext) = (text1, text2)
i = longtext.find(shorttext)
if i != -1:
# Shorter text is inside the longer text (speedup).
diffs = [(self.DIFF_INSERT, longtext[:i]), (self.DIFF_EQUAL, shorttext),
(self.DIFF_INSERT, longtext[i + len(shorttext):])]
# Swap insertions for deletions if diff is reversed.
if len(text1) > len(text2):
diffs[0] = (self.DIFF_DELETE, diffs[0][1])
diffs[2] = (self.DIFF_DELETE, diffs[2][1])
return diffs
if len(shorttext) == 1:
# Single character string.
# After the previous speedup, the character can't be an equality.
return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)]
# Check to see if the problem can be split in two.
hm = self.diff_halfMatch(text1, text2)
if hm:
# A half-match was found, sort out the return data.
(text1_a, text1_b, text2_a, text2_b, mid_common) = hm
# Send both pairs off for separate processing.
diffs_a = self.diff_main(text1_a, text2_a, checklines, deadline)
diffs_b = self.diff_main(text1_b, text2_b, checklines, deadline)
# Merge the results.
return diffs_a + [(self.DIFF_EQUAL, mid_common)] + diffs_b
if checklines and len(text1) > 100 and len(text2) > 100:
return self.diff_lineMode(text1, text2, deadline)
return self.diff_bisect(text1, text2, deadline) | Find the differences between two texts. Assumes that the texts do not
have any common prefix or suffix.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
checklines: Speedup flag. If false, then don't run a line-level diff
first to identify the changed areas.
If true, then run a faster, slightly less optimal diff.
deadline: Time when the diff should be complete by.
Returns:
Array of changes. | Below is the the instruction that describes the task:
### Input:
Find the differences between two texts. Assumes that the texts do not
have any common prefix or suffix.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
checklines: Speedup flag. If false, then don't run a line-level diff
first to identify the changed areas.
If true, then run a faster, slightly less optimal diff.
deadline: Time when the diff should be complete by.
Returns:
Array of changes.
### Response:
def diff_compute(self, text1, text2, checklines, deadline):
"""Find the differences between two texts. Assumes that the texts do not
have any common prefix or suffix.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
checklines: Speedup flag. If false, then don't run a line-level diff
first to identify the changed areas.
If true, then run a faster, slightly less optimal diff.
deadline: Time when the diff should be complete by.
Returns:
Array of changes.
"""
if not text1:
# Just add some text (speedup).
return [(self.DIFF_INSERT, text2)]
if not text2:
# Just delete some text (speedup).
return [(self.DIFF_DELETE, text1)]
if len(text1) > len(text2):
(longtext, shorttext) = (text1, text2)
else:
(shorttext, longtext) = (text1, text2)
i = longtext.find(shorttext)
if i != -1:
# Shorter text is inside the longer text (speedup).
diffs = [(self.DIFF_INSERT, longtext[:i]), (self.DIFF_EQUAL, shorttext),
(self.DIFF_INSERT, longtext[i + len(shorttext):])]
# Swap insertions for deletions if diff is reversed.
if len(text1) > len(text2):
diffs[0] = (self.DIFF_DELETE, diffs[0][1])
diffs[2] = (self.DIFF_DELETE, diffs[2][1])
return diffs
if len(shorttext) == 1:
# Single character string.
# After the previous speedup, the character can't be an equality.
return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)]
# Check to see if the problem can be split in two.
hm = self.diff_halfMatch(text1, text2)
if hm:
# A half-match was found, sort out the return data.
(text1_a, text1_b, text2_a, text2_b, mid_common) = hm
# Send both pairs off for separate processing.
diffs_a = self.diff_main(text1_a, text2_a, checklines, deadline)
diffs_b = self.diff_main(text1_b, text2_b, checklines, deadline)
# Merge the results.
return diffs_a + [(self.DIFF_EQUAL, mid_common)] + diffs_b
if checklines and len(text1) > 100 and len(text2) > 100:
return self.diff_lineMode(text1, text2, deadline)
return self.diff_bisect(text1, text2, deadline) |
def class_get_help(cls, inst=None):
"""Get the help string for this class in ReST format.
If `inst` is given, it's current trait values will be used in place of
class defaults.
"""
assert inst is None or isinstance(inst, cls)
cls_traits = cls.class_traits(config=True)
final_help = []
final_help.append(u'%s options' % cls.__name__)
final_help.append(len(final_help[0])*u'-')
for k,v in sorted(cls.class_traits(config=True).iteritems()):
help = cls.class_get_trait_help(v, inst)
final_help.append(help)
return '\n'.join(final_help) | Get the help string for this class in ReST format.
If `inst` is given, it's current trait values will be used in place of
class defaults. | Below is the the instruction that describes the task:
### Input:
Get the help string for this class in ReST format.
If `inst` is given, it's current trait values will be used in place of
class defaults.
### Response:
def class_get_help(cls, inst=None):
"""Get the help string for this class in ReST format.
If `inst` is given, it's current trait values will be used in place of
class defaults.
"""
assert inst is None or isinstance(inst, cls)
cls_traits = cls.class_traits(config=True)
final_help = []
final_help.append(u'%s options' % cls.__name__)
final_help.append(len(final_help[0])*u'-')
for k,v in sorted(cls.class_traits(config=True).iteritems()):
help = cls.class_get_trait_help(v, inst)
final_help.append(help)
return '\n'.join(final_help) |
def _get_goid2dbids(associations):
"""Return gene2go data for user-specified taxids."""
go2ids = cx.defaultdict(set)
for ntd in associations:
go2ids[ntd.GO_ID].add(ntd.DB_ID)
return dict(go2ids) | Return gene2go data for user-specified taxids. | Below is the the instruction that describes the task:
### Input:
Return gene2go data for user-specified taxids.
### Response:
def _get_goid2dbids(associations):
"""Return gene2go data for user-specified taxids."""
go2ids = cx.defaultdict(set)
for ntd in associations:
go2ids[ntd.GO_ID].add(ntd.DB_ID)
return dict(go2ids) |
def ObjectEnum(ctx):
"""Object Enumeration.
Should export the whole list from the game for the best accuracy.
"""
return Enum(
ctx,
villager_male=83,
villager_female=293,
scout_cavalry=448,
eagle_warrior=751,
king=434,
flare=332,
relic=285,
turkey=833,
sheep=594,
deer=65,
boar=48,
iron_boar=810,
ostrich=1026,
javelina=822,
crocodile=1031,
rhinoceros=1139,
wolf=126,
jaguar=812,
hawk=96,
macaw=816,
shore_fish=69,
fish_1=455,
fish_2=456,
fish_4=458,
fish_3=457,
marlin_1=450,
marlin_2=451,
dolphin=452,
cactus=709,
berry_bush=59,
stone_pile=102,
gold_pile=66,
forest_tree=350,
forest_tree_2=411,
snow_pine_tree=413,
straggler_tree=349,
tc_1=109,
tc_2=618,
tc_3=619,
tc_4=620,
castle=70,
palisade_wall=72,
stone_wall=117,
stone_gate_1=64,
stone_gate_2=81,
stone_gate_3=88,
stone_gate_4=95,
palisade_gate_1=662,
palisade_gate_2=666,
palisade_gate_3=670,
palisade_gate_4=674,
fortified_wall=155,
cliff_1=264,
cliff_2=265,
cliff_3=266,
cliff_4=267,
cliff_5=268,
cliff_6=269,
cliff_7=270,
cliff_8=271,
cliff_9=272,
cliff_10=273,
outpost=598,
shipwreck=722,
map_revealer=837,
default=Pass
) | Object Enumeration.
Should export the whole list from the game for the best accuracy. | Below is the the instruction that describes the task:
### Input:
Object Enumeration.
Should export the whole list from the game for the best accuracy.
### Response:
def ObjectEnum(ctx):
"""Object Enumeration.
Should export the whole list from the game for the best accuracy.
"""
return Enum(
ctx,
villager_male=83,
villager_female=293,
scout_cavalry=448,
eagle_warrior=751,
king=434,
flare=332,
relic=285,
turkey=833,
sheep=594,
deer=65,
boar=48,
iron_boar=810,
ostrich=1026,
javelina=822,
crocodile=1031,
rhinoceros=1139,
wolf=126,
jaguar=812,
hawk=96,
macaw=816,
shore_fish=69,
fish_1=455,
fish_2=456,
fish_4=458,
fish_3=457,
marlin_1=450,
marlin_2=451,
dolphin=452,
cactus=709,
berry_bush=59,
stone_pile=102,
gold_pile=66,
forest_tree=350,
forest_tree_2=411,
snow_pine_tree=413,
straggler_tree=349,
tc_1=109,
tc_2=618,
tc_3=619,
tc_4=620,
castle=70,
palisade_wall=72,
stone_wall=117,
stone_gate_1=64,
stone_gate_2=81,
stone_gate_3=88,
stone_gate_4=95,
palisade_gate_1=662,
palisade_gate_2=666,
palisade_gate_3=670,
palisade_gate_4=674,
fortified_wall=155,
cliff_1=264,
cliff_2=265,
cliff_3=266,
cliff_4=267,
cliff_5=268,
cliff_6=269,
cliff_7=270,
cliff_8=271,
cliff_9=272,
cliff_10=273,
outpost=598,
shipwreck=722,
map_revealer=837,
default=Pass
) |
def _collect_fields(self):
""" Iterate over all Field objects within, including multi fields. """
for f in itervalues(self.properties.to_dict()):
yield f
# multi fields
if hasattr(f, 'fields'):
for inner_f in itervalues(f.fields.to_dict()):
yield inner_f
# nested and inner objects
if hasattr(f, '_collect_fields'):
for inner_f in f._collect_fields():
yield inner_f | Iterate over all Field objects within, including multi fields. | Below is the the instruction that describes the task:
### Input:
Iterate over all Field objects within, including multi fields.
### Response:
def _collect_fields(self):
""" Iterate over all Field objects within, including multi fields. """
for f in itervalues(self.properties.to_dict()):
yield f
# multi fields
if hasattr(f, 'fields'):
for inner_f in itervalues(f.fields.to_dict()):
yield inner_f
# nested and inner objects
if hasattr(f, '_collect_fields'):
for inner_f in f._collect_fields():
yield inner_f |
def packed_parallel_tsv_dataset(filenames=gin.REQUIRED,
dataset_split=gin.REQUIRED,
batch_size=gin.REQUIRED,
sequence_length=gin.REQUIRED,
vocabulary=gin.REQUIRED,
append_eos=True,
shuffle_buffer_size=10000,
eos_id=1):
"""Reads parallel tab-separated text file. One example per line."""
dataset = tf.data.TextLineDataset(filenames)
if dataset_split == "train":
dataset = dataset.repeat()
dataset = dataset.shuffle(shuffle_buffer_size)
def _parse_fn(record): # pylint: disable=missing-docstring
tokens = tf.decode_csv(
record,
record_defaults=[""] * 2,
field_delim="\t",
use_quote_delim=False)
return {"inputs": tokens[0], "targets": tokens[1]}
def _encode_fn(features): # pylint: disable=missing-docstring
inputs_vocabulary = vocabulary[0] if isinstance(vocabulary,
tuple) else vocabulary
targets_vocabulary = vocabulary[1] if isinstance(vocabulary,
tuple) else vocabulary
inputs_enc = inputs_vocabulary.encode_tf(features["inputs"])
targets_enc = targets_vocabulary.encode_tf(features["targets"])
if append_eos:
inputs_enc = tf.concat([tf.to_int64(inputs_enc), [eos_id]], 0)
targets_enc = tf.concat([tf.to_int64(targets_enc), [eos_id]], 0)
return {"inputs": inputs_enc, "targets": targets_enc}
dataset = dataset.map(_parse_fn)
dataset = dataset.map(_encode_fn)
return pack_and_batch(dataset, batch_size, sequence_length) | Reads parallel tab-separated text file. One example per line. | Below is the the instruction that describes the task:
### Input:
Reads parallel tab-separated text file. One example per line.
### Response:
def packed_parallel_tsv_dataset(filenames=gin.REQUIRED,
dataset_split=gin.REQUIRED,
batch_size=gin.REQUIRED,
sequence_length=gin.REQUIRED,
vocabulary=gin.REQUIRED,
append_eos=True,
shuffle_buffer_size=10000,
eos_id=1):
"""Reads parallel tab-separated text file. One example per line."""
dataset = tf.data.TextLineDataset(filenames)
if dataset_split == "train":
dataset = dataset.repeat()
dataset = dataset.shuffle(shuffle_buffer_size)
def _parse_fn(record): # pylint: disable=missing-docstring
tokens = tf.decode_csv(
record,
record_defaults=[""] * 2,
field_delim="\t",
use_quote_delim=False)
return {"inputs": tokens[0], "targets": tokens[1]}
def _encode_fn(features): # pylint: disable=missing-docstring
inputs_vocabulary = vocabulary[0] if isinstance(vocabulary,
tuple) else vocabulary
targets_vocabulary = vocabulary[1] if isinstance(vocabulary,
tuple) else vocabulary
inputs_enc = inputs_vocabulary.encode_tf(features["inputs"])
targets_enc = targets_vocabulary.encode_tf(features["targets"])
if append_eos:
inputs_enc = tf.concat([tf.to_int64(inputs_enc), [eos_id]], 0)
targets_enc = tf.concat([tf.to_int64(targets_enc), [eos_id]], 0)
return {"inputs": inputs_enc, "targets": targets_enc}
dataset = dataset.map(_parse_fn)
dataset = dataset.map(_encode_fn)
return pack_and_batch(dataset, batch_size, sequence_length) |
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.lingeling and self.status == False:
return pysolvers.lingeling_core(self.lingeling, self.prev_assumps) | Get an unsatisfiable core if the formula was previously
unsatisfied. | Below is the the instruction that describes the task:
### Input:
Get an unsatisfiable core if the formula was previously
unsatisfied.
### Response:
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.lingeling and self.status == False:
return pysolvers.lingeling_core(self.lingeling, self.prev_assumps) |
def decode_aes256_base64_auto(data, encryption_key):
"""Guesses AES cipher (EBC or CBD) from the length of the base64 encoded data."""
assert isinstance(data, bytes)
length = len(data)
if length == 0:
return b''
elif data[0] == b'!'[0]:
return decode_aes256_cbc_base64(data, encryption_key)
else:
return decode_aes256_ecb_base64(data, encryption_key) | Guesses AES cipher (EBC or CBD) from the length of the base64 encoded data. | Below is the the instruction that describes the task:
### Input:
Guesses AES cipher (EBC or CBD) from the length of the base64 encoded data.
### Response:
def decode_aes256_base64_auto(data, encryption_key):
"""Guesses AES cipher (EBC or CBD) from the length of the base64 encoded data."""
assert isinstance(data, bytes)
length = len(data)
if length == 0:
return b''
elif data[0] == b'!'[0]:
return decode_aes256_cbc_base64(data, encryption_key)
else:
return decode_aes256_ecb_base64(data, encryption_key) |
def determine_selection_of_iterable_values_from_config(config: DictLike, possible_iterables: Mapping[str, Type[enum.Enum]]) -> Dict[str, List[Any]]:
""" Determine iterable values to use to create objects for a given configuration.
All values of an iterable can be included be setting the value to ``True`` (Not as a single value list,
but as the only value.). Alternatively, an iterator can be disabled by setting the value to ``False``.
Args:
config: The dict-like configuration from ruamel.yaml which should be overridden.
possible_iterables: Key value pairs of names of enumerations and their values.
Returns:
dict: Iterables values that were requested in the config.
"""
iterables = {}
requested_iterables = config["iterables"]
for k, v in requested_iterables.items():
if k not in possible_iterables:
raise KeyError(k, f"Cannot find requested iterable in possible_iterables: {possible_iterables}")
logger.debug(f"k: {k}, v: {v}")
additional_iterable: List[Any] = []
enum_values = possible_iterables[k]
# Check for a string. This is wrong, and the user should be notified.
if isinstance(v, str):
raise TypeError(type(v), f"Passed string {v} when must be either bool or list")
# Allow the possibility to skip
if v is False:
continue
# Allow the possibility to including all possible values in the enum.
elif v is True:
additional_iterable = list(enum_values)
else:
if enum_values is None:
# The enumeration values are none, which means that we want to take
# all of the values defined in the config.
additional_iterable = list(v)
else:
# Otherwise, only take the requested values.
for el in v:
additional_iterable.append(enum_values[el])
# Store for later
iterables[k] = additional_iterable
return iterables | Determine iterable values to use to create objects for a given configuration.
All values of an iterable can be included be setting the value to ``True`` (Not as a single value list,
but as the only value.). Alternatively, an iterator can be disabled by setting the value to ``False``.
Args:
config: The dict-like configuration from ruamel.yaml which should be overridden.
possible_iterables: Key value pairs of names of enumerations and their values.
Returns:
dict: Iterables values that were requested in the config. | Below is the the instruction that describes the task:
### Input:
Determine iterable values to use to create objects for a given configuration.
All values of an iterable can be included be setting the value to ``True`` (Not as a single value list,
but as the only value.). Alternatively, an iterator can be disabled by setting the value to ``False``.
Args:
config: The dict-like configuration from ruamel.yaml which should be overridden.
possible_iterables: Key value pairs of names of enumerations and their values.
Returns:
dict: Iterables values that were requested in the config.
### Response:
def determine_selection_of_iterable_values_from_config(config: DictLike, possible_iterables: Mapping[str, Type[enum.Enum]]) -> Dict[str, List[Any]]:
""" Determine iterable values to use to create objects for a given configuration.
All values of an iterable can be included be setting the value to ``True`` (Not as a single value list,
but as the only value.). Alternatively, an iterator can be disabled by setting the value to ``False``.
Args:
config: The dict-like configuration from ruamel.yaml which should be overridden.
possible_iterables: Key value pairs of names of enumerations and their values.
Returns:
dict: Iterables values that were requested in the config.
"""
iterables = {}
requested_iterables = config["iterables"]
for k, v in requested_iterables.items():
if k not in possible_iterables:
raise KeyError(k, f"Cannot find requested iterable in possible_iterables: {possible_iterables}")
logger.debug(f"k: {k}, v: {v}")
additional_iterable: List[Any] = []
enum_values = possible_iterables[k]
# Check for a string. This is wrong, and the user should be notified.
if isinstance(v, str):
raise TypeError(type(v), f"Passed string {v} when must be either bool or list")
# Allow the possibility to skip
if v is False:
continue
# Allow the possibility to including all possible values in the enum.
elif v is True:
additional_iterable = list(enum_values)
else:
if enum_values is None:
# The enumeration values are none, which means that we want to take
# all of the values defined in the config.
additional_iterable = list(v)
else:
# Otherwise, only take the requested values.
for el in v:
additional_iterable.append(enum_values[el])
# Store for later
iterables[k] = additional_iterable
return iterables |
def store(self):
"""
Store and return packages for upgrading
"""
data = repo_data(self.PACKAGES_TXT, "slack", self.flag)
black = BlackList().packages(pkgs=data[0], repo="slack")
for name, loc, comp, uncomp in zip(data[0], data[1], data[2], data[3]):
status(0.0003)
repo_pkg_name = split_package(name)[0]
if (not os.path.isfile(self.meta.pkg_path + name[:-4]) and
repo_pkg_name not in black and
repo_pkg_name not in self.skip):
self.dwn_links.append("{0}{1}/{2}".format(mirrors("", ""),
loc, name))
self.comp_sum.append(comp)
self.uncomp_sum.append(uncomp)
self.upgrade_all.append(name)
self.count_upg += 1
if not find_package(repo_pkg_name + self.meta.sp,
self.meta.pkg_path):
self.count_added += 1
self.count_upg -= 1
return self.count_upg | Store and return packages for upgrading | Below is the the instruction that describes the task:
### Input:
Store and return packages for upgrading
### Response:
def store(self):
"""
Store and return packages for upgrading
"""
data = repo_data(self.PACKAGES_TXT, "slack", self.flag)
black = BlackList().packages(pkgs=data[0], repo="slack")
for name, loc, comp, uncomp in zip(data[0], data[1], data[2], data[3]):
status(0.0003)
repo_pkg_name = split_package(name)[0]
if (not os.path.isfile(self.meta.pkg_path + name[:-4]) and
repo_pkg_name not in black and
repo_pkg_name not in self.skip):
self.dwn_links.append("{0}{1}/{2}".format(mirrors("", ""),
loc, name))
self.comp_sum.append(comp)
self.uncomp_sum.append(uncomp)
self.upgrade_all.append(name)
self.count_upg += 1
if not find_package(repo_pkg_name + self.meta.sp,
self.meta.pkg_path):
self.count_added += 1
self.count_upg -= 1
return self.count_upg |
def _gorg(a):
"""Return the farthest origin of a generic class (internal helper)."""
assert isinstance(a, GenericMeta)
while a.__origin__ is not None:
a = a.__origin__
return a | Return the farthest origin of a generic class (internal helper). | Below is the the instruction that describes the task:
### Input:
Return the farthest origin of a generic class (internal helper).
### Response:
def _gorg(a):
"""Return the farthest origin of a generic class (internal helper)."""
assert isinstance(a, GenericMeta)
while a.__origin__ is not None:
a = a.__origin__
return a |
def get_import_data_url(deployment_name,
token_manager=None,
app_url=defaults.APP_URL):
"""
return the import data url
"""
return get_data_url(deployment_name,
endpoint_type='http-import',
app_url=app_url,
token_manager=token_manager) | return the import data url | Below is the the instruction that describes the task:
### Input:
return the import data url
### Response:
def get_import_data_url(deployment_name,
token_manager=None,
app_url=defaults.APP_URL):
"""
return the import data url
"""
return get_data_url(deployment_name,
endpoint_type='http-import',
app_url=app_url,
token_manager=token_manager) |
def patch(self, resource_endpoint, data={}):
"""Don't use it."""
url = self._create_request_url(resource_endpoint)
return req.patch(url, headers=self.auth_header, json=data) | Don't use it. | Below is the the instruction that describes the task:
### Input:
Don't use it.
### Response:
def patch(self, resource_endpoint, data={}):
"""Don't use it."""
url = self._create_request_url(resource_endpoint)
return req.patch(url, headers=self.auth_header, json=data) |
def get_session_list(self, account):
"""
获取客服的会话列表
详情请参考
http://mp.weixin.qq.com/wiki/2/6c20f3e323bdf5986cfcb33cbd3b829a.html
:param account: 完整客服账号
:return: 客服的会话列表
"""
res = self._get(
'https://api.weixin.qq.com/customservice/kfsession/getsessionlist',
params={'kf_account': account},
result_processor=lambda x: x['sessionlist']
)
return res | 获取客服的会话列表
详情请参考
http://mp.weixin.qq.com/wiki/2/6c20f3e323bdf5986cfcb33cbd3b829a.html
:param account: 完整客服账号
:return: 客服的会话列表 | Below is the the instruction that describes the task:
### Input:
获取客服的会话列表
详情请参考
http://mp.weixin.qq.com/wiki/2/6c20f3e323bdf5986cfcb33cbd3b829a.html
:param account: 完整客服账号
:return: 客服的会话列表
### Response:
def get_session_list(self, account):
"""
获取客服的会话列表
详情请参考
http://mp.weixin.qq.com/wiki/2/6c20f3e323bdf5986cfcb33cbd3b829a.html
:param account: 完整客服账号
:return: 客服的会话列表
"""
res = self._get(
'https://api.weixin.qq.com/customservice/kfsession/getsessionlist',
params={'kf_account': account},
result_processor=lambda x: x['sessionlist']
)
return res |
def transfer(self, volume, source, dest, **kwargs):
"""
Transfer will move a volume of liquid from a source location(s)
to a dest location(s). It is a higher-level command, incorporating
other :any:`Pipette` commands, like :any:`aspirate` and
:any:`dispense`, designed to make protocol writing easier at the
cost of specificity.
Parameters
----------
volumes : number, list, or tuple
The amount of volume to remove from each `sources` :any:`Placeable`
and add to each `targets` :any:`Placeable`. If `volumes` is a list,
each volume will be used for the sources/targets at the
matching index. If `volumes` is a tuple with two elements,
like `(20, 100)`, then a list of volumes will be generated with
a linear gradient between the two volumes in the tuple.
source : Placeable or list
Single :any:`Placeable` or list of :any:`Placeable`s, from where
liquid will be :any:`aspirate`ed from.
dest : Placeable or list
Single :any:`Placeable` or list of :any:`Placeable`s, where
liquid will be :any:`dispense`ed to.
new_tip : str
The number of clean tips this transfer command will use. If
'never', no tips will be picked up nor dropped. If 'once', a
single tip will be used for all commands. If 'always', a new tip
will be used for each transfer. Default is 'once'.
trash : boolean
If `False` (default behavior) tips will be returned to their
tip rack. If `True` and a trash container has been attached
to this `Pipette`, then the tip will be sent to the trash
container.
touch_tip : boolean
If `True`, a :any:`touch_tip` will occur following each
:any:`aspirate` and :any:`dispense`. If set to `False` (default),
no :any:`touch_tip` will occur.
blow_out : boolean
If `True`, a :any:`blow_out` will occur following each
:any:`dispense`, but only if the pipette has no liquid left in it.
If set to `False` (default), no :any:`blow_out` will occur.
mix_before : tuple
Specify the number of repetitions volume to mix, and a :any:`mix`
will proceed each :any:`aspirate` during the transfer and dispense.
The tuple's values is interpreted as (repetitions, volume).
mix_after : tuple
Specify the number of repetitions volume to mix, and a :any:`mix`
will following each :any:`dispense` during the transfer or
consolidate. The tuple's values is interpreted as
(repetitions, volume).
carryover : boolean
If `True` (default), any `volumes` that exceed the maximum volume
of this `Pipette` will be split into multiple smaller volumes.
repeat : boolean
(Only applicable to :any:`distribute` and :any:`consolidate`)If
`True` (default), sequential :any:`aspirate` volumes will be
combined into one tip for the purpose of saving time. If `False`,
all volumes will be transferred seperately.
gradient : lambda
Function for calculated the curve used for gradient volumes.
When `volumes` is a tuple of length 2, it's values are used
to create a list of gradient volumes. The default curve for
this gradient is linear (lambda x: x), however a method can
be passed with the `gradient` keyword argument to create a
custom curve.
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
...
>>> from opentrons import instruments, labware, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> plate = labware.load('96-flat', '5') # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='right') # doctest: +SKIP
>>> p300.transfer(50, plate[0], plate[1]) # doctest: +SKIP
"""
# Note: currently it varies whether the pipette should have a tip on
# or not depending on the parameters for this call, so we cannot
# create a very reliable assertion on tip status
kwargs['mode'] = kwargs.get('mode', 'transfer')
touch_tip = kwargs.get('touch_tip', False)
if touch_tip is True:
touch_tip = -1
kwargs['touch_tip'] = touch_tip
tip_options = {
'once': 1,
'never': 0,
'always': float('inf')
}
tip_option = kwargs.get('new_tip', 'once')
tips = tip_options.get(tip_option)
if tips is None:
raise ValueError('Unknown "new_tip" option: {}'.format(tip_option))
plan = self._create_transfer_plan(volume, source, dest, **kwargs)
self._run_transfer_plan(tips, plan, **kwargs)
return self | Transfer will move a volume of liquid from a source location(s)
to a dest location(s). It is a higher-level command, incorporating
other :any:`Pipette` commands, like :any:`aspirate` and
:any:`dispense`, designed to make protocol writing easier at the
cost of specificity.
Parameters
----------
volumes : number, list, or tuple
The amount of volume to remove from each `sources` :any:`Placeable`
and add to each `targets` :any:`Placeable`. If `volumes` is a list,
each volume will be used for the sources/targets at the
matching index. If `volumes` is a tuple with two elements,
like `(20, 100)`, then a list of volumes will be generated with
a linear gradient between the two volumes in the tuple.
source : Placeable or list
Single :any:`Placeable` or list of :any:`Placeable`s, from where
liquid will be :any:`aspirate`ed from.
dest : Placeable or list
Single :any:`Placeable` or list of :any:`Placeable`s, where
liquid will be :any:`dispense`ed to.
new_tip : str
The number of clean tips this transfer command will use. If
'never', no tips will be picked up nor dropped. If 'once', a
single tip will be used for all commands. If 'always', a new tip
will be used for each transfer. Default is 'once'.
trash : boolean
If `False` (default behavior) tips will be returned to their
tip rack. If `True` and a trash container has been attached
to this `Pipette`, then the tip will be sent to the trash
container.
touch_tip : boolean
If `True`, a :any:`touch_tip` will occur following each
:any:`aspirate` and :any:`dispense`. If set to `False` (default),
no :any:`touch_tip` will occur.
blow_out : boolean
If `True`, a :any:`blow_out` will occur following each
:any:`dispense`, but only if the pipette has no liquid left in it.
If set to `False` (default), no :any:`blow_out` will occur.
mix_before : tuple
Specify the number of repetitions volume to mix, and a :any:`mix`
will proceed each :any:`aspirate` during the transfer and dispense.
The tuple's values is interpreted as (repetitions, volume).
mix_after : tuple
Specify the number of repetitions volume to mix, and a :any:`mix`
will following each :any:`dispense` during the transfer or
consolidate. The tuple's values is interpreted as
(repetitions, volume).
carryover : boolean
If `True` (default), any `volumes` that exceed the maximum volume
of this `Pipette` will be split into multiple smaller volumes.
repeat : boolean
(Only applicable to :any:`distribute` and :any:`consolidate`)If
`True` (default), sequential :any:`aspirate` volumes will be
combined into one tip for the purpose of saving time. If `False`,
all volumes will be transferred seperately.
gradient : lambda
Function for calculated the curve used for gradient volumes.
When `volumes` is a tuple of length 2, it's values are used
to create a list of gradient volumes. The default curve for
this gradient is linear (lambda x: x), however a method can
be passed with the `gradient` keyword argument to create a
custom curve.
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
...
>>> from opentrons import instruments, labware, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> plate = labware.load('96-flat', '5') # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='right') # doctest: +SKIP
>>> p300.transfer(50, plate[0], plate[1]) # doctest: +SKIP | Below is the the instruction that describes the task:
### Input:
Transfer will move a volume of liquid from a source location(s)
to a dest location(s). It is a higher-level command, incorporating
other :any:`Pipette` commands, like :any:`aspirate` and
:any:`dispense`, designed to make protocol writing easier at the
cost of specificity.
Parameters
----------
volumes : number, list, or tuple
The amount of volume to remove from each `sources` :any:`Placeable`
and add to each `targets` :any:`Placeable`. If `volumes` is a list,
each volume will be used for the sources/targets at the
matching index. If `volumes` is a tuple with two elements,
like `(20, 100)`, then a list of volumes will be generated with
a linear gradient between the two volumes in the tuple.
source : Placeable or list
Single :any:`Placeable` or list of :any:`Placeable`s, from where
liquid will be :any:`aspirate`ed from.
dest : Placeable or list
Single :any:`Placeable` or list of :any:`Placeable`s, where
liquid will be :any:`dispense`ed to.
new_tip : str
The number of clean tips this transfer command will use. If
'never', no tips will be picked up nor dropped. If 'once', a
single tip will be used for all commands. If 'always', a new tip
will be used for each transfer. Default is 'once'.
trash : boolean
If `False` (default behavior) tips will be returned to their
tip rack. If `True` and a trash container has been attached
to this `Pipette`, then the tip will be sent to the trash
container.
touch_tip : boolean
If `True`, a :any:`touch_tip` will occur following each
:any:`aspirate` and :any:`dispense`. If set to `False` (default),
no :any:`touch_tip` will occur.
blow_out : boolean
If `True`, a :any:`blow_out` will occur following each
:any:`dispense`, but only if the pipette has no liquid left in it.
If set to `False` (default), no :any:`blow_out` will occur.
mix_before : tuple
Specify the number of repetitions volume to mix, and a :any:`mix`
will proceed each :any:`aspirate` during the transfer and dispense.
The tuple's values is interpreted as (repetitions, volume).
mix_after : tuple
Specify the number of repetitions volume to mix, and a :any:`mix`
will following each :any:`dispense` during the transfer or
consolidate. The tuple's values is interpreted as
(repetitions, volume).
carryover : boolean
If `True` (default), any `volumes` that exceed the maximum volume
of this `Pipette` will be split into multiple smaller volumes.
repeat : boolean
(Only applicable to :any:`distribute` and :any:`consolidate`)If
`True` (default), sequential :any:`aspirate` volumes will be
combined into one tip for the purpose of saving time. If `False`,
all volumes will be transferred seperately.
gradient : lambda
Function for calculated the curve used for gradient volumes.
When `volumes` is a tuple of length 2, it's values are used
to create a list of gradient volumes. The default curve for
this gradient is linear (lambda x: x), however a method can
be passed with the `gradient` keyword argument to create a
custom curve.
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
...
>>> from opentrons import instruments, labware, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> plate = labware.load('96-flat', '5') # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='right') # doctest: +SKIP
>>> p300.transfer(50, plate[0], plate[1]) # doctest: +SKIP
### Response:
def transfer(self, volume, source, dest, **kwargs):
"""
Transfer will move a volume of liquid from a source location(s)
to a dest location(s). It is a higher-level command, incorporating
other :any:`Pipette` commands, like :any:`aspirate` and
:any:`dispense`, designed to make protocol writing easier at the
cost of specificity.
Parameters
----------
volumes : number, list, or tuple
The amount of volume to remove from each `sources` :any:`Placeable`
and add to each `targets` :any:`Placeable`. If `volumes` is a list,
each volume will be used for the sources/targets at the
matching index. If `volumes` is a tuple with two elements,
like `(20, 100)`, then a list of volumes will be generated with
a linear gradient between the two volumes in the tuple.
source : Placeable or list
Single :any:`Placeable` or list of :any:`Placeable`s, from where
liquid will be :any:`aspirate`ed from.
dest : Placeable or list
Single :any:`Placeable` or list of :any:`Placeable`s, where
liquid will be :any:`dispense`ed to.
new_tip : str
The number of clean tips this transfer command will use. If
'never', no tips will be picked up nor dropped. If 'once', a
single tip will be used for all commands. If 'always', a new tip
will be used for each transfer. Default is 'once'.
trash : boolean
If `False` (default behavior) tips will be returned to their
tip rack. If `True` and a trash container has been attached
to this `Pipette`, then the tip will be sent to the trash
container.
touch_tip : boolean
If `True`, a :any:`touch_tip` will occur following each
:any:`aspirate` and :any:`dispense`. If set to `False` (default),
no :any:`touch_tip` will occur.
blow_out : boolean
If `True`, a :any:`blow_out` will occur following each
:any:`dispense`, but only if the pipette has no liquid left in it.
If set to `False` (default), no :any:`blow_out` will occur.
mix_before : tuple
Specify the number of repetitions volume to mix, and a :any:`mix`
will proceed each :any:`aspirate` during the transfer and dispense.
The tuple's values is interpreted as (repetitions, volume).
mix_after : tuple
Specify the number of repetitions volume to mix, and a :any:`mix`
will following each :any:`dispense` during the transfer or
consolidate. The tuple's values is interpreted as
(repetitions, volume).
carryover : boolean
If `True` (default), any `volumes` that exceed the maximum volume
of this `Pipette` will be split into multiple smaller volumes.
repeat : boolean
(Only applicable to :any:`distribute` and :any:`consolidate`)If
`True` (default), sequential :any:`aspirate` volumes will be
combined into one tip for the purpose of saving time. If `False`,
all volumes will be transferred seperately.
gradient : lambda
Function for calculated the curve used for gradient volumes.
When `volumes` is a tuple of length 2, it's values are used
to create a list of gradient volumes. The default curve for
this gradient is linear (lambda x: x), however a method can
be passed with the `gradient` keyword argument to create a
custom curve.
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
...
>>> from opentrons import instruments, labware, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> plate = labware.load('96-flat', '5') # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='right') # doctest: +SKIP
>>> p300.transfer(50, plate[0], plate[1]) # doctest: +SKIP
"""
# Note: currently it varies whether the pipette should have a tip on
# or not depending on the parameters for this call, so we cannot
# create a very reliable assertion on tip status
kwargs['mode'] = kwargs.get('mode', 'transfer')
touch_tip = kwargs.get('touch_tip', False)
if touch_tip is True:
touch_tip = -1
kwargs['touch_tip'] = touch_tip
tip_options = {
'once': 1,
'never': 0,
'always': float('inf')
}
tip_option = kwargs.get('new_tip', 'once')
tips = tip_options.get(tip_option)
if tips is None:
raise ValueError('Unknown "new_tip" option: {}'.format(tip_option))
plan = self._create_transfer_plan(volume, source, dest, **kwargs)
self._run_transfer_plan(tips, plan, **kwargs)
return self |
def Nasv(macs,T):
'''
Returns
-------
Na*<sigma v>
for MACS [mb] at T [K].
'''
Na = avogadro_constant
k = boltzmann_constant
vtherm=(2.*k*T/mass_H_atom)**0.5
s = macs*1.e-27
Nasv = s*vtherm*Na
return Nasv | Returns
-------
Na*<sigma v>
for MACS [mb] at T [K]. | Below is the the instruction that describes the task:
### Input:
Returns
-------
Na*<sigma v>
for MACS [mb] at T [K].
### Response:
def Nasv(macs,T):
'''
Returns
-------
Na*<sigma v>
for MACS [mb] at T [K].
'''
Na = avogadro_constant
k = boltzmann_constant
vtherm=(2.*k*T/mass_H_atom)**0.5
s = macs*1.e-27
Nasv = s*vtherm*Na
return Nasv |
def _is_valid_input(self, inpt, metadata, array):
"""The _is_valid_input method takes three arguments:
the user input to be checked, the associated osid.Metadata object
containing validation requirements and a boolean value indicating
whether this is an array value.
"""
# pylint: disable=too-many-branches,no-self-use
# Please redesign, and move to utility module
syntax = metadata.get_syntax()
# First check if this is a required data element
if metadata.is_required and not inpt:
return False
valid = True # Innocent until proven guilty
# Recursively run through all the elements of an array
if array:
if len(inpt) < metadata.get_minimum_elements():
valid = False
elif len(inpt) > metadata.get_maximum_elements():
valid = False
else:
for element in inpt:
valid = (valid and self._is_valid_input(element, metadata, False))
# Run through all the possible syntax types
elif syntax == 'ID':
valid = self._is_valid_id(inpt)
elif syntax == 'TYPE':
valid = self._is_valid_type(inpt)
elif syntax == 'BOOLEAN':
valid = self._is_valid_boolean(inpt)
elif syntax == 'STRING':
valid = self._is_valid_string(inpt, metadata)
elif syntax == 'INTEGER':
valid = self._is_valid_integer(inpt, metadata)
elif syntax == 'DECIMAL':
valid = self._is_valid_decimal(inpt, metadata)
elif syntax == 'DATETIME':
valid = self._is_valid_date_time(inpt, metadata)
elif syntax == 'DURATION':
valid = self._is_valid_duration(inpt, metadata)
elif syntax == 'CARDINAL':
valid = self._is_valid_cardinal(inpt, metadata)
elif syntax == 'INTEGER':
valid = self._is_valid_integer(inpt, metadata)
elif syntax == 'DECIMAL':
valid = self._is_valid_decimal(inpt, metadata)
else:
raise errors.OperationFailed('no validation function available for ' + syntax)
return valid | The _is_valid_input method takes three arguments:
the user input to be checked, the associated osid.Metadata object
containing validation requirements and a boolean value indicating
whether this is an array value. | Below is the the instruction that describes the task:
### Input:
The _is_valid_input method takes three arguments:
the user input to be checked, the associated osid.Metadata object
containing validation requirements and a boolean value indicating
whether this is an array value.
### Response:
def _is_valid_input(self, inpt, metadata, array):
"""The _is_valid_input method takes three arguments:
the user input to be checked, the associated osid.Metadata object
containing validation requirements and a boolean value indicating
whether this is an array value.
"""
# pylint: disable=too-many-branches,no-self-use
# Please redesign, and move to utility module
syntax = metadata.get_syntax()
# First check if this is a required data element
if metadata.is_required and not inpt:
return False
valid = True # Innocent until proven guilty
# Recursively run through all the elements of an array
if array:
if len(inpt) < metadata.get_minimum_elements():
valid = False
elif len(inpt) > metadata.get_maximum_elements():
valid = False
else:
for element in inpt:
valid = (valid and self._is_valid_input(element, metadata, False))
# Run through all the possible syntax types
elif syntax == 'ID':
valid = self._is_valid_id(inpt)
elif syntax == 'TYPE':
valid = self._is_valid_type(inpt)
elif syntax == 'BOOLEAN':
valid = self._is_valid_boolean(inpt)
elif syntax == 'STRING':
valid = self._is_valid_string(inpt, metadata)
elif syntax == 'INTEGER':
valid = self._is_valid_integer(inpt, metadata)
elif syntax == 'DECIMAL':
valid = self._is_valid_decimal(inpt, metadata)
elif syntax == 'DATETIME':
valid = self._is_valid_date_time(inpt, metadata)
elif syntax == 'DURATION':
valid = self._is_valid_duration(inpt, metadata)
elif syntax == 'CARDINAL':
valid = self._is_valid_cardinal(inpt, metadata)
elif syntax == 'INTEGER':
valid = self._is_valid_integer(inpt, metadata)
elif syntax == 'DECIMAL':
valid = self._is_valid_decimal(inpt, metadata)
else:
raise errors.OperationFailed('no validation function available for ' + syntax)
return valid |
def set_parameter_scale(self, name, par, scale):
"""Update the scale of a parameter while keeping its value constant."""
name = self.roi.get_source_by_name(name).name
idx = self.like.par_index(name, par)
current_bounds = list(self.like.model[idx].getBounds())
current_scale = self.like.model[idx].getScale()
current_value = self.like[idx].getValue()
self.like[idx].setScale(scale)
self.like[idx].setValue(current_value * current_scale / scale)
self.like[idx].setBounds(current_bounds[0] * current_scale / scale,
current_bounds[1] * current_scale / scale)
self._sync_params(name) | Update the scale of a parameter while keeping its value constant. | Below is the the instruction that describes the task:
### Input:
Update the scale of a parameter while keeping its value constant.
### Response:
def set_parameter_scale(self, name, par, scale):
"""Update the scale of a parameter while keeping its value constant."""
name = self.roi.get_source_by_name(name).name
idx = self.like.par_index(name, par)
current_bounds = list(self.like.model[idx].getBounds())
current_scale = self.like.model[idx].getScale()
current_value = self.like[idx].getValue()
self.like[idx].setScale(scale)
self.like[idx].setValue(current_value * current_scale / scale)
self.like[idx].setBounds(current_bounds[0] * current_scale / scale,
current_bounds[1] * current_scale / scale)
self._sync_params(name) |
def allclose_variable(a, b, limits, rtols=None, atols=None):
'''Returns True if two arrays are element-wise equal within several
different tolerances. Tolerance values are always positive, usually
very small. Based on numpy's allclose function.
Only atols or rtols needs to be specified; both are used if given.
Parameters
----------
a, b : array_like
Input arrays to compare.
limits : array_like
Fractions of elements allowed to not match to within each tolerance.
rtols : array_like
The relative tolerance parameters.
atols : float
The absolute tolerance parameters.
Returns
-------
allclose : bool
Returns True if the two arrays are equal within the given
tolerances; False otherwise.
Examples
--------
10 random similar variables, all of them matching to within 1E-5, allowing
up to half to match up to 1E-6.
>>> x = [2.7244322249597719e-08, 3.0105683900110473e-10, 2.7244124924802327e-08, 3.0105259397637556e-10, 2.7243929226310193e-08, 3.0104990272770901e-10, 2.7243666849384451e-08, 3.0104101821236015e-10, 2.7243433745917367e-08, 3.0103707421519949e-10]
>>> y = [2.7244328304561904e-08, 3.0105753470546008e-10, 2.724412872417824e-08, 3.0105303055834564e-10, 2.7243914341030203e-08, 3.0104819238021998e-10, 2.7243684057561379e-08, 3.0104299541023674e-10, 2.7243436694839306e-08, 3.010374130526363e-10]
>>> allclose_variable(x, y, limits=[.0, .5], rtols=[1E-5, 1E-6])
True
'''
l = float(len(a))
if rtols is None and atols is None:
raise Exception('Either absolute errors or relative errors must be supplied.')
elif rtols is None:
rtols = [0 for i in atols]
elif atols is None:
atols = [0 for i in rtols]
for atol, rtol, lim in zip(atols, rtols, limits):
matches = np.count_nonzero(np.isclose(a, b, rtol=rtol, atol=atol))
if 1-matches/l > lim:
return False
return True | Returns True if two arrays are element-wise equal within several
different tolerances. Tolerance values are always positive, usually
very small. Based on numpy's allclose function.
Only atols or rtols needs to be specified; both are used if given.
Parameters
----------
a, b : array_like
Input arrays to compare.
limits : array_like
Fractions of elements allowed to not match to within each tolerance.
rtols : array_like
The relative tolerance parameters.
atols : float
The absolute tolerance parameters.
Returns
-------
allclose : bool
Returns True if the two arrays are equal within the given
tolerances; False otherwise.
Examples
--------
10 random similar variables, all of them matching to within 1E-5, allowing
up to half to match up to 1E-6.
>>> x = [2.7244322249597719e-08, 3.0105683900110473e-10, 2.7244124924802327e-08, 3.0105259397637556e-10, 2.7243929226310193e-08, 3.0104990272770901e-10, 2.7243666849384451e-08, 3.0104101821236015e-10, 2.7243433745917367e-08, 3.0103707421519949e-10]
>>> y = [2.7244328304561904e-08, 3.0105753470546008e-10, 2.724412872417824e-08, 3.0105303055834564e-10, 2.7243914341030203e-08, 3.0104819238021998e-10, 2.7243684057561379e-08, 3.0104299541023674e-10, 2.7243436694839306e-08, 3.010374130526363e-10]
>>> allclose_variable(x, y, limits=[.0, .5], rtols=[1E-5, 1E-6])
True | Below is the the instruction that describes the task:
### Input:
Returns True if two arrays are element-wise equal within several
different tolerances. Tolerance values are always positive, usually
very small. Based on numpy's allclose function.
Only atols or rtols needs to be specified; both are used if given.
Parameters
----------
a, b : array_like
Input arrays to compare.
limits : array_like
Fractions of elements allowed to not match to within each tolerance.
rtols : array_like
The relative tolerance parameters.
atols : float
The absolute tolerance parameters.
Returns
-------
allclose : bool
Returns True if the two arrays are equal within the given
tolerances; False otherwise.
Examples
--------
10 random similar variables, all of them matching to within 1E-5, allowing
up to half to match up to 1E-6.
>>> x = [2.7244322249597719e-08, 3.0105683900110473e-10, 2.7244124924802327e-08, 3.0105259397637556e-10, 2.7243929226310193e-08, 3.0104990272770901e-10, 2.7243666849384451e-08, 3.0104101821236015e-10, 2.7243433745917367e-08, 3.0103707421519949e-10]
>>> y = [2.7244328304561904e-08, 3.0105753470546008e-10, 2.724412872417824e-08, 3.0105303055834564e-10, 2.7243914341030203e-08, 3.0104819238021998e-10, 2.7243684057561379e-08, 3.0104299541023674e-10, 2.7243436694839306e-08, 3.010374130526363e-10]
>>> allclose_variable(x, y, limits=[.0, .5], rtols=[1E-5, 1E-6])
True
### Response:
def allclose_variable(a, b, limits, rtols=None, atols=None):
'''Returns True if two arrays are element-wise equal within several
different tolerances. Tolerance values are always positive, usually
very small. Based on numpy's allclose function.
Only atols or rtols needs to be specified; both are used if given.
Parameters
----------
a, b : array_like
Input arrays to compare.
limits : array_like
Fractions of elements allowed to not match to within each tolerance.
rtols : array_like
The relative tolerance parameters.
atols : float
The absolute tolerance parameters.
Returns
-------
allclose : bool
Returns True if the two arrays are equal within the given
tolerances; False otherwise.
Examples
--------
10 random similar variables, all of them matching to within 1E-5, allowing
up to half to match up to 1E-6.
>>> x = [2.7244322249597719e-08, 3.0105683900110473e-10, 2.7244124924802327e-08, 3.0105259397637556e-10, 2.7243929226310193e-08, 3.0104990272770901e-10, 2.7243666849384451e-08, 3.0104101821236015e-10, 2.7243433745917367e-08, 3.0103707421519949e-10]
>>> y = [2.7244328304561904e-08, 3.0105753470546008e-10, 2.724412872417824e-08, 3.0105303055834564e-10, 2.7243914341030203e-08, 3.0104819238021998e-10, 2.7243684057561379e-08, 3.0104299541023674e-10, 2.7243436694839306e-08, 3.010374130526363e-10]
>>> allclose_variable(x, y, limits=[.0, .5], rtols=[1E-5, 1E-6])
True
'''
l = float(len(a))
if rtols is None and atols is None:
raise Exception('Either absolute errors or relative errors must be supplied.')
elif rtols is None:
rtols = [0 for i in atols]
elif atols is None:
atols = [0 for i in rtols]
for atol, rtol, lim in zip(atols, rtols, limits):
matches = np.count_nonzero(np.isclose(a, b, rtol=rtol, atol=atol))
if 1-matches/l > lim:
return False
return True |
def create_connections(self, connection_map):
'''Create agent connections from a given connection map.
:param dict connection_map:
A map of connections to be created. Dictionary where keys are
agent addresses and values are lists of (addr, attitude)-tuples
suitable for
:meth:`~creamas.core.agent.CreativeAgent.add_connections`.
Only connections for agents in this environment are made.
'''
agents = self.get_agents(addr=False)
rets = []
for a in agents:
if a.addr in connection_map:
r = a.add_connections(connection_map[a.addr])
rets.append(r)
return rets | Create agent connections from a given connection map.
:param dict connection_map:
A map of connections to be created. Dictionary where keys are
agent addresses and values are lists of (addr, attitude)-tuples
suitable for
:meth:`~creamas.core.agent.CreativeAgent.add_connections`.
Only connections for agents in this environment are made. | Below is the the instruction that describes the task:
### Input:
Create agent connections from a given connection map.
:param dict connection_map:
A map of connections to be created. Dictionary where keys are
agent addresses and values are lists of (addr, attitude)-tuples
suitable for
:meth:`~creamas.core.agent.CreativeAgent.add_connections`.
Only connections for agents in this environment are made.
### Response:
def create_connections(self, connection_map):
'''Create agent connections from a given connection map.
:param dict connection_map:
A map of connections to be created. Dictionary where keys are
agent addresses and values are lists of (addr, attitude)-tuples
suitable for
:meth:`~creamas.core.agent.CreativeAgent.add_connections`.
Only connections for agents in this environment are made.
'''
agents = self.get_agents(addr=False)
rets = []
for a in agents:
if a.addr in connection_map:
r = a.add_connections(connection_map[a.addr])
rets.append(r)
return rets |
def delete(self, table_name):
"""Delete a table in user's CARTO account.
Args:
table_name (str): Name of table to delete
Returns:
bool: `True` if table is removed
"""
dataset = Dataset(self, table_name)
deleted = dataset.delete()
if deleted:
return deleted
raise CartoException('''The table `{}` doesn't exist'''.format(table_name)) | Delete a table in user's CARTO account.
Args:
table_name (str): Name of table to delete
Returns:
bool: `True` if table is removed | Below is the the instruction that describes the task:
### Input:
Delete a table in user's CARTO account.
Args:
table_name (str): Name of table to delete
Returns:
bool: `True` if table is removed
### Response:
def delete(self, table_name):
"""Delete a table in user's CARTO account.
Args:
table_name (str): Name of table to delete
Returns:
bool: `True` if table is removed
"""
dataset = Dataset(self, table_name)
deleted = dataset.delete()
if deleted:
return deleted
raise CartoException('''The table `{}` doesn't exist'''.format(table_name)) |
def unwrap_or_else(self, op: Callable[[E], U]) -> Union[T, U]:
"""
Returns the sucess value in the :class:`Result` or computes a default
from the error value.
Args:
op: The function to computes default with.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value, otherwise ``op(E)``.
Examples:
>>> Ok(1).unwrap_or_else(lambda e: e * 10)
1
>>> Err(1).unwrap_or_else(lambda e: e * 10)
10
"""
return cast(T, self._val) if self._is_ok else op(cast(E, self._val)) | Returns the sucess value in the :class:`Result` or computes a default
from the error value.
Args:
op: The function to computes default with.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value, otherwise ``op(E)``.
Examples:
>>> Ok(1).unwrap_or_else(lambda e: e * 10)
1
>>> Err(1).unwrap_or_else(lambda e: e * 10)
10 | Below is the the instruction that describes the task:
### Input:
Returns the sucess value in the :class:`Result` or computes a default
from the error value.
Args:
op: The function to computes default with.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value, otherwise ``op(E)``.
Examples:
>>> Ok(1).unwrap_or_else(lambda e: e * 10)
1
>>> Err(1).unwrap_or_else(lambda e: e * 10)
10
### Response:
def unwrap_or_else(self, op: Callable[[E], U]) -> Union[T, U]:
"""
Returns the sucess value in the :class:`Result` or computes a default
from the error value.
Args:
op: The function to computes default with.
Returns:
The success value in the :class:`Result` if it is
a :meth:`Result.Ok` value, otherwise ``op(E)``.
Examples:
>>> Ok(1).unwrap_or_else(lambda e: e * 10)
1
>>> Err(1).unwrap_or_else(lambda e: e * 10)
10
"""
return cast(T, self._val) if self._is_ok else op(cast(E, self._val)) |
def cyvcf_add_filter(rec, name):
"""Add a FILTER value to a cyvcf2 record
"""
if rec.FILTER:
filters = rec.FILTER.split(";")
else:
filters = []
if name not in filters:
filters.append(name)
rec.FILTER = filters
return rec | Add a FILTER value to a cyvcf2 record | Below is the the instruction that describes the task:
### Input:
Add a FILTER value to a cyvcf2 record
### Response:
def cyvcf_add_filter(rec, name):
"""Add a FILTER value to a cyvcf2 record
"""
if rec.FILTER:
filters = rec.FILTER.split(";")
else:
filters = []
if name not in filters:
filters.append(name)
rec.FILTER = filters
return rec |
def configure_node(self, node):
"""Slaves need to know if they are collocated and what files have moved."""
node.slaveinput['cov_master_host'] = socket.gethostname()
node.slaveinput['cov_master_topdir'] = self.topdir
node.slaveinput['cov_master_rsync_roots'] = [str(root) for root in node.nodemanager.roots] | Slaves need to know if they are collocated and what files have moved. | Below is the the instruction that describes the task:
### Input:
Slaves need to know if they are collocated and what files have moved.
### Response:
def configure_node(self, node):
"""Slaves need to know if they are collocated and what files have moved."""
node.slaveinput['cov_master_host'] = socket.gethostname()
node.slaveinput['cov_master_topdir'] = self.topdir
node.slaveinput['cov_master_rsync_roots'] = [str(root) for root in node.nodemanager.roots] |
def times_like(X, sr=22050, hop_length=512, n_fft=None, axis=-1):
"""Return an array of time values to match the time axis from a feature matrix.
Parameters
----------
X : np.ndarray or scalar
- If ndarray, X is a feature matrix, e.g. STFT, chromagram, or mel spectrogram.
- If scalar, X represents the number of frames.
sr : number > 0 [scalar]
audio sampling rate
hop_length : int > 0 [scalar]
number of samples between successive frames
n_fft : None or int > 0 [scalar]
Optional: length of the FFT window.
If given, time conversion will include an offset of `n_fft / 2`
to counteract windowing effects when using a non-centered STFT.
axis : int [scalar]
The axis representing the time axis of X.
By default, the last axis (-1) is taken.
Returns
-------
times : np.ndarray [shape=(n,)]
ndarray of times (in seconds) corresponding to each frame of X.
See Also
--------
samples_like : Return an array of sample indices to match the time axis from a feature matrix.
Examples
--------
Provide a feature matrix input:
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> X = librosa.stft(y)
>>> times = librosa.times_like(X)
>>> times
array([ 0.00000000e+00, 2.32199546e-02, 4.64399093e-02, ...,
6.13935601e+01, 6.14167800e+01, 6.14400000e+01])
Provide a scalar input:
>>> n_frames = 2647
>>> times = librosa.times_like(n_frames)
>>> times
array([ 0.00000000e+00, 2.32199546e-02, 4.64399093e-02, ...,
6.13935601e+01, 6.14167800e+01, 6.14400000e+01])
"""
samples = samples_like(X, hop_length=hop_length, n_fft=n_fft, axis=axis)
return samples_to_time(samples, sr=sr) | Return an array of time values to match the time axis from a feature matrix.
Parameters
----------
X : np.ndarray or scalar
- If ndarray, X is a feature matrix, e.g. STFT, chromagram, or mel spectrogram.
- If scalar, X represents the number of frames.
sr : number > 0 [scalar]
audio sampling rate
hop_length : int > 0 [scalar]
number of samples between successive frames
n_fft : None or int > 0 [scalar]
Optional: length of the FFT window.
If given, time conversion will include an offset of `n_fft / 2`
to counteract windowing effects when using a non-centered STFT.
axis : int [scalar]
The axis representing the time axis of X.
By default, the last axis (-1) is taken.
Returns
-------
times : np.ndarray [shape=(n,)]
ndarray of times (in seconds) corresponding to each frame of X.
See Also
--------
samples_like : Return an array of sample indices to match the time axis from a feature matrix.
Examples
--------
Provide a feature matrix input:
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> X = librosa.stft(y)
>>> times = librosa.times_like(X)
>>> times
array([ 0.00000000e+00, 2.32199546e-02, 4.64399093e-02, ...,
6.13935601e+01, 6.14167800e+01, 6.14400000e+01])
Provide a scalar input:
>>> n_frames = 2647
>>> times = librosa.times_like(n_frames)
>>> times
array([ 0.00000000e+00, 2.32199546e-02, 4.64399093e-02, ...,
6.13935601e+01, 6.14167800e+01, 6.14400000e+01]) | Below is the the instruction that describes the task:
### Input:
Return an array of time values to match the time axis from a feature matrix.
Parameters
----------
X : np.ndarray or scalar
- If ndarray, X is a feature matrix, e.g. STFT, chromagram, or mel spectrogram.
- If scalar, X represents the number of frames.
sr : number > 0 [scalar]
audio sampling rate
hop_length : int > 0 [scalar]
number of samples between successive frames
n_fft : None or int > 0 [scalar]
Optional: length of the FFT window.
If given, time conversion will include an offset of `n_fft / 2`
to counteract windowing effects when using a non-centered STFT.
axis : int [scalar]
The axis representing the time axis of X.
By default, the last axis (-1) is taken.
Returns
-------
times : np.ndarray [shape=(n,)]
ndarray of times (in seconds) corresponding to each frame of X.
See Also
--------
samples_like : Return an array of sample indices to match the time axis from a feature matrix.
Examples
--------
Provide a feature matrix input:
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> X = librosa.stft(y)
>>> times = librosa.times_like(X)
>>> times
array([ 0.00000000e+00, 2.32199546e-02, 4.64399093e-02, ...,
6.13935601e+01, 6.14167800e+01, 6.14400000e+01])
Provide a scalar input:
>>> n_frames = 2647
>>> times = librosa.times_like(n_frames)
>>> times
array([ 0.00000000e+00, 2.32199546e-02, 4.64399093e-02, ...,
6.13935601e+01, 6.14167800e+01, 6.14400000e+01])
### Response:
def times_like(X, sr=22050, hop_length=512, n_fft=None, axis=-1):
"""Return an array of time values to match the time axis from a feature matrix.
Parameters
----------
X : np.ndarray or scalar
- If ndarray, X is a feature matrix, e.g. STFT, chromagram, or mel spectrogram.
- If scalar, X represents the number of frames.
sr : number > 0 [scalar]
audio sampling rate
hop_length : int > 0 [scalar]
number of samples between successive frames
n_fft : None or int > 0 [scalar]
Optional: length of the FFT window.
If given, time conversion will include an offset of `n_fft / 2`
to counteract windowing effects when using a non-centered STFT.
axis : int [scalar]
The axis representing the time axis of X.
By default, the last axis (-1) is taken.
Returns
-------
times : np.ndarray [shape=(n,)]
ndarray of times (in seconds) corresponding to each frame of X.
See Also
--------
samples_like : Return an array of sample indices to match the time axis from a feature matrix.
Examples
--------
Provide a feature matrix input:
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> X = librosa.stft(y)
>>> times = librosa.times_like(X)
>>> times
array([ 0.00000000e+00, 2.32199546e-02, 4.64399093e-02, ...,
6.13935601e+01, 6.14167800e+01, 6.14400000e+01])
Provide a scalar input:
>>> n_frames = 2647
>>> times = librosa.times_like(n_frames)
>>> times
array([ 0.00000000e+00, 2.32199546e-02, 4.64399093e-02, ...,
6.13935601e+01, 6.14167800e+01, 6.14400000e+01])
"""
samples = samples_like(X, hop_length=hop_length, n_fft=n_fft, axis=axis)
return samples_to_time(samples, sr=sr) |
def parse(argv=None):
"""
Parse some arguments using the parser.
"""
if argv is None:
argv = sys.argv[1:]
# Evade http://bugs.python.org/issue9253
if not argv or argv[0] not in {"run", "transform"}:
argv = ["run"] + argv
arguments = _clean(_parser.parse_args(argv))
return arguments | Parse some arguments using the parser. | Below is the the instruction that describes the task:
### Input:
Parse some arguments using the parser.
### Response:
def parse(argv=None):
"""
Parse some arguments using the parser.
"""
if argv is None:
argv = sys.argv[1:]
# Evade http://bugs.python.org/issue9253
if not argv or argv[0] not in {"run", "transform"}:
argv = ["run"] + argv
arguments = _clean(_parser.parse_args(argv))
return arguments |
def _get_ignore_from_manifest_lines(lines):
"""Gather the various ignore patterns from a MANIFEST.in.
'lines' should be a list of strings with comments removed
and continuation lines joined.
Returns a list of standard ignore patterns and a list of regular
expressions to ignore.
"""
ignore = []
ignore_regexps = []
for line in lines:
try:
cmd, rest = line.split(None, 1)
except ValueError:
# no whitespace, so not interesting
continue
for part in rest.split():
# distutils enforces these warnings on Windows only
if part.startswith('/'):
warning("ERROR: Leading slashes are not allowed in MANIFEST.in on Windows: %s" % part)
if part.endswith('/'):
warning("ERROR: Trailing slashes are not allowed in MANIFEST.in on Windows: %s" % part)
if cmd == 'exclude':
# An exclude of 'dirname/*css' can match 'dirname/foo.css'
# but not 'dirname/subdir/bar.css'. We need a regular
# expression for that, since fnmatch doesn't pay attention to
# directory separators.
for pat in rest.split():
if '*' in pat or '?' in pat or '[!' in pat:
ignore_regexps.append(_glob_to_regexp(pat))
else:
# No need for special handling.
ignore.append(pat)
elif cmd == 'global-exclude':
ignore.extend(rest.split())
elif cmd == 'recursive-exclude':
try:
dirname, patterns = rest.split(None, 1)
except ValueError:
# Wrong MANIFEST.in line.
warning("You have a wrong line in MANIFEST.in: %r\n"
"'recursive-exclude' expects <dir> <pattern1> "
"<pattern2> ..." % line)
continue
# Strip path separator for clarity.
dirname = dirname.rstrip(os.path.sep)
for pattern in patterns.split():
if pattern.startswith('*'):
ignore.append(dirname + os.path.sep + pattern)
else:
# 'recursive-exclude plone metadata.xml' should
# exclude plone/metadata.xml and
# plone/*/metadata.xml, where * can be any number
# of sub directories. We could use a regexp, but
# two ignores seems easier.
ignore.append(dirname + os.path.sep + pattern)
ignore.append(
dirname + os.path.sep + '*' + os.path.sep + pattern)
elif cmd == 'prune':
# rest is considered to be a directory name. It should
# not contain a path separator, as it actually has no
# effect in that case, but that could differ per python
# version. We strip it here to avoid double separators.
# XXX: mg: I'm not 100% sure the above is correct, AFAICS
# all pythons from 2.6 complain if the path has a leading or
# trailing slash -- on Windows, that is.
rest = rest.rstrip('/\\')
ignore.append(rest)
ignore.append(rest + os.path.sep + '*')
return ignore, ignore_regexps | Gather the various ignore patterns from a MANIFEST.in.
'lines' should be a list of strings with comments removed
and continuation lines joined.
Returns a list of standard ignore patterns and a list of regular
expressions to ignore. | Below is the the instruction that describes the task:
### Input:
Gather the various ignore patterns from a MANIFEST.in.
'lines' should be a list of strings with comments removed
and continuation lines joined.
Returns a list of standard ignore patterns and a list of regular
expressions to ignore.
### Response:
def _get_ignore_from_manifest_lines(lines):
"""Gather the various ignore patterns from a MANIFEST.in.
'lines' should be a list of strings with comments removed
and continuation lines joined.
Returns a list of standard ignore patterns and a list of regular
expressions to ignore.
"""
ignore = []
ignore_regexps = []
for line in lines:
try:
cmd, rest = line.split(None, 1)
except ValueError:
# no whitespace, so not interesting
continue
for part in rest.split():
# distutils enforces these warnings on Windows only
if part.startswith('/'):
warning("ERROR: Leading slashes are not allowed in MANIFEST.in on Windows: %s" % part)
if part.endswith('/'):
warning("ERROR: Trailing slashes are not allowed in MANIFEST.in on Windows: %s" % part)
if cmd == 'exclude':
# An exclude of 'dirname/*css' can match 'dirname/foo.css'
# but not 'dirname/subdir/bar.css'. We need a regular
# expression for that, since fnmatch doesn't pay attention to
# directory separators.
for pat in rest.split():
if '*' in pat or '?' in pat or '[!' in pat:
ignore_regexps.append(_glob_to_regexp(pat))
else:
# No need for special handling.
ignore.append(pat)
elif cmd == 'global-exclude':
ignore.extend(rest.split())
elif cmd == 'recursive-exclude':
try:
dirname, patterns = rest.split(None, 1)
except ValueError:
# Wrong MANIFEST.in line.
warning("You have a wrong line in MANIFEST.in: %r\n"
"'recursive-exclude' expects <dir> <pattern1> "
"<pattern2> ..." % line)
continue
# Strip path separator for clarity.
dirname = dirname.rstrip(os.path.sep)
for pattern in patterns.split():
if pattern.startswith('*'):
ignore.append(dirname + os.path.sep + pattern)
else:
# 'recursive-exclude plone metadata.xml' should
# exclude plone/metadata.xml and
# plone/*/metadata.xml, where * can be any number
# of sub directories. We could use a regexp, but
# two ignores seems easier.
ignore.append(dirname + os.path.sep + pattern)
ignore.append(
dirname + os.path.sep + '*' + os.path.sep + pattern)
elif cmd == 'prune':
# rest is considered to be a directory name. It should
# not contain a path separator, as it actually has no
# effect in that case, but that could differ per python
# version. We strip it here to avoid double separators.
# XXX: mg: I'm not 100% sure the above is correct, AFAICS
# all pythons from 2.6 complain if the path has a leading or
# trailing slash -- on Windows, that is.
rest = rest.rstrip('/\\')
ignore.append(rest)
ignore.append(rest + os.path.sep + '*')
return ignore, ignore_regexps |
def get_schema_input_version(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_schema = ET.Element("get_schema")
config = get_schema
input = ET.SubElement(get_schema, "input")
version = ET.SubElement(input, "version")
version.text = kwargs.pop('version')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_schema_input_version(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_schema = ET.Element("get_schema")
config = get_schema
input = ET.SubElement(get_schema, "input")
version = ET.SubElement(input, "version")
version.text = kwargs.pop('version')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def from_array(array):
"""
Deserialize a new Chat from a given dictionary.
:return: new Chat instance.
:rtype: Chat
"""
if array is None or not array:
return None
# end if
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.receivable.media import ChatPhoto
from pytgbot.api_types.receivable.updates import Message
data = {}
data['id'] = int(array.get('id'))
data['type'] = u(array.get('type'))
data['title'] = u(array.get('title')) if array.get('title') is not None else None
data['username'] = u(array.get('username')) if array.get('username') is not None else None
data['first_name'] = u(array.get('first_name')) if array.get('first_name') is not None else None
data['last_name'] = u(array.get('last_name')) if array.get('last_name') is not None else None
data['all_members_are_administrators'] = bool(array.get('all_members_are_administrators')) if array.get('all_members_are_administrators') is not None else None
data['photo'] = ChatPhoto.from_array(array.get('photo')) if array.get('photo') is not None else None
data['description'] = u(array.get('description')) if array.get('description') is not None else None
data['invite_link'] = u(array.get('invite_link')) if array.get('invite_link') is not None else None
data['pinned_message'] = Message.from_array(array.get('pinned_message')) if array.get('pinned_message') is not None else None
data['sticker_set_name'] = u(array.get('sticker_set_name')) if array.get('sticker_set_name') is not None else None
data['can_set_sticker_set'] = bool(array.get('can_set_sticker_set')) if array.get('can_set_sticker_set') is not None else None
data['_raw'] = array
return Chat(**data) | Deserialize a new Chat from a given dictionary.
:return: new Chat instance.
:rtype: Chat | Below is the the instruction that describes the task:
### Input:
Deserialize a new Chat from a given dictionary.
:return: new Chat instance.
:rtype: Chat
### Response:
def from_array(array):
"""
Deserialize a new Chat from a given dictionary.
:return: new Chat instance.
:rtype: Chat
"""
if array is None or not array:
return None
# end if
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.receivable.media import ChatPhoto
from pytgbot.api_types.receivable.updates import Message
data = {}
data['id'] = int(array.get('id'))
data['type'] = u(array.get('type'))
data['title'] = u(array.get('title')) if array.get('title') is not None else None
data['username'] = u(array.get('username')) if array.get('username') is not None else None
data['first_name'] = u(array.get('first_name')) if array.get('first_name') is not None else None
data['last_name'] = u(array.get('last_name')) if array.get('last_name') is not None else None
data['all_members_are_administrators'] = bool(array.get('all_members_are_administrators')) if array.get('all_members_are_administrators') is not None else None
data['photo'] = ChatPhoto.from_array(array.get('photo')) if array.get('photo') is not None else None
data['description'] = u(array.get('description')) if array.get('description') is not None else None
data['invite_link'] = u(array.get('invite_link')) if array.get('invite_link') is not None else None
data['pinned_message'] = Message.from_array(array.get('pinned_message')) if array.get('pinned_message') is not None else None
data['sticker_set_name'] = u(array.get('sticker_set_name')) if array.get('sticker_set_name') is not None else None
data['can_set_sticker_set'] = bool(array.get('can_set_sticker_set')) if array.get('can_set_sticker_set') is not None else None
data['_raw'] = array
return Chat(**data) |
def call_async(self, func: Callable, *args, **kwargs):
"""
Call the given callable in the event loop thread.
This method lets you call asynchronous code from a worker thread.
Do not use it from within the event loop thread.
If the callable returns an awaitable, it is resolved before returning to the caller.
:param func: a regular function or a coroutine function
:param args: positional arguments to call the callable with
:param kwargs: keyword arguments to call the callable with
:return: the return value of the call
"""
return asyncio_extras.call_async(self.loop, func, *args, **kwargs) | Call the given callable in the event loop thread.
This method lets you call asynchronous code from a worker thread.
Do not use it from within the event loop thread.
If the callable returns an awaitable, it is resolved before returning to the caller.
:param func: a regular function or a coroutine function
:param args: positional arguments to call the callable with
:param kwargs: keyword arguments to call the callable with
:return: the return value of the call | Below is the the instruction that describes the task:
### Input:
Call the given callable in the event loop thread.
This method lets you call asynchronous code from a worker thread.
Do not use it from within the event loop thread.
If the callable returns an awaitable, it is resolved before returning to the caller.
:param func: a regular function or a coroutine function
:param args: positional arguments to call the callable with
:param kwargs: keyword arguments to call the callable with
:return: the return value of the call
### Response:
def call_async(self, func: Callable, *args, **kwargs):
"""
Call the given callable in the event loop thread.
This method lets you call asynchronous code from a worker thread.
Do not use it from within the event loop thread.
If the callable returns an awaitable, it is resolved before returning to the caller.
:param func: a regular function or a coroutine function
:param args: positional arguments to call the callable with
:param kwargs: keyword arguments to call the callable with
:return: the return value of the call
"""
return asyncio_extras.call_async(self.loop, func, *args, **kwargs) |
def _is_ready(self, as_of):
"""Is the RecurringCost ready to be enacted as of the date `as_of`
This determines if `as_of` precedes the start of `initial_billing_cycle`. If so,
we should not be enacting this RecurringCost yet.
Args:
as_of (Date):
"""
if self.is_one_off():
return self.initial_billing_cycle.date_range.lower <= as_of
else:
return True | Is the RecurringCost ready to be enacted as of the date `as_of`
This determines if `as_of` precedes the start of `initial_billing_cycle`. If so,
we should not be enacting this RecurringCost yet.
Args:
as_of (Date): | Below is the the instruction that describes the task:
### Input:
Is the RecurringCost ready to be enacted as of the date `as_of`
This determines if `as_of` precedes the start of `initial_billing_cycle`. If so,
we should not be enacting this RecurringCost yet.
Args:
as_of (Date):
### Response:
def _is_ready(self, as_of):
"""Is the RecurringCost ready to be enacted as of the date `as_of`
This determines if `as_of` precedes the start of `initial_billing_cycle`. If so,
we should not be enacting this RecurringCost yet.
Args:
as_of (Date):
"""
if self.is_one_off():
return self.initial_billing_cycle.date_range.lower <= as_of
else:
return True |
def consultar_cep(cep, ambiente=PRODUCAO):
"""Retorna o endereço correspondente ao número de CEP informado.
Arguments:
cep {str} -- CEP a ser consultado.
Keyword Arguments:
ambiente {int} -- Indica qual será o webservice utilizado na consulta de CEP. Valor default é PRODUCAO (default: {PRODUCAO})
Raises:
KeyError -- Quando ambiente selecionado não existe (esperado: PRODUCAO ou HOMOLOGACAO)
ExcecaoPyCEPCorreios -- Quando ocorre qualquer erro na consulta do CEP.
Returns:
dict -- Dados do endereço do CEP consultado.
"""
if ambiente not in URL:
raise KeyError('Ambiente inválido! Valor deve ser 1 para produção e 2 '
'para homologação')
try:
with warnings.catch_warnings():
# Desabilitamos o warning
warnings.simplefilter('ignore', InsecureRequestWarning)
warnings.simplefilter('ignore', ImportWarning)
client = zeep.Client(URL[ambiente])
endereco = client.service.consultaCEP(formatar_cep(cep))
return {
'bairro': endereco.bairro,
'cep': endereco.cep,
'cidade': endereco.cidade,
'end': endereco.end,
'uf': endereco.uf,
'complemento2': endereco.complemento2,
'unidadesPostagem': endereco.unidadesPostagem,
}
except zeep.exceptions.Fault as e:
raise excecoes.ExcecaoPyCEPCorreios(message=e.message) | Retorna o endereço correspondente ao número de CEP informado.
Arguments:
cep {str} -- CEP a ser consultado.
Keyword Arguments:
ambiente {int} -- Indica qual será o webservice utilizado na consulta de CEP. Valor default é PRODUCAO (default: {PRODUCAO})
Raises:
KeyError -- Quando ambiente selecionado não existe (esperado: PRODUCAO ou HOMOLOGACAO)
ExcecaoPyCEPCorreios -- Quando ocorre qualquer erro na consulta do CEP.
Returns:
dict -- Dados do endereço do CEP consultado. | Below is the the instruction that describes the task:
### Input:
Retorna o endereço correspondente ao número de CEP informado.
Arguments:
cep {str} -- CEP a ser consultado.
Keyword Arguments:
ambiente {int} -- Indica qual será o webservice utilizado na consulta de CEP. Valor default é PRODUCAO (default: {PRODUCAO})
Raises:
KeyError -- Quando ambiente selecionado não existe (esperado: PRODUCAO ou HOMOLOGACAO)
ExcecaoPyCEPCorreios -- Quando ocorre qualquer erro na consulta do CEP.
Returns:
dict -- Dados do endereço do CEP consultado.
### Response:
def consultar_cep(cep, ambiente=PRODUCAO):
"""Retorna o endereço correspondente ao número de CEP informado.
Arguments:
cep {str} -- CEP a ser consultado.
Keyword Arguments:
ambiente {int} -- Indica qual será o webservice utilizado na consulta de CEP. Valor default é PRODUCAO (default: {PRODUCAO})
Raises:
KeyError -- Quando ambiente selecionado não existe (esperado: PRODUCAO ou HOMOLOGACAO)
ExcecaoPyCEPCorreios -- Quando ocorre qualquer erro na consulta do CEP.
Returns:
dict -- Dados do endereço do CEP consultado.
"""
if ambiente not in URL:
raise KeyError('Ambiente inválido! Valor deve ser 1 para produção e 2 '
'para homologação')
try:
with warnings.catch_warnings():
# Desabilitamos o warning
warnings.simplefilter('ignore', InsecureRequestWarning)
warnings.simplefilter('ignore', ImportWarning)
client = zeep.Client(URL[ambiente])
endereco = client.service.consultaCEP(formatar_cep(cep))
return {
'bairro': endereco.bairro,
'cep': endereco.cep,
'cidade': endereco.cidade,
'end': endereco.end,
'uf': endereco.uf,
'complemento2': endereco.complemento2,
'unidadesPostagem': endereco.unidadesPostagem,
}
except zeep.exceptions.Fault as e:
raise excecoes.ExcecaoPyCEPCorreios(message=e.message) |
def get_bpf_pointer(tcpdump_lines):
"""Create a BPF Pointer for TCPDump filter"""
if conf.use_pypy:
return _legacy_bpf_pointer(tcpdump_lines)
# Allocate BPF instructions
size = int(tcpdump_lines[0])
bpf_insn_a = bpf_insn * size
bip = bpf_insn_a()
# Fill the BPF instruction structures with the byte code
tcpdump_lines = tcpdump_lines[1:]
i = 0
for line in tcpdump_lines:
values = [int(v) for v in line.split()]
bip[i].code = c_ushort(values[0])
bip[i].jt = c_ubyte(values[1])
bip[i].jf = c_ubyte(values[2])
bip[i].k = c_uint(values[3])
i += 1
# Create the BPF program
return bpf_program(size, bip) | Create a BPF Pointer for TCPDump filter | Below is the the instruction that describes the task:
### Input:
Create a BPF Pointer for TCPDump filter
### Response:
def get_bpf_pointer(tcpdump_lines):
"""Create a BPF Pointer for TCPDump filter"""
if conf.use_pypy:
return _legacy_bpf_pointer(tcpdump_lines)
# Allocate BPF instructions
size = int(tcpdump_lines[0])
bpf_insn_a = bpf_insn * size
bip = bpf_insn_a()
# Fill the BPF instruction structures with the byte code
tcpdump_lines = tcpdump_lines[1:]
i = 0
for line in tcpdump_lines:
values = [int(v) for v in line.split()]
bip[i].code = c_ushort(values[0])
bip[i].jt = c_ubyte(values[1])
bip[i].jf = c_ubyte(values[2])
bip[i].k = c_uint(values[3])
i += 1
# Create the BPF program
return bpf_program(size, bip) |
def _read_ftdna_famfinder(file):
""" Read and parse Family Tree DNA (FTDNA) "famfinder" file.
https://www.familytreedna.com
Parameters
----------
file : str
path to file
Returns
-------
pandas.DataFrame
individual's genetic data normalized for use with `lineage`
str
name of data source
"""
df = pd.read_csv(
file,
comment="#",
na_values="-",
names=["rsid", "chrom", "pos", "allele1", "allele2"],
index_col=0,
dtype={"chrom": object},
)
# create genotype column from allele columns
df["genotype"] = df["allele1"] + df["allele2"]
# delete allele columns
# http://stackoverflow.com/a/13485766
del df["allele1"]
del df["allele2"]
return sort_snps(df), "FTDNA" | Read and parse Family Tree DNA (FTDNA) "famfinder" file.
https://www.familytreedna.com
Parameters
----------
file : str
path to file
Returns
-------
pandas.DataFrame
individual's genetic data normalized for use with `lineage`
str
name of data source | Below is the the instruction that describes the task:
### Input:
Read and parse Family Tree DNA (FTDNA) "famfinder" file.
https://www.familytreedna.com
Parameters
----------
file : str
path to file
Returns
-------
pandas.DataFrame
individual's genetic data normalized for use with `lineage`
str
name of data source
### Response:
def _read_ftdna_famfinder(file):
""" Read and parse Family Tree DNA (FTDNA) "famfinder" file.
https://www.familytreedna.com
Parameters
----------
file : str
path to file
Returns
-------
pandas.DataFrame
individual's genetic data normalized for use with `lineage`
str
name of data source
"""
df = pd.read_csv(
file,
comment="#",
na_values="-",
names=["rsid", "chrom", "pos", "allele1", "allele2"],
index_col=0,
dtype={"chrom": object},
)
# create genotype column from allele columns
df["genotype"] = df["allele1"] + df["allele2"]
# delete allele columns
# http://stackoverflow.com/a/13485766
del df["allele1"]
del df["allele2"]
return sort_snps(df), "FTDNA" |
def process_data(self, data):
"""Convert an unknown data input into a geojson dictionary."""
if isinstance(data, dict):
self.embed = True
return data
elif isinstance(data, str):
if data.lower().startswith(('http:', 'ftp:', 'https:')):
if not self.embed:
self.embed_link = data
return requests.get(data).json()
elif data.lstrip()[0] in '[{': # This is a GeoJSON inline string
self.embed = True
return json.loads(data)
else: # This is a filename
if not self.embed:
self.embed_link = data
with open(data) as f:
return json.loads(f.read())
elif hasattr(data, '__geo_interface__'):
self.embed = True
if hasattr(data, 'to_crs'):
data = data.to_crs(epsg='4326')
return json.loads(json.dumps(data.__geo_interface__))
else:
raise ValueError('Cannot render objects with any missing geometries'
': {!r}'.format(data)) | Convert an unknown data input into a geojson dictionary. | Below is the the instruction that describes the task:
### Input:
Convert an unknown data input into a geojson dictionary.
### Response:
def process_data(self, data):
"""Convert an unknown data input into a geojson dictionary."""
if isinstance(data, dict):
self.embed = True
return data
elif isinstance(data, str):
if data.lower().startswith(('http:', 'ftp:', 'https:')):
if not self.embed:
self.embed_link = data
return requests.get(data).json()
elif data.lstrip()[0] in '[{': # This is a GeoJSON inline string
self.embed = True
return json.loads(data)
else: # This is a filename
if not self.embed:
self.embed_link = data
with open(data) as f:
return json.loads(f.read())
elif hasattr(data, '__geo_interface__'):
self.embed = True
if hasattr(data, 'to_crs'):
data = data.to_crs(epsg='4326')
return json.loads(json.dumps(data.__geo_interface__))
else:
raise ValueError('Cannot render objects with any missing geometries'
': {!r}'.format(data)) |
def _baseplot(cls, session, type, *args, **kwargs):
"""
Base method for plotting data and images.
Applies a plot-type specific cleaning operation to generate
a dictionary with the data, then creates a visualization with the data.
Expects a session and a type, followed by all plot-type specific
positional and keyword arguments, which will be handled by the clean
method of the given plot type.
If the dictionary contains only images, or only non-image data,
they will be passed on their own. If the dictionary contains
both images and non-image data, the images will be appended
to the visualization.
"""
if not type:
raise Exception("Must provide a plot type")
options, description = cls._clean_options(**kwargs)
data = cls._clean_data(*args)
if 'images' in data and len(data) > 1:
images = data['images']
del data['images']
viz = cls._create(session, data=data, type=type, options=options, description=description)
first_image, remaining_images = images[0], images[1:]
viz._append_image(first_image)
for image in remaining_images:
viz._append_image(image)
elif 'images' in data:
images = data['images']
viz = cls._create(session, images=images, type=type, options=options, description=description)
else:
viz = cls._create(session, data=data, type=type, options=options, description=description)
return viz | Base method for plotting data and images.
Applies a plot-type specific cleaning operation to generate
a dictionary with the data, then creates a visualization with the data.
Expects a session and a type, followed by all plot-type specific
positional and keyword arguments, which will be handled by the clean
method of the given plot type.
If the dictionary contains only images, or only non-image data,
they will be passed on their own. If the dictionary contains
both images and non-image data, the images will be appended
to the visualization. | Below is the the instruction that describes the task:
### Input:
Base method for plotting data and images.
Applies a plot-type specific cleaning operation to generate
a dictionary with the data, then creates a visualization with the data.
Expects a session and a type, followed by all plot-type specific
positional and keyword arguments, which will be handled by the clean
method of the given plot type.
If the dictionary contains only images, or only non-image data,
they will be passed on their own. If the dictionary contains
both images and non-image data, the images will be appended
to the visualization.
### Response:
def _baseplot(cls, session, type, *args, **kwargs):
"""
Base method for plotting data and images.
Applies a plot-type specific cleaning operation to generate
a dictionary with the data, then creates a visualization with the data.
Expects a session and a type, followed by all plot-type specific
positional and keyword arguments, which will be handled by the clean
method of the given plot type.
If the dictionary contains only images, or only non-image data,
they will be passed on their own. If the dictionary contains
both images and non-image data, the images will be appended
to the visualization.
"""
if not type:
raise Exception("Must provide a plot type")
options, description = cls._clean_options(**kwargs)
data = cls._clean_data(*args)
if 'images' in data and len(data) > 1:
images = data['images']
del data['images']
viz = cls._create(session, data=data, type=type, options=options, description=description)
first_image, remaining_images = images[0], images[1:]
viz._append_image(first_image)
for image in remaining_images:
viz._append_image(image)
elif 'images' in data:
images = data['images']
viz = cls._create(session, images=images, type=type, options=options, description=description)
else:
viz = cls._create(session, data=data, type=type, options=options, description=description)
return viz |
def delete_archive(self, archive_id):
"""
Deletes an OpenTok archive.
You can only delete an archive which has a status of "available" or "uploaded". Deleting an
archive removes its record from the list of archives. For an "available" archive, it also
removes the archive file, making it unavailable for download.
:param String archive_id: The archive ID of the archive to be deleted.
"""
response = requests.delete(self.endpoints.archive_url(archive_id), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout)
if response.status_code < 300:
pass
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 404:
raise NotFoundError("Archive not found")
else:
raise RequestError("An unexpected error occurred", response.status_code) | Deletes an OpenTok archive.
You can only delete an archive which has a status of "available" or "uploaded". Deleting an
archive removes its record from the list of archives. For an "available" archive, it also
removes the archive file, making it unavailable for download.
:param String archive_id: The archive ID of the archive to be deleted. | Below is the the instruction that describes the task:
### Input:
Deletes an OpenTok archive.
You can only delete an archive which has a status of "available" or "uploaded". Deleting an
archive removes its record from the list of archives. For an "available" archive, it also
removes the archive file, making it unavailable for download.
:param String archive_id: The archive ID of the archive to be deleted.
### Response:
def delete_archive(self, archive_id):
"""
Deletes an OpenTok archive.
You can only delete an archive which has a status of "available" or "uploaded". Deleting an
archive removes its record from the list of archives. For an "available" archive, it also
removes the archive file, making it unavailable for download.
:param String archive_id: The archive ID of the archive to be deleted.
"""
response = requests.delete(self.endpoints.archive_url(archive_id), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout)
if response.status_code < 300:
pass
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 404:
raise NotFoundError("Archive not found")
else:
raise RequestError("An unexpected error occurred", response.status_code) |
def main():
""" Called by PyBridge.start()
"""
#: If we set the TMP env variable the dev reloader will save file
#: and load changes in this directory instead of overwriting the
#: ones installed with the app.
os.environ['TMP'] = os.path.join(sys.path[0], '../tmp')
from enamlnative.android.app import AndroidApplication
app = AndroidApplication(
debug=True, #: Makes a lot of lag!
dev='server',
load_view=load_view,
)
app.start() | Called by PyBridge.start() | Below is the the instruction that describes the task:
### Input:
Called by PyBridge.start()
### Response:
def main():
""" Called by PyBridge.start()
"""
#: If we set the TMP env variable the dev reloader will save file
#: and load changes in this directory instead of overwriting the
#: ones installed with the app.
os.environ['TMP'] = os.path.join(sys.path[0], '../tmp')
from enamlnative.android.app import AndroidApplication
app = AndroidApplication(
debug=True, #: Makes a lot of lag!
dev='server',
load_view=load_view,
)
app.start() |
def sos(self, year):
"""Returns the SOS (Strength of Schedule) for a team in a year, based
on SRS.
:year: The year for the season in question.
:returns: A float of SOS.
"""
try:
sos_text = self._year_info_pq(year, 'SOS').text()
except ValueError:
return None
m = re.search(r'SOS\s*:\s*(\S+)', sos_text)
if m:
return float(m.group(1))
else:
return None | Returns the SOS (Strength of Schedule) for a team in a year, based
on SRS.
:year: The year for the season in question.
:returns: A float of SOS. | Below is the the instruction that describes the task:
### Input:
Returns the SOS (Strength of Schedule) for a team in a year, based
on SRS.
:year: The year for the season in question.
:returns: A float of SOS.
### Response:
def sos(self, year):
"""Returns the SOS (Strength of Schedule) for a team in a year, based
on SRS.
:year: The year for the season in question.
:returns: A float of SOS.
"""
try:
sos_text = self._year_info_pq(year, 'SOS').text()
except ValueError:
return None
m = re.search(r'SOS\s*:\s*(\S+)', sos_text)
if m:
return float(m.group(1))
else:
return None |
def l2traceroute_result_output_l2_hop_results_l2_hop_egress_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
l2traceroute_result = ET.Element("l2traceroute_result")
config = l2traceroute_result
output = ET.SubElement(l2traceroute_result, "output")
l2_hop_results = ET.SubElement(output, "l2-hop-results")
l2_hop = ET.SubElement(l2_hop_results, "l2-hop")
egress = ET.SubElement(l2_hop, "egress")
interface_type = ET.SubElement(egress, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def l2traceroute_result_output_l2_hop_results_l2_hop_egress_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
l2traceroute_result = ET.Element("l2traceroute_result")
config = l2traceroute_result
output = ET.SubElement(l2traceroute_result, "output")
l2_hop_results = ET.SubElement(output, "l2-hop-results")
l2_hop = ET.SubElement(l2_hop_results, "l2-hop")
egress = ET.SubElement(l2_hop, "egress")
interface_type = ET.SubElement(egress, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _build_url(self, api_call):
"""Build request url.
Parameters:
api_call (str): Base API Call.
Returns:
Complete url (str).
"""
if self.api_version in ('1.13.0', '1.13.0+update.1', '1.13.0+update.2'):
if '/' not in api_call:
return "{0}/{1}/index.json".format(self.site_url, api_call)
return "{0}/{1}.json".format(self.site_url, api_call) | Build request url.
Parameters:
api_call (str): Base API Call.
Returns:
Complete url (str). | Below is the the instruction that describes the task:
### Input:
Build request url.
Parameters:
api_call (str): Base API Call.
Returns:
Complete url (str).
### Response:
def _build_url(self, api_call):
"""Build request url.
Parameters:
api_call (str): Base API Call.
Returns:
Complete url (str).
"""
if self.api_version in ('1.13.0', '1.13.0+update.1', '1.13.0+update.2'):
if '/' not in api_call:
return "{0}/{1}/index.json".format(self.site_url, api_call)
return "{0}/{1}.json".format(self.site_url, api_call) |
def text_remove_empty_lines(text):
"""
Whitespace normalization:
- Strip empty lines
- Strip trailing whitespace
"""
lines = [ line.rstrip() for line in text.splitlines() if line.strip() ]
return "\n".join(lines) | Whitespace normalization:
- Strip empty lines
- Strip trailing whitespace | Below is the the instruction that describes the task:
### Input:
Whitespace normalization:
- Strip empty lines
- Strip trailing whitespace
### Response:
def text_remove_empty_lines(text):
"""
Whitespace normalization:
- Strip empty lines
- Strip trailing whitespace
"""
lines = [ line.rstrip() for line in text.splitlines() if line.strip() ]
return "\n".join(lines) |
def from_file(filename, use_cores=True, thresh=1.e-4):
"""
Reads an xr-formatted file to create an Xr object.
Args:
filename (str): name of file to read from.
use_cores (bool): use core positions and discard shell
positions if set to True (default). Otherwise,
use shell positions and discard core positions.
thresh (float): relative threshold for consistency check
between cell parameters (lengths and angles) from
header information and cell vectors, respectively.
Returns:
xr (Xr): Xr object corresponding to the input
file.
"""
with zopen(filename, "rt") as f:
return Xr.from_string(
f.read(), use_cores=use_cores,
thresh=thresh) | Reads an xr-formatted file to create an Xr object.
Args:
filename (str): name of file to read from.
use_cores (bool): use core positions and discard shell
positions if set to True (default). Otherwise,
use shell positions and discard core positions.
thresh (float): relative threshold for consistency check
between cell parameters (lengths and angles) from
header information and cell vectors, respectively.
Returns:
xr (Xr): Xr object corresponding to the input
file. | Below is the the instruction that describes the task:
### Input:
Reads an xr-formatted file to create an Xr object.
Args:
filename (str): name of file to read from.
use_cores (bool): use core positions and discard shell
positions if set to True (default). Otherwise,
use shell positions and discard core positions.
thresh (float): relative threshold for consistency check
between cell parameters (lengths and angles) from
header information and cell vectors, respectively.
Returns:
xr (Xr): Xr object corresponding to the input
file.
### Response:
def from_file(filename, use_cores=True, thresh=1.e-4):
"""
Reads an xr-formatted file to create an Xr object.
Args:
filename (str): name of file to read from.
use_cores (bool): use core positions and discard shell
positions if set to True (default). Otherwise,
use shell positions and discard core positions.
thresh (float): relative threshold for consistency check
between cell parameters (lengths and angles) from
header information and cell vectors, respectively.
Returns:
xr (Xr): Xr object corresponding to the input
file.
"""
with zopen(filename, "rt") as f:
return Xr.from_string(
f.read(), use_cores=use_cores,
thresh=thresh) |
def main():
"""main program body"""
if len( sys.argv ) != 2:
print __doc__ % sys.argv[0]
sys.exit( 1 )
file = open( sys.argv[1], "w\n" )
write = file.write
count_sid = len( sid_standard_names )
# `mac_extras' contains the list of glyph names in the Macintosh standard
# encoding which are not in the SID Standard Names.
#
mac_extras = filter_glyph_names( mac_standard_names, sid_standard_names )
# `base_list' contains the names of our final glyph names table.
# It consists of the `mac_extras' glyph names, followed by the SID
# standard names.
#
mac_extras_count = len( mac_extras )
base_list = mac_extras + sid_standard_names
write( "/***************************************************************************/\n" )
write( "/* */\n" )
write( "/* %-71s*/\n" % os.path.basename( sys.argv[1] ) )
write( "/* */\n" )
write( "/* PostScript glyph names. */\n" )
write( "/* */\n" )
write( "/* Copyright 2005, 2008, 2011 by */\n" )
write( "/* David Turner, Robert Wilhelm, and Werner Lemberg. */\n" )
write( "/* */\n" )
write( "/* This file is part of the FreeType project, and may only be used, */\n" )
write( "/* modified, and distributed under the terms of the FreeType project */\n" )
write( "/* license, LICENSE.TXT. By continuing to use, modify, or distribute */\n" )
write( "/* this file you indicate that you have read the license and */\n" )
write( "/* understand and accept it fully. */\n" )
write( "/* */\n" )
write( "/***************************************************************************/\n" )
write( "\n" )
write( "\n" )
write( " /* This file has been generated automatically -- do not edit! */\n" )
write( "\n" )
write( "\n" )
# dump final glyph list (mac extras + sid standard names)
#
st = StringTable( base_list, "ft_standard_glyph_names" )
st.dump( file )
st.dump_sublist( file, "ft_mac_names",
"FT_NUM_MAC_NAMES", mac_standard_names )
st.dump_sublist( file, "ft_sid_names",
"FT_NUM_SID_NAMES", sid_standard_names )
dump_encoding( file, "t1_standard_encoding", t1_standard_encoding )
dump_encoding( file, "t1_expert_encoding", t1_expert_encoding )
# dump the AGL in its compressed form
#
agl_glyphs, agl_values = adobe_glyph_values()
dict = StringNode( "", 0 )
for g in range( len( agl_glyphs ) ):
dict.add( agl_glyphs[g], eval( "0x" + agl_values[g] ) )
dict = dict.optimize()
dict_len = dict.locate( 0 )
dict_array = dict.store( "" )
write( """\
/*
* This table is a compressed version of the Adobe Glyph List (AGL),
* optimized for efficient searching. It has been generated by the
* `glnames.py' python script located in the `src/tools' directory.
*
* The lookup function to get the Unicode value for a given string
* is defined below the table.
*/
#ifdef FT_CONFIG_OPTION_ADOBE_GLYPH_LIST
""" )
dump_array( dict_array, write, "ft_adobe_glyph_list" )
# write the lookup routine now
#
write( """\
/*
* This function searches the compressed table efficiently.
*/
static unsigned long
ft_get_adobe_glyph_index( const char* name,
const char* limit )
{
int c = 0;
int count, min, max;
const unsigned char* p = ft_adobe_glyph_list;
if ( name == 0 || name >= limit )
goto NotFound;
c = *name++;
count = p[1];
p += 2;
min = 0;
max = count;
while ( min < max )
{
int mid = ( min + max ) >> 1;
const unsigned char* q = p + mid * 2;
int c2;
q = ft_adobe_glyph_list + ( ( (int)q[0] << 8 ) | q[1] );
c2 = q[0] & 127;
if ( c2 == c )
{
p = q;
goto Found;
}
if ( c2 < c )
min = mid + 1;
else
max = mid;
}
goto NotFound;
Found:
for (;;)
{
/* assert (*p & 127) == c */
if ( name >= limit )
{
if ( (p[0] & 128) == 0 &&
(p[1] & 128) != 0 )
return (unsigned long)( ( (int)p[2] << 8 ) | p[3] );
goto NotFound;
}
c = *name++;
if ( p[0] & 128 )
{
p++;
if ( c != (p[0] & 127) )
goto NotFound;
continue;
}
p++;
count = p[0] & 127;
if ( p[0] & 128 )
p += 2;
p++;
for ( ; count > 0; count--, p += 2 )
{
int offset = ( (int)p[0] << 8 ) | p[1];
const unsigned char* q = ft_adobe_glyph_list + offset;
if ( c == ( q[0] & 127 ) )
{
p = q;
goto NextIter;
}
}
goto NotFound;
NextIter:
;
}
NotFound:
return 0;
}
#endif /* FT_CONFIG_OPTION_ADOBE_GLYPH_LIST */
""" )
if 0: # generate unit test, or don't
#
# now write the unit test to check that everything works OK
#
write( "#ifdef TEST\n\n" )
write( "static const char* const the_names[] = {\n" )
for name in agl_glyphs:
write( ' "' + name + '",\n' )
write( " 0\n};\n" )
write( "static const unsigned long the_values[] = {\n" )
for val in agl_values:
write( ' 0x' + val + ',\n' )
write( " 0\n};\n" )
write( """
#include <stdlib.h>
#include <stdio.h>
int
main( void )
{
int result = 0;
const char* const* names = the_names;
const unsigned long* values = the_values;
for ( ; *names; names++, values++ )
{
const char* name = *names;
unsigned long reference = *values;
unsigned long value;
value = ft_get_adobe_glyph_index( name, name + strlen( name ) );
if ( value != reference )
{
result = 1;
fprintf( stderr, "name '%s' => %04x instead of %04x\\n",
name, value, reference );
}
}
return result;
}
""" )
write( "#endif /* TEST */\n" )
write("\n/* END */\n") | main program body | Below is the the instruction that describes the task:
### Input:
main program body
### Response:
def main():
"""main program body"""
if len( sys.argv ) != 2:
print __doc__ % sys.argv[0]
sys.exit( 1 )
file = open( sys.argv[1], "w\n" )
write = file.write
count_sid = len( sid_standard_names )
# `mac_extras' contains the list of glyph names in the Macintosh standard
# encoding which are not in the SID Standard Names.
#
mac_extras = filter_glyph_names( mac_standard_names, sid_standard_names )
# `base_list' contains the names of our final glyph names table.
# It consists of the `mac_extras' glyph names, followed by the SID
# standard names.
#
mac_extras_count = len( mac_extras )
base_list = mac_extras + sid_standard_names
write( "/***************************************************************************/\n" )
write( "/* */\n" )
write( "/* %-71s*/\n" % os.path.basename( sys.argv[1] ) )
write( "/* */\n" )
write( "/* PostScript glyph names. */\n" )
write( "/* */\n" )
write( "/* Copyright 2005, 2008, 2011 by */\n" )
write( "/* David Turner, Robert Wilhelm, and Werner Lemberg. */\n" )
write( "/* */\n" )
write( "/* This file is part of the FreeType project, and may only be used, */\n" )
write( "/* modified, and distributed under the terms of the FreeType project */\n" )
write( "/* license, LICENSE.TXT. By continuing to use, modify, or distribute */\n" )
write( "/* this file you indicate that you have read the license and */\n" )
write( "/* understand and accept it fully. */\n" )
write( "/* */\n" )
write( "/***************************************************************************/\n" )
write( "\n" )
write( "\n" )
write( " /* This file has been generated automatically -- do not edit! */\n" )
write( "\n" )
write( "\n" )
# dump final glyph list (mac extras + sid standard names)
#
st = StringTable( base_list, "ft_standard_glyph_names" )
st.dump( file )
st.dump_sublist( file, "ft_mac_names",
"FT_NUM_MAC_NAMES", mac_standard_names )
st.dump_sublist( file, "ft_sid_names",
"FT_NUM_SID_NAMES", sid_standard_names )
dump_encoding( file, "t1_standard_encoding", t1_standard_encoding )
dump_encoding( file, "t1_expert_encoding", t1_expert_encoding )
# dump the AGL in its compressed form
#
agl_glyphs, agl_values = adobe_glyph_values()
dict = StringNode( "", 0 )
for g in range( len( agl_glyphs ) ):
dict.add( agl_glyphs[g], eval( "0x" + agl_values[g] ) )
dict = dict.optimize()
dict_len = dict.locate( 0 )
dict_array = dict.store( "" )
write( """\
/*
* This table is a compressed version of the Adobe Glyph List (AGL),
* optimized for efficient searching. It has been generated by the
* `glnames.py' python script located in the `src/tools' directory.
*
* The lookup function to get the Unicode value for a given string
* is defined below the table.
*/
#ifdef FT_CONFIG_OPTION_ADOBE_GLYPH_LIST
""" )
dump_array( dict_array, write, "ft_adobe_glyph_list" )
# write the lookup routine now
#
write( """\
/*
* This function searches the compressed table efficiently.
*/
static unsigned long
ft_get_adobe_glyph_index( const char* name,
const char* limit )
{
int c = 0;
int count, min, max;
const unsigned char* p = ft_adobe_glyph_list;
if ( name == 0 || name >= limit )
goto NotFound;
c = *name++;
count = p[1];
p += 2;
min = 0;
max = count;
while ( min < max )
{
int mid = ( min + max ) >> 1;
const unsigned char* q = p + mid * 2;
int c2;
q = ft_adobe_glyph_list + ( ( (int)q[0] << 8 ) | q[1] );
c2 = q[0] & 127;
if ( c2 == c )
{
p = q;
goto Found;
}
if ( c2 < c )
min = mid + 1;
else
max = mid;
}
goto NotFound;
Found:
for (;;)
{
/* assert (*p & 127) == c */
if ( name >= limit )
{
if ( (p[0] & 128) == 0 &&
(p[1] & 128) != 0 )
return (unsigned long)( ( (int)p[2] << 8 ) | p[3] );
goto NotFound;
}
c = *name++;
if ( p[0] & 128 )
{
p++;
if ( c != (p[0] & 127) )
goto NotFound;
continue;
}
p++;
count = p[0] & 127;
if ( p[0] & 128 )
p += 2;
p++;
for ( ; count > 0; count--, p += 2 )
{
int offset = ( (int)p[0] << 8 ) | p[1];
const unsigned char* q = ft_adobe_glyph_list + offset;
if ( c == ( q[0] & 127 ) )
{
p = q;
goto NextIter;
}
}
goto NotFound;
NextIter:
;
}
NotFound:
return 0;
}
#endif /* FT_CONFIG_OPTION_ADOBE_GLYPH_LIST */
""" )
if 0: # generate unit test, or don't
#
# now write the unit test to check that everything works OK
#
write( "#ifdef TEST\n\n" )
write( "static const char* const the_names[] = {\n" )
for name in agl_glyphs:
write( ' "' + name + '",\n' )
write( " 0\n};\n" )
write( "static const unsigned long the_values[] = {\n" )
for val in agl_values:
write( ' 0x' + val + ',\n' )
write( " 0\n};\n" )
write( """
#include <stdlib.h>
#include <stdio.h>
int
main( void )
{
int result = 0;
const char* const* names = the_names;
const unsigned long* values = the_values;
for ( ; *names; names++, values++ )
{
const char* name = *names;
unsigned long reference = *values;
unsigned long value;
value = ft_get_adobe_glyph_index( name, name + strlen( name ) );
if ( value != reference )
{
result = 1;
fprintf( stderr, "name '%s' => %04x instead of %04x\\n",
name, value, reference );
}
}
return result;
}
""" )
write( "#endif /* TEST */\n" )
write("\n/* END */\n") |
def is_valid_with_config(self, config):
"""
Check if output format is valid with other process parameters.
Parameters
----------
config : dictionary
output configuration parameters
Returns
-------
is_valid : bool
"""
validate_values(config, [("schema", dict), ("path", str)])
validate_values(config["schema"], [("properties", dict), ("geometry", str)])
if config["schema"]["geometry"] not in [
"Geometry", "Point", "MultiPoint", "Line", "MultiLine",
"Polygon", "MultiPolygon"
]:
raise TypeError("invalid geometry type")
return True | Check if output format is valid with other process parameters.
Parameters
----------
config : dictionary
output configuration parameters
Returns
-------
is_valid : bool | Below is the the instruction that describes the task:
### Input:
Check if output format is valid with other process parameters.
Parameters
----------
config : dictionary
output configuration parameters
Returns
-------
is_valid : bool
### Response:
def is_valid_with_config(self, config):
"""
Check if output format is valid with other process parameters.
Parameters
----------
config : dictionary
output configuration parameters
Returns
-------
is_valid : bool
"""
validate_values(config, [("schema", dict), ("path", str)])
validate_values(config["schema"], [("properties", dict), ("geometry", str)])
if config["schema"]["geometry"] not in [
"Geometry", "Point", "MultiPoint", "Line", "MultiLine",
"Polygon", "MultiPolygon"
]:
raise TypeError("invalid geometry type")
return True |
def CORS(func=None):
"""
CORS support
"""
def w(r=None):
from uliweb import request, response
if request.method == 'OPTIONS':
response = Response(status=204)
response.headers['Access-Control-Allow-Credentials'] = 'true'
response.headers['Access-Control-Allow-Origin'] = request.headers['Origin']
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, DELETE, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range'
response.headers['Access-Control-Max-Age'] = 24*3600
response.headers['Content-Type'] = 'text/plain; charset=utf-8'
response.headers['Content-Length'] = 0
return response
elif request.method in ('GET', 'POST'):
if isinstance(r, Response):
response = r
response.headers['Access-Control-Allow-Credentials'] = 'true'
if 'Origin' in request.headers:
response.headers['Access-Control-Allow-Origin'] = request.headers['Origin']
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, DELETE, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range'
response.headers['Access-Control-Expose-Headers'] = 'Content-Length,Content-Range'
if callable(func):
@wraps(func)
def f(*arg, **kwargs):
if request.method == 'OPTIONS':
return w()
ret = func(*arg, **kwargs)
w(ret)
return ret
return f
else:
w() | CORS support | Below is the the instruction that describes the task:
### Input:
CORS support
### Response:
def CORS(func=None):
"""
CORS support
"""
def w(r=None):
from uliweb import request, response
if request.method == 'OPTIONS':
response = Response(status=204)
response.headers['Access-Control-Allow-Credentials'] = 'true'
response.headers['Access-Control-Allow-Origin'] = request.headers['Origin']
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, DELETE, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range'
response.headers['Access-Control-Max-Age'] = 24*3600
response.headers['Content-Type'] = 'text/plain; charset=utf-8'
response.headers['Content-Length'] = 0
return response
elif request.method in ('GET', 'POST'):
if isinstance(r, Response):
response = r
response.headers['Access-Control-Allow-Credentials'] = 'true'
if 'Origin' in request.headers:
response.headers['Access-Control-Allow-Origin'] = request.headers['Origin']
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, DELETE, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range'
response.headers['Access-Control-Expose-Headers'] = 'Content-Length,Content-Range'
if callable(func):
@wraps(func)
def f(*arg, **kwargs):
if request.method == 'OPTIONS':
return w()
ret = func(*arg, **kwargs)
w(ret)
return ret
return f
else:
w() |
def is_valid_email(self):
"""A bool value that indicates whether the address is a valid
email address.
Note that the check is done be matching to the regular expression
at Email.re_email which is very basic and far from covering end-cases...
"""
return bool(self.address and Email.re_email.match(self.address)) | A bool value that indicates whether the address is a valid
email address.
Note that the check is done be matching to the regular expression
at Email.re_email which is very basic and far from covering end-cases... | Below is the the instruction that describes the task:
### Input:
A bool value that indicates whether the address is a valid
email address.
Note that the check is done be matching to the regular expression
at Email.re_email which is very basic and far from covering end-cases...
### Response:
def is_valid_email(self):
"""A bool value that indicates whether the address is a valid
email address.
Note that the check is done be matching to the regular expression
at Email.re_email which is very basic and far from covering end-cases...
"""
return bool(self.address and Email.re_email.match(self.address)) |
def download_kitchen(split=False):
"""Download structured grid of kitchen with velocity field. Use the
``split`` argument to extract all of the furniture in the kitchen.
"""
mesh = _download_and_read('kitchen.vtk')
if not split:
return mesh
extents = {
'door' : (27, 27, 14, 18, 0, 11),
'window1' : (0, 0, 9, 18, 6, 12),
'window2' : (5, 12, 23, 23, 6, 12),
'klower1' : (17, 17, 0, 11, 0, 6),
'klower2' : (19, 19, 0, 11, 0, 6),
'klower3' : (17, 19, 0, 0, 0, 6),
'klower4' : (17, 19, 11, 11, 0, 6),
'klower5' : (17, 19, 0, 11, 0, 0),
'klower6' : (17, 19, 0, 7, 6, 6),
'klower7' : (17, 19, 9, 11, 6, 6),
'hood1' : (17, 17, 0, 11, 11, 16),
'hood2' : (19, 19, 0, 11, 11, 16),
'hood3' : (17, 19, 0, 0, 11, 16),
'hood4' : (17, 19, 11, 11, 11, 16),
'hood5' : (17, 19, 0, 11, 16, 16),
'cookingPlate' : (17, 19, 7, 9, 6, 6),
'furniture' : (17, 19, 7, 9, 11, 11),
}
kitchen = vtki.MultiBlock()
for key, extent in extents.items():
alg = vtk.vtkStructuredGridGeometryFilter()
alg.SetInputDataObject(mesh)
alg.SetExtent(extent)
alg.Update()
result = vtki.filters._get_output(alg)
kitchen[key] = result
return kitchen | Download structured grid of kitchen with velocity field. Use the
``split`` argument to extract all of the furniture in the kitchen. | Below is the the instruction that describes the task:
### Input:
Download structured grid of kitchen with velocity field. Use the
``split`` argument to extract all of the furniture in the kitchen.
### Response:
def download_kitchen(split=False):
"""Download structured grid of kitchen with velocity field. Use the
``split`` argument to extract all of the furniture in the kitchen.
"""
mesh = _download_and_read('kitchen.vtk')
if not split:
return mesh
extents = {
'door' : (27, 27, 14, 18, 0, 11),
'window1' : (0, 0, 9, 18, 6, 12),
'window2' : (5, 12, 23, 23, 6, 12),
'klower1' : (17, 17, 0, 11, 0, 6),
'klower2' : (19, 19, 0, 11, 0, 6),
'klower3' : (17, 19, 0, 0, 0, 6),
'klower4' : (17, 19, 11, 11, 0, 6),
'klower5' : (17, 19, 0, 11, 0, 0),
'klower6' : (17, 19, 0, 7, 6, 6),
'klower7' : (17, 19, 9, 11, 6, 6),
'hood1' : (17, 17, 0, 11, 11, 16),
'hood2' : (19, 19, 0, 11, 11, 16),
'hood3' : (17, 19, 0, 0, 11, 16),
'hood4' : (17, 19, 11, 11, 11, 16),
'hood5' : (17, 19, 0, 11, 16, 16),
'cookingPlate' : (17, 19, 7, 9, 6, 6),
'furniture' : (17, 19, 7, 9, 11, 11),
}
kitchen = vtki.MultiBlock()
for key, extent in extents.items():
alg = vtk.vtkStructuredGridGeometryFilter()
alg.SetInputDataObject(mesh)
alg.SetExtent(extent)
alg.Update()
result = vtki.filters._get_output(alg)
kitchen[key] = result
return kitchen |
def makeDirectoryFromAbsolutePath(absDirPath):
""" Makes directory for the given directory path with default permissions.
If the directory already exists, it is treated as success.
absDirPath: absolute path of the directory to create.
Returns: absDirPath arg
Exceptions: OSError if directory creation fails
"""
assert os.path.isabs(absDirPath)
try:
os.makedirs(absDirPath)
except OSError, e:
if e.errno != os.errno.EEXIST:
raise
return absDirPath | Makes directory for the given directory path with default permissions.
If the directory already exists, it is treated as success.
absDirPath: absolute path of the directory to create.
Returns: absDirPath arg
Exceptions: OSError if directory creation fails | Below is the the instruction that describes the task:
### Input:
Makes directory for the given directory path with default permissions.
If the directory already exists, it is treated as success.
absDirPath: absolute path of the directory to create.
Returns: absDirPath arg
Exceptions: OSError if directory creation fails
### Response:
def makeDirectoryFromAbsolutePath(absDirPath):
""" Makes directory for the given directory path with default permissions.
If the directory already exists, it is treated as success.
absDirPath: absolute path of the directory to create.
Returns: absDirPath arg
Exceptions: OSError if directory creation fails
"""
assert os.path.isabs(absDirPath)
try:
os.makedirs(absDirPath)
except OSError, e:
if e.errno != os.errno.EEXIST:
raise
return absDirPath |
def connect(self, fun):
""" Connect a function to an event
The name of the function
should be on_X, with X the name of the event (e.g. 'on_draw').
This method is typically used as a decorator on a function
definition for an event handler.
Parameters
----------
fun : callable
The function.
"""
# Get and check name
name = fun.__name__
if not name.startswith('on_'):
raise ValueError('When connecting a function based on its name, '
'the name should start with "on_"')
eventname = name[3:]
# Get emitter
try:
emitter = self.events[eventname]
except KeyError:
raise ValueError(
'Event "%s" not available on this canvas.' %
eventname)
# Connect
emitter.connect(fun) | Connect a function to an event
The name of the function
should be on_X, with X the name of the event (e.g. 'on_draw').
This method is typically used as a decorator on a function
definition for an event handler.
Parameters
----------
fun : callable
The function. | Below is the the instruction that describes the task:
### Input:
Connect a function to an event
The name of the function
should be on_X, with X the name of the event (e.g. 'on_draw').
This method is typically used as a decorator on a function
definition for an event handler.
Parameters
----------
fun : callable
The function.
### Response:
def connect(self, fun):
""" Connect a function to an event
The name of the function
should be on_X, with X the name of the event (e.g. 'on_draw').
This method is typically used as a decorator on a function
definition for an event handler.
Parameters
----------
fun : callable
The function.
"""
# Get and check name
name = fun.__name__
if not name.startswith('on_'):
raise ValueError('When connecting a function based on its name, '
'the name should start with "on_"')
eventname = name[3:]
# Get emitter
try:
emitter = self.events[eventname]
except KeyError:
raise ValueError(
'Event "%s" not available on this canvas.' %
eventname)
# Connect
emitter.connect(fun) |
def one_sided_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1., low_counts_threshold=1e-8):
'''
perform one-sided (causal) EMA (exponential moving average)
smoothing and resampling to an even grid with n points.
Does not do extrapolation, so we assume
xolds[0] <= low && high <= xolds[-1]
Arguments:
xolds: array or list - x values of data. Needs to be sorted in ascending order
yolds: array of list - y values of data. Has to have the same length as xolds
low: float - min value of the new x grid. By default equals to xolds[0]
high: float - max value of the new x grid. By default equals to xolds[-1]
n: int - number of points in new x grid
decay_steps: float - EMA decay factor, expressed in new x grid steps.
low_counts_threshold: float or int
- y values with counts less than this value will be set to NaN
Returns:
tuple sum_ys, count_ys where
xs - array with new x grid
ys - array of EMA of y at each point of the new x grid
count_ys - array of EMA of y counts at each point of the new x grid
'''
low = xolds[0] if low is None else low
high = xolds[-1] if high is None else high
assert xolds[0] <= low, 'low = {} < xolds[0] = {} - extrapolation not permitted!'.format(low, xolds[0])
assert xolds[-1] >= high, 'high = {} > xolds[-1] = {} - extrapolation not permitted!'.format(high, xolds[-1])
assert len(xolds) == len(yolds), 'length of xolds ({}) and yolds ({}) do not match!'.format(len(xolds), len(yolds))
xolds = xolds.astype('float64')
yolds = yolds.astype('float64')
luoi = 0 # last unused old index
sum_y = 0.
count_y = 0.
xnews = np.linspace(low, high, n)
decay_period = (high - low) / (n - 1) * decay_steps
interstep_decay = np.exp(- 1. / decay_steps)
sum_ys = np.zeros_like(xnews)
count_ys = np.zeros_like(xnews)
for i in range(n):
xnew = xnews[i]
sum_y *= interstep_decay
count_y *= interstep_decay
while True:
xold = xolds[luoi]
if xold <= xnew:
decay = np.exp(- (xnew - xold) / decay_period)
sum_y += decay * yolds[luoi]
count_y += decay
luoi += 1
else:
break
if luoi >= len(xolds):
break
sum_ys[i] = sum_y
count_ys[i] = count_y
ys = sum_ys / count_ys
ys[count_ys < low_counts_threshold] = np.nan
return xnews, ys, count_ys | perform one-sided (causal) EMA (exponential moving average)
smoothing and resampling to an even grid with n points.
Does not do extrapolation, so we assume
xolds[0] <= low && high <= xolds[-1]
Arguments:
xolds: array or list - x values of data. Needs to be sorted in ascending order
yolds: array of list - y values of data. Has to have the same length as xolds
low: float - min value of the new x grid. By default equals to xolds[0]
high: float - max value of the new x grid. By default equals to xolds[-1]
n: int - number of points in new x grid
decay_steps: float - EMA decay factor, expressed in new x grid steps.
low_counts_threshold: float or int
- y values with counts less than this value will be set to NaN
Returns:
tuple sum_ys, count_ys where
xs - array with new x grid
ys - array of EMA of y at each point of the new x grid
count_ys - array of EMA of y counts at each point of the new x grid | Below is the the instruction that describes the task:
### Input:
perform one-sided (causal) EMA (exponential moving average)
smoothing and resampling to an even grid with n points.
Does not do extrapolation, so we assume
xolds[0] <= low && high <= xolds[-1]
Arguments:
xolds: array or list - x values of data. Needs to be sorted in ascending order
yolds: array of list - y values of data. Has to have the same length as xolds
low: float - min value of the new x grid. By default equals to xolds[0]
high: float - max value of the new x grid. By default equals to xolds[-1]
n: int - number of points in new x grid
decay_steps: float - EMA decay factor, expressed in new x grid steps.
low_counts_threshold: float or int
- y values with counts less than this value will be set to NaN
Returns:
tuple sum_ys, count_ys where
xs - array with new x grid
ys - array of EMA of y at each point of the new x grid
count_ys - array of EMA of y counts at each point of the new x grid
### Response:
def one_sided_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1., low_counts_threshold=1e-8):
'''
perform one-sided (causal) EMA (exponential moving average)
smoothing and resampling to an even grid with n points.
Does not do extrapolation, so we assume
xolds[0] <= low && high <= xolds[-1]
Arguments:
xolds: array or list - x values of data. Needs to be sorted in ascending order
yolds: array of list - y values of data. Has to have the same length as xolds
low: float - min value of the new x grid. By default equals to xolds[0]
high: float - max value of the new x grid. By default equals to xolds[-1]
n: int - number of points in new x grid
decay_steps: float - EMA decay factor, expressed in new x grid steps.
low_counts_threshold: float or int
- y values with counts less than this value will be set to NaN
Returns:
tuple sum_ys, count_ys where
xs - array with new x grid
ys - array of EMA of y at each point of the new x grid
count_ys - array of EMA of y counts at each point of the new x grid
'''
low = xolds[0] if low is None else low
high = xolds[-1] if high is None else high
assert xolds[0] <= low, 'low = {} < xolds[0] = {} - extrapolation not permitted!'.format(low, xolds[0])
assert xolds[-1] >= high, 'high = {} > xolds[-1] = {} - extrapolation not permitted!'.format(high, xolds[-1])
assert len(xolds) == len(yolds), 'length of xolds ({}) and yolds ({}) do not match!'.format(len(xolds), len(yolds))
xolds = xolds.astype('float64')
yolds = yolds.astype('float64')
luoi = 0 # last unused old index
sum_y = 0.
count_y = 0.
xnews = np.linspace(low, high, n)
decay_period = (high - low) / (n - 1) * decay_steps
interstep_decay = np.exp(- 1. / decay_steps)
sum_ys = np.zeros_like(xnews)
count_ys = np.zeros_like(xnews)
for i in range(n):
xnew = xnews[i]
sum_y *= interstep_decay
count_y *= interstep_decay
while True:
xold = xolds[luoi]
if xold <= xnew:
decay = np.exp(- (xnew - xold) / decay_period)
sum_y += decay * yolds[luoi]
count_y += decay
luoi += 1
else:
break
if luoi >= len(xolds):
break
sum_ys[i] = sum_y
count_ys[i] = count_y
ys = sum_ys / count_ys
ys[count_ys < low_counts_threshold] = np.nan
return xnews, ys, count_ys |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.