code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def generate_and_register_handle(self, prefix, location, checksum=None, additional_URLs=None, **extratypes):
'''
Register a new Handle with a unique random name (random UUID).
:param prefix: The prefix of the handle to be registered. The method
will generate a suffix.
:param location: The URL of the data entity to be referenced.
:param checksum: Optional. The checksum string.
:param extratypes: Optional. Additional key value pairs as dict.
:param additional_URLs: Optional. A list of URLs (as strings) to be
added to the handle record as 10320/LOC entry.
:raises: :exc:`~b2handle.handleexceptions.HandleAuthenticationError`
:return: The new handle name.
'''
LOGGER.debug('generate_and_register_handle...')
handle = self.generate_PID_name(prefix)
handle = self.register_handle(
handle,
location,
checksum,
additional_URLs,
overwrite=True,
**extratypes
)
return handle | Register a new Handle with a unique random name (random UUID).
:param prefix: The prefix of the handle to be registered. The method
will generate a suffix.
:param location: The URL of the data entity to be referenced.
:param checksum: Optional. The checksum string.
:param extratypes: Optional. Additional key value pairs as dict.
:param additional_URLs: Optional. A list of URLs (as strings) to be
added to the handle record as 10320/LOC entry.
:raises: :exc:`~b2handle.handleexceptions.HandleAuthenticationError`
:return: The new handle name. | Below is the the instruction that describes the task:
### Input:
Register a new Handle with a unique random name (random UUID).
:param prefix: The prefix of the handle to be registered. The method
will generate a suffix.
:param location: The URL of the data entity to be referenced.
:param checksum: Optional. The checksum string.
:param extratypes: Optional. Additional key value pairs as dict.
:param additional_URLs: Optional. A list of URLs (as strings) to be
added to the handle record as 10320/LOC entry.
:raises: :exc:`~b2handle.handleexceptions.HandleAuthenticationError`
:return: The new handle name.
### Response:
def generate_and_register_handle(self, prefix, location, checksum=None, additional_URLs=None, **extratypes):
'''
Register a new Handle with a unique random name (random UUID).
:param prefix: The prefix of the handle to be registered. The method
will generate a suffix.
:param location: The URL of the data entity to be referenced.
:param checksum: Optional. The checksum string.
:param extratypes: Optional. Additional key value pairs as dict.
:param additional_URLs: Optional. A list of URLs (as strings) to be
added to the handle record as 10320/LOC entry.
:raises: :exc:`~b2handle.handleexceptions.HandleAuthenticationError`
:return: The new handle name.
'''
LOGGER.debug('generate_and_register_handle...')
handle = self.generate_PID_name(prefix)
handle = self.register_handle(
handle,
location,
checksum,
additional_URLs,
overwrite=True,
**extratypes
)
return handle |
def init_common_services(self, with_cloud_account=True, zone_name=None):
"""
Initialize common service,
When 'zone_name' is defined " at $zone_name" is added to service names
:param bool with_cloud_account:
:param str zone_name:
:return: OR tuple(Workflow, Vault), OR tuple(Workflow, Vault, CloudAccount) with services
"""
zone_names = ZoneConstants(zone_name)
type_to_app = lambda t: self.organization.applications[system_application_types.get(t, t)]
wf_service = self.organization.service(name=zone_names.DEFAULT_WORKFLOW_SERVICE,
application=type_to_app(WORKFLOW_SERVICE_TYPE),
environment=self)
key_service = self.organization.service(name=zone_names.DEFAULT_CREDENTIAL_SERVICE,
application=type_to_app(COBALT_SECURE_STORE_TYPE),
environment=self)
assert wf_service.running()
assert key_service.running()
if not with_cloud_account:
with self as env:
env.add_service(wf_service, force=True)
env.add_service(key_service, force=True)
return wf_service, key_service
cloud_account_service = self.organization.instance(name=zone_names.DEFAULT_CLOUD_ACCOUNT_SERVICE,
application=type_to_app(CLOUD_ACCOUNT_TYPE),
environment=self,
parameters=PROVIDER_CONFIG,
destroyInterval=0)
# Imidiate adding to env cause CA not to drop destroy interval. Known issue 6132. So, add service as instance with
# destroyInterval set to 'never'
assert cloud_account_service.running()
with self as env:
env.add_service(wf_service, force=True)
env.add_service(key_service, force=True)
env.add_service(cloud_account_service, force=True)
return wf_service, key_service, cloud_account_service | Initialize common service,
When 'zone_name' is defined " at $zone_name" is added to service names
:param bool with_cloud_account:
:param str zone_name:
:return: OR tuple(Workflow, Vault), OR tuple(Workflow, Vault, CloudAccount) with services | Below is the the instruction that describes the task:
### Input:
Initialize common service,
When 'zone_name' is defined " at $zone_name" is added to service names
:param bool with_cloud_account:
:param str zone_name:
:return: OR tuple(Workflow, Vault), OR tuple(Workflow, Vault, CloudAccount) with services
### Response:
def init_common_services(self, with_cloud_account=True, zone_name=None):
"""
Initialize common service,
When 'zone_name' is defined " at $zone_name" is added to service names
:param bool with_cloud_account:
:param str zone_name:
:return: OR tuple(Workflow, Vault), OR tuple(Workflow, Vault, CloudAccount) with services
"""
zone_names = ZoneConstants(zone_name)
type_to_app = lambda t: self.organization.applications[system_application_types.get(t, t)]
wf_service = self.organization.service(name=zone_names.DEFAULT_WORKFLOW_SERVICE,
application=type_to_app(WORKFLOW_SERVICE_TYPE),
environment=self)
key_service = self.organization.service(name=zone_names.DEFAULT_CREDENTIAL_SERVICE,
application=type_to_app(COBALT_SECURE_STORE_TYPE),
environment=self)
assert wf_service.running()
assert key_service.running()
if not with_cloud_account:
with self as env:
env.add_service(wf_service, force=True)
env.add_service(key_service, force=True)
return wf_service, key_service
cloud_account_service = self.organization.instance(name=zone_names.DEFAULT_CLOUD_ACCOUNT_SERVICE,
application=type_to_app(CLOUD_ACCOUNT_TYPE),
environment=self,
parameters=PROVIDER_CONFIG,
destroyInterval=0)
# Imidiate adding to env cause CA not to drop destroy interval. Known issue 6132. So, add service as instance with
# destroyInterval set to 'never'
assert cloud_account_service.running()
with self as env:
env.add_service(wf_service, force=True)
env.add_service(key_service, force=True)
env.add_service(cloud_account_service, force=True)
return wf_service, key_service, cloud_account_service |
def deprecated(replacement=None, version=None):
"""A decorator which can be used to mark functions as deprecated.
replacement is a callable that will be called with the same args
as the decorated function.
>>> import pytest
>>> @deprecated()
... def foo1(x):
... return x
...
>>> pytest.warns(DeprecationWarning, foo1, 1)
1
>>> def newfun(x):
... return 0
...
>>> @deprecated(newfun, '1.1')
... def foo2(x):
... return x
...
>>> pytest.warns(DeprecationWarning, foo2, 1)
0
>>>
"""
def outer(oldfun):
def inner(*args, **kwargs):
msg = "%s is deprecated" % oldfun.__name__
if version is not None:
msg += "will be removed in version %s;" % version
if replacement is not None:
msg += "; use %s instead" % (replacement)
warnings.warn(msg, DeprecationWarning, stacklevel=2)
if callable(replacement):
return replacement(*args, **kwargs)
else:
return oldfun(*args, **kwargs)
return inner
return outer | A decorator which can be used to mark functions as deprecated.
replacement is a callable that will be called with the same args
as the decorated function.
>>> import pytest
>>> @deprecated()
... def foo1(x):
... return x
...
>>> pytest.warns(DeprecationWarning, foo1, 1)
1
>>> def newfun(x):
... return 0
...
>>> @deprecated(newfun, '1.1')
... def foo2(x):
... return x
...
>>> pytest.warns(DeprecationWarning, foo2, 1)
0
>>> | Below is the the instruction that describes the task:
### Input:
A decorator which can be used to mark functions as deprecated.
replacement is a callable that will be called with the same args
as the decorated function.
>>> import pytest
>>> @deprecated()
... def foo1(x):
... return x
...
>>> pytest.warns(DeprecationWarning, foo1, 1)
1
>>> def newfun(x):
... return 0
...
>>> @deprecated(newfun, '1.1')
... def foo2(x):
... return x
...
>>> pytest.warns(DeprecationWarning, foo2, 1)
0
>>>
### Response:
def deprecated(replacement=None, version=None):
"""A decorator which can be used to mark functions as deprecated.
replacement is a callable that will be called with the same args
as the decorated function.
>>> import pytest
>>> @deprecated()
... def foo1(x):
... return x
...
>>> pytest.warns(DeprecationWarning, foo1, 1)
1
>>> def newfun(x):
... return 0
...
>>> @deprecated(newfun, '1.1')
... def foo2(x):
... return x
...
>>> pytest.warns(DeprecationWarning, foo2, 1)
0
>>>
"""
def outer(oldfun):
def inner(*args, **kwargs):
msg = "%s is deprecated" % oldfun.__name__
if version is not None:
msg += "will be removed in version %s;" % version
if replacement is not None:
msg += "; use %s instead" % (replacement)
warnings.warn(msg, DeprecationWarning, stacklevel=2)
if callable(replacement):
return replacement(*args, **kwargs)
else:
return oldfun(*args, **kwargs)
return inner
return outer |
def has_in_repos(self, repo):
"""
:calls: `GET /teams/:id/repos/:owner/:repo <http://developer.github.com/v3/orgs/teams>`_
:param repo: :class:`github.Repository.Repository`
:rtype: bool
"""
assert isinstance(repo, github.Repository.Repository), repo
status, headers, data = self._requester.requestJson(
"GET",
self.url + "/repos/" + repo._identity
)
return status == 204 | :calls: `GET /teams/:id/repos/:owner/:repo <http://developer.github.com/v3/orgs/teams>`_
:param repo: :class:`github.Repository.Repository`
:rtype: bool | Below is the the instruction that describes the task:
### Input:
:calls: `GET /teams/:id/repos/:owner/:repo <http://developer.github.com/v3/orgs/teams>`_
:param repo: :class:`github.Repository.Repository`
:rtype: bool
### Response:
def has_in_repos(self, repo):
"""
:calls: `GET /teams/:id/repos/:owner/:repo <http://developer.github.com/v3/orgs/teams>`_
:param repo: :class:`github.Repository.Repository`
:rtype: bool
"""
assert isinstance(repo, github.Repository.Repository), repo
status, headers, data = self._requester.requestJson(
"GET",
self.url + "/repos/" + repo._identity
)
return status == 204 |
def parse_time(block_time):
"""Take a string representation of time from the blockchain, and parse it
into datetime object.
"""
return datetime.strptime(block_time, timeFormat).replace(tzinfo=timezone.utc) | Take a string representation of time from the blockchain, and parse it
into datetime object. | Below is the the instruction that describes the task:
### Input:
Take a string representation of time from the blockchain, and parse it
into datetime object.
### Response:
def parse_time(block_time):
"""Take a string representation of time from the blockchain, and parse it
into datetime object.
"""
return datetime.strptime(block_time, timeFormat).replace(tzinfo=timezone.utc) |
def url(ctx):
"""Prints the notebook url for this project.
Uses [Caching](/references/polyaxon-cli/#caching)
Example:
\b
```bash
$ polyaxon notebook url
```
"""
user, project_name = get_project_or_local(ctx.obj.get('project'))
try:
response = PolyaxonClient().project.get_project(user, project_name)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get project `{}`.'.format(project_name))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
if response.has_notebook:
click.echo(get_notebook_url(user, project_name))
else:
Printer.print_warning(
'This project `{}` does not have a running notebook.'.format(project_name))
click.echo('You can start a notebook with this command: polyaxon notebook start --help') | Prints the notebook url for this project.
Uses [Caching](/references/polyaxon-cli/#caching)
Example:
\b
```bash
$ polyaxon notebook url
``` | Below is the the instruction that describes the task:
### Input:
Prints the notebook url for this project.
Uses [Caching](/references/polyaxon-cli/#caching)
Example:
\b
```bash
$ polyaxon notebook url
```
### Response:
def url(ctx):
"""Prints the notebook url for this project.
Uses [Caching](/references/polyaxon-cli/#caching)
Example:
\b
```bash
$ polyaxon notebook url
```
"""
user, project_name = get_project_or_local(ctx.obj.get('project'))
try:
response = PolyaxonClient().project.get_project(user, project_name)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get project `{}`.'.format(project_name))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
if response.has_notebook:
click.echo(get_notebook_url(user, project_name))
else:
Printer.print_warning(
'This project `{}` does not have a running notebook.'.format(project_name))
click.echo('You can start a notebook with this command: polyaxon notebook start --help') |
def joliet_vd_factory(joliet, sys_ident, vol_ident, set_size, seqnum,
log_block_size, vol_set_ident, pub_ident_str,
preparer_ident_str, app_ident_str, copyright_file,
abstract_file, bibli_file, vol_expire_date, app_use, xa):
# type: (int, bytes, bytes, int, int, int, bytes, bytes, bytes, bytes, bytes, bytes, bytes, float, bytes, bool) -> PrimaryOrSupplementaryVD
'''
An internal function to create an Joliet Volume Descriptor.
Parameters:
joliet - The joliet version to use, one of 1, 2, or 3.
sys_ident - The system identification string to use on the new ISO.
vol_ident - The volume identification string to use on the new ISO.
set_size - The size of the set of ISOs this ISO is a part of.
seqnum - The sequence number of the set of this ISO.
log_block_size - The logical block size to use for the ISO. While ISO9660
technically supports sizes other than 2048 (the default),
this almost certainly doesn't work.
vol_set_ident - The volume set identification string to use on the new ISO.
pub_ident_str - The publisher identification string to use on the new ISO.
preparer_ident_str - The preparer identification string to use on the new ISO.
app_ident_str - The application identification string to use on the new ISO.
copyright_file - The name of a file at the root of the ISO to use as the
copyright file.
abstract_file - The name of a file at the root of the ISO to use as the
abstract file.
bibli_file - The name of a file at the root of the ISO to use as the
bibliographic file.
vol_expire_date - The date that this ISO will expire at.
app_use - Arbitrary data that the application can stuff into the primary
volume descriptor of this ISO.
xa - Whether to add the ISO9660 Extended Attribute extensions to this
ISO. The default is False.
Returns:
The newly created Joliet Volume Descriptor.
'''
if joliet == 1:
escape_sequence = b'%/@'
elif joliet == 2:
escape_sequence = b'%/C'
elif joliet == 3:
escape_sequence = b'%/E'
else:
raise pycdlibexception.PyCdlibInvalidInput('Invalid Joliet level; must be 1, 2, or 3')
svd = PrimaryOrSupplementaryVD(VOLUME_DESCRIPTOR_TYPE_SUPPLEMENTARY)
svd.new(0, sys_ident, vol_ident, set_size, seqnum, log_block_size,
vol_set_ident, pub_ident_str, preparer_ident_str, app_ident_str,
copyright_file, abstract_file,
bibli_file, vol_expire_date, app_use, xa, 1, escape_sequence)
return svd | An internal function to create an Joliet Volume Descriptor.
Parameters:
joliet - The joliet version to use, one of 1, 2, or 3.
sys_ident - The system identification string to use on the new ISO.
vol_ident - The volume identification string to use on the new ISO.
set_size - The size of the set of ISOs this ISO is a part of.
seqnum - The sequence number of the set of this ISO.
log_block_size - The logical block size to use for the ISO. While ISO9660
technically supports sizes other than 2048 (the default),
this almost certainly doesn't work.
vol_set_ident - The volume set identification string to use on the new ISO.
pub_ident_str - The publisher identification string to use on the new ISO.
preparer_ident_str - The preparer identification string to use on the new ISO.
app_ident_str - The application identification string to use on the new ISO.
copyright_file - The name of a file at the root of the ISO to use as the
copyright file.
abstract_file - The name of a file at the root of the ISO to use as the
abstract file.
bibli_file - The name of a file at the root of the ISO to use as the
bibliographic file.
vol_expire_date - The date that this ISO will expire at.
app_use - Arbitrary data that the application can stuff into the primary
volume descriptor of this ISO.
xa - Whether to add the ISO9660 Extended Attribute extensions to this
ISO. The default is False.
Returns:
The newly created Joliet Volume Descriptor. | Below is the the instruction that describes the task:
### Input:
An internal function to create an Joliet Volume Descriptor.
Parameters:
joliet - The joliet version to use, one of 1, 2, or 3.
sys_ident - The system identification string to use on the new ISO.
vol_ident - The volume identification string to use on the new ISO.
set_size - The size of the set of ISOs this ISO is a part of.
seqnum - The sequence number of the set of this ISO.
log_block_size - The logical block size to use for the ISO. While ISO9660
technically supports sizes other than 2048 (the default),
this almost certainly doesn't work.
vol_set_ident - The volume set identification string to use on the new ISO.
pub_ident_str - The publisher identification string to use on the new ISO.
preparer_ident_str - The preparer identification string to use on the new ISO.
app_ident_str - The application identification string to use on the new ISO.
copyright_file - The name of a file at the root of the ISO to use as the
copyright file.
abstract_file - The name of a file at the root of the ISO to use as the
abstract file.
bibli_file - The name of a file at the root of the ISO to use as the
bibliographic file.
vol_expire_date - The date that this ISO will expire at.
app_use - Arbitrary data that the application can stuff into the primary
volume descriptor of this ISO.
xa - Whether to add the ISO9660 Extended Attribute extensions to this
ISO. The default is False.
Returns:
The newly created Joliet Volume Descriptor.
### Response:
def joliet_vd_factory(joliet, sys_ident, vol_ident, set_size, seqnum,
log_block_size, vol_set_ident, pub_ident_str,
preparer_ident_str, app_ident_str, copyright_file,
abstract_file, bibli_file, vol_expire_date, app_use, xa):
# type: (int, bytes, bytes, int, int, int, bytes, bytes, bytes, bytes, bytes, bytes, bytes, float, bytes, bool) -> PrimaryOrSupplementaryVD
'''
An internal function to create an Joliet Volume Descriptor.
Parameters:
joliet - The joliet version to use, one of 1, 2, or 3.
sys_ident - The system identification string to use on the new ISO.
vol_ident - The volume identification string to use on the new ISO.
set_size - The size of the set of ISOs this ISO is a part of.
seqnum - The sequence number of the set of this ISO.
log_block_size - The logical block size to use for the ISO. While ISO9660
technically supports sizes other than 2048 (the default),
this almost certainly doesn't work.
vol_set_ident - The volume set identification string to use on the new ISO.
pub_ident_str - The publisher identification string to use on the new ISO.
preparer_ident_str - The preparer identification string to use on the new ISO.
app_ident_str - The application identification string to use on the new ISO.
copyright_file - The name of a file at the root of the ISO to use as the
copyright file.
abstract_file - The name of a file at the root of the ISO to use as the
abstract file.
bibli_file - The name of a file at the root of the ISO to use as the
bibliographic file.
vol_expire_date - The date that this ISO will expire at.
app_use - Arbitrary data that the application can stuff into the primary
volume descriptor of this ISO.
xa - Whether to add the ISO9660 Extended Attribute extensions to this
ISO. The default is False.
Returns:
The newly created Joliet Volume Descriptor.
'''
if joliet == 1:
escape_sequence = b'%/@'
elif joliet == 2:
escape_sequence = b'%/C'
elif joliet == 3:
escape_sequence = b'%/E'
else:
raise pycdlibexception.PyCdlibInvalidInput('Invalid Joliet level; must be 1, 2, or 3')
svd = PrimaryOrSupplementaryVD(VOLUME_DESCRIPTOR_TYPE_SUPPLEMENTARY)
svd.new(0, sys_ident, vol_ident, set_size, seqnum, log_block_size,
vol_set_ident, pub_ident_str, preparer_ident_str, app_ident_str,
copyright_file, abstract_file,
bibli_file, vol_expire_date, app_use, xa, 1, escape_sequence)
return svd |
def INIT_TLS_SESSION(self):
"""
XXX We should offer the right key according to the client's suites. For
now server_rsa_key is only used for RSAkx, but we should try to replace
every server_key with both server_rsa_key and server_ecdsa_key.
"""
self.cur_session = tlsSession(connection_end="server")
self.cur_session.server_certs = [self.mycert]
self.cur_session.server_key = self.mykey
if isinstance(self.mykey, PrivKeyRSA):
self.cur_session.server_rsa_key = self.mykey
# elif isinstance(self.mykey, PrivKeyECDSA):
# self.cur_session.server_ecdsa_key = self.mykey
raise self.WAITING_CLIENTFLIGHT1() | XXX We should offer the right key according to the client's suites. For
now server_rsa_key is only used for RSAkx, but we should try to replace
every server_key with both server_rsa_key and server_ecdsa_key. | Below is the the instruction that describes the task:
### Input:
XXX We should offer the right key according to the client's suites. For
now server_rsa_key is only used for RSAkx, but we should try to replace
every server_key with both server_rsa_key and server_ecdsa_key.
### Response:
def INIT_TLS_SESSION(self):
"""
XXX We should offer the right key according to the client's suites. For
now server_rsa_key is only used for RSAkx, but we should try to replace
every server_key with both server_rsa_key and server_ecdsa_key.
"""
self.cur_session = tlsSession(connection_end="server")
self.cur_session.server_certs = [self.mycert]
self.cur_session.server_key = self.mykey
if isinstance(self.mykey, PrivKeyRSA):
self.cur_session.server_rsa_key = self.mykey
# elif isinstance(self.mykey, PrivKeyECDSA):
# self.cur_session.server_ecdsa_key = self.mykey
raise self.WAITING_CLIENTFLIGHT1() |
def create_task(self, task_name=None, script=None, hyper_parameters=None, saved_result_keys=None, **kwargs):
"""Uploads a task to the database, timestamp will be added automatically.
Parameters
-----------
task_name : str
The task name.
script : str
File name of the python script.
hyper_parameters : dictionary
The hyper parameters pass into the script.
saved_result_keys : list of str
The keys of the task results to keep in the database when the task finishes.
kwargs : other parameters
Users customized parameters such as description, version number.
Examples
-----------
Uploads a task
>>> db.create_task(task_name='mnist', script='example/tutorial_mnist_simple.py', description='simple tutorial')
Finds and runs the latest task
>>> db.run_top_task(sess=sess, sort=[("time", pymongo.DESCENDING)])
>>> db.run_top_task(sess=sess, sort=[("time", -1)])
Finds and runs the oldest task
>>> db.run_top_task(sess=sess, sort=[("time", pymongo.ASCENDING)])
>>> db.run_top_task(sess=sess, sort=[("time", 1)])
"""
if not isinstance(task_name, str): # is None:
raise Exception("task_name should be string")
if not isinstance(script, str): # is None:
raise Exception("script should be string")
if hyper_parameters is None:
hyper_parameters = {}
if saved_result_keys is None:
saved_result_keys = []
self._fill_project_info(kwargs)
kwargs.update({'time': datetime.utcnow()})
kwargs.update({'hyper_parameters': hyper_parameters})
kwargs.update({'saved_result_keys': saved_result_keys})
_script = open(script, 'rb').read()
kwargs.update({'status': 'pending', 'script': _script, 'result': {}})
self.db.Task.insert_one(kwargs)
logging.info("[Database] Saved Task - task_name: {} script: {}".format(task_name, script)) | Uploads a task to the database, timestamp will be added automatically.
Parameters
-----------
task_name : str
The task name.
script : str
File name of the python script.
hyper_parameters : dictionary
The hyper parameters pass into the script.
saved_result_keys : list of str
The keys of the task results to keep in the database when the task finishes.
kwargs : other parameters
Users customized parameters such as description, version number.
Examples
-----------
Uploads a task
>>> db.create_task(task_name='mnist', script='example/tutorial_mnist_simple.py', description='simple tutorial')
Finds and runs the latest task
>>> db.run_top_task(sess=sess, sort=[("time", pymongo.DESCENDING)])
>>> db.run_top_task(sess=sess, sort=[("time", -1)])
Finds and runs the oldest task
>>> db.run_top_task(sess=sess, sort=[("time", pymongo.ASCENDING)])
>>> db.run_top_task(sess=sess, sort=[("time", 1)]) | Below is the the instruction that describes the task:
### Input:
Uploads a task to the database, timestamp will be added automatically.
Parameters
-----------
task_name : str
The task name.
script : str
File name of the python script.
hyper_parameters : dictionary
The hyper parameters pass into the script.
saved_result_keys : list of str
The keys of the task results to keep in the database when the task finishes.
kwargs : other parameters
Users customized parameters such as description, version number.
Examples
-----------
Uploads a task
>>> db.create_task(task_name='mnist', script='example/tutorial_mnist_simple.py', description='simple tutorial')
Finds and runs the latest task
>>> db.run_top_task(sess=sess, sort=[("time", pymongo.DESCENDING)])
>>> db.run_top_task(sess=sess, sort=[("time", -1)])
Finds and runs the oldest task
>>> db.run_top_task(sess=sess, sort=[("time", pymongo.ASCENDING)])
>>> db.run_top_task(sess=sess, sort=[("time", 1)])
### Response:
def create_task(self, task_name=None, script=None, hyper_parameters=None, saved_result_keys=None, **kwargs):
"""Uploads a task to the database, timestamp will be added automatically.
Parameters
-----------
task_name : str
The task name.
script : str
File name of the python script.
hyper_parameters : dictionary
The hyper parameters pass into the script.
saved_result_keys : list of str
The keys of the task results to keep in the database when the task finishes.
kwargs : other parameters
Users customized parameters such as description, version number.
Examples
-----------
Uploads a task
>>> db.create_task(task_name='mnist', script='example/tutorial_mnist_simple.py', description='simple tutorial')
Finds and runs the latest task
>>> db.run_top_task(sess=sess, sort=[("time", pymongo.DESCENDING)])
>>> db.run_top_task(sess=sess, sort=[("time", -1)])
Finds and runs the oldest task
>>> db.run_top_task(sess=sess, sort=[("time", pymongo.ASCENDING)])
>>> db.run_top_task(sess=sess, sort=[("time", 1)])
"""
if not isinstance(task_name, str): # is None:
raise Exception("task_name should be string")
if not isinstance(script, str): # is None:
raise Exception("script should be string")
if hyper_parameters is None:
hyper_parameters = {}
if saved_result_keys is None:
saved_result_keys = []
self._fill_project_info(kwargs)
kwargs.update({'time': datetime.utcnow()})
kwargs.update({'hyper_parameters': hyper_parameters})
kwargs.update({'saved_result_keys': saved_result_keys})
_script = open(script, 'rb').read()
kwargs.update({'status': 'pending', 'script': _script, 'result': {}})
self.db.Task.insert_one(kwargs)
logging.info("[Database] Saved Task - task_name: {} script: {}".format(task_name, script)) |
def open_reader(self, file_name, reopen=False, endpoint=None, start=None, length=None, **kwargs):
"""
Open a volume file for read. A file-like object will be returned which can be used to read contents from
volume files.
:param str file_name: name of the file
:param bool reopen: whether we need to open an existing read session
:param str endpoint: tunnel service URL
:param start: start position
:param length: length limit
:param compress_option: the compression algorithm, level and strategy
:type compress_option: :class:`odps.tunnel.CompressOption`
:Example:
>>> with partition.open_reader('file') as reader:
>>> [print(line) for line in reader]
"""
tunnel = self._create_volume_tunnel(endpoint=endpoint)
download_id = self._download_id if not reopen else None
download_session = tunnel.create_download_session(volume=self.volume.name, partition_spec=self.name,
file_name=file_name, download_id=download_id, **kwargs)
self._download_id = download_session.id
open_args = {}
if start is not None:
open_args['start'] = start
if length is not None:
open_args['length'] = length
return download_session.open(**open_args) | Open a volume file for read. A file-like object will be returned which can be used to read contents from
volume files.
:param str file_name: name of the file
:param bool reopen: whether we need to open an existing read session
:param str endpoint: tunnel service URL
:param start: start position
:param length: length limit
:param compress_option: the compression algorithm, level and strategy
:type compress_option: :class:`odps.tunnel.CompressOption`
:Example:
>>> with partition.open_reader('file') as reader:
>>> [print(line) for line in reader] | Below is the the instruction that describes the task:
### Input:
Open a volume file for read. A file-like object will be returned which can be used to read contents from
volume files.
:param str file_name: name of the file
:param bool reopen: whether we need to open an existing read session
:param str endpoint: tunnel service URL
:param start: start position
:param length: length limit
:param compress_option: the compression algorithm, level and strategy
:type compress_option: :class:`odps.tunnel.CompressOption`
:Example:
>>> with partition.open_reader('file') as reader:
>>> [print(line) for line in reader]
### Response:
def open_reader(self, file_name, reopen=False, endpoint=None, start=None, length=None, **kwargs):
"""
Open a volume file for read. A file-like object will be returned which can be used to read contents from
volume files.
:param str file_name: name of the file
:param bool reopen: whether we need to open an existing read session
:param str endpoint: tunnel service URL
:param start: start position
:param length: length limit
:param compress_option: the compression algorithm, level and strategy
:type compress_option: :class:`odps.tunnel.CompressOption`
:Example:
>>> with partition.open_reader('file') as reader:
>>> [print(line) for line in reader]
"""
tunnel = self._create_volume_tunnel(endpoint=endpoint)
download_id = self._download_id if not reopen else None
download_session = tunnel.create_download_session(volume=self.volume.name, partition_spec=self.name,
file_name=file_name, download_id=download_id, **kwargs)
self._download_id = download_session.id
open_args = {}
if start is not None:
open_args['start'] = start
if length is not None:
open_args['length'] = length
return download_session.open(**open_args) |
def prepare_attachments(attachment):
"""
Converts incoming attachment into dictionary.
"""
if isinstance(attachment, tuple):
result = {"Name": attachment[0], "Content": attachment[1], "ContentType": attachment[2]}
if len(attachment) == 4:
result["ContentID"] = attachment[3]
elif isinstance(attachment, MIMEBase):
payload = attachment.get_payload()
content_type = attachment.get_content_type()
# Special case for message/rfc822
# Even if RFC implies such attachments being not base64-encoded,
# Postmark requires all attachments to be encoded in this way
if content_type == "message/rfc822" and not isinstance(payload, str):
payload = b64encode(payload[0].get_payload(decode=True)).decode()
result = {
"Name": attachment.get_filename() or "attachment.txt",
"Content": payload,
"ContentType": content_type,
}
content_id = attachment.get("Content-ID")
if content_id:
if content_id.startswith("<") and content_id.endswith(">"):
content_id = content_id[1:-1]
if (attachment.get("Content-Disposition") or "").startswith("inline"):
content_id = "cid:%s" % content_id
result["ContentID"] = content_id
elif isinstance(attachment, str):
content_type = guess_content_type(attachment)
filename = os.path.basename(attachment)
with open(attachment, "rb") as fd:
data = fd.read()
result = {"Name": filename, "Content": b64encode(data).decode("utf-8"), "ContentType": content_type}
else:
result = attachment
return result | Converts incoming attachment into dictionary. | Below is the the instruction that describes the task:
### Input:
Converts incoming attachment into dictionary.
### Response:
def prepare_attachments(attachment):
"""
Converts incoming attachment into dictionary.
"""
if isinstance(attachment, tuple):
result = {"Name": attachment[0], "Content": attachment[1], "ContentType": attachment[2]}
if len(attachment) == 4:
result["ContentID"] = attachment[3]
elif isinstance(attachment, MIMEBase):
payload = attachment.get_payload()
content_type = attachment.get_content_type()
# Special case for message/rfc822
# Even if RFC implies such attachments being not base64-encoded,
# Postmark requires all attachments to be encoded in this way
if content_type == "message/rfc822" and not isinstance(payload, str):
payload = b64encode(payload[0].get_payload(decode=True)).decode()
result = {
"Name": attachment.get_filename() or "attachment.txt",
"Content": payload,
"ContentType": content_type,
}
content_id = attachment.get("Content-ID")
if content_id:
if content_id.startswith("<") and content_id.endswith(">"):
content_id = content_id[1:-1]
if (attachment.get("Content-Disposition") or "").startswith("inline"):
content_id = "cid:%s" % content_id
result["ContentID"] = content_id
elif isinstance(attachment, str):
content_type = guess_content_type(attachment)
filename = os.path.basename(attachment)
with open(attachment, "rb") as fd:
data = fd.read()
result = {"Name": filename, "Content": b64encode(data).decode("utf-8"), "ContentType": content_type}
else:
result = attachment
return result |
def to_dict(self, model_run):
"""Create a Json-like dictionary for a model run object. Extends the
basic object with run state, arguments, and optional prediction results
or error descriptions.
Parameters
----------
model_run : PredictionHandle
Returns
-------
(JSON)
Json-like object, i.e., dictionary.
"""
# Get the basic Json object from the super class
json_obj = super(DefaultModelRunManager, self).to_dict(model_run)
# Add run state
json_obj['state'] = ModelRunState.to_dict(model_run.state)
# Add run scheduling Timestamps
json_obj['schedule'] = model_run.schedule
# Add experiment information
json_obj['experiment'] = model_run.experiment_id
# Add model information
json_obj['model'] = model_run.model_id
# Transform dictionary of attributes into list of key-value pairs.
json_obj['arguments'] = attribute.attributes_to_dict(model_run.arguments)
# Include attachments
json_obj['attachments'] = [
attachment.to_dict()
for attachment in model_run.attachments.values()
]
return json_obj | Create a Json-like dictionary for a model run object. Extends the
basic object with run state, arguments, and optional prediction results
or error descriptions.
Parameters
----------
model_run : PredictionHandle
Returns
-------
(JSON)
Json-like object, i.e., dictionary. | Below is the the instruction that describes the task:
### Input:
Create a Json-like dictionary for a model run object. Extends the
basic object with run state, arguments, and optional prediction results
or error descriptions.
Parameters
----------
model_run : PredictionHandle
Returns
-------
(JSON)
Json-like object, i.e., dictionary.
### Response:
def to_dict(self, model_run):
"""Create a Json-like dictionary for a model run object. Extends the
basic object with run state, arguments, and optional prediction results
or error descriptions.
Parameters
----------
model_run : PredictionHandle
Returns
-------
(JSON)
Json-like object, i.e., dictionary.
"""
# Get the basic Json object from the super class
json_obj = super(DefaultModelRunManager, self).to_dict(model_run)
# Add run state
json_obj['state'] = ModelRunState.to_dict(model_run.state)
# Add run scheduling Timestamps
json_obj['schedule'] = model_run.schedule
# Add experiment information
json_obj['experiment'] = model_run.experiment_id
# Add model information
json_obj['model'] = model_run.model_id
# Transform dictionary of attributes into list of key-value pairs.
json_obj['arguments'] = attribute.attributes_to_dict(model_run.arguments)
# Include attachments
json_obj['attachments'] = [
attachment.to_dict()
for attachment in model_run.attachments.values()
]
return json_obj |
def permutations(mesh,
function=lambda x: x.identifier,
displacement_max=1e-8,
count=1000,
subdivisions=2,
cutoff=3600):
"""
Permutate a mesh, record the maximum it deviates from the original mesh
and the resulting value of an identifier function.
Parameters
----------
mesh: Trimesh object
function: function which takes a single mesh as an argument
and returns an (n,) float vector
subdivisions: the maximum number of times to subdivide the mesh
count: int, number of times to permutate each subdivision step
Returns
-----------
identifiers: numpy array of identifiers
"""
identifiers = []
start = time.time()
# do subdivisions
divided = [mesh.copy()]
for j in range(subdivisions - 1):
divided.append(divided[-1].copy().subdivide())
for i, displacement in enumerate(np.linspace(0.0,
displacement_max / mesh.scale,
count)):
# get one of the subdivided meshes
current = np.random.choice(divided).copy()
if i > (count / 10):
# run first bunch without tessellation permutation
current = current.permutate.tessellation()
# after the first few displace it a lot
transformed = trimesh.permutate.transform(current)
# noisy = trimesh.permutate.noise(transformed, displacement)
identifier = function(transformed)
identifiers.append(identifier)
if (time.time() - start) > cutoff:
print('bailing for time:{} count:{}'.format(time.time() - start,
i))
return np.array(identifiers)
return np.array(identifiers) | Permutate a mesh, record the maximum it deviates from the original mesh
and the resulting value of an identifier function.
Parameters
----------
mesh: Trimesh object
function: function which takes a single mesh as an argument
and returns an (n,) float vector
subdivisions: the maximum number of times to subdivide the mesh
count: int, number of times to permutate each subdivision step
Returns
-----------
identifiers: numpy array of identifiers | Below is the the instruction that describes the task:
### Input:
Permutate a mesh, record the maximum it deviates from the original mesh
and the resulting value of an identifier function.
Parameters
----------
mesh: Trimesh object
function: function which takes a single mesh as an argument
and returns an (n,) float vector
subdivisions: the maximum number of times to subdivide the mesh
count: int, number of times to permutate each subdivision step
Returns
-----------
identifiers: numpy array of identifiers
### Response:
def permutations(mesh,
function=lambda x: x.identifier,
displacement_max=1e-8,
count=1000,
subdivisions=2,
cutoff=3600):
"""
Permutate a mesh, record the maximum it deviates from the original mesh
and the resulting value of an identifier function.
Parameters
----------
mesh: Trimesh object
function: function which takes a single mesh as an argument
and returns an (n,) float vector
subdivisions: the maximum number of times to subdivide the mesh
count: int, number of times to permutate each subdivision step
Returns
-----------
identifiers: numpy array of identifiers
"""
identifiers = []
start = time.time()
# do subdivisions
divided = [mesh.copy()]
for j in range(subdivisions - 1):
divided.append(divided[-1].copy().subdivide())
for i, displacement in enumerate(np.linspace(0.0,
displacement_max / mesh.scale,
count)):
# get one of the subdivided meshes
current = np.random.choice(divided).copy()
if i > (count / 10):
# run first bunch without tessellation permutation
current = current.permutate.tessellation()
# after the first few displace it a lot
transformed = trimesh.permutate.transform(current)
# noisy = trimesh.permutate.noise(transformed, displacement)
identifier = function(transformed)
identifiers.append(identifier)
if (time.time() - start) > cutoff:
print('bailing for time:{} count:{}'.format(time.time() - start,
i))
return np.array(identifiers)
return np.array(identifiers) |
def transform_flask_from_import(node):
'''Translates a flask.ext from-style import into a non-magical import.
Translates:
from flask.ext import wtf, bcrypt as fcrypt
Into:
import flask_wtf as wtf, flask_bcrypt as fcrypt
'''
new_names = []
# node.names is a list of 2-tuples. Each tuple consists of (name, as_name).
# So, the import would be represented as:
#
# from flask.ext import wtf as ftw, admin
#
# node.names = [('wtf', 'ftw'), ('admin', None)]
for (name, as_name) in node.names:
actual_module_name = 'flask_{}'.format(name)
new_names.append((actual_module_name, as_name or name))
new_node = nodes.Import()
copy_node_info(node, new_node)
new_node.names = new_names
mark_transformed(new_node)
return new_node | Translates a flask.ext from-style import into a non-magical import.
Translates:
from flask.ext import wtf, bcrypt as fcrypt
Into:
import flask_wtf as wtf, flask_bcrypt as fcrypt | Below is the the instruction that describes the task:
### Input:
Translates a flask.ext from-style import into a non-magical import.
Translates:
from flask.ext import wtf, bcrypt as fcrypt
Into:
import flask_wtf as wtf, flask_bcrypt as fcrypt
### Response:
def transform_flask_from_import(node):
'''Translates a flask.ext from-style import into a non-magical import.
Translates:
from flask.ext import wtf, bcrypt as fcrypt
Into:
import flask_wtf as wtf, flask_bcrypt as fcrypt
'''
new_names = []
# node.names is a list of 2-tuples. Each tuple consists of (name, as_name).
# So, the import would be represented as:
#
# from flask.ext import wtf as ftw, admin
#
# node.names = [('wtf', 'ftw'), ('admin', None)]
for (name, as_name) in node.names:
actual_module_name = 'flask_{}'.format(name)
new_names.append((actual_module_name, as_name or name))
new_node = nodes.Import()
copy_node_info(node, new_node)
new_node.names = new_names
mark_transformed(new_node)
return new_node |
def getChangeSets(self):
"""Get all the ChangeSets of this workitem
:return: a :class:`list` contains all the
:class:`rtcclient.models.ChangeSet` objects
:rtype: list
"""
changeset_tag = ("rtc_cm:com.ibm.team.filesystem.workitems."
"change_set.com.ibm.team.scm.ChangeSet")
return (self.rtc_obj
._get_paged_resources("ChangeSet",
workitem_id=self.identifier,
customized_attr=changeset_tag,
page_size="10")) | Get all the ChangeSets of this workitem
:return: a :class:`list` contains all the
:class:`rtcclient.models.ChangeSet` objects
:rtype: list | Below is the the instruction that describes the task:
### Input:
Get all the ChangeSets of this workitem
:return: a :class:`list` contains all the
:class:`rtcclient.models.ChangeSet` objects
:rtype: list
### Response:
def getChangeSets(self):
"""Get all the ChangeSets of this workitem
:return: a :class:`list` contains all the
:class:`rtcclient.models.ChangeSet` objects
:rtype: list
"""
changeset_tag = ("rtc_cm:com.ibm.team.filesystem.workitems."
"change_set.com.ibm.team.scm.ChangeSet")
return (self.rtc_obj
._get_paged_resources("ChangeSet",
workitem_id=self.identifier,
customized_attr=changeset_tag,
page_size="10")) |
def _get_size(self):
"""
Callable that returns the current `Size`, required by Vt100_Output.
"""
if self._chan is None:
return Size(rows=20, columns=79)
else:
width, height, pixwidth, pixheight = self._chan.get_terminal_size()
return Size(rows=height, columns=width) | Callable that returns the current `Size`, required by Vt100_Output. | Below is the the instruction that describes the task:
### Input:
Callable that returns the current `Size`, required by Vt100_Output.
### Response:
def _get_size(self):
"""
Callable that returns the current `Size`, required by Vt100_Output.
"""
if self._chan is None:
return Size(rows=20, columns=79)
else:
width, height, pixwidth, pixheight = self._chan.get_terminal_size()
return Size(rows=height, columns=width) |
def pre_call(self, ctxt, pre_mod, post_mod, action):
"""
A modifier hook function. This is called in priority order prior
to invoking the ``Action`` for the step. This allows a
modifier to alter the context, or to take over subsequent
action invocation.
:param ctxt: The context object.
:param pre_mod: A list of the modifiers preceding this
modifier in the list of modifiers that is
applicable to the action. This list is in
priority order.
:param post_mod: A list of the modifiers following this
modifier in the list of modifiers that is
applicable to the action. This list is in
priority order.
:param action: The action that will be performed.
:returns: A ``None`` return value indicates that the modifier
is taking no action. A non-``None`` return value
should consist of a ``StepResult`` object; this will
suspend further ``pre_call()`` processing and
proceed to the ``post_call()`` processing. This
implementation returns a ``StepResult`` with state
``SKIPPED`` if the condition does not evaluate to
``True``.
"""
# Check the condition
if not self.condition(ctxt):
return steps.StepResult(state=steps.SKIPPED)
return None | A modifier hook function. This is called in priority order prior
to invoking the ``Action`` for the step. This allows a
modifier to alter the context, or to take over subsequent
action invocation.
:param ctxt: The context object.
:param pre_mod: A list of the modifiers preceding this
modifier in the list of modifiers that is
applicable to the action. This list is in
priority order.
:param post_mod: A list of the modifiers following this
modifier in the list of modifiers that is
applicable to the action. This list is in
priority order.
:param action: The action that will be performed.
:returns: A ``None`` return value indicates that the modifier
is taking no action. A non-``None`` return value
should consist of a ``StepResult`` object; this will
suspend further ``pre_call()`` processing and
proceed to the ``post_call()`` processing. This
implementation returns a ``StepResult`` with state
``SKIPPED`` if the condition does not evaluate to
``True``. | Below is the the instruction that describes the task:
### Input:
A modifier hook function. This is called in priority order prior
to invoking the ``Action`` for the step. This allows a
modifier to alter the context, or to take over subsequent
action invocation.
:param ctxt: The context object.
:param pre_mod: A list of the modifiers preceding this
modifier in the list of modifiers that is
applicable to the action. This list is in
priority order.
:param post_mod: A list of the modifiers following this
modifier in the list of modifiers that is
applicable to the action. This list is in
priority order.
:param action: The action that will be performed.
:returns: A ``None`` return value indicates that the modifier
is taking no action. A non-``None`` return value
should consist of a ``StepResult`` object; this will
suspend further ``pre_call()`` processing and
proceed to the ``post_call()`` processing. This
implementation returns a ``StepResult`` with state
``SKIPPED`` if the condition does not evaluate to
``True``.
### Response:
def pre_call(self, ctxt, pre_mod, post_mod, action):
"""
A modifier hook function. This is called in priority order prior
to invoking the ``Action`` for the step. This allows a
modifier to alter the context, or to take over subsequent
action invocation.
:param ctxt: The context object.
:param pre_mod: A list of the modifiers preceding this
modifier in the list of modifiers that is
applicable to the action. This list is in
priority order.
:param post_mod: A list of the modifiers following this
modifier in the list of modifiers that is
applicable to the action. This list is in
priority order.
:param action: The action that will be performed.
:returns: A ``None`` return value indicates that the modifier
is taking no action. A non-``None`` return value
should consist of a ``StepResult`` object; this will
suspend further ``pre_call()`` processing and
proceed to the ``post_call()`` processing. This
implementation returns a ``StepResult`` with state
``SKIPPED`` if the condition does not evaluate to
``True``.
"""
# Check the condition
if not self.condition(ctxt):
return steps.StepResult(state=steps.SKIPPED)
return None |
def add_automation_link(testcase):
"""Appends link to automation script to the test description."""
automation_link = (
'<a href="{}">Test Source</a>'.format(testcase["automation_script"])
if testcase.get("automation_script")
else ""
)
testcase["description"] = "{}<br/>{}".format(testcase.get("description") or "", automation_link) | Appends link to automation script to the test description. | Below is the the instruction that describes the task:
### Input:
Appends link to automation script to the test description.
### Response:
def add_automation_link(testcase):
"""Appends link to automation script to the test description."""
automation_link = (
'<a href="{}">Test Source</a>'.format(testcase["automation_script"])
if testcase.get("automation_script")
else ""
)
testcase["description"] = "{}<br/>{}".format(testcase.get("description") or "", automation_link) |
def get_relationships_for_destination_on_date(self, destination_id, from_, to):
"""Gets a ``RelationshipList`` corresponding to the given peer ``Id`` with a starting effective date in the given range inclusive.
arg: destination_id (osid.id.Id): a peer ``Id``
arg: from (osid.calendaring.DateTime): starting date
arg: to (osid.calendaring.DateTime): ending date
return: (osid.relationship.RelationshipList) - the relationships
raise: InvalidArgument - ``from is greater than to``
raise: NullArgument - ``destination_id, from`` ,or ``to`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.relationship.RelationshipLookupSession.get_relationships_for_destination_on_date
relationship_list = []
for relationship in self.get_relationships_for_destination():
if overlap(from_, to, relationship.start_date, relationship.end_date):
relationship_list.append(relationship)
return objects.RelationshipList(relationship_list, runtime=self._runtime) | Gets a ``RelationshipList`` corresponding to the given peer ``Id`` with a starting effective date in the given range inclusive.
arg: destination_id (osid.id.Id): a peer ``Id``
arg: from (osid.calendaring.DateTime): starting date
arg: to (osid.calendaring.DateTime): ending date
return: (osid.relationship.RelationshipList) - the relationships
raise: InvalidArgument - ``from is greater than to``
raise: NullArgument - ``destination_id, from`` ,or ``to`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets a ``RelationshipList`` corresponding to the given peer ``Id`` with a starting effective date in the given range inclusive.
arg: destination_id (osid.id.Id): a peer ``Id``
arg: from (osid.calendaring.DateTime): starting date
arg: to (osid.calendaring.DateTime): ending date
return: (osid.relationship.RelationshipList) - the relationships
raise: InvalidArgument - ``from is greater than to``
raise: NullArgument - ``destination_id, from`` ,or ``to`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_relationships_for_destination_on_date(self, destination_id, from_, to):
"""Gets a ``RelationshipList`` corresponding to the given peer ``Id`` with a starting effective date in the given range inclusive.
arg: destination_id (osid.id.Id): a peer ``Id``
arg: from (osid.calendaring.DateTime): starting date
arg: to (osid.calendaring.DateTime): ending date
return: (osid.relationship.RelationshipList) - the relationships
raise: InvalidArgument - ``from is greater than to``
raise: NullArgument - ``destination_id, from`` ,or ``to`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.relationship.RelationshipLookupSession.get_relationships_for_destination_on_date
relationship_list = []
for relationship in self.get_relationships_for_destination():
if overlap(from_, to, relationship.start_date, relationship.end_date):
relationship_list.append(relationship)
return objects.RelationshipList(relationship_list, runtime=self._runtime) |
def _expand_batch(cls, batch):
"""Deserializes a Batch's header, and the header of its Transactions.
"""
cls._parse_header(BatchHeader, batch)
if 'transactions' in batch:
batch['transactions'] = [
cls._expand_transaction(t) for t in batch['transactions']]
return batch | Deserializes a Batch's header, and the header of its Transactions. | Below is the the instruction that describes the task:
### Input:
Deserializes a Batch's header, and the header of its Transactions.
### Response:
def _expand_batch(cls, batch):
"""Deserializes a Batch's header, and the header of its Transactions.
"""
cls._parse_header(BatchHeader, batch)
if 'transactions' in batch:
batch['transactions'] = [
cls._expand_transaction(t) for t in batch['transactions']]
return batch |
def apply(self, im):
"""
Apply an n-dimensional displacement by shifting an image or volume.
Parameters
----------
im : ndarray
The image or volume to shift
"""
from scipy.ndimage.interpolation import shift
return shift(im, map(lambda x: -x, self.delta), mode='nearest') | Apply an n-dimensional displacement by shifting an image or volume.
Parameters
----------
im : ndarray
The image or volume to shift | Below is the the instruction that describes the task:
### Input:
Apply an n-dimensional displacement by shifting an image or volume.
Parameters
----------
im : ndarray
The image or volume to shift
### Response:
def apply(self, im):
"""
Apply an n-dimensional displacement by shifting an image or volume.
Parameters
----------
im : ndarray
The image or volume to shift
"""
from scipy.ndimage.interpolation import shift
return shift(im, map(lambda x: -x, self.delta), mode='nearest') |
def delete(self, uri, logon_required=True):
"""
Perform the HTTP DELETE method against the resource identified by a
URI.
A set of standard HTTP headers is automatically part of the request.
If the HMC session token is expired, this method re-logs on and retries
the operation.
Parameters:
uri (:term:`string`):
Relative URI path of the resource, e.g.
"/api/session/{session-id}".
This URI is relative to the base URL of the session (see
the :attr:`~zhmcclient.Session.base_url` property).
Must not be `None`.
logon_required (bool):
Boolean indicating whether the operation requires that the session
is logged on to the HMC. For example, for the logoff operation, it
does not make sense to first log on.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.ClientAuthError`
:exc:`~zhmcclient.ServerAuthError`
:exc:`~zhmcclient.ConnectionError`
"""
if logon_required:
self.logon()
url = self.base_url + uri
self._log_http_request('DELETE', url, headers=self.headers)
stats = self.time_stats_keeper.get_stats('delete ' + uri)
stats.begin()
req = self._session or requests
req_timeout = (self.retry_timeout_config.connect_timeout,
self.retry_timeout_config.read_timeout)
try:
result = req.delete(url, headers=self.headers, verify=False,
timeout=req_timeout)
except requests.exceptions.RequestException as exc:
_handle_request_exc(exc, self.retry_timeout_config)
finally:
stats.end()
self._log_http_response('DELETE', url,
status=result.status_code,
headers=result.headers,
content=result.content)
if result.status_code in (200, 204):
return
elif result.status_code == 403:
result_object = _result_object(result)
reason = result_object.get('reason', None)
if reason == 5:
# API session token expired: re-logon and retry
self._do_logon()
self.delete(uri, logon_required)
return
else:
msg = result_object.get('message', None)
raise ServerAuthError("HTTP authentication failed: {}".
format(msg), HTTPError(result_object))
else:
result_object = _result_object(result)
raise HTTPError(result_object) | Perform the HTTP DELETE method against the resource identified by a
URI.
A set of standard HTTP headers is automatically part of the request.
If the HMC session token is expired, this method re-logs on and retries
the operation.
Parameters:
uri (:term:`string`):
Relative URI path of the resource, e.g.
"/api/session/{session-id}".
This URI is relative to the base URL of the session (see
the :attr:`~zhmcclient.Session.base_url` property).
Must not be `None`.
logon_required (bool):
Boolean indicating whether the operation requires that the session
is logged on to the HMC. For example, for the logoff operation, it
does not make sense to first log on.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.ClientAuthError`
:exc:`~zhmcclient.ServerAuthError`
:exc:`~zhmcclient.ConnectionError` | Below is the the instruction that describes the task:
### Input:
Perform the HTTP DELETE method against the resource identified by a
URI.
A set of standard HTTP headers is automatically part of the request.
If the HMC session token is expired, this method re-logs on and retries
the operation.
Parameters:
uri (:term:`string`):
Relative URI path of the resource, e.g.
"/api/session/{session-id}".
This URI is relative to the base URL of the session (see
the :attr:`~zhmcclient.Session.base_url` property).
Must not be `None`.
logon_required (bool):
Boolean indicating whether the operation requires that the session
is logged on to the HMC. For example, for the logoff operation, it
does not make sense to first log on.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.ClientAuthError`
:exc:`~zhmcclient.ServerAuthError`
:exc:`~zhmcclient.ConnectionError`
### Response:
def delete(self, uri, logon_required=True):
"""
Perform the HTTP DELETE method against the resource identified by a
URI.
A set of standard HTTP headers is automatically part of the request.
If the HMC session token is expired, this method re-logs on and retries
the operation.
Parameters:
uri (:term:`string`):
Relative URI path of the resource, e.g.
"/api/session/{session-id}".
This URI is relative to the base URL of the session (see
the :attr:`~zhmcclient.Session.base_url` property).
Must not be `None`.
logon_required (bool):
Boolean indicating whether the operation requires that the session
is logged on to the HMC. For example, for the logoff operation, it
does not make sense to first log on.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.ClientAuthError`
:exc:`~zhmcclient.ServerAuthError`
:exc:`~zhmcclient.ConnectionError`
"""
if logon_required:
self.logon()
url = self.base_url + uri
self._log_http_request('DELETE', url, headers=self.headers)
stats = self.time_stats_keeper.get_stats('delete ' + uri)
stats.begin()
req = self._session or requests
req_timeout = (self.retry_timeout_config.connect_timeout,
self.retry_timeout_config.read_timeout)
try:
result = req.delete(url, headers=self.headers, verify=False,
timeout=req_timeout)
except requests.exceptions.RequestException as exc:
_handle_request_exc(exc, self.retry_timeout_config)
finally:
stats.end()
self._log_http_response('DELETE', url,
status=result.status_code,
headers=result.headers,
content=result.content)
if result.status_code in (200, 204):
return
elif result.status_code == 403:
result_object = _result_object(result)
reason = result_object.get('reason', None)
if reason == 5:
# API session token expired: re-logon and retry
self._do_logon()
self.delete(uri, logon_required)
return
else:
msg = result_object.get('message', None)
raise ServerAuthError("HTTP authentication failed: {}".
format(msg), HTTPError(result_object))
else:
result_object = _result_object(result)
raise HTTPError(result_object) |
def ks_unif_durbin_recurrence_rational(samples, statistic):
"""
Calculates the probability that the statistic is less than the given value,
using Durbin's recurrence and employing the standard fractions module.
This is a (hopefully) exact reference implementation, likely too slow for
practical usage. The statistic should be given as a Fraction instance and
the result is also a Fraction. See: doi:10.18637/jss.v026.i02.
"""
t = statistic * samples
# Python 3: int()s can be skipped.
ft1 = int(floor(t)) + 1
fmt1 = int(floor(-t)) + 1
fdt1 = int(floor(2 * t)) + 1
qs = [Fraction(i ** i, factorial(i)) for i in range(ft1)]
qs.extend(Fraction(i ** i, factorial(i)) - 2 * t *
sum((t + j) ** (j - 1) / factorial(j) *
(i - t - j) ** (i - j) / factorial(i - j)
for j in range(i + fmt1))
for i in range(ft1, fdt1))
qs.extend(-sum((-1) ** j * (2 * t - j) ** j / factorial(j) * qs[i - j]
for j in range(1, fdt1))
for i in range(fdt1, samples + 1))
return qs[samples] * factorial(samples) / samples ** samples | Calculates the probability that the statistic is less than the given value,
using Durbin's recurrence and employing the standard fractions module.
This is a (hopefully) exact reference implementation, likely too slow for
practical usage. The statistic should be given as a Fraction instance and
the result is also a Fraction. See: doi:10.18637/jss.v026.i02. | Below is the the instruction that describes the task:
### Input:
Calculates the probability that the statistic is less than the given value,
using Durbin's recurrence and employing the standard fractions module.
This is a (hopefully) exact reference implementation, likely too slow for
practical usage. The statistic should be given as a Fraction instance and
the result is also a Fraction. See: doi:10.18637/jss.v026.i02.
### Response:
def ks_unif_durbin_recurrence_rational(samples, statistic):
"""
Calculates the probability that the statistic is less than the given value,
using Durbin's recurrence and employing the standard fractions module.
This is a (hopefully) exact reference implementation, likely too slow for
practical usage. The statistic should be given as a Fraction instance and
the result is also a Fraction. See: doi:10.18637/jss.v026.i02.
"""
t = statistic * samples
# Python 3: int()s can be skipped.
ft1 = int(floor(t)) + 1
fmt1 = int(floor(-t)) + 1
fdt1 = int(floor(2 * t)) + 1
qs = [Fraction(i ** i, factorial(i)) for i in range(ft1)]
qs.extend(Fraction(i ** i, factorial(i)) - 2 * t *
sum((t + j) ** (j - 1) / factorial(j) *
(i - t - j) ** (i - j) / factorial(i - j)
for j in range(i + fmt1))
for i in range(ft1, fdt1))
qs.extend(-sum((-1) ** j * (2 * t - j) ** j / factorial(j) * qs[i - j]
for j in range(1, fdt1))
for i in range(fdt1, samples + 1))
return qs[samples] * factorial(samples) / samples ** samples |
def to_ip(self):
"""Return of copy of the data inside a TDIP container
"""
if 'chargeability' in self.data.columns:
tdip = reda.TDIP(data=self.data)
else:
raise Exception('Missing column "chargeability"')
return tdip | Return of copy of the data inside a TDIP container | Below is the the instruction that describes the task:
### Input:
Return of copy of the data inside a TDIP container
### Response:
def to_ip(self):
"""Return of copy of the data inside a TDIP container
"""
if 'chargeability' in self.data.columns:
tdip = reda.TDIP(data=self.data)
else:
raise Exception('Missing column "chargeability"')
return tdip |
def format(self, record):
"""
:param logging.LogRecord record:
"""
super(HtmlFormatter, self).format(record)
if record.funcName:
record.funcName = escape_html(str(record.funcName))
if record.name:
record.name = escape_html(str(record.name))
if record.msg:
record.msg = escape_html(record.getMessage())
if self.use_emoji:
if record.levelno == logging.DEBUG:
record.levelname += ' ' + EMOJI.WHITE_CIRCLE
elif record.levelno == logging.INFO:
record.levelname += ' ' + EMOJI.BLUE_CIRCLE
else:
record.levelname += ' ' + EMOJI.RED_CIRCLE
if hasattr(self, '_style'):
return self._style.format(record)
else:
# py2.7 branch
return self._fmt % record.__dict__ | :param logging.LogRecord record: | Below is the the instruction that describes the task:
### Input:
:param logging.LogRecord record:
### Response:
def format(self, record):
"""
:param logging.LogRecord record:
"""
super(HtmlFormatter, self).format(record)
if record.funcName:
record.funcName = escape_html(str(record.funcName))
if record.name:
record.name = escape_html(str(record.name))
if record.msg:
record.msg = escape_html(record.getMessage())
if self.use_emoji:
if record.levelno == logging.DEBUG:
record.levelname += ' ' + EMOJI.WHITE_CIRCLE
elif record.levelno == logging.INFO:
record.levelname += ' ' + EMOJI.BLUE_CIRCLE
else:
record.levelname += ' ' + EMOJI.RED_CIRCLE
if hasattr(self, '_style'):
return self._style.format(record)
else:
# py2.7 branch
return self._fmt % record.__dict__ |
def heartbeat(self):
"""Periodically send heartbeats."""
while self._manager.is_active and not self._stop_event.is_set():
self._manager.heartbeat()
_LOGGER.debug("Sent heartbeat.")
self._stop_event.wait(timeout=self._period)
_LOGGER.info("%s exiting.", _HEARTBEAT_WORKER_NAME) | Periodically send heartbeats. | Below is the the instruction that describes the task:
### Input:
Periodically send heartbeats.
### Response:
def heartbeat(self):
"""Periodically send heartbeats."""
while self._manager.is_active and not self._stop_event.is_set():
self._manager.heartbeat()
_LOGGER.debug("Sent heartbeat.")
self._stop_event.wait(timeout=self._period)
_LOGGER.info("%s exiting.", _HEARTBEAT_WORKER_NAME) |
def results(self):
"""
Returns a summary dict.
Returns:
dict
"""
return dict(e0=self.e0, b0=self.b0, b1=self.b1, v0=self.v0) | Returns a summary dict.
Returns:
dict | Below is the the instruction that describes the task:
### Input:
Returns a summary dict.
Returns:
dict
### Response:
def results(self):
"""
Returns a summary dict.
Returns:
dict
"""
return dict(e0=self.e0, b0=self.b0, b1=self.b1, v0=self.v0) |
def patch_addContext(self, patch, text):
"""Increase the context until it is unique,
but don't let the pattern expand beyond Match_MaxBits.
Args:
patch: The patch to grow.
text: Source text.
"""
if len(text) == 0:
return
pattern = text[patch.start2 : patch.start2 + patch.length1]
padding = 0
# Look for the first and last matches of pattern in text. If two different
# matches are found, increase the pattern length.
while (text.find(pattern) != text.rfind(pattern) and (self.Match_MaxBits ==
0 or len(pattern) < self.Match_MaxBits - self.Patch_Margin -
self.Patch_Margin)):
padding += self.Patch_Margin
pattern = text[max(0, patch.start2 - padding) :
patch.start2 + patch.length1 + padding]
# Add one chunk for good luck.
padding += self.Patch_Margin
# Add the prefix.
prefix = text[max(0, patch.start2 - padding) : patch.start2]
if prefix:
patch.diffs[:0] = [(self.DIFF_EQUAL, prefix)]
# Add the suffix.
suffix = text[patch.start2 + patch.length1 :
patch.start2 + patch.length1 + padding]
if suffix:
patch.diffs.append((self.DIFF_EQUAL, suffix))
# Roll back the start points.
patch.start1 -= len(prefix)
patch.start2 -= len(prefix)
# Extend lengths.
patch.length1 += len(prefix) + len(suffix)
patch.length2 += len(prefix) + len(suffix) | Increase the context until it is unique,
but don't let the pattern expand beyond Match_MaxBits.
Args:
patch: The patch to grow.
text: Source text. | Below is the the instruction that describes the task:
### Input:
Increase the context until it is unique,
but don't let the pattern expand beyond Match_MaxBits.
Args:
patch: The patch to grow.
text: Source text.
### Response:
def patch_addContext(self, patch, text):
"""Increase the context until it is unique,
but don't let the pattern expand beyond Match_MaxBits.
Args:
patch: The patch to grow.
text: Source text.
"""
if len(text) == 0:
return
pattern = text[patch.start2 : patch.start2 + patch.length1]
padding = 0
# Look for the first and last matches of pattern in text. If two different
# matches are found, increase the pattern length.
while (text.find(pattern) != text.rfind(pattern) and (self.Match_MaxBits ==
0 or len(pattern) < self.Match_MaxBits - self.Patch_Margin -
self.Patch_Margin)):
padding += self.Patch_Margin
pattern = text[max(0, patch.start2 - padding) :
patch.start2 + patch.length1 + padding]
# Add one chunk for good luck.
padding += self.Patch_Margin
# Add the prefix.
prefix = text[max(0, patch.start2 - padding) : patch.start2]
if prefix:
patch.diffs[:0] = [(self.DIFF_EQUAL, prefix)]
# Add the suffix.
suffix = text[patch.start2 + patch.length1 :
patch.start2 + patch.length1 + padding]
if suffix:
patch.diffs.append((self.DIFF_EQUAL, suffix))
# Roll back the start points.
patch.start1 -= len(prefix)
patch.start2 -= len(prefix)
# Extend lengths.
patch.length1 += len(prefix) + len(suffix)
patch.length2 += len(prefix) + len(suffix) |
def run(self, steps=0, force=False, ipyclient=None,
show_cluster=0, **kwargs):
"""
Run assembly steps of an ipyrad analysis. Enter steps as a string,
e.g., "1", "123", "12345". This step checks for an existing
ipcluster instance otherwise it raises an exception. The ipyparallel
connection is made using information from the _ipcluster dict of the
Assembly class object.
"""
## check that mindepth params are compatible, fix and report warning.
self._compatible_params_check()
## wrap everything in a try statement to ensure that we save the
## Assembly object if it is interrupted at any point, and also
## to ensure proper cleanup of the ipyclient.
inst = None
try:
## use an existing ipcluster instance
if not ipyclient:
args = self._ipcluster.items() + [("spacer", self._spacer)]
ipyclient = ip.core.parallel.get_client(**dict(args))
## print a message about the cluster status
## if MPI setup then we are going to wait until all engines are
## ready so that we can print how many cores started on each
## host machine exactly.
if (self._cli) or show_cluster:
ip.cluster_info(ipyclient=ipyclient, spacer=self._spacer)
## get the list of steps to run
if isinstance(steps, int):
steps = str(steps)
steps = sorted(list(steps))
## print an Assembly name header if inside API
if not self._cli:
print("Assembly: {}".format(self.name))
## store ipyclient engine pids to the Assembly so we can
## hard-interrupt them later if assembly is interrupted.
## Only stores pids of engines that aren't busy at this moment,
## otherwise it would block here while waiting to find their pids.
self._ipcluster["pids"] = {}
for eid in ipyclient.ids:
engine = ipyclient[eid]
if not engine.outstanding:
pid = engine.apply(os.getpid).get()
self._ipcluster["pids"][eid] = pid
#ipyclient[:].apply(os.getpid).get_dict()
## has many fixed arguments right now, but we may add these to
## hackerz_only, or they may be accessed in the API.
if '1' in steps:
self._step1func(force, ipyclient)
self.save()
ipyclient.purge_everything()
if '2' in steps:
self._step2func(samples=None, force=force, ipyclient=ipyclient)
self.save()
ipyclient.purge_everything()
if '3' in steps:
self._step3func(samples=None, noreverse=0, force=force,
maxindels=8, ipyclient=ipyclient)
self.save()
ipyclient.purge_everything()
if '4' in steps:
self._step4func(samples=None, force=force, ipyclient=ipyclient)
self.save()
ipyclient.purge_everything()
if '5' in steps:
self._step5func(samples=None, force=force, ipyclient=ipyclient)
self.save()
ipyclient.purge_everything()
if '6' in steps:
self._step6func(samples=None, noreverse=0, randomseed=12345,
force=force, ipyclient=ipyclient, **kwargs)
self.save()
ipyclient.purge_everything()
if '7' in steps:
self._step7func(samples=None, force=force, ipyclient=ipyclient)
self.save()
ipyclient.purge_everything()
## handle exceptions so they will be raised after we clean up below
except KeyboardInterrupt as inst:
print("\n Keyboard Interrupt by user")
LOGGER.info("assembly interrupted by user.")
except IPyradWarningExit as inst:
LOGGER.error("IPyradWarningExit: %s", inst)
print("\n Encountered an error (see details in ./ipyrad_log.txt)"+\
"\n Error summary is below -------------------------------"+\
"\n{}".format(inst))
except Exception as inst:
LOGGER.error(inst)
print("\n Encountered an unexpected error (see ./ipyrad_log.txt)"+\
"\n Error message is below -------------------------------"+\
"\n{}".format(inst))
## close client when done or interrupted
finally:
try:
## save the Assembly
self.save()
## can't close client if it was never open
if ipyclient:
## send SIGINT (2) to all engines
try:
ipyclient.abort()
time.sleep(1)
for engine_id, pid in self._ipcluster["pids"].items():
if ipyclient.queue_status()[engine_id]["tasks"]:
os.kill(pid, 2)
LOGGER.info('interrupted engine {} w/ SIGINT to {}'\
.format(engine_id, pid))
time.sleep(1)
except ipp.NoEnginesRegistered:
pass
## if CLI, stop jobs and shutdown. Don't use _cli here
## because you can have a CLI object but use the --ipcluster
## flag, in which case we don't want to kill ipcluster.
if 'ipyrad-cli' in self._ipcluster["cluster_id"]:
LOGGER.info(" shutting down engines")
ipyclient.shutdown(hub=True, block=False)
ipyclient.close()
LOGGER.info(" finished shutdown")
else:
if not ipyclient.outstanding:
ipyclient.purge_everything()
else:
## nanny: kill everything, something bad happened
ipyclient.shutdown(hub=True, block=False)
ipyclient.close()
print("\nwarning: ipcluster shutdown and must be restarted")
## if exception is close and save, print and ignore
except Exception as inst2:
print("warning: error during shutdown:\n{}".format(inst2))
LOGGER.error("shutdown warning: %s", inst2) | Run assembly steps of an ipyrad analysis. Enter steps as a string,
e.g., "1", "123", "12345". This step checks for an existing
ipcluster instance otherwise it raises an exception. The ipyparallel
connection is made using information from the _ipcluster dict of the
Assembly class object. | Below is the the instruction that describes the task:
### Input:
Run assembly steps of an ipyrad analysis. Enter steps as a string,
e.g., "1", "123", "12345". This step checks for an existing
ipcluster instance otherwise it raises an exception. The ipyparallel
connection is made using information from the _ipcluster dict of the
Assembly class object.
### Response:
def run(self, steps=0, force=False, ipyclient=None,
show_cluster=0, **kwargs):
"""
Run assembly steps of an ipyrad analysis. Enter steps as a string,
e.g., "1", "123", "12345". This step checks for an existing
ipcluster instance otherwise it raises an exception. The ipyparallel
connection is made using information from the _ipcluster dict of the
Assembly class object.
"""
## check that mindepth params are compatible, fix and report warning.
self._compatible_params_check()
## wrap everything in a try statement to ensure that we save the
## Assembly object if it is interrupted at any point, and also
## to ensure proper cleanup of the ipyclient.
inst = None
try:
## use an existing ipcluster instance
if not ipyclient:
args = self._ipcluster.items() + [("spacer", self._spacer)]
ipyclient = ip.core.parallel.get_client(**dict(args))
## print a message about the cluster status
## if MPI setup then we are going to wait until all engines are
## ready so that we can print how many cores started on each
## host machine exactly.
if (self._cli) or show_cluster:
ip.cluster_info(ipyclient=ipyclient, spacer=self._spacer)
## get the list of steps to run
if isinstance(steps, int):
steps = str(steps)
steps = sorted(list(steps))
## print an Assembly name header if inside API
if not self._cli:
print("Assembly: {}".format(self.name))
## store ipyclient engine pids to the Assembly so we can
## hard-interrupt them later if assembly is interrupted.
## Only stores pids of engines that aren't busy at this moment,
## otherwise it would block here while waiting to find their pids.
self._ipcluster["pids"] = {}
for eid in ipyclient.ids:
engine = ipyclient[eid]
if not engine.outstanding:
pid = engine.apply(os.getpid).get()
self._ipcluster["pids"][eid] = pid
#ipyclient[:].apply(os.getpid).get_dict()
## has many fixed arguments right now, but we may add these to
## hackerz_only, or they may be accessed in the API.
if '1' in steps:
self._step1func(force, ipyclient)
self.save()
ipyclient.purge_everything()
if '2' in steps:
self._step2func(samples=None, force=force, ipyclient=ipyclient)
self.save()
ipyclient.purge_everything()
if '3' in steps:
self._step3func(samples=None, noreverse=0, force=force,
maxindels=8, ipyclient=ipyclient)
self.save()
ipyclient.purge_everything()
if '4' in steps:
self._step4func(samples=None, force=force, ipyclient=ipyclient)
self.save()
ipyclient.purge_everything()
if '5' in steps:
self._step5func(samples=None, force=force, ipyclient=ipyclient)
self.save()
ipyclient.purge_everything()
if '6' in steps:
self._step6func(samples=None, noreverse=0, randomseed=12345,
force=force, ipyclient=ipyclient, **kwargs)
self.save()
ipyclient.purge_everything()
if '7' in steps:
self._step7func(samples=None, force=force, ipyclient=ipyclient)
self.save()
ipyclient.purge_everything()
## handle exceptions so they will be raised after we clean up below
except KeyboardInterrupt as inst:
print("\n Keyboard Interrupt by user")
LOGGER.info("assembly interrupted by user.")
except IPyradWarningExit as inst:
LOGGER.error("IPyradWarningExit: %s", inst)
print("\n Encountered an error (see details in ./ipyrad_log.txt)"+\
"\n Error summary is below -------------------------------"+\
"\n{}".format(inst))
except Exception as inst:
LOGGER.error(inst)
print("\n Encountered an unexpected error (see ./ipyrad_log.txt)"+\
"\n Error message is below -------------------------------"+\
"\n{}".format(inst))
## close client when done or interrupted
finally:
try:
## save the Assembly
self.save()
## can't close client if it was never open
if ipyclient:
## send SIGINT (2) to all engines
try:
ipyclient.abort()
time.sleep(1)
for engine_id, pid in self._ipcluster["pids"].items():
if ipyclient.queue_status()[engine_id]["tasks"]:
os.kill(pid, 2)
LOGGER.info('interrupted engine {} w/ SIGINT to {}'\
.format(engine_id, pid))
time.sleep(1)
except ipp.NoEnginesRegistered:
pass
## if CLI, stop jobs and shutdown. Don't use _cli here
## because you can have a CLI object but use the --ipcluster
## flag, in which case we don't want to kill ipcluster.
if 'ipyrad-cli' in self._ipcluster["cluster_id"]:
LOGGER.info(" shutting down engines")
ipyclient.shutdown(hub=True, block=False)
ipyclient.close()
LOGGER.info(" finished shutdown")
else:
if not ipyclient.outstanding:
ipyclient.purge_everything()
else:
## nanny: kill everything, something bad happened
ipyclient.shutdown(hub=True, block=False)
ipyclient.close()
print("\nwarning: ipcluster shutdown and must be restarted")
## if exception is close and save, print and ignore
except Exception as inst2:
print("warning: error during shutdown:\n{}".format(inst2))
LOGGER.error("shutdown warning: %s", inst2) |
def get_node_config(self, jid, node=None):
"""
Request the configuration of a node.
:param jid: Address of the PubSub service.
:type jid: :class:`aioxmpp.JID`
:param node: Name of the PubSub node to query.
:type node: :class:`str`
:raises aioxmpp.errors.XMPPError: as returned by the service
:return: The configuration of the node.
:rtype: :class:`~.forms.Data`
On success, the :class:`~.forms.Data` form is returned.
If an error occurs, the corresponding :class:`~.errors.XMPPError` is
raised.
"""
iq = aioxmpp.stanza.IQ(to=jid, type_=aioxmpp.structs.IQType.GET)
iq.payload = pubsub_xso.OwnerRequest(
pubsub_xso.OwnerConfigure(node=node)
)
response = yield from self.client.send(iq)
return response.payload.data | Request the configuration of a node.
:param jid: Address of the PubSub service.
:type jid: :class:`aioxmpp.JID`
:param node: Name of the PubSub node to query.
:type node: :class:`str`
:raises aioxmpp.errors.XMPPError: as returned by the service
:return: The configuration of the node.
:rtype: :class:`~.forms.Data`
On success, the :class:`~.forms.Data` form is returned.
If an error occurs, the corresponding :class:`~.errors.XMPPError` is
raised. | Below is the the instruction that describes the task:
### Input:
Request the configuration of a node.
:param jid: Address of the PubSub service.
:type jid: :class:`aioxmpp.JID`
:param node: Name of the PubSub node to query.
:type node: :class:`str`
:raises aioxmpp.errors.XMPPError: as returned by the service
:return: The configuration of the node.
:rtype: :class:`~.forms.Data`
On success, the :class:`~.forms.Data` form is returned.
If an error occurs, the corresponding :class:`~.errors.XMPPError` is
raised.
### Response:
def get_node_config(self, jid, node=None):
"""
Request the configuration of a node.
:param jid: Address of the PubSub service.
:type jid: :class:`aioxmpp.JID`
:param node: Name of the PubSub node to query.
:type node: :class:`str`
:raises aioxmpp.errors.XMPPError: as returned by the service
:return: The configuration of the node.
:rtype: :class:`~.forms.Data`
On success, the :class:`~.forms.Data` form is returned.
If an error occurs, the corresponding :class:`~.errors.XMPPError` is
raised.
"""
iq = aioxmpp.stanza.IQ(to=jid, type_=aioxmpp.structs.IQType.GET)
iq.payload = pubsub_xso.OwnerRequest(
pubsub_xso.OwnerConfigure(node=node)
)
response = yield from self.client.send(iq)
return response.payload.data |
def get(self):
"""
Get a JSON-ready representation of this BCCSettings.
:returns: This BCCSettings, ready for use in a request body.
:rtype: dict
"""
bcc_settings = {}
if self.enable is not None:
bcc_settings["enable"] = self.enable
if self.email is not None:
bcc_settings["email"] = self.email.get()
return bcc_settings | Get a JSON-ready representation of this BCCSettings.
:returns: This BCCSettings, ready for use in a request body.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Get a JSON-ready representation of this BCCSettings.
:returns: This BCCSettings, ready for use in a request body.
:rtype: dict
### Response:
def get(self):
"""
Get a JSON-ready representation of this BCCSettings.
:returns: This BCCSettings, ready for use in a request body.
:rtype: dict
"""
bcc_settings = {}
if self.enable is not None:
bcc_settings["enable"] = self.enable
if self.email is not None:
bcc_settings["email"] = self.email.get()
return bcc_settings |
def bitmap2RRlist(bitmap):
"""
Decode the 'Type Bit Maps' field of the NSEC Resource Record into an
integer list.
"""
# RFC 4034, 4.1.2. The Type Bit Maps Field
RRlist = []
while bitmap:
if len(bitmap) < 2:
warning("bitmap too short (%i)" % len(bitmap))
return
#window_block = ord(bitmap[0]) # window number
window_block = (bitmap[0]) # window number
offset = 256*window_block # offset of the Ressource Record
#bitmap_len = ord(bitmap[0]) # length of the bitmap in bytes
bitmap_len = (bitmap[1]) # length of the bitmap in bytes
if bitmap_len <= 0 or bitmap_len > 32:
warning("bitmap length is no valid (%i)" % bitmap_len)
return
tmp_bitmap = bitmap[2:2+bitmap_len]
# Let's compare each bit of tmp_bitmap and compute the real RR value
for b in range(len(tmp_bitmap)):
v = 128
for i in range(8):
#if ord(tmp_bitmap[b]) & v:
if (tmp_bitmap[b]) & v:
# each of the RR is encoded as a bit
RRlist += [ offset + b*8 + i ]
v = v >> 1
# Next block if any
bitmap = bitmap[2+bitmap_len:]
return RRlist | Decode the 'Type Bit Maps' field of the NSEC Resource Record into an
integer list. | Below is the the instruction that describes the task:
### Input:
Decode the 'Type Bit Maps' field of the NSEC Resource Record into an
integer list.
### Response:
def bitmap2RRlist(bitmap):
"""
Decode the 'Type Bit Maps' field of the NSEC Resource Record into an
integer list.
"""
# RFC 4034, 4.1.2. The Type Bit Maps Field
RRlist = []
while bitmap:
if len(bitmap) < 2:
warning("bitmap too short (%i)" % len(bitmap))
return
#window_block = ord(bitmap[0]) # window number
window_block = (bitmap[0]) # window number
offset = 256*window_block # offset of the Ressource Record
#bitmap_len = ord(bitmap[0]) # length of the bitmap in bytes
bitmap_len = (bitmap[1]) # length of the bitmap in bytes
if bitmap_len <= 0 or bitmap_len > 32:
warning("bitmap length is no valid (%i)" % bitmap_len)
return
tmp_bitmap = bitmap[2:2+bitmap_len]
# Let's compare each bit of tmp_bitmap and compute the real RR value
for b in range(len(tmp_bitmap)):
v = 128
for i in range(8):
#if ord(tmp_bitmap[b]) & v:
if (tmp_bitmap[b]) & v:
# each of the RR is encoded as a bit
RRlist += [ offset + b*8 + i ]
v = v >> 1
# Next block if any
bitmap = bitmap[2+bitmap_len:]
return RRlist |
def save():
'''save is a view to save data. We might want to adjust this to allow for
updating saved data, but given single file is just one post for now
'''
if request.method == 'POST':
exp_id = session.get('exp_id')
app.logger.debug('Saving data for %s' %exp_id)
fields = get_post_fields(request)
result_file = app.save_data(session=session, content=fields, exp_id=exp_id)
experiments = app.finish_experiment(session, exp_id)
app.logger.info('Finished %s, %s remaining.' % (exp_id, len(experiments)))
# Note, this doesn't seem to be enough to trigger ajax success
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
return json.dumps({'success':False}), 403, {'ContentType':'application/json'} | save is a view to save data. We might want to adjust this to allow for
updating saved data, but given single file is just one post for now | Below is the the instruction that describes the task:
### Input:
save is a view to save data. We might want to adjust this to allow for
updating saved data, but given single file is just one post for now
### Response:
def save():
'''save is a view to save data. We might want to adjust this to allow for
updating saved data, but given single file is just one post for now
'''
if request.method == 'POST':
exp_id = session.get('exp_id')
app.logger.debug('Saving data for %s' %exp_id)
fields = get_post_fields(request)
result_file = app.save_data(session=session, content=fields, exp_id=exp_id)
experiments = app.finish_experiment(session, exp_id)
app.logger.info('Finished %s, %s remaining.' % (exp_id, len(experiments)))
# Note, this doesn't seem to be enough to trigger ajax success
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
return json.dumps({'success':False}), 403, {'ContentType':'application/json'} |
def _is_second_run():
"""Returns `True` when we know that `fuck` called second time."""
tracker_path = _get_not_configured_usage_tracker_path()
if not tracker_path.exists():
return False
current_pid = _get_shell_pid()
with tracker_path.open('r') as tracker:
try:
info = json.load(tracker)
except ValueError:
return False
if not (isinstance(info, dict) and info.get('pid') == current_pid):
return False
return (_get_previous_command() == 'fuck' or
time.time() - info.get('time', 0) < const.CONFIGURATION_TIMEOUT) | Returns `True` when we know that `fuck` called second time. | Below is the the instruction that describes the task:
### Input:
Returns `True` when we know that `fuck` called second time.
### Response:
def _is_second_run():
"""Returns `True` when we know that `fuck` called second time."""
tracker_path = _get_not_configured_usage_tracker_path()
if not tracker_path.exists():
return False
current_pid = _get_shell_pid()
with tracker_path.open('r') as tracker:
try:
info = json.load(tracker)
except ValueError:
return False
if not (isinstance(info, dict) and info.get('pid') == current_pid):
return False
return (_get_previous_command() == 'fuck' or
time.time() - info.get('time', 0) < const.CONFIGURATION_TIMEOUT) |
def normalize(data):
"""Normalize the data to be in the [0, 1] range.
:param data:
:return: normalized data
"""
out_data = data.copy()
for i, sample in enumerate(out_data):
out_data[i] /= sum(out_data[i])
return out_data | Normalize the data to be in the [0, 1] range.
:param data:
:return: normalized data | Below is the the instruction that describes the task:
### Input:
Normalize the data to be in the [0, 1] range.
:param data:
:return: normalized data
### Response:
def normalize(data):
"""Normalize the data to be in the [0, 1] range.
:param data:
:return: normalized data
"""
out_data = data.copy()
for i, sample in enumerate(out_data):
out_data[i] /= sum(out_data[i])
return out_data |
def xpath(self, xpath, **kwargs):
"""
Perform an XPath query on the current node.
:param string xpath: XPath query.
:param dict kwargs: Optional keyword arguments that are passed through
to the underlying XML library implementation.
:return: results of the query as a list of :class:`Node` objects, or
a list of base type objects if the XPath query does not reference
node objects.
"""
result = self.adapter.xpath_on_node(self.impl_node, xpath, **kwargs)
if isinstance(result, (list, tuple)):
return [self._maybe_wrap_node(r) for r in result]
else:
return self._maybe_wrap_node(result) | Perform an XPath query on the current node.
:param string xpath: XPath query.
:param dict kwargs: Optional keyword arguments that are passed through
to the underlying XML library implementation.
:return: results of the query as a list of :class:`Node` objects, or
a list of base type objects if the XPath query does not reference
node objects. | Below is the the instruction that describes the task:
### Input:
Perform an XPath query on the current node.
:param string xpath: XPath query.
:param dict kwargs: Optional keyword arguments that are passed through
to the underlying XML library implementation.
:return: results of the query as a list of :class:`Node` objects, or
a list of base type objects if the XPath query does not reference
node objects.
### Response:
def xpath(self, xpath, **kwargs):
"""
Perform an XPath query on the current node.
:param string xpath: XPath query.
:param dict kwargs: Optional keyword arguments that are passed through
to the underlying XML library implementation.
:return: results of the query as a list of :class:`Node` objects, or
a list of base type objects if the XPath query does not reference
node objects.
"""
result = self.adapter.xpath_on_node(self.impl_node, xpath, **kwargs)
if isinstance(result, (list, tuple)):
return [self._maybe_wrap_node(r) for r in result]
else:
return self._maybe_wrap_node(result) |
def Cx(mt, x):
""" Return the Cx """
return ((1 / (1 + mt.i)) ** (x + 1)) * mt.dx[x] * ((1 + mt.i) ** 0.5) | Return the Cx | Below is the the instruction that describes the task:
### Input:
Return the Cx
### Response:
def Cx(mt, x):
""" Return the Cx """
return ((1 / (1 + mt.i)) ** (x + 1)) * mt.dx[x] * ((1 + mt.i) ** 0.5) |
def parse_mbox(filepath):
"""Parse a mbox file.
This method parses a mbox file and returns an iterator of dictionaries.
Each one of this contains an email message.
:param filepath: path of the mbox to parse
:returns : generator of messages; each message is stored in a
dictionary of type `requests.structures.CaseInsensitiveDict`
"""
mbox = _MBox(filepath, create=False)
for msg in mbox:
message = message_to_dict(msg)
yield message | Parse a mbox file.
This method parses a mbox file and returns an iterator of dictionaries.
Each one of this contains an email message.
:param filepath: path of the mbox to parse
:returns : generator of messages; each message is stored in a
dictionary of type `requests.structures.CaseInsensitiveDict` | Below is the the instruction that describes the task:
### Input:
Parse a mbox file.
This method parses a mbox file and returns an iterator of dictionaries.
Each one of this contains an email message.
:param filepath: path of the mbox to parse
:returns : generator of messages; each message is stored in a
dictionary of type `requests.structures.CaseInsensitiveDict`
### Response:
def parse_mbox(filepath):
"""Parse a mbox file.
This method parses a mbox file and returns an iterator of dictionaries.
Each one of this contains an email message.
:param filepath: path of the mbox to parse
:returns : generator of messages; each message is stored in a
dictionary of type `requests.structures.CaseInsensitiveDict`
"""
mbox = _MBox(filepath, create=False)
for msg in mbox:
message = message_to_dict(msg)
yield message |
def setup_sfr_reach_obs(sfr_out_file,seg_reach=None,ins_file=None,model=None,
include_path=False):
"""setup observations using the sfr ASCII output file. Setups
sfr point observations using segment and reach numbers.
Parameters
----------
sft_out_file : str
the existing SFR output file
seg_reach : dict, list or pandas.DataFrame
a dict, or list of SFR [segment,reach] pairs identifying observation locations.
If dict the key value in the dict is the base observation name.
If None, all reaches are used as individual observations. Default is None - THIS MAY SET UP A LOT OF OBS!
model : flopy.mbase
a flopy model. If passed, the observation names will have the datetime of the
observation appended to them. If None, the observation names will have the
stress period appended to them. Default is None.
include_path : bool
flag to prepend sfr_out_file path to sfr_obs.config. Useful for setting up
process in separate directory for where python is running.
Returns
-------
df : pd.DataFrame
dataframe of obsnme, obsval and obgnme if inschek run was successful. Else None
Note
----
This function writes "sfr_reach_obs.config" which must be kept in the dir where
"apply_sfr_reach_obs()" is being called during the forward run
"""
if seg_reach is None:
warnings.warn("Obs will be set up for every reach", PyemuWarning)
seg_reach = 'all'
elif isinstance(seg_reach, list) or isinstance(seg_reach, np.ndarray):
if np.ndim(seg_reach) == 1:
seg_reach = [seg_reach]
assert np.shape(
seg_reach)[1] == 2, "varible seg_reach expected shape (n,2), received {0}".format(np.shape(seg_reach))
seg_reach = pd.DataFrame(seg_reach, columns=['segment', 'reach'])
seg_reach.index = seg_reach.apply(lambda x: "s{0:03d}r{1:03d}".format(int(x.segment), int(x.reach)), axis=1)
elif isinstance(seg_reach, dict):
seg_reach = pd.DataFrame.from_dict(seg_reach, orient='index', columns=['segment', 'reach'])
else:
assert isinstance(
seg_reach, pd.DataFrame), "'selection needs to be pandas Dataframe. Type {} passed.".format(type(seg_reach))
assert np.all([sr in seg_reach.columns for sr in ['segment', 'reach']]
), "Either 'segment' or 'reach' not in selection columns"
sfr_dict = load_sfr_out(sfr_out_file, selection=seg_reach)
kpers = list(sfr_dict.keys())
kpers.sort()
if isinstance(seg_reach, str) and seg_reach == 'all':
seg_reach = sfr_dict[kpers[0]][['segment', 'reach']]
seg_reach.index = seg_reach.apply(lambda x: "s{0:03d}r{1:03d}".format(int(x.segment), int(x.reach)), axis=1)
keys = ["sfr_out_file"]
if include_path:
values = [os.path.split(sfr_out_file)[-1]]
else:
values = [sfr_out_file]
diff = seg_reach.loc[seg_reach.apply(lambda x: "{0:03d}_{1:03d}".format(int(x.segment), int(x.reach))
not in sfr_dict[list(sfr_dict.keys())[0]].index, axis=1)]
if len(diff) > 0:
for ob in diff.itertuples():
warnings.warn("segs,reach pair listed with onames {0} was not found: {1}".
format(ob.Index, "({},{})".format(ob.segment, ob.reach)), PyemuWarning)
seg_reach = seg_reach.drop(diff.index)
seg_reach['obs_base'] = seg_reach.index
df_key = pd.DataFrame({"obs_base": keys, "segment": 0, 'reach': values})
df_key = pd.concat([df_key, seg_reach], sort=True).reset_index(drop=True)
if include_path:
pth = os.path.join(*[p for p in os.path.split(sfr_out_file)[:-1]])
config_file = os.path.join(pth,"sfr_reach_obs.config")
else:
config_file = "sfr_reach_obs.config"
print("writing 'sfr_reach_obs.config' to {0}".format(config_file))
df_key.to_csv(config_file)
bd = '.'
if include_path:
bd = os.getcwd()
os.chdir(pth)
try:
df = apply_sfr_reach_obs()
except Exception as e:
os.chdir(bd)
raise Exception("error in apply_sfr_reach_obs(): {0}".format(str(e)))
os.chdir(bd)
if model is not None:
dts = (pd.to_datetime(model.start_datetime) + pd.to_timedelta(np.cumsum(model.dis.perlen.array), unit='d')).date
df.loc[:, "datetime"] = df.kper.apply(lambda x: dts[x])
df.loc[:, "time_str"] = df.datetime.apply(lambda x: x.strftime("%Y%m%d"))
else:
df.loc[:, "time_str"] = df.kper.apply(lambda x: "{0:04d}".format(x))
df.loc[:, "flaqx_obsnme"] = df.apply(lambda x: "{0}_{1}_{2}".format("fa", x.obs_base, x.time_str), axis=1)
df.loc[:, "flout_obsnme"] = df.apply(lambda x: "{0}_{1}_{2}".format("fo", x.obs_base, x.time_str), axis=1)
if ins_file is None:
ins_file = sfr_out_file + ".reach_processed.ins"
with open(ins_file, 'w') as f:
f.write("pif ~\nl1\n")
for fla, flo in zip(df.flaqx_obsnme, df.flout_obsnme):
f.write("l1 w w !{0}! !{1}!\n".format(fla, flo))
df = None
pth = os.path.split(ins_file)[:-1]
pth = os.path.join(*pth)
if pth == '':
pth = '.'
bd = os.getcwd()
os.chdir(pth)
try:
#df = _try_run_inschek(os.path.split(ins_file)[-1],os.path.split(sfr_out_file+".processed")[-1])
df = try_process_ins_file(os.path.split(ins_file)[-1], os.path.split(sfr_out_file+".reach_processed")[-1])
except Exception as e:
pass
os.chdir(bd)
if df is not None:
df.loc[:, "obsnme"] = df.index.values
df.loc[:, "obgnme"] = df.obsnme.apply(lambda x: "flaqx" if x.startswith("fa") else "flout")
return df | setup observations using the sfr ASCII output file. Setups
sfr point observations using segment and reach numbers.
Parameters
----------
sft_out_file : str
the existing SFR output file
seg_reach : dict, list or pandas.DataFrame
a dict, or list of SFR [segment,reach] pairs identifying observation locations.
If dict the key value in the dict is the base observation name.
If None, all reaches are used as individual observations. Default is None - THIS MAY SET UP A LOT OF OBS!
model : flopy.mbase
a flopy model. If passed, the observation names will have the datetime of the
observation appended to them. If None, the observation names will have the
stress period appended to them. Default is None.
include_path : bool
flag to prepend sfr_out_file path to sfr_obs.config. Useful for setting up
process in separate directory for where python is running.
Returns
-------
df : pd.DataFrame
dataframe of obsnme, obsval and obgnme if inschek run was successful. Else None
Note
----
This function writes "sfr_reach_obs.config" which must be kept in the dir where
"apply_sfr_reach_obs()" is being called during the forward run | Below is the the instruction that describes the task:
### Input:
setup observations using the sfr ASCII output file. Setups
sfr point observations using segment and reach numbers.
Parameters
----------
sft_out_file : str
the existing SFR output file
seg_reach : dict, list or pandas.DataFrame
a dict, or list of SFR [segment,reach] pairs identifying observation locations.
If dict the key value in the dict is the base observation name.
If None, all reaches are used as individual observations. Default is None - THIS MAY SET UP A LOT OF OBS!
model : flopy.mbase
a flopy model. If passed, the observation names will have the datetime of the
observation appended to them. If None, the observation names will have the
stress period appended to them. Default is None.
include_path : bool
flag to prepend sfr_out_file path to sfr_obs.config. Useful for setting up
process in separate directory for where python is running.
Returns
-------
df : pd.DataFrame
dataframe of obsnme, obsval and obgnme if inschek run was successful. Else None
Note
----
This function writes "sfr_reach_obs.config" which must be kept in the dir where
"apply_sfr_reach_obs()" is being called during the forward run
### Response:
def setup_sfr_reach_obs(sfr_out_file,seg_reach=None,ins_file=None,model=None,
include_path=False):
"""setup observations using the sfr ASCII output file. Setups
sfr point observations using segment and reach numbers.
Parameters
----------
sft_out_file : str
the existing SFR output file
seg_reach : dict, list or pandas.DataFrame
a dict, or list of SFR [segment,reach] pairs identifying observation locations.
If dict the key value in the dict is the base observation name.
If None, all reaches are used as individual observations. Default is None - THIS MAY SET UP A LOT OF OBS!
model : flopy.mbase
a flopy model. If passed, the observation names will have the datetime of the
observation appended to them. If None, the observation names will have the
stress period appended to them. Default is None.
include_path : bool
flag to prepend sfr_out_file path to sfr_obs.config. Useful for setting up
process in separate directory for where python is running.
Returns
-------
df : pd.DataFrame
dataframe of obsnme, obsval and obgnme if inschek run was successful. Else None
Note
----
This function writes "sfr_reach_obs.config" which must be kept in the dir where
"apply_sfr_reach_obs()" is being called during the forward run
"""
if seg_reach is None:
warnings.warn("Obs will be set up for every reach", PyemuWarning)
seg_reach = 'all'
elif isinstance(seg_reach, list) or isinstance(seg_reach, np.ndarray):
if np.ndim(seg_reach) == 1:
seg_reach = [seg_reach]
assert np.shape(
seg_reach)[1] == 2, "varible seg_reach expected shape (n,2), received {0}".format(np.shape(seg_reach))
seg_reach = pd.DataFrame(seg_reach, columns=['segment', 'reach'])
seg_reach.index = seg_reach.apply(lambda x: "s{0:03d}r{1:03d}".format(int(x.segment), int(x.reach)), axis=1)
elif isinstance(seg_reach, dict):
seg_reach = pd.DataFrame.from_dict(seg_reach, orient='index', columns=['segment', 'reach'])
else:
assert isinstance(
seg_reach, pd.DataFrame), "'selection needs to be pandas Dataframe. Type {} passed.".format(type(seg_reach))
assert np.all([sr in seg_reach.columns for sr in ['segment', 'reach']]
), "Either 'segment' or 'reach' not in selection columns"
sfr_dict = load_sfr_out(sfr_out_file, selection=seg_reach)
kpers = list(sfr_dict.keys())
kpers.sort()
if isinstance(seg_reach, str) and seg_reach == 'all':
seg_reach = sfr_dict[kpers[0]][['segment', 'reach']]
seg_reach.index = seg_reach.apply(lambda x: "s{0:03d}r{1:03d}".format(int(x.segment), int(x.reach)), axis=1)
keys = ["sfr_out_file"]
if include_path:
values = [os.path.split(sfr_out_file)[-1]]
else:
values = [sfr_out_file]
diff = seg_reach.loc[seg_reach.apply(lambda x: "{0:03d}_{1:03d}".format(int(x.segment), int(x.reach))
not in sfr_dict[list(sfr_dict.keys())[0]].index, axis=1)]
if len(diff) > 0:
for ob in diff.itertuples():
warnings.warn("segs,reach pair listed with onames {0} was not found: {1}".
format(ob.Index, "({},{})".format(ob.segment, ob.reach)), PyemuWarning)
seg_reach = seg_reach.drop(diff.index)
seg_reach['obs_base'] = seg_reach.index
df_key = pd.DataFrame({"obs_base": keys, "segment": 0, 'reach': values})
df_key = pd.concat([df_key, seg_reach], sort=True).reset_index(drop=True)
if include_path:
pth = os.path.join(*[p for p in os.path.split(sfr_out_file)[:-1]])
config_file = os.path.join(pth,"sfr_reach_obs.config")
else:
config_file = "sfr_reach_obs.config"
print("writing 'sfr_reach_obs.config' to {0}".format(config_file))
df_key.to_csv(config_file)
bd = '.'
if include_path:
bd = os.getcwd()
os.chdir(pth)
try:
df = apply_sfr_reach_obs()
except Exception as e:
os.chdir(bd)
raise Exception("error in apply_sfr_reach_obs(): {0}".format(str(e)))
os.chdir(bd)
if model is not None:
dts = (pd.to_datetime(model.start_datetime) + pd.to_timedelta(np.cumsum(model.dis.perlen.array), unit='d')).date
df.loc[:, "datetime"] = df.kper.apply(lambda x: dts[x])
df.loc[:, "time_str"] = df.datetime.apply(lambda x: x.strftime("%Y%m%d"))
else:
df.loc[:, "time_str"] = df.kper.apply(lambda x: "{0:04d}".format(x))
df.loc[:, "flaqx_obsnme"] = df.apply(lambda x: "{0}_{1}_{2}".format("fa", x.obs_base, x.time_str), axis=1)
df.loc[:, "flout_obsnme"] = df.apply(lambda x: "{0}_{1}_{2}".format("fo", x.obs_base, x.time_str), axis=1)
if ins_file is None:
ins_file = sfr_out_file + ".reach_processed.ins"
with open(ins_file, 'w') as f:
f.write("pif ~\nl1\n")
for fla, flo in zip(df.flaqx_obsnme, df.flout_obsnme):
f.write("l1 w w !{0}! !{1}!\n".format(fla, flo))
df = None
pth = os.path.split(ins_file)[:-1]
pth = os.path.join(*pth)
if pth == '':
pth = '.'
bd = os.getcwd()
os.chdir(pth)
try:
#df = _try_run_inschek(os.path.split(ins_file)[-1],os.path.split(sfr_out_file+".processed")[-1])
df = try_process_ins_file(os.path.split(ins_file)[-1], os.path.split(sfr_out_file+".reach_processed")[-1])
except Exception as e:
pass
os.chdir(bd)
if df is not None:
df.loc[:, "obsnme"] = df.index.values
df.loc[:, "obgnme"] = df.obsnme.apply(lambda x: "flaqx" if x.startswith("fa") else "flout")
return df |
def encode_multipart(data, files):
"""Encode multipart.
:arg dict data: Data to be encoded
:arg dict files: Files to be encoded
:returns: Encoded binary string
:raises: :class:`UrlfetchException`
"""
body = BytesIO()
boundary = choose_boundary()
part_boundary = b('--%s\r\n' % boundary)
writer = codecs.lookup('utf-8')[3]
if isinstance(data, dict):
for name, values in data.items():
if not isinstance(values, (list, tuple, set)):
# behave like urllib.urlencode(dict, 1)
values = (values, )
for value in values:
body.write(part_boundary)
writer(body).write('Content-Disposition: form-data; '
'name="%s"\r\n' % name)
body.write(b'Content-Type: text/plain\r\n\r\n')
if isinstance(value, int):
value = str(value)
if py3k and isinstance(value, str):
writer(body).write(value)
else:
body.write(value)
body.write(b'\r\n')
for fieldname, f in files.items():
if isinstance(f, tuple):
filename, f = f
elif hasattr(f, 'name'):
filename = basename(f.name)
else:
filename = None
raise UrlfetchException("file must has filename")
if hasattr(f, 'read'):
value = f.read()
elif isinstance(f, basestring):
value = f
else:
value = str(f)
body.write(part_boundary)
if filename:
writer(body).write('Content-Disposition: form-data; name="%s"; '
'filename="%s"\r\n' % (fieldname, filename))
body.write(b'Content-Type: application/octet-stream\r\n\r\n')
else:
writer(body).write('Content-Disposition: form-data; name="%s"'
'\r\n' % name)
body.write(b'Content-Type: text/plain\r\n\r\n')
if py3k and isinstance(value, str):
writer(body).write(value)
else:
body.write(value)
body.write(b'\r\n')
body.write(b('--' + boundary + '--\r\n'))
content_type = 'multipart/form-data; boundary=%s' % boundary
return content_type, body.getvalue() | Encode multipart.
:arg dict data: Data to be encoded
:arg dict files: Files to be encoded
:returns: Encoded binary string
:raises: :class:`UrlfetchException` | Below is the the instruction that describes the task:
### Input:
Encode multipart.
:arg dict data: Data to be encoded
:arg dict files: Files to be encoded
:returns: Encoded binary string
:raises: :class:`UrlfetchException`
### Response:
def encode_multipart(data, files):
"""Encode multipart.
:arg dict data: Data to be encoded
:arg dict files: Files to be encoded
:returns: Encoded binary string
:raises: :class:`UrlfetchException`
"""
body = BytesIO()
boundary = choose_boundary()
part_boundary = b('--%s\r\n' % boundary)
writer = codecs.lookup('utf-8')[3]
if isinstance(data, dict):
for name, values in data.items():
if not isinstance(values, (list, tuple, set)):
# behave like urllib.urlencode(dict, 1)
values = (values, )
for value in values:
body.write(part_boundary)
writer(body).write('Content-Disposition: form-data; '
'name="%s"\r\n' % name)
body.write(b'Content-Type: text/plain\r\n\r\n')
if isinstance(value, int):
value = str(value)
if py3k and isinstance(value, str):
writer(body).write(value)
else:
body.write(value)
body.write(b'\r\n')
for fieldname, f in files.items():
if isinstance(f, tuple):
filename, f = f
elif hasattr(f, 'name'):
filename = basename(f.name)
else:
filename = None
raise UrlfetchException("file must has filename")
if hasattr(f, 'read'):
value = f.read()
elif isinstance(f, basestring):
value = f
else:
value = str(f)
body.write(part_boundary)
if filename:
writer(body).write('Content-Disposition: form-data; name="%s"; '
'filename="%s"\r\n' % (fieldname, filename))
body.write(b'Content-Type: application/octet-stream\r\n\r\n')
else:
writer(body).write('Content-Disposition: form-data; name="%s"'
'\r\n' % name)
body.write(b'Content-Type: text/plain\r\n\r\n')
if py3k and isinstance(value, str):
writer(body).write(value)
else:
body.write(value)
body.write(b'\r\n')
body.write(b('--' + boundary + '--\r\n'))
content_type = 'multipart/form-data; boundary=%s' % boundary
return content_type, body.getvalue() |
def add_actions(self, actions_list, scheduler_instance_id):
"""Add a list of actions to the satellite queues
:param actions_list: Actions list to add
:type actions_list: list
:param scheduler_instance_id: sheduler link to assign the actions to
:type scheduler_instance_id: SchedulerLink
:return: None
"""
# We check for new check in each schedulers and put the result in new_checks
scheduler_link = None
for scheduler_id in self.schedulers:
logger.debug("Trying to add an action, scheduler: %s", self.schedulers[scheduler_id])
if scheduler_instance_id == self.schedulers[scheduler_id].instance_id:
scheduler_link = self.schedulers[scheduler_id]
break
else:
logger.error("Trying to add actions from an unknwown scheduler: %s",
scheduler_instance_id)
return
if not scheduler_link:
logger.error("Trying to add actions, but scheduler link is not found for: %s, "
"actions: %s", scheduler_instance_id, actions_list)
return
logger.debug("Found scheduler link: %s", scheduler_link)
for action in actions_list:
# First we look if the action is identified
uuid = getattr(action, 'uuid', None)
if uuid is None:
try:
action = unserialize(action, no_load=True)
uuid = action.uuid
except AlignakClassLookupException:
logger.error('Cannot un-serialize action: %s', action)
continue
# If we already have this action, we are already working for it!
if uuid in scheduler_link.actions:
continue
# Action is attached to a scheduler
action.my_scheduler = scheduler_link.uuid
scheduler_link.actions[action.uuid] = action
self.assign_to_a_queue(action) | Add a list of actions to the satellite queues
:param actions_list: Actions list to add
:type actions_list: list
:param scheduler_instance_id: sheduler link to assign the actions to
:type scheduler_instance_id: SchedulerLink
:return: None | Below is the the instruction that describes the task:
### Input:
Add a list of actions to the satellite queues
:param actions_list: Actions list to add
:type actions_list: list
:param scheduler_instance_id: sheduler link to assign the actions to
:type scheduler_instance_id: SchedulerLink
:return: None
### Response:
def add_actions(self, actions_list, scheduler_instance_id):
"""Add a list of actions to the satellite queues
:param actions_list: Actions list to add
:type actions_list: list
:param scheduler_instance_id: sheduler link to assign the actions to
:type scheduler_instance_id: SchedulerLink
:return: None
"""
# We check for new check in each schedulers and put the result in new_checks
scheduler_link = None
for scheduler_id in self.schedulers:
logger.debug("Trying to add an action, scheduler: %s", self.schedulers[scheduler_id])
if scheduler_instance_id == self.schedulers[scheduler_id].instance_id:
scheduler_link = self.schedulers[scheduler_id]
break
else:
logger.error("Trying to add actions from an unknwown scheduler: %s",
scheduler_instance_id)
return
if not scheduler_link:
logger.error("Trying to add actions, but scheduler link is not found for: %s, "
"actions: %s", scheduler_instance_id, actions_list)
return
logger.debug("Found scheduler link: %s", scheduler_link)
for action in actions_list:
# First we look if the action is identified
uuid = getattr(action, 'uuid', None)
if uuid is None:
try:
action = unserialize(action, no_load=True)
uuid = action.uuid
except AlignakClassLookupException:
logger.error('Cannot un-serialize action: %s', action)
continue
# If we already have this action, we are already working for it!
if uuid in scheduler_link.actions:
continue
# Action is attached to a scheduler
action.my_scheduler = scheduler_link.uuid
scheduler_link.actions[action.uuid] = action
self.assign_to_a_queue(action) |
def is_solved(self):
"""
Check if Cube is solved.
"""
for side in "LUFDRB":
sample = self.cube[side].facings[side]
for square in sum(self.cube.get_face(side), []):
if square != sample:
return False
return True | Check if Cube is solved. | Below is the the instruction that describes the task:
### Input:
Check if Cube is solved.
### Response:
def is_solved(self):
"""
Check if Cube is solved.
"""
for side in "LUFDRB":
sample = self.cube[side].facings[side]
for square in sum(self.cube.get_face(side), []):
if square != sample:
return False
return True |
def Cinv(self):
"""Inverse of the noise covariance."""
try:
return np.linalg.inv(self.c)
except np.linalg.linalg.LinAlgError:
print('Warning: non-invertible noise covariance matrix c.')
return np.eye(self.c.shape[0]) | Inverse of the noise covariance. | Below is the the instruction that describes the task:
### Input:
Inverse of the noise covariance.
### Response:
def Cinv(self):
"""Inverse of the noise covariance."""
try:
return np.linalg.inv(self.c)
except np.linalg.linalg.LinAlgError:
print('Warning: non-invertible noise covariance matrix c.')
return np.eye(self.c.shape[0]) |
def plotCurve(self):
"""Shows a calibration curve, in a separate window, of the currently selected calibration"""
try:
attenuations, freqs = self.datafile.get_calibration(str(self.ui.calChoiceCmbbx.currentText()), self.calf)
self.pw = SimplePlotWidget(freqs, attenuations, parent=self)
self.pw.setWindowFlags(QtCore.Qt.Window)
self.pw.setLabels('Frequency', 'Attenuation', 'Calibration Curve')
self.pw.show()
except IOError:
QtGui.QMessageBox.warning(self, "File Read Error", "Unable to read calibration file")
except KeyError:
QtGui.QMessageBox.warning(self, "File Data Error", "Unable to find data in file") | Shows a calibration curve, in a separate window, of the currently selected calibration | Below is the the instruction that describes the task:
### Input:
Shows a calibration curve, in a separate window, of the currently selected calibration
### Response:
def plotCurve(self):
"""Shows a calibration curve, in a separate window, of the currently selected calibration"""
try:
attenuations, freqs = self.datafile.get_calibration(str(self.ui.calChoiceCmbbx.currentText()), self.calf)
self.pw = SimplePlotWidget(freqs, attenuations, parent=self)
self.pw.setWindowFlags(QtCore.Qt.Window)
self.pw.setLabels('Frequency', 'Attenuation', 'Calibration Curve')
self.pw.show()
except IOError:
QtGui.QMessageBox.warning(self, "File Read Error", "Unable to read calibration file")
except KeyError:
QtGui.QMessageBox.warning(self, "File Data Error", "Unable to find data in file") |
def add_cli_clear_bel_namespace(main: click.Group) -> click.Group: # noqa: D202
"""Add a ``clear_bel_namespace`` command to main :mod:`click` function."""
@main.command()
@click.pass_obj
def drop(manager: BELNamespaceManagerMixin):
"""Clear names/identifiers to terminology store."""
namespace = manager.drop_bel_namespace()
if namespace:
click.echo(f'namespace {namespace} was cleared')
return main | Add a ``clear_bel_namespace`` command to main :mod:`click` function. | Below is the the instruction that describes the task:
### Input:
Add a ``clear_bel_namespace`` command to main :mod:`click` function.
### Response:
def add_cli_clear_bel_namespace(main: click.Group) -> click.Group: # noqa: D202
"""Add a ``clear_bel_namespace`` command to main :mod:`click` function."""
@main.command()
@click.pass_obj
def drop(manager: BELNamespaceManagerMixin):
"""Clear names/identifiers to terminology store."""
namespace = manager.drop_bel_namespace()
if namespace:
click.echo(f'namespace {namespace} was cleared')
return main |
def get_commits(self, since_sha=None):
"""Returns a list of Commit objects.
Args:
since_sha - (optional) A sha to search from
"""
assert self.tempdir
cmd = ['git', 'log', '--first-parent', '--reverse', COMMIT_FORMAT]
if since_sha:
commits = [self.get_commit(since_sha)]
cmd.append('{}..HEAD'.format(since_sha))
else:
commits = []
cmd.append('HEAD')
output = cmd_output(*cmd, cwd=self.tempdir)
for sha, date in chunk_iter(output.splitlines(), 2):
commits.append(Commit(sha, int(date)))
return commits | Returns a list of Commit objects.
Args:
since_sha - (optional) A sha to search from | Below is the the instruction that describes the task:
### Input:
Returns a list of Commit objects.
Args:
since_sha - (optional) A sha to search from
### Response:
def get_commits(self, since_sha=None):
"""Returns a list of Commit objects.
Args:
since_sha - (optional) A sha to search from
"""
assert self.tempdir
cmd = ['git', 'log', '--first-parent', '--reverse', COMMIT_FORMAT]
if since_sha:
commits = [self.get_commit(since_sha)]
cmd.append('{}..HEAD'.format(since_sha))
else:
commits = []
cmd.append('HEAD')
output = cmd_output(*cmd, cwd=self.tempdir)
for sha, date in chunk_iter(output.splitlines(), 2):
commits.append(Commit(sha, int(date)))
return commits |
def process_result(self, new_concept, concepts):
"""Save all concepts with non-zero |small_phi| to the
|CauseEffectStructure|.
"""
if new_concept.phi > 0:
# Replace the subsystem
new_concept.subsystem = self.subsystem
concepts.append(new_concept)
return concepts | Save all concepts with non-zero |small_phi| to the
|CauseEffectStructure|. | Below is the the instruction that describes the task:
### Input:
Save all concepts with non-zero |small_phi| to the
|CauseEffectStructure|.
### Response:
def process_result(self, new_concept, concepts):
"""Save all concepts with non-zero |small_phi| to the
|CauseEffectStructure|.
"""
if new_concept.phi > 0:
# Replace the subsystem
new_concept.subsystem = self.subsystem
concepts.append(new_concept)
return concepts |
def _thread_to_xml(self, thread):
""" thread information as XML """
name = pydevd_xml.make_valid_xml_value(thread.getName())
cmdText = '<thread name="%s" id="%s" />' % (quote(name), get_thread_id(thread))
return cmdText | thread information as XML | Below is the the instruction that describes the task:
### Input:
thread information as XML
### Response:
def _thread_to_xml(self, thread):
""" thread information as XML """
name = pydevd_xml.make_valid_xml_value(thread.getName())
cmdText = '<thread name="%s" id="%s" />' % (quote(name), get_thread_id(thread))
return cmdText |
def mel(sr, n_fft, n_mels=128, fmin=0.0, fmax=None, htk=False,
norm=1, dtype=np.float32):
"""Create a Filterbank matrix to combine FFT bins into Mel-frequency bins
Parameters
----------
sr : number > 0 [scalar]
sampling rate of the incoming signal
n_fft : int > 0 [scalar]
number of FFT components
n_mels : int > 0 [scalar]
number of Mel bands to generate
fmin : float >= 0 [scalar]
lowest frequency (in Hz)
fmax : float >= 0 [scalar]
highest frequency (in Hz).
If `None`, use `fmax = sr / 2.0`
htk : bool [scalar]
use HTK formula instead of Slaney
norm : {None, 1, np.inf} [scalar]
if 1, divide the triangular mel weights by the width of the mel band
(area normalization). Otherwise, leave all the triangles aiming for
a peak value of 1.0
dtype : np.dtype
The data type of the output basis.
By default, uses 32-bit (single-precision) floating point.
Returns
-------
M : np.ndarray [shape=(n_mels, 1 + n_fft/2)]
Mel transform matrix
Notes
-----
This function caches at level 10.
Examples
--------
>>> melfb = librosa.filters.mel(22050, 2048)
>>> melfb
array([[ 0. , 0.016, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
Clip the maximum frequency to 8KHz
>>> librosa.filters.mel(22050, 2048, fmax=8000)
array([[ 0. , 0.02, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(melfb, x_axis='linear')
>>> plt.ylabel('Mel filter')
>>> plt.title('Mel filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
if fmax is None:
fmax = float(sr) / 2
if norm is not None and norm != 1 and norm != np.inf:
raise ParameterError('Unsupported norm: {}'.format(repr(norm)))
# Initialize the weights
n_mels = int(n_mels)
weights = np.zeros((n_mels, int(1 + n_fft // 2)), dtype=dtype)
# Center freqs of each FFT bin
fftfreqs = fft_frequencies(sr=sr, n_fft=n_fft)
# 'Center freqs' of mel bands - uniformly spaced between limits
mel_f = mel_frequencies(n_mels + 2, fmin=fmin, fmax=fmax, htk=htk)
fdiff = np.diff(mel_f)
ramps = np.subtract.outer(mel_f, fftfreqs)
for i in range(n_mels):
# lower and upper slopes for all bins
lower = -ramps[i] / fdiff[i]
upper = ramps[i+2] / fdiff[i+1]
# .. then intersect them with each other and zero
weights[i] = np.maximum(0, np.minimum(lower, upper))
if norm == 1:
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (mel_f[2:n_mels+2] - mel_f[:n_mels])
weights *= enorm[:, np.newaxis]
# Only check weights if f_mel[0] is positive
if not np.all((mel_f[:-2] == 0) | (weights.max(axis=1) > 0)):
# This means we have an empty channel somewhere
warnings.warn('Empty filters detected in mel frequency basis. '
'Some channels will produce empty responses. '
'Try increasing your sampling rate (and fmax) or '
'reducing n_mels.')
return weights | Create a Filterbank matrix to combine FFT bins into Mel-frequency bins
Parameters
----------
sr : number > 0 [scalar]
sampling rate of the incoming signal
n_fft : int > 0 [scalar]
number of FFT components
n_mels : int > 0 [scalar]
number of Mel bands to generate
fmin : float >= 0 [scalar]
lowest frequency (in Hz)
fmax : float >= 0 [scalar]
highest frequency (in Hz).
If `None`, use `fmax = sr / 2.0`
htk : bool [scalar]
use HTK formula instead of Slaney
norm : {None, 1, np.inf} [scalar]
if 1, divide the triangular mel weights by the width of the mel band
(area normalization). Otherwise, leave all the triangles aiming for
a peak value of 1.0
dtype : np.dtype
The data type of the output basis.
By default, uses 32-bit (single-precision) floating point.
Returns
-------
M : np.ndarray [shape=(n_mels, 1 + n_fft/2)]
Mel transform matrix
Notes
-----
This function caches at level 10.
Examples
--------
>>> melfb = librosa.filters.mel(22050, 2048)
>>> melfb
array([[ 0. , 0.016, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
Clip the maximum frequency to 8KHz
>>> librosa.filters.mel(22050, 2048, fmax=8000)
array([[ 0. , 0.02, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(melfb, x_axis='linear')
>>> plt.ylabel('Mel filter')
>>> plt.title('Mel filter bank')
>>> plt.colorbar()
>>> plt.tight_layout() | Below is the the instruction that describes the task:
### Input:
Create a Filterbank matrix to combine FFT bins into Mel-frequency bins
Parameters
----------
sr : number > 0 [scalar]
sampling rate of the incoming signal
n_fft : int > 0 [scalar]
number of FFT components
n_mels : int > 0 [scalar]
number of Mel bands to generate
fmin : float >= 0 [scalar]
lowest frequency (in Hz)
fmax : float >= 0 [scalar]
highest frequency (in Hz).
If `None`, use `fmax = sr / 2.0`
htk : bool [scalar]
use HTK formula instead of Slaney
norm : {None, 1, np.inf} [scalar]
if 1, divide the triangular mel weights by the width of the mel band
(area normalization). Otherwise, leave all the triangles aiming for
a peak value of 1.0
dtype : np.dtype
The data type of the output basis.
By default, uses 32-bit (single-precision) floating point.
Returns
-------
M : np.ndarray [shape=(n_mels, 1 + n_fft/2)]
Mel transform matrix
Notes
-----
This function caches at level 10.
Examples
--------
>>> melfb = librosa.filters.mel(22050, 2048)
>>> melfb
array([[ 0. , 0.016, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
Clip the maximum frequency to 8KHz
>>> librosa.filters.mel(22050, 2048, fmax=8000)
array([[ 0. , 0.02, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(melfb, x_axis='linear')
>>> plt.ylabel('Mel filter')
>>> plt.title('Mel filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
### Response:
def mel(sr, n_fft, n_mels=128, fmin=0.0, fmax=None, htk=False,
norm=1, dtype=np.float32):
"""Create a Filterbank matrix to combine FFT bins into Mel-frequency bins
Parameters
----------
sr : number > 0 [scalar]
sampling rate of the incoming signal
n_fft : int > 0 [scalar]
number of FFT components
n_mels : int > 0 [scalar]
number of Mel bands to generate
fmin : float >= 0 [scalar]
lowest frequency (in Hz)
fmax : float >= 0 [scalar]
highest frequency (in Hz).
If `None`, use `fmax = sr / 2.0`
htk : bool [scalar]
use HTK formula instead of Slaney
norm : {None, 1, np.inf} [scalar]
if 1, divide the triangular mel weights by the width of the mel band
(area normalization). Otherwise, leave all the triangles aiming for
a peak value of 1.0
dtype : np.dtype
The data type of the output basis.
By default, uses 32-bit (single-precision) floating point.
Returns
-------
M : np.ndarray [shape=(n_mels, 1 + n_fft/2)]
Mel transform matrix
Notes
-----
This function caches at level 10.
Examples
--------
>>> melfb = librosa.filters.mel(22050, 2048)
>>> melfb
array([[ 0. , 0.016, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
Clip the maximum frequency to 8KHz
>>> librosa.filters.mel(22050, 2048, fmax=8000)
array([[ 0. , 0.02, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(melfb, x_axis='linear')
>>> plt.ylabel('Mel filter')
>>> plt.title('Mel filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
if fmax is None:
fmax = float(sr) / 2
if norm is not None and norm != 1 and norm != np.inf:
raise ParameterError('Unsupported norm: {}'.format(repr(norm)))
# Initialize the weights
n_mels = int(n_mels)
weights = np.zeros((n_mels, int(1 + n_fft // 2)), dtype=dtype)
# Center freqs of each FFT bin
fftfreqs = fft_frequencies(sr=sr, n_fft=n_fft)
# 'Center freqs' of mel bands - uniformly spaced between limits
mel_f = mel_frequencies(n_mels + 2, fmin=fmin, fmax=fmax, htk=htk)
fdiff = np.diff(mel_f)
ramps = np.subtract.outer(mel_f, fftfreqs)
for i in range(n_mels):
# lower and upper slopes for all bins
lower = -ramps[i] / fdiff[i]
upper = ramps[i+2] / fdiff[i+1]
# .. then intersect them with each other and zero
weights[i] = np.maximum(0, np.minimum(lower, upper))
if norm == 1:
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (mel_f[2:n_mels+2] - mel_f[:n_mels])
weights *= enorm[:, np.newaxis]
# Only check weights if f_mel[0] is positive
if not np.all((mel_f[:-2] == 0) | (weights.max(axis=1) > 0)):
# This means we have an empty channel somewhere
warnings.warn('Empty filters detected in mel frequency basis. '
'Some channels will produce empty responses. '
'Try increasing your sampling rate (and fmax) or '
'reducing n_mels.')
return weights |
def remove_mid_line_ifs(self, ifs):
"""
Go through passed offsets, filtering ifs
located somewhere mid-line.
"""
# FIXME: this doesn't work for Python 3.6+
filtered = []
for i in ifs:
# For each offset, if line number of current and next op
# is the same
if self.lines[i].l_no == self.lines[i+3].l_no:
# Skip last op on line if it is some sort of POP_JUMP.
if self.code[self.prev[self.lines[i].next]] in (self.opc.PJIT, self.opc.PJIF):
continue
filtered.append(i)
return filtered | Go through passed offsets, filtering ifs
located somewhere mid-line. | Below is the the instruction that describes the task:
### Input:
Go through passed offsets, filtering ifs
located somewhere mid-line.
### Response:
def remove_mid_line_ifs(self, ifs):
"""
Go through passed offsets, filtering ifs
located somewhere mid-line.
"""
# FIXME: this doesn't work for Python 3.6+
filtered = []
for i in ifs:
# For each offset, if line number of current and next op
# is the same
if self.lines[i].l_no == self.lines[i+3].l_no:
# Skip last op on line if it is some sort of POP_JUMP.
if self.code[self.prev[self.lines[i].next]] in (self.opc.PJIT, self.opc.PJIF):
continue
filtered.append(i)
return filtered |
def ToVegaMag(self, wave, flux, **kwargs):
"""Convert to ``vegamag``.
.. math::
\\textnormal{vegamag} = -2.5 \\; \\log(\\frac{\\textnormal{photlam}}{f_{\\textnormal{Vega}}})
where :math:`f_{\\textnormal{Vega}}` is the flux of
:ref:`pysynphot-vega-spec` resampled at given wavelength values
and converted to ``photlam``.
Parameters
----------
wave, flux : number or array_like
Wavelength and flux values to be used for conversion.
kwargs : dict
Extra keywords (not used).
Returns
-------
result : number or array_like
Converted values.
"""
from . import spectrum
resampled = spectrum.Vega.resample(wave)
normalized = flux / resampled._fluxtable
return -2.5 * N.log10(normalized) | Convert to ``vegamag``.
.. math::
\\textnormal{vegamag} = -2.5 \\; \\log(\\frac{\\textnormal{photlam}}{f_{\\textnormal{Vega}}})
where :math:`f_{\\textnormal{Vega}}` is the flux of
:ref:`pysynphot-vega-spec` resampled at given wavelength values
and converted to ``photlam``.
Parameters
----------
wave, flux : number or array_like
Wavelength and flux values to be used for conversion.
kwargs : dict
Extra keywords (not used).
Returns
-------
result : number or array_like
Converted values. | Below is the the instruction that describes the task:
### Input:
Convert to ``vegamag``.
.. math::
\\textnormal{vegamag} = -2.5 \\; \\log(\\frac{\\textnormal{photlam}}{f_{\\textnormal{Vega}}})
where :math:`f_{\\textnormal{Vega}}` is the flux of
:ref:`pysynphot-vega-spec` resampled at given wavelength values
and converted to ``photlam``.
Parameters
----------
wave, flux : number or array_like
Wavelength and flux values to be used for conversion.
kwargs : dict
Extra keywords (not used).
Returns
-------
result : number or array_like
Converted values.
### Response:
def ToVegaMag(self, wave, flux, **kwargs):
"""Convert to ``vegamag``.
.. math::
\\textnormal{vegamag} = -2.5 \\; \\log(\\frac{\\textnormal{photlam}}{f_{\\textnormal{Vega}}})
where :math:`f_{\\textnormal{Vega}}` is the flux of
:ref:`pysynphot-vega-spec` resampled at given wavelength values
and converted to ``photlam``.
Parameters
----------
wave, flux : number or array_like
Wavelength and flux values to be used for conversion.
kwargs : dict
Extra keywords (not used).
Returns
-------
result : number or array_like
Converted values.
"""
from . import spectrum
resampled = spectrum.Vega.resample(wave)
normalized = flux / resampled._fluxtable
return -2.5 * N.log10(normalized) |
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Ensure cipher and Tlsv1"""
context = create_urllib3_context(ciphers=self.CIPHERS,
ssl_version=ssl.PROTOCOL_TLSv1)
proxy_kwargs['ssl_context'] = context
return super(TLSv1Adapter, self).proxy_manager_for(proxy,
**proxy_kwargs) | Ensure cipher and Tlsv1 | Below is the the instruction that describes the task:
### Input:
Ensure cipher and Tlsv1
### Response:
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Ensure cipher and Tlsv1"""
context = create_urllib3_context(ciphers=self.CIPHERS,
ssl_version=ssl.PROTOCOL_TLSv1)
proxy_kwargs['ssl_context'] = context
return super(TLSv1Adapter, self).proxy_manager_for(proxy,
**proxy_kwargs) |
def dumps(self):
"""Dump the name to string, after normalizing it."""
def _is_initial(author_name):
return len(author_name) == 1 or u'.' in author_name
def _ensure_dotted_initials(author_name):
if _is_initial(author_name) \
and u'.' not in author_name:
seq = (author_name, u'.')
author_name = u''.join(seq)
return author_name
def _ensure_dotted_suffixes(author_suffix):
if u'.' not in author_suffix:
seq = (author_suffix, u'.')
author_suffix = u''.join(seq)
return author_suffix
def _is_roman_numeral(suffix):
"""Controls that the user's input only contains valid roman numerals"""
valid_roman_numerals = [u'M', u'D', u'C', u'L', u'X',
u'V', u'I', u'(', u')']
return all(letters in valid_roman_numerals
for letters in suffix.upper())
first_and_middle_names = iter(_ensure_dotted_initials(name) for name in self.first_list)
try:
prev = next(first_and_middle_names)
except StopIteration:
LOGGER.warning(u"Cannot process %s properly",
self._parsed_name.original)
prev = self._parsed_name.original
names_with_spaces = [prev]
for name in first_and_middle_names:
if not _is_initial(name) or not _is_initial(prev):
names_with_spaces.append(' ')
prev = name
names_with_spaces.append(prev)
normalized_names = u''.join(names_with_spaces)
if _is_roman_numeral(self.suffix):
suffix = self.suffix.upper()
else:
suffix = _ensure_dotted_suffixes(self.suffix)
final_name = u', '.join(
part for part in (self.last, normalized_names.strip(), suffix)
if part)
# Replace unicode curly apostrophe to normal apostrophe.
final_name = final_name.replace(u'’', '\'')
return final_name | Dump the name to string, after normalizing it. | Below is the the instruction that describes the task:
### Input:
Dump the name to string, after normalizing it.
### Response:
def dumps(self):
"""Dump the name to string, after normalizing it."""
def _is_initial(author_name):
return len(author_name) == 1 or u'.' in author_name
def _ensure_dotted_initials(author_name):
if _is_initial(author_name) \
and u'.' not in author_name:
seq = (author_name, u'.')
author_name = u''.join(seq)
return author_name
def _ensure_dotted_suffixes(author_suffix):
if u'.' not in author_suffix:
seq = (author_suffix, u'.')
author_suffix = u''.join(seq)
return author_suffix
def _is_roman_numeral(suffix):
"""Controls that the user's input only contains valid roman numerals"""
valid_roman_numerals = [u'M', u'D', u'C', u'L', u'X',
u'V', u'I', u'(', u')']
return all(letters in valid_roman_numerals
for letters in suffix.upper())
first_and_middle_names = iter(_ensure_dotted_initials(name) for name in self.first_list)
try:
prev = next(first_and_middle_names)
except StopIteration:
LOGGER.warning(u"Cannot process %s properly",
self._parsed_name.original)
prev = self._parsed_name.original
names_with_spaces = [prev]
for name in first_and_middle_names:
if not _is_initial(name) or not _is_initial(prev):
names_with_spaces.append(' ')
prev = name
names_with_spaces.append(prev)
normalized_names = u''.join(names_with_spaces)
if _is_roman_numeral(self.suffix):
suffix = self.suffix.upper()
else:
suffix = _ensure_dotted_suffixes(self.suffix)
final_name = u', '.join(
part for part in (self.last, normalized_names.strip(), suffix)
if part)
# Replace unicode curly apostrophe to normal apostrophe.
final_name = final_name.replace(u'’', '\'')
return final_name |
def calc_ag_v1(self):
"""Sum the through flown area of the total cross section.
Required flux sequences:
|AM|
|AV|
|AVR|
Calculated flux sequence:
|AG|
Example:
>>> from hydpy.models.lstream import *
>>> parameterstep()
>>> fluxes.am = 1.0
>>> fluxes.av= 2.0, 3.0
>>> fluxes.avr = 4.0, 5.0
>>> model.calc_ag_v1()
>>> fluxes.ag
ag(15.0)
"""
flu = self.sequences.fluxes.fastaccess
flu.ag = flu.am+flu.av[0]+flu.av[1]+flu.avr[0]+flu.avr[1] | Sum the through flown area of the total cross section.
Required flux sequences:
|AM|
|AV|
|AVR|
Calculated flux sequence:
|AG|
Example:
>>> from hydpy.models.lstream import *
>>> parameterstep()
>>> fluxes.am = 1.0
>>> fluxes.av= 2.0, 3.0
>>> fluxes.avr = 4.0, 5.0
>>> model.calc_ag_v1()
>>> fluxes.ag
ag(15.0) | Below is the the instruction that describes the task:
### Input:
Sum the through flown area of the total cross section.
Required flux sequences:
|AM|
|AV|
|AVR|
Calculated flux sequence:
|AG|
Example:
>>> from hydpy.models.lstream import *
>>> parameterstep()
>>> fluxes.am = 1.0
>>> fluxes.av= 2.0, 3.0
>>> fluxes.avr = 4.0, 5.0
>>> model.calc_ag_v1()
>>> fluxes.ag
ag(15.0)
### Response:
def calc_ag_v1(self):
"""Sum the through flown area of the total cross section.
Required flux sequences:
|AM|
|AV|
|AVR|
Calculated flux sequence:
|AG|
Example:
>>> from hydpy.models.lstream import *
>>> parameterstep()
>>> fluxes.am = 1.0
>>> fluxes.av= 2.0, 3.0
>>> fluxes.avr = 4.0, 5.0
>>> model.calc_ag_v1()
>>> fluxes.ag
ag(15.0)
"""
flu = self.sequences.fluxes.fastaccess
flu.ag = flu.am+flu.av[0]+flu.av[1]+flu.avr[0]+flu.avr[1] |
def subscribeToDeviceCommands(self, typeId="+", deviceId="+", commandId="+", msgFormat="+"):
"""
Subscribe to device command messages
# Parameters
typeId (string): typeId for the subscription, optional. Defaults to all device types (MQTT `+` wildcard)
deviceId (string): deviceId for the subscription, optional. Defaults to all devices (MQTT `+` wildcard)
commandId (string): commandId for the subscription, optional. Defaults to all commands (MQTT `+` wildcard)
msgFormat (string): msgFormat for the subscription, optional. Defaults to all formats (MQTT `+` wildcard)
qos (int): MQTT quality of service level to use (`0`, `1`, or `2`)
# Returns
int: If the subscription was successful then the return Message ID (mid) for the subscribe request
will be returned. The mid value can be used to track the subscribe request by checking against
the mid argument if you register a subscriptionCallback method.
If the subscription fails then the return value will be `0`
"""
if self._config.isQuickstart():
self.logger.warning("QuickStart applications do not support commands")
return 0
topic = "iot-2/type/%s/id/%s/cmd/%s/fmt/%s" % (typeId, deviceId, commandId, msgFormat)
return self._subscribe(topic, 0) | Subscribe to device command messages
# Parameters
typeId (string): typeId for the subscription, optional. Defaults to all device types (MQTT `+` wildcard)
deviceId (string): deviceId for the subscription, optional. Defaults to all devices (MQTT `+` wildcard)
commandId (string): commandId for the subscription, optional. Defaults to all commands (MQTT `+` wildcard)
msgFormat (string): msgFormat for the subscription, optional. Defaults to all formats (MQTT `+` wildcard)
qos (int): MQTT quality of service level to use (`0`, `1`, or `2`)
# Returns
int: If the subscription was successful then the return Message ID (mid) for the subscribe request
will be returned. The mid value can be used to track the subscribe request by checking against
the mid argument if you register a subscriptionCallback method.
If the subscription fails then the return value will be `0` | Below is the the instruction that describes the task:
### Input:
Subscribe to device command messages
# Parameters
typeId (string): typeId for the subscription, optional. Defaults to all device types (MQTT `+` wildcard)
deviceId (string): deviceId for the subscription, optional. Defaults to all devices (MQTT `+` wildcard)
commandId (string): commandId for the subscription, optional. Defaults to all commands (MQTT `+` wildcard)
msgFormat (string): msgFormat for the subscription, optional. Defaults to all formats (MQTT `+` wildcard)
qos (int): MQTT quality of service level to use (`0`, `1`, or `2`)
# Returns
int: If the subscription was successful then the return Message ID (mid) for the subscribe request
will be returned. The mid value can be used to track the subscribe request by checking against
the mid argument if you register a subscriptionCallback method.
If the subscription fails then the return value will be `0`
### Response:
def subscribeToDeviceCommands(self, typeId="+", deviceId="+", commandId="+", msgFormat="+"):
"""
Subscribe to device command messages
# Parameters
typeId (string): typeId for the subscription, optional. Defaults to all device types (MQTT `+` wildcard)
deviceId (string): deviceId for the subscription, optional. Defaults to all devices (MQTT `+` wildcard)
commandId (string): commandId for the subscription, optional. Defaults to all commands (MQTT `+` wildcard)
msgFormat (string): msgFormat for the subscription, optional. Defaults to all formats (MQTT `+` wildcard)
qos (int): MQTT quality of service level to use (`0`, `1`, or `2`)
# Returns
int: If the subscription was successful then the return Message ID (mid) for the subscribe request
will be returned. The mid value can be used to track the subscribe request by checking against
the mid argument if you register a subscriptionCallback method.
If the subscription fails then the return value will be `0`
"""
if self._config.isQuickstart():
self.logger.warning("QuickStart applications do not support commands")
return 0
topic = "iot-2/type/%s/id/%s/cmd/%s/fmt/%s" % (typeId, deviceId, commandId, msgFormat)
return self._subscribe(topic, 0) |
def _create_diff(diff, fun, key, prev, curr):
'''
Builds the diff dictionary.
'''
if not fun(prev):
_create_diff_action(diff, 'added', key, curr)
elif fun(prev) and not fun(curr):
_create_diff_action(diff, 'removed', key, prev)
elif not fun(curr):
_create_diff_action(diff, 'updated', key, curr) | Builds the diff dictionary. | Below is the the instruction that describes the task:
### Input:
Builds the diff dictionary.
### Response:
def _create_diff(diff, fun, key, prev, curr):
'''
Builds the diff dictionary.
'''
if not fun(prev):
_create_diff_action(diff, 'added', key, curr)
elif fun(prev) and not fun(curr):
_create_diff_action(diff, 'removed', key, prev)
elif not fun(curr):
_create_diff_action(diff, 'updated', key, curr) |
def pstd(self, *args, **kwargs):
""" Console to STDOUT """
kwargs['file'] = self.out
self.print(*args, **kwargs)
sys.stdout.flush() | Console to STDOUT | Below is the the instruction that describes the task:
### Input:
Console to STDOUT
### Response:
def pstd(self, *args, **kwargs):
""" Console to STDOUT """
kwargs['file'] = self.out
self.print(*args, **kwargs)
sys.stdout.flush() |
def error_respond(self, error):
"""Create an error response to this request.
When processing the request produces an error condition this method can be used to
create the error response object.
:param error: Specifies what error occurred.
:type error: str or Exception
:returns: An error response object that can be serialized and sent to the client.
:rtype: :py:class:`JSONRPCErrorResponse`
"""
if self.unique_id is None:
return None
response = JSONRPCErrorResponse()
response.unique_id = None if self.one_way else self.unique_id
code, msg, data = _get_code_message_and_data(error)
response.error = msg
response._jsonrpc_error_code = code
if data:
response.data = data
return response | Create an error response to this request.
When processing the request produces an error condition this method can be used to
create the error response object.
:param error: Specifies what error occurred.
:type error: str or Exception
:returns: An error response object that can be serialized and sent to the client.
:rtype: :py:class:`JSONRPCErrorResponse` | Below is the the instruction that describes the task:
### Input:
Create an error response to this request.
When processing the request produces an error condition this method can be used to
create the error response object.
:param error: Specifies what error occurred.
:type error: str or Exception
:returns: An error response object that can be serialized and sent to the client.
:rtype: :py:class:`JSONRPCErrorResponse`
### Response:
def error_respond(self, error):
"""Create an error response to this request.
When processing the request produces an error condition this method can be used to
create the error response object.
:param error: Specifies what error occurred.
:type error: str or Exception
:returns: An error response object that can be serialized and sent to the client.
:rtype: :py:class:`JSONRPCErrorResponse`
"""
if self.unique_id is None:
return None
response = JSONRPCErrorResponse()
response.unique_id = None if self.one_way else self.unique_id
code, msg, data = _get_code_message_and_data(error)
response.error = msg
response._jsonrpc_error_code = code
if data:
response.data = data
return response |
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift the polygon from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift the polygon from the top.
right : None or int, optional
Amount of pixels by which to shift the polygon from the right.
bottom : None or int, optional
Amount of pixels by which to shift the polygon from the bottom.
left : None or int, optional
Amount of pixels by which to shift the polygon from the left.
Returns
-------
imgaug.Polygon
Shifted polygon.
"""
ls_shifted = self.to_line_string(closed=False).shift(
top=top, right=right, bottom=bottom, left=left)
return self.copy(exterior=ls_shifted.coords) | Shift the polygon from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift the polygon from the top.
right : None or int, optional
Amount of pixels by which to shift the polygon from the right.
bottom : None or int, optional
Amount of pixels by which to shift the polygon from the bottom.
left : None or int, optional
Amount of pixels by which to shift the polygon from the left.
Returns
-------
imgaug.Polygon
Shifted polygon. | Below is the the instruction that describes the task:
### Input:
Shift the polygon from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift the polygon from the top.
right : None or int, optional
Amount of pixels by which to shift the polygon from the right.
bottom : None or int, optional
Amount of pixels by which to shift the polygon from the bottom.
left : None or int, optional
Amount of pixels by which to shift the polygon from the left.
Returns
-------
imgaug.Polygon
Shifted polygon.
### Response:
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift the polygon from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift the polygon from the top.
right : None or int, optional
Amount of pixels by which to shift the polygon from the right.
bottom : None or int, optional
Amount of pixels by which to shift the polygon from the bottom.
left : None or int, optional
Amount of pixels by which to shift the polygon from the left.
Returns
-------
imgaug.Polygon
Shifted polygon.
"""
ls_shifted = self.to_line_string(closed=False).shift(
top=top, right=right, bottom=bottom, left=left)
return self.copy(exterior=ls_shifted.coords) |
def _storeConfig(self, config, configPath):
"""
Writes the config to the configPath.
:param config a dict of config.
:param configPath the path to the file to write to, intermediate dirs will be created as necessary.
"""
self.logger.info("Writing to " + str(configPath))
os.makedirs(os.path.dirname(configPath), exist_ok=True)
with (open(configPath, 'w')) as yml:
yaml.dump(config, yml, default_flow_style=False) | Writes the config to the configPath.
:param config a dict of config.
:param configPath the path to the file to write to, intermediate dirs will be created as necessary. | Below is the the instruction that describes the task:
### Input:
Writes the config to the configPath.
:param config a dict of config.
:param configPath the path to the file to write to, intermediate dirs will be created as necessary.
### Response:
def _storeConfig(self, config, configPath):
"""
Writes the config to the configPath.
:param config a dict of config.
:param configPath the path to the file to write to, intermediate dirs will be created as necessary.
"""
self.logger.info("Writing to " + str(configPath))
os.makedirs(os.path.dirname(configPath), exist_ok=True)
with (open(configPath, 'w')) as yml:
yaml.dump(config, yml, default_flow_style=False) |
def crop(self, height, width, center_i=None, center_j=None):
"""Crop the image centered around center_i, center_j.
Parameters
----------
height : int
The height of the desired image.
width : int
The width of the desired image.
center_i : int
The center height point at which to crop. If not specified, the center
of the image is used.
center_j : int
The center width point at which to crop. If not specified, the center
of the image is used.
Returns
-------
:obj:`Image`
A cropped Image of the same type.
"""
# crop channels separately
color_im_cropped = self.color.crop(height, width,
center_i=center_i,
center_j=center_j)
depth_im_cropped = self.depth.crop(height, width,
center_i=center_i,
center_j=center_j)
# return combination of cropped data
return RgbdImage.from_color_and_depth(
color_im_cropped, depth_im_cropped) | Crop the image centered around center_i, center_j.
Parameters
----------
height : int
The height of the desired image.
width : int
The width of the desired image.
center_i : int
The center height point at which to crop. If not specified, the center
of the image is used.
center_j : int
The center width point at which to crop. If not specified, the center
of the image is used.
Returns
-------
:obj:`Image`
A cropped Image of the same type. | Below is the the instruction that describes the task:
### Input:
Crop the image centered around center_i, center_j.
Parameters
----------
height : int
The height of the desired image.
width : int
The width of the desired image.
center_i : int
The center height point at which to crop. If not specified, the center
of the image is used.
center_j : int
The center width point at which to crop. If not specified, the center
of the image is used.
Returns
-------
:obj:`Image`
A cropped Image of the same type.
### Response:
def crop(self, height, width, center_i=None, center_j=None):
"""Crop the image centered around center_i, center_j.
Parameters
----------
height : int
The height of the desired image.
width : int
The width of the desired image.
center_i : int
The center height point at which to crop. If not specified, the center
of the image is used.
center_j : int
The center width point at which to crop. If not specified, the center
of the image is used.
Returns
-------
:obj:`Image`
A cropped Image of the same type.
"""
# crop channels separately
color_im_cropped = self.color.crop(height, width,
center_i=center_i,
center_j=center_j)
depth_im_cropped = self.depth.crop(height, width,
center_i=center_i,
center_j=center_j)
# return combination of cropped data
return RgbdImage.from_color_and_depth(
color_im_cropped, depth_im_cropped) |
def using_git(cwd):
"""Test whether the directory cwd is contained in a git repository."""
try:
git_log = shell_out(["git", "log"], cwd=cwd)
return True
except (CalledProcessError, OSError): # pragma: no cover
return False | Test whether the directory cwd is contained in a git repository. | Below is the the instruction that describes the task:
### Input:
Test whether the directory cwd is contained in a git repository.
### Response:
def using_git(cwd):
"""Test whether the directory cwd is contained in a git repository."""
try:
git_log = shell_out(["git", "log"], cwd=cwd)
return True
except (CalledProcessError, OSError): # pragma: no cover
return False |
def patch_namespaced_controller_revision(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_controller_revision # noqa: E501
partially update the specified ControllerRevision # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_controller_revision(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ControllerRevision (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1ControllerRevision
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs) # noqa: E501
else:
(data) = self.patch_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs) # noqa: E501
return data | patch_namespaced_controller_revision # noqa: E501
partially update the specified ControllerRevision # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_controller_revision(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ControllerRevision (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1ControllerRevision
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
patch_namespaced_controller_revision # noqa: E501
partially update the specified ControllerRevision # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_controller_revision(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ControllerRevision (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1ControllerRevision
If the method is called asynchronously,
returns the request thread.
### Response:
def patch_namespaced_controller_revision(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_controller_revision # noqa: E501
partially update the specified ControllerRevision # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_controller_revision(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ControllerRevision (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1ControllerRevision
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs) # noqa: E501
else:
(data) = self.patch_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs) # noqa: E501
return data |
def _dT_h_delta(T_in_kK, eta, k, threenk, c_v):
"""
internal function for calculation of temperature along a Hugoniot
:param T_in_kK: temperature in kK scale, see Jamieson for detail
:param eta: = 1 - rho0/rho
:param k: = [rho0, c0, s, gamma0, q, theta0]
:param threenk: see the definition in Jamieson 1983,
it is a correction term mostly for Jamieson gold scale
:param c_v: manual input of Cv value,
if 0 calculated through Debye function
:return: eta derivative of temperature
"""
rho0 = k[0] # g/m^3
gamma0 = k[3] # no unit
q = k[4] # no unit
theta0_in_kK = k[5] # K, see Jamieson 1983 for detail
rho = rho0 / (1. - eta)
c0 = k[1] # km/s
s = k[2] # no unit
dPhdelta_H = rho0 * c0 * c0 * (1. + s * eta) / \
np.power((1. - s * eta), 3.)
# [g/cm^3][km/s]^2 = 1e9[kg m^2/s^2] = [GPa]
Ph = hugoniot_p(rho, rho0, c0, s) # in [GPa]
# calculate Cv
gamma = gamma0 * np.power((1. - eta), q)
theta_in_kK = theta0_in_kK * np.exp((gamma0 - gamma) / q)
x = theta_in_kK / T_in_kK
debye3 = debye_E(x)
if c_v == 0.:
c_v = threenk * (4. * debye3 - 3. * x / (np.exp(x) - 1.)) # [J/g/K]
# calculate dYdX
dYdX = (gamma / (1. - eta) * T_in_kK) + (dPhdelta_H * eta - Ph) / \
(2. * c_v * rho0)
# print('dYdX', dYdX)
return dYdX | internal function for calculation of temperature along a Hugoniot
:param T_in_kK: temperature in kK scale, see Jamieson for detail
:param eta: = 1 - rho0/rho
:param k: = [rho0, c0, s, gamma0, q, theta0]
:param threenk: see the definition in Jamieson 1983,
it is a correction term mostly for Jamieson gold scale
:param c_v: manual input of Cv value,
if 0 calculated through Debye function
:return: eta derivative of temperature | Below is the the instruction that describes the task:
### Input:
internal function for calculation of temperature along a Hugoniot
:param T_in_kK: temperature in kK scale, see Jamieson for detail
:param eta: = 1 - rho0/rho
:param k: = [rho0, c0, s, gamma0, q, theta0]
:param threenk: see the definition in Jamieson 1983,
it is a correction term mostly for Jamieson gold scale
:param c_v: manual input of Cv value,
if 0 calculated through Debye function
:return: eta derivative of temperature
### Response:
def _dT_h_delta(T_in_kK, eta, k, threenk, c_v):
"""
internal function for calculation of temperature along a Hugoniot
:param T_in_kK: temperature in kK scale, see Jamieson for detail
:param eta: = 1 - rho0/rho
:param k: = [rho0, c0, s, gamma0, q, theta0]
:param threenk: see the definition in Jamieson 1983,
it is a correction term mostly for Jamieson gold scale
:param c_v: manual input of Cv value,
if 0 calculated through Debye function
:return: eta derivative of temperature
"""
rho0 = k[0] # g/m^3
gamma0 = k[3] # no unit
q = k[4] # no unit
theta0_in_kK = k[5] # K, see Jamieson 1983 for detail
rho = rho0 / (1. - eta)
c0 = k[1] # km/s
s = k[2] # no unit
dPhdelta_H = rho0 * c0 * c0 * (1. + s * eta) / \
np.power((1. - s * eta), 3.)
# [g/cm^3][km/s]^2 = 1e9[kg m^2/s^2] = [GPa]
Ph = hugoniot_p(rho, rho0, c0, s) # in [GPa]
# calculate Cv
gamma = gamma0 * np.power((1. - eta), q)
theta_in_kK = theta0_in_kK * np.exp((gamma0 - gamma) / q)
x = theta_in_kK / T_in_kK
debye3 = debye_E(x)
if c_v == 0.:
c_v = threenk * (4. * debye3 - 3. * x / (np.exp(x) - 1.)) # [J/g/K]
# calculate dYdX
dYdX = (gamma / (1. - eta) * T_in_kK) + (dPhdelta_H * eta - Ph) / \
(2. * c_v * rho0)
# print('dYdX', dYdX)
return dYdX |
def getConfiguration(configPath = None):
"""
Reading the configuration file to look for where the different gates are running.
:return: A json containing the information stored in the .cfg file.
"""
if configPath == None:
# If a current.cfg has not been found, creating it by copying from default
configPath = getConfigPath("browser.cfg")
# Checking if the configuration file exists
if not os.path.exists(configPath):
try:
# Copy the data from the default folder
defaultConfigPath = getConfigPath(os.path.join("default", "browser.cfg"))
with open(configPath, "w") as oF:
with open(defaultConfigPath) as iF:
cont = iF.read()
oF.write(cont)
except Exception, e:
errMsg = "ERROR. No configuration file could be found and the default file was not found either. You might need to reset it manually."
raise Exception( errMsg + " " + str(e))
try:
# Reading the configuration file
config = ConfigParser.ConfigParser()
config.read(configPath)
info = {}
# Iterating through all the sections, which contain the platforms
for section in config.sections():
current = {}
# Iterating through parametgers
for (param, value) in config.items(section):
current[param] = value
# Loading the configuration in the info dictionary
info[section] = current
except Exception, e:
errMsg = "ERROR. Something happened when processing the Configuration file (some kind of malform?). Check it before running it again."
raise Exception( errMsg + " " + str(e))
return info | Reading the configuration file to look for where the different gates are running.
:return: A json containing the information stored in the .cfg file. | Below is the the instruction that describes the task:
### Input:
Reading the configuration file to look for where the different gates are running.
:return: A json containing the information stored in the .cfg file.
### Response:
def getConfiguration(configPath = None):
"""
Reading the configuration file to look for where the different gates are running.
:return: A json containing the information stored in the .cfg file.
"""
if configPath == None:
# If a current.cfg has not been found, creating it by copying from default
configPath = getConfigPath("browser.cfg")
# Checking if the configuration file exists
if not os.path.exists(configPath):
try:
# Copy the data from the default folder
defaultConfigPath = getConfigPath(os.path.join("default", "browser.cfg"))
with open(configPath, "w") as oF:
with open(defaultConfigPath) as iF:
cont = iF.read()
oF.write(cont)
except Exception, e:
errMsg = "ERROR. No configuration file could be found and the default file was not found either. You might need to reset it manually."
raise Exception( errMsg + " " + str(e))
try:
# Reading the configuration file
config = ConfigParser.ConfigParser()
config.read(configPath)
info = {}
# Iterating through all the sections, which contain the platforms
for section in config.sections():
current = {}
# Iterating through parametgers
for (param, value) in config.items(section):
current[param] = value
# Loading the configuration in the info dictionary
info[section] = current
except Exception, e:
errMsg = "ERROR. Something happened when processing the Configuration file (some kind of malform?). Check it before running it again."
raise Exception( errMsg + " " + str(e))
return info |
def NewSection(self, token_type, section_name, pre_formatters):
"""For sections or repeated sections."""
pre_formatters = [self._GetFormatter(f) for f in pre_formatters]
# TODO: Consider getting rid of this dispatching, and turn _Do* into methods
if token_type == REPEATED_SECTION_TOKEN:
new_block = _RepeatedSection(section_name, pre_formatters)
func = _DoRepeatedSection
elif token_type == SECTION_TOKEN:
new_block = _Section(section_name, pre_formatters)
func = _DoSection
elif token_type == DEF_TOKEN:
new_block = _Section(section_name, [])
func = _DoDef
else:
raise AssertionError('Invalid token type %s' % token_type)
self._NewSection(func, new_block) | For sections or repeated sections. | Below is the the instruction that describes the task:
### Input:
For sections or repeated sections.
### Response:
def NewSection(self, token_type, section_name, pre_formatters):
"""For sections or repeated sections."""
pre_formatters = [self._GetFormatter(f) for f in pre_formatters]
# TODO: Consider getting rid of this dispatching, and turn _Do* into methods
if token_type == REPEATED_SECTION_TOKEN:
new_block = _RepeatedSection(section_name, pre_formatters)
func = _DoRepeatedSection
elif token_type == SECTION_TOKEN:
new_block = _Section(section_name, pre_formatters)
func = _DoSection
elif token_type == DEF_TOKEN:
new_block = _Section(section_name, [])
func = _DoDef
else:
raise AssertionError('Invalid token type %s' % token_type)
self._NewSection(func, new_block) |
def datetime(self):
"""
Returns a datetime object to indicate the month, day, year, and time
the requested game took place.
"""
date_string = '%s %s' % (self._date, self._time.upper())
date_string = re.sub(r'/.*', '', date_string)
date_string = re.sub(r' ET', '', date_string)
date_string += 'M'
date_string = re.sub(r'PMM', 'PM', date_string, flags=re.IGNORECASE)
date_string = re.sub(r'AMM', 'AM', date_string, flags=re.IGNORECASE)
date_string = re.sub(r' PM', 'PM', date_string, flags=re.IGNORECASE)
date_string = re.sub(r' AM', 'AM', date_string, flags=re.IGNORECASE)
return datetime.strptime(date_string, '%a, %b %d, %Y %I:%M%p') | Returns a datetime object to indicate the month, day, year, and time
the requested game took place. | Below is the the instruction that describes the task:
### Input:
Returns a datetime object to indicate the month, day, year, and time
the requested game took place.
### Response:
def datetime(self):
"""
Returns a datetime object to indicate the month, day, year, and time
the requested game took place.
"""
date_string = '%s %s' % (self._date, self._time.upper())
date_string = re.sub(r'/.*', '', date_string)
date_string = re.sub(r' ET', '', date_string)
date_string += 'M'
date_string = re.sub(r'PMM', 'PM', date_string, flags=re.IGNORECASE)
date_string = re.sub(r'AMM', 'AM', date_string, flags=re.IGNORECASE)
date_string = re.sub(r' PM', 'PM', date_string, flags=re.IGNORECASE)
date_string = re.sub(r' AM', 'AM', date_string, flags=re.IGNORECASE)
return datetime.strptime(date_string, '%a, %b %d, %Y %I:%M%p') |
def undistortPoints(self, points, keepSize=False):
'''
points --> list of (x,y) coordinates
'''
s = self.img.shape
cam = self.coeffs['cameraMatrix']
d = self.coeffs['distortionCoeffs']
pts = np.asarray(points, dtype=np.float32)
if pts.ndim == 2:
pts = np.expand_dims(pts, axis=0)
(newCameraMatrix, roi) = cv2.getOptimalNewCameraMatrix(cam,
d, s[::-1], 1,
s[::-1])
if not keepSize:
xx, yy = roi[:2]
pts[0, 0] -= xx
pts[0, 1] -= yy
return cv2.undistortPoints(pts,
cam, d, P=newCameraMatrix) | points --> list of (x,y) coordinates | Below is the the instruction that describes the task:
### Input:
points --> list of (x,y) coordinates
### Response:
def undistortPoints(self, points, keepSize=False):
'''
points --> list of (x,y) coordinates
'''
s = self.img.shape
cam = self.coeffs['cameraMatrix']
d = self.coeffs['distortionCoeffs']
pts = np.asarray(points, dtype=np.float32)
if pts.ndim == 2:
pts = np.expand_dims(pts, axis=0)
(newCameraMatrix, roi) = cv2.getOptimalNewCameraMatrix(cam,
d, s[::-1], 1,
s[::-1])
if not keepSize:
xx, yy = roi[:2]
pts[0, 0] -= xx
pts[0, 1] -= yy
return cv2.undistortPoints(pts,
cam, d, P=newCameraMatrix) |
def _create_server_rackspace(region,
access_key_id,
secret_access_key,
disk_name,
disk_size,
ami,
key_pair,
instance_type,
username,
instance_name,
tags={},
security_groups=None):
"""
Creates Rackspace Instance and saves it state in a local json file
"""
nova = connect_to_rackspace(region, access_key_id, secret_access_key)
log_yellow("Creating Rackspace instance...")
flavor = nova.flavors.find(name=instance_type)
image = nova.images.find(name=ami)
server = nova.servers.create(name=instance_name,
flavor=flavor.id,
image=image.id,
region=region,
availability_zone=region,
key_name=key_pair)
while server.status == 'BUILD':
log_yellow("Waiting for build to finish...")
sleep(5)
server = nova.servers.get(server.id)
# check for errors
if server.status != 'ACTIVE':
log_red("Error creating rackspace instance")
exit(1)
# the server was assigned IPv4 and IPv6 addresses, locate the IPv4 address
ip_address = server.accessIPv4
if ip_address is None:
log_red('No IP address assigned')
exit(1)
wait_for_ssh(ip_address)
log_green('New server with IP address {0}.'.format(ip_address))
# finally save the details or our new instance into the local state file
save_rackspace_state_locally(instance_id=server.id,
region=region,
username=username,
access_key_id=access_key_id,
secret_access_key=secret_access_key) | Creates Rackspace Instance and saves it state in a local json file | Below is the the instruction that describes the task:
### Input:
Creates Rackspace Instance and saves it state in a local json file
### Response:
def _create_server_rackspace(region,
access_key_id,
secret_access_key,
disk_name,
disk_size,
ami,
key_pair,
instance_type,
username,
instance_name,
tags={},
security_groups=None):
"""
Creates Rackspace Instance and saves it state in a local json file
"""
nova = connect_to_rackspace(region, access_key_id, secret_access_key)
log_yellow("Creating Rackspace instance...")
flavor = nova.flavors.find(name=instance_type)
image = nova.images.find(name=ami)
server = nova.servers.create(name=instance_name,
flavor=flavor.id,
image=image.id,
region=region,
availability_zone=region,
key_name=key_pair)
while server.status == 'BUILD':
log_yellow("Waiting for build to finish...")
sleep(5)
server = nova.servers.get(server.id)
# check for errors
if server.status != 'ACTIVE':
log_red("Error creating rackspace instance")
exit(1)
# the server was assigned IPv4 and IPv6 addresses, locate the IPv4 address
ip_address = server.accessIPv4
if ip_address is None:
log_red('No IP address assigned')
exit(1)
wait_for_ssh(ip_address)
log_green('New server with IP address {0}.'.format(ip_address))
# finally save the details or our new instance into the local state file
save_rackspace_state_locally(instance_id=server.id,
region=region,
username=username,
access_key_id=access_key_id,
secret_access_key=secret_access_key) |
def authenticated_get(username, password, url, verify=True):
"""
Perform an authorized query to the url, and return the result
"""
try:
response = requests.get(url, auth=(username, password), verify=verify)
if response.status_code == 401:
raise BadCredentialsException(
"Unable to authenticate user %s to %s with password provided!"
% (username, url))
except requests.exceptions.SSLError:
raise CertificateException("Unable to verify certificate at %s!" % url)
return response.content | Perform an authorized query to the url, and return the result | Below is the the instruction that describes the task:
### Input:
Perform an authorized query to the url, and return the result
### Response:
def authenticated_get(username, password, url, verify=True):
"""
Perform an authorized query to the url, and return the result
"""
try:
response = requests.get(url, auth=(username, password), verify=verify)
if response.status_code == 401:
raise BadCredentialsException(
"Unable to authenticate user %s to %s with password provided!"
% (username, url))
except requests.exceptions.SSLError:
raise CertificateException("Unable to verify certificate at %s!" % url)
return response.content |
def main():
"""Execute command line interface."""
parser = argparse.ArgumentParser(
description='Find and analyze basic loop blocks and mark for IACA.',
epilog='For help, examples, documentation and bug reports go to:\nhttps://github.com'
'/RRZE-HPC/kerncraft\nLicense: AGPLv3')
parser.add_argument('--version', action='version', version='%(prog)s {}'.format(__version__))
parser.add_argument('source', type=argparse.FileType(), nargs='?', default=sys.stdin,
help='assembly file to analyze (default: stdin)')
parser.add_argument('--outfile', '-o', type=argparse.FileType('w'), nargs='?',
default=sys.stdout, help='output file location (default: stdout)')
parser.add_argument('--debug', action='store_true',
help='Output nternal analysis information for debugging.')
args = parser.parse_args()
# pointer_increment is given, since it makes no difference on the command lien and requires
# less user input
iaca_instrumentation(input_file=args.source, output_file=args.outfile,
block_selection='manual', pointer_increment=1, debug=args.debug) | Execute command line interface. | Below is the the instruction that describes the task:
### Input:
Execute command line interface.
### Response:
def main():
"""Execute command line interface."""
parser = argparse.ArgumentParser(
description='Find and analyze basic loop blocks and mark for IACA.',
epilog='For help, examples, documentation and bug reports go to:\nhttps://github.com'
'/RRZE-HPC/kerncraft\nLicense: AGPLv3')
parser.add_argument('--version', action='version', version='%(prog)s {}'.format(__version__))
parser.add_argument('source', type=argparse.FileType(), nargs='?', default=sys.stdin,
help='assembly file to analyze (default: stdin)')
parser.add_argument('--outfile', '-o', type=argparse.FileType('w'), nargs='?',
default=sys.stdout, help='output file location (default: stdout)')
parser.add_argument('--debug', action='store_true',
help='Output nternal analysis information for debugging.')
args = parser.parse_args()
# pointer_increment is given, since it makes no difference on the command lien and requires
# less user input
iaca_instrumentation(input_file=args.source, output_file=args.outfile,
block_selection='manual', pointer_increment=1, debug=args.debug) |
def evaluate_all(ctx, model):
"""Evaluate POS taggers on WSJ and GENIA."""
click.echo('chemdataextractor.pos.evaluate_all')
click.echo('Model: %s' % model)
ctx.invoke(evaluate, model='%s_wsj_nocluster.pickle' % model, corpus='wsj', clusters=False)
ctx.invoke(evaluate, model='%s_wsj_nocluster.pickle' % model, corpus='genia', clusters=False)
ctx.invoke(evaluate, model='%s_wsj.pickle' % model, corpus='wsj', clusters=True)
ctx.invoke(evaluate, model='%s_wsj.pickle' % model, corpus='genia', clusters=True)
ctx.invoke(evaluate, model='%s_genia_nocluster.pickle' % model, corpus='wsj', clusters=False)
ctx.invoke(evaluate, model='%s_genia_nocluster.pickle' % model, corpus='genia', clusters=False)
ctx.invoke(evaluate, model='%s_genia.pickle' % model, corpus='wsj', clusters=True)
ctx.invoke(evaluate, model='%s_genia.pickle' % model, corpus='genia', clusters=True)
ctx.invoke(evaluate, model='%s_wsj_genia_nocluster.pickle' % model, corpus='wsj', clusters=False)
ctx.invoke(evaluate, model='%s_wsj_genia_nocluster.pickle' % model, corpus='genia', clusters=False)
ctx.invoke(evaluate, model='%s_wsj_genia.pickle' % model, corpus='wsj', clusters=True)
ctx.invoke(evaluate, model='%s_wsj_genia.pickle' % model, corpus='genia', clusters=True) | Evaluate POS taggers on WSJ and GENIA. | Below is the the instruction that describes the task:
### Input:
Evaluate POS taggers on WSJ and GENIA.
### Response:
def evaluate_all(ctx, model):
"""Evaluate POS taggers on WSJ and GENIA."""
click.echo('chemdataextractor.pos.evaluate_all')
click.echo('Model: %s' % model)
ctx.invoke(evaluate, model='%s_wsj_nocluster.pickle' % model, corpus='wsj', clusters=False)
ctx.invoke(evaluate, model='%s_wsj_nocluster.pickle' % model, corpus='genia', clusters=False)
ctx.invoke(evaluate, model='%s_wsj.pickle' % model, corpus='wsj', clusters=True)
ctx.invoke(evaluate, model='%s_wsj.pickle' % model, corpus='genia', clusters=True)
ctx.invoke(evaluate, model='%s_genia_nocluster.pickle' % model, corpus='wsj', clusters=False)
ctx.invoke(evaluate, model='%s_genia_nocluster.pickle' % model, corpus='genia', clusters=False)
ctx.invoke(evaluate, model='%s_genia.pickle' % model, corpus='wsj', clusters=True)
ctx.invoke(evaluate, model='%s_genia.pickle' % model, corpus='genia', clusters=True)
ctx.invoke(evaluate, model='%s_wsj_genia_nocluster.pickle' % model, corpus='wsj', clusters=False)
ctx.invoke(evaluate, model='%s_wsj_genia_nocluster.pickle' % model, corpus='genia', clusters=False)
ctx.invoke(evaluate, model='%s_wsj_genia.pickle' % model, corpus='wsj', clusters=True)
ctx.invoke(evaluate, model='%s_wsj_genia.pickle' % model, corpus='genia', clusters=True) |
def verts_str(verts, pad=1):
r""" makes a string from a list of integer verticies """
if verts is None:
return 'None'
fmtstr = ', '.join(['%' + six.text_type(pad) + 'd' +
', %' + six.text_type(pad) + 'd'] * 1)
return ', '.join(['(' + fmtstr % vert + ')' for vert in verts]) | r""" makes a string from a list of integer verticies | Below is the the instruction that describes the task:
### Input:
r""" makes a string from a list of integer verticies
### Response:
def verts_str(verts, pad=1):
r""" makes a string from a list of integer verticies """
if verts is None:
return 'None'
fmtstr = ', '.join(['%' + six.text_type(pad) + 'd' +
', %' + six.text_type(pad) + 'd'] * 1)
return ', '.join(['(' + fmtstr % vert + ')' for vert in verts]) |
def _create_cache_filename(self, cache_dir=None, **kwargs):
"""Create filename for the cached resampling parameters"""
cache_dir = cache_dir or '.'
hash_str = self.get_hash(**kwargs)
return os.path.join(cache_dir, 'resample_lut-' + hash_str + '.npz') | Create filename for the cached resampling parameters | Below is the the instruction that describes the task:
### Input:
Create filename for the cached resampling parameters
### Response:
def _create_cache_filename(self, cache_dir=None, **kwargs):
"""Create filename for the cached resampling parameters"""
cache_dir = cache_dir or '.'
hash_str = self.get_hash(**kwargs)
return os.path.join(cache_dir, 'resample_lut-' + hash_str + '.npz') |
def _group(self, group_data):
"""Return previously stored group or new group.
Args:
group_data (dict|obj): An Group dict or instance of Group object.
Returns:
dict|obj: The new Group dict/object or the previously stored dict/object.
"""
if isinstance(group_data, dict):
# get xid from dict
xid = group_data.get('xid')
else:
# get xid from object
xid = group_data.xid
if self.groups.get(xid) is not None:
# return existing group from memory
group_data = self.groups.get(xid)
elif self.groups_shelf.get(xid) is not None:
# return existing group from shelf
group_data = self.groups_shelf.get(xid)
else:
# store new group
self.groups[xid] = group_data
return group_data | Return previously stored group or new group.
Args:
group_data (dict|obj): An Group dict or instance of Group object.
Returns:
dict|obj: The new Group dict/object or the previously stored dict/object. | Below is the the instruction that describes the task:
### Input:
Return previously stored group or new group.
Args:
group_data (dict|obj): An Group dict or instance of Group object.
Returns:
dict|obj: The new Group dict/object or the previously stored dict/object.
### Response:
def _group(self, group_data):
"""Return previously stored group or new group.
Args:
group_data (dict|obj): An Group dict or instance of Group object.
Returns:
dict|obj: The new Group dict/object or the previously stored dict/object.
"""
if isinstance(group_data, dict):
# get xid from dict
xid = group_data.get('xid')
else:
# get xid from object
xid = group_data.xid
if self.groups.get(xid) is not None:
# return existing group from memory
group_data = self.groups.get(xid)
elif self.groups_shelf.get(xid) is not None:
# return existing group from shelf
group_data = self.groups_shelf.get(xid)
else:
# store new group
self.groups[xid] = group_data
return group_data |
def _raise_if_error(self):
"""
Raise IOError if process is not running anymore and the
exit code is nonzero.
"""
retcode = self.process.poll()
if retcode is not None and retcode != 0:
message = self._stderr.read().strip()
raise IOError(message) | Raise IOError if process is not running anymore and the
exit code is nonzero. | Below is the the instruction that describes the task:
### Input:
Raise IOError if process is not running anymore and the
exit code is nonzero.
### Response:
def _raise_if_error(self):
"""
Raise IOError if process is not running anymore and the
exit code is nonzero.
"""
retcode = self.process.poll()
if retcode is not None and retcode != 0:
message = self._stderr.read().strip()
raise IOError(message) |
def validate(self, xml_input):
"""
This method validate the parsing and schema, return a boolean
"""
parsed_xml = etree.parse(self._handle_xml(xml_input))
try:
return self.xmlschema.validate(parsed_xml)
except AttributeError:
raise CannotValidate('Set XSD to validate the XML') | This method validate the parsing and schema, return a boolean | Below is the the instruction that describes the task:
### Input:
This method validate the parsing and schema, return a boolean
### Response:
def validate(self, xml_input):
"""
This method validate the parsing and schema, return a boolean
"""
parsed_xml = etree.parse(self._handle_xml(xml_input))
try:
return self.xmlschema.validate(parsed_xml)
except AttributeError:
raise CannotValidate('Set XSD to validate the XML') |
def discretize_path(entities, vertices, path, scale=1.0):
"""
Turn a list of entity indices into a path of connected points.
Parameters
-----------
entities : (j,) entity objects
Objects like 'Line', 'Arc', etc.
vertices: (n, dimension) float
Vertex points in space.
path : (m,) int
Indexes of entities
scale : float
Overall scale of drawing used for
numeric tolerances in certain cases
Returns
-----------
discrete : (p, dimension) float
Connected points in space that lie on the
path and can be connected with line segments.
"""
# make sure vertices are numpy array
vertices = np.asanyarray(vertices)
path_len = len(path)
if path_len == 0:
raise ValueError('Cannot discretize empty path!')
if path_len == 1:
# case where we only have one entity
discrete = np.asanyarray(entities[path[0]].discrete(
vertices,
scale=scale))
else:
# run through path appending each entity
discrete = []
for i, entity_id in enumerate(path):
# the current (n, dimension) discrete curve of an entity
current = entities[entity_id].discrete(vertices, scale=scale)
# check if we are on the final entity
if i >= (path_len - 1):
# if we are on the last entity include the last point
discrete.append(current)
else:
# slice off the last point so we don't get duplicate
# points from the end of one entity and the start of another
discrete.append(current[:-1])
# stack all curves to one nice (n, dimension) curve
discrete = np.vstack(discrete)
# make sure 2D curves are are counterclockwise
if vertices.shape[1] == 2 and not is_ccw(discrete):
# reversing will make array non c- contiguous
discrete = np.ascontiguousarray(discrete[::-1])
return discrete | Turn a list of entity indices into a path of connected points.
Parameters
-----------
entities : (j,) entity objects
Objects like 'Line', 'Arc', etc.
vertices: (n, dimension) float
Vertex points in space.
path : (m,) int
Indexes of entities
scale : float
Overall scale of drawing used for
numeric tolerances in certain cases
Returns
-----------
discrete : (p, dimension) float
Connected points in space that lie on the
path and can be connected with line segments. | Below is the the instruction that describes the task:
### Input:
Turn a list of entity indices into a path of connected points.
Parameters
-----------
entities : (j,) entity objects
Objects like 'Line', 'Arc', etc.
vertices: (n, dimension) float
Vertex points in space.
path : (m,) int
Indexes of entities
scale : float
Overall scale of drawing used for
numeric tolerances in certain cases
Returns
-----------
discrete : (p, dimension) float
Connected points in space that lie on the
path and can be connected with line segments.
### Response:
def discretize_path(entities, vertices, path, scale=1.0):
"""
Turn a list of entity indices into a path of connected points.
Parameters
-----------
entities : (j,) entity objects
Objects like 'Line', 'Arc', etc.
vertices: (n, dimension) float
Vertex points in space.
path : (m,) int
Indexes of entities
scale : float
Overall scale of drawing used for
numeric tolerances in certain cases
Returns
-----------
discrete : (p, dimension) float
Connected points in space that lie on the
path and can be connected with line segments.
"""
# make sure vertices are numpy array
vertices = np.asanyarray(vertices)
path_len = len(path)
if path_len == 0:
raise ValueError('Cannot discretize empty path!')
if path_len == 1:
# case where we only have one entity
discrete = np.asanyarray(entities[path[0]].discrete(
vertices,
scale=scale))
else:
# run through path appending each entity
discrete = []
for i, entity_id in enumerate(path):
# the current (n, dimension) discrete curve of an entity
current = entities[entity_id].discrete(vertices, scale=scale)
# check if we are on the final entity
if i >= (path_len - 1):
# if we are on the last entity include the last point
discrete.append(current)
else:
# slice off the last point so we don't get duplicate
# points from the end of one entity and the start of another
discrete.append(current[:-1])
# stack all curves to one nice (n, dimension) curve
discrete = np.vstack(discrete)
# make sure 2D curves are are counterclockwise
if vertices.shape[1] == 2 and not is_ccw(discrete):
# reversing will make array non c- contiguous
discrete = np.ascontiguousarray(discrete[::-1])
return discrete |
def set_parameters(version=None,
binary_path=None,
config_file=None,
*args,
**kwargs):
'''
Sets variables.
CLI Example:
.. code-block:: bash
salt '*' syslog_ng.set_parameters version='3.6'
salt '*' syslog_ng.set_parameters binary_path=/home/user/install/syslog-ng/sbin config_file=/home/user/install/syslog-ng/etc/syslog-ng.conf
'''
if binary_path:
set_binary_path(binary_path)
if config_file:
set_config_file(config_file)
if version:
version = _determine_config_version(__SYSLOG_NG_BINARY_PATH)
write_version(version)
return _format_return_data(0) | Sets variables.
CLI Example:
.. code-block:: bash
salt '*' syslog_ng.set_parameters version='3.6'
salt '*' syslog_ng.set_parameters binary_path=/home/user/install/syslog-ng/sbin config_file=/home/user/install/syslog-ng/etc/syslog-ng.conf | Below is the the instruction that describes the task:
### Input:
Sets variables.
CLI Example:
.. code-block:: bash
salt '*' syslog_ng.set_parameters version='3.6'
salt '*' syslog_ng.set_parameters binary_path=/home/user/install/syslog-ng/sbin config_file=/home/user/install/syslog-ng/etc/syslog-ng.conf
### Response:
def set_parameters(version=None,
binary_path=None,
config_file=None,
*args,
**kwargs):
'''
Sets variables.
CLI Example:
.. code-block:: bash
salt '*' syslog_ng.set_parameters version='3.6'
salt '*' syslog_ng.set_parameters binary_path=/home/user/install/syslog-ng/sbin config_file=/home/user/install/syslog-ng/etc/syslog-ng.conf
'''
if binary_path:
set_binary_path(binary_path)
if config_file:
set_config_file(config_file)
if version:
version = _determine_config_version(__SYSLOG_NG_BINARY_PATH)
write_version(version)
return _format_return_data(0) |
def _find_docstring_line(self, start, end):
"""Find the row where a docstring starts in a function or class.
This will search for the first match of a triple quote token in
row sequence from the start of the class or function.
Args:
start: the row where the class / function starts.
end: the row where the class / function ends.
Returns:
int: the row number where the docstring is found.
"""
for i in range(start, end + 1):
if i in self._tokenized_triple_quotes:
return i
return None | Find the row where a docstring starts in a function or class.
This will search for the first match of a triple quote token in
row sequence from the start of the class or function.
Args:
start: the row where the class / function starts.
end: the row where the class / function ends.
Returns:
int: the row number where the docstring is found. | Below is the the instruction that describes the task:
### Input:
Find the row where a docstring starts in a function or class.
This will search for the first match of a triple quote token in
row sequence from the start of the class or function.
Args:
start: the row where the class / function starts.
end: the row where the class / function ends.
Returns:
int: the row number where the docstring is found.
### Response:
def _find_docstring_line(self, start, end):
"""Find the row where a docstring starts in a function or class.
This will search for the first match of a triple quote token in
row sequence from the start of the class or function.
Args:
start: the row where the class / function starts.
end: the row where the class / function ends.
Returns:
int: the row number where the docstring is found.
"""
for i in range(start, end + 1):
if i in self._tokenized_triple_quotes:
return i
return None |
def __check_integrity(self):
"""
A method to check if when invoking __select_wd_item() and the WD item does not exist yet, but another item
has a property of the current domain with a value like submitted in the data dict, this item does not get
selected but a ManualInterventionReqException() is raised. This check is dependent on the core identifiers
of a certain domain.
:return: boolean True if test passed
"""
# all core props
wdi_core_props = self.core_props
# core prop statements that exist on the item
cp_statements = [x for x in self.statements if x.get_prop_nr() in wdi_core_props]
item_core_props = set(x.get_prop_nr() for x in cp_statements)
# core prop statements we are loading
cp_data = [x for x in self.data if x.get_prop_nr() in wdi_core_props]
# compare the claim values of the currently loaded QIDs to the data provided in self.data
# this is the number of core_ids in self.data that are also on the item
count_existing_ids = len([x for x in self.data if x.get_prop_nr() in item_core_props])
core_prop_match_count = 0
for new_stat in self.data:
for stat in self.statements:
if (new_stat.get_prop_nr() == stat.get_prop_nr()) and (new_stat.get_value() == stat.get_value()) \
and (new_stat.get_prop_nr() in item_core_props):
core_prop_match_count += 1
if core_prop_match_count < count_existing_ids * self.core_prop_match_thresh:
existing_core_pv = defaultdict(set)
for s in cp_statements:
existing_core_pv[s.get_prop_nr()].add(s.get_value())
new_core_pv = defaultdict(set)
for s in cp_data:
new_core_pv[s.get_prop_nr()].add(s.get_value())
nomatch_existing = {k: v - new_core_pv[k] for k, v in existing_core_pv.items()}
nomatch_existing = {k: v for k, v in nomatch_existing.items() if v}
nomatch_new = {k: v - existing_core_pv[k] for k, v in new_core_pv.items()}
nomatch_new = {k: v for k, v in nomatch_new.items() if v}
raise CorePropIntegrityException('Retrieved item ({}) does not match provided core IDs. '
'Matching count {}, non-matching count {}. '
.format(self.wd_item_id, core_prop_match_count,
count_existing_ids - core_prop_match_count) +
'existing unmatched core props: {}. '.format(nomatch_existing) +
'statement unmatched core props: {}.'.format(nomatch_new))
else:
return True | A method to check if when invoking __select_wd_item() and the WD item does not exist yet, but another item
has a property of the current domain with a value like submitted in the data dict, this item does not get
selected but a ManualInterventionReqException() is raised. This check is dependent on the core identifiers
of a certain domain.
:return: boolean True if test passed | Below is the the instruction that describes the task:
### Input:
A method to check if when invoking __select_wd_item() and the WD item does not exist yet, but another item
has a property of the current domain with a value like submitted in the data dict, this item does not get
selected but a ManualInterventionReqException() is raised. This check is dependent on the core identifiers
of a certain domain.
:return: boolean True if test passed
### Response:
def __check_integrity(self):
"""
A method to check if when invoking __select_wd_item() and the WD item does not exist yet, but another item
has a property of the current domain with a value like submitted in the data dict, this item does not get
selected but a ManualInterventionReqException() is raised. This check is dependent on the core identifiers
of a certain domain.
:return: boolean True if test passed
"""
# all core props
wdi_core_props = self.core_props
# core prop statements that exist on the item
cp_statements = [x for x in self.statements if x.get_prop_nr() in wdi_core_props]
item_core_props = set(x.get_prop_nr() for x in cp_statements)
# core prop statements we are loading
cp_data = [x for x in self.data if x.get_prop_nr() in wdi_core_props]
# compare the claim values of the currently loaded QIDs to the data provided in self.data
# this is the number of core_ids in self.data that are also on the item
count_existing_ids = len([x for x in self.data if x.get_prop_nr() in item_core_props])
core_prop_match_count = 0
for new_stat in self.data:
for stat in self.statements:
if (new_stat.get_prop_nr() == stat.get_prop_nr()) and (new_stat.get_value() == stat.get_value()) \
and (new_stat.get_prop_nr() in item_core_props):
core_prop_match_count += 1
if core_prop_match_count < count_existing_ids * self.core_prop_match_thresh:
existing_core_pv = defaultdict(set)
for s in cp_statements:
existing_core_pv[s.get_prop_nr()].add(s.get_value())
new_core_pv = defaultdict(set)
for s in cp_data:
new_core_pv[s.get_prop_nr()].add(s.get_value())
nomatch_existing = {k: v - new_core_pv[k] for k, v in existing_core_pv.items()}
nomatch_existing = {k: v for k, v in nomatch_existing.items() if v}
nomatch_new = {k: v - existing_core_pv[k] for k, v in new_core_pv.items()}
nomatch_new = {k: v for k, v in nomatch_new.items() if v}
raise CorePropIntegrityException('Retrieved item ({}) does not match provided core IDs. '
'Matching count {}, non-matching count {}. '
.format(self.wd_item_id, core_prop_match_count,
count_existing_ids - core_prop_match_count) +
'existing unmatched core props: {}. '.format(nomatch_existing) +
'statement unmatched core props: {}.'.format(nomatch_new))
else:
return True |
def main(args):
"""
main entry point for the GenomicIntIntersection script.
:param args: the arguments for this script, as a list of string. Should
already have had things like the script name stripped. That
is, if there are no args provided, this should be an empty
list.
"""
# get options and arguments
ui = getUI(args)
if ui.optionIsSet("test"):
# just run unit tests
unittest.main(argv=[sys.argv[0]])
elif ui.optionIsSet("help"):
# just show help
ui.usage()
else:
verbose = ui.optionIsSet("verbose")
# stranded?
stranded = ui.optionIsSet("stranded")
if stranded:
sys.stderr.write("Sorry, stranded mode hasn't been implemented yet.")
sys.exit()
# get output handle
out_fh = sys.stdout
if ui.optionIsSet("output"):
out_fh = open(ui.getValue("output"), "w")
# get input file-handles -- we know we'll get exactly two, since we
# specified it in the UI definition
regions_1 = [x for x in BEDIterator(ui.getArgument(0), verbose=verbose)]
regions_2 = [x for x in BEDIterator(ui.getArgument(1), verbose=verbose)]
for r in regionsIntersection(regions_1, regions_2):
out_fh.write(str(r) + "\n") | main entry point for the GenomicIntIntersection script.
:param args: the arguments for this script, as a list of string. Should
already have had things like the script name stripped. That
is, if there are no args provided, this should be an empty
list. | Below is the the instruction that describes the task:
### Input:
main entry point for the GenomicIntIntersection script.
:param args: the arguments for this script, as a list of string. Should
already have had things like the script name stripped. That
is, if there are no args provided, this should be an empty
list.
### Response:
def main(args):
"""
main entry point for the GenomicIntIntersection script.
:param args: the arguments for this script, as a list of string. Should
already have had things like the script name stripped. That
is, if there are no args provided, this should be an empty
list.
"""
# get options and arguments
ui = getUI(args)
if ui.optionIsSet("test"):
# just run unit tests
unittest.main(argv=[sys.argv[0]])
elif ui.optionIsSet("help"):
# just show help
ui.usage()
else:
verbose = ui.optionIsSet("verbose")
# stranded?
stranded = ui.optionIsSet("stranded")
if stranded:
sys.stderr.write("Sorry, stranded mode hasn't been implemented yet.")
sys.exit()
# get output handle
out_fh = sys.stdout
if ui.optionIsSet("output"):
out_fh = open(ui.getValue("output"), "w")
# get input file-handles -- we know we'll get exactly two, since we
# specified it in the UI definition
regions_1 = [x for x in BEDIterator(ui.getArgument(0), verbose=verbose)]
regions_2 = [x for x in BEDIterator(ui.getArgument(1), verbose=verbose)]
for r in regionsIntersection(regions_1, regions_2):
out_fh.write(str(r) + "\n") |
def libvlc_media_new_callbacks(instance, open_cb, read_cb, seek_cb, close_cb, opaque):
'''Create a media with custom callbacks to read the data from.
@param instance: LibVLC instance.
@param open_cb: callback to open the custom bitstream input media.
@param read_cb: callback to read data (must not be NULL).
@param seek_cb: callback to seek, or NULL if seeking is not supported.
@param close_cb: callback to close the media, or NULL if unnecessary.
@param opaque: data pointer for the open callback.
@return: the newly created media or NULL on error @note If open_cb is NULL, the opaque pointer will be passed to read_cb, seek_cb and close_cb, and the stream size will be treated as unknown. @note The callbacks may be called asynchronously (from another thread). A single stream instance need not be reentrant. However the open_cb needs to be reentrant if the media is used by multiple player instances. @warning The callbacks may be used until all or any player instances that were supplied the media item are stopped. See L{libvlc_media_release}.
@version: LibVLC 3.0.0 and later.
'''
f = _Cfunctions.get('libvlc_media_new_callbacks', None) or \
_Cfunction('libvlc_media_new_callbacks', ((1,), (1,), (1,), (1,), (1,), (1,),), class_result(Media),
ctypes.c_void_p, Instance, MediaOpenCb, MediaReadCb, MediaSeekCb, MediaCloseCb, ctypes.c_void_p)
return f(instance, open_cb, read_cb, seek_cb, close_cb, opaque) | Create a media with custom callbacks to read the data from.
@param instance: LibVLC instance.
@param open_cb: callback to open the custom bitstream input media.
@param read_cb: callback to read data (must not be NULL).
@param seek_cb: callback to seek, or NULL if seeking is not supported.
@param close_cb: callback to close the media, or NULL if unnecessary.
@param opaque: data pointer for the open callback.
@return: the newly created media or NULL on error @note If open_cb is NULL, the opaque pointer will be passed to read_cb, seek_cb and close_cb, and the stream size will be treated as unknown. @note The callbacks may be called asynchronously (from another thread). A single stream instance need not be reentrant. However the open_cb needs to be reentrant if the media is used by multiple player instances. @warning The callbacks may be used until all or any player instances that were supplied the media item are stopped. See L{libvlc_media_release}.
@version: LibVLC 3.0.0 and later. | Below is the the instruction that describes the task:
### Input:
Create a media with custom callbacks to read the data from.
@param instance: LibVLC instance.
@param open_cb: callback to open the custom bitstream input media.
@param read_cb: callback to read data (must not be NULL).
@param seek_cb: callback to seek, or NULL if seeking is not supported.
@param close_cb: callback to close the media, or NULL if unnecessary.
@param opaque: data pointer for the open callback.
@return: the newly created media or NULL on error @note If open_cb is NULL, the opaque pointer will be passed to read_cb, seek_cb and close_cb, and the stream size will be treated as unknown. @note The callbacks may be called asynchronously (from another thread). A single stream instance need not be reentrant. However the open_cb needs to be reentrant if the media is used by multiple player instances. @warning The callbacks may be used until all or any player instances that were supplied the media item are stopped. See L{libvlc_media_release}.
@version: LibVLC 3.0.0 and later.
### Response:
def libvlc_media_new_callbacks(instance, open_cb, read_cb, seek_cb, close_cb, opaque):
'''Create a media with custom callbacks to read the data from.
@param instance: LibVLC instance.
@param open_cb: callback to open the custom bitstream input media.
@param read_cb: callback to read data (must not be NULL).
@param seek_cb: callback to seek, or NULL if seeking is not supported.
@param close_cb: callback to close the media, or NULL if unnecessary.
@param opaque: data pointer for the open callback.
@return: the newly created media or NULL on error @note If open_cb is NULL, the opaque pointer will be passed to read_cb, seek_cb and close_cb, and the stream size will be treated as unknown. @note The callbacks may be called asynchronously (from another thread). A single stream instance need not be reentrant. However the open_cb needs to be reentrant if the media is used by multiple player instances. @warning The callbacks may be used until all or any player instances that were supplied the media item are stopped. See L{libvlc_media_release}.
@version: LibVLC 3.0.0 and later.
'''
f = _Cfunctions.get('libvlc_media_new_callbacks', None) or \
_Cfunction('libvlc_media_new_callbacks', ((1,), (1,), (1,), (1,), (1,), (1,),), class_result(Media),
ctypes.c_void_p, Instance, MediaOpenCb, MediaReadCb, MediaSeekCb, MediaCloseCb, ctypes.c_void_p)
return f(instance, open_cb, read_cb, seek_cb, close_cb, opaque) |
def validate(self, value, add_comments=False, schema_name="map"):
"""
verbose - also return the jsonschema error details
"""
validator = self.get_schema_validator(schema_name)
error_messages = []
if isinstance(value, list):
for d in value:
error_messages += self._validate(d, validator, add_comments, schema_name)
else:
error_messages = self._validate(value, validator, add_comments, schema_name)
return error_messages | verbose - also return the jsonschema error details | Below is the the instruction that describes the task:
### Input:
verbose - also return the jsonschema error details
### Response:
def validate(self, value, add_comments=False, schema_name="map"):
"""
verbose - also return the jsonschema error details
"""
validator = self.get_schema_validator(schema_name)
error_messages = []
if isinstance(value, list):
for d in value:
error_messages += self._validate(d, validator, add_comments, schema_name)
else:
error_messages = self._validate(value, validator, add_comments, schema_name)
return error_messages |
def validate(self, value) :
"""checks the validity of 'value' given the lits of validators"""
for v in self.validators :
v.validate(value)
return True | checks the validity of 'value' given the lits of validators | Below is the the instruction that describes the task:
### Input:
checks the validity of 'value' given the lits of validators
### Response:
def validate(self, value) :
"""checks the validity of 'value' given the lits of validators"""
for v in self.validators :
v.validate(value)
return True |
def properties(dataset_uri, item_identifier):
"""Report item properties."""
dataset = dtoolcore.DataSet.from_uri(dataset_uri)
try:
props = dataset.item_properties(item_identifier)
except KeyError:
click.secho(
"No such item in dataset: {}".format(item_identifier),
fg="red",
err=True
)
sys.exit(20)
json_lines = [
'{',
' "relpath": "{}",'.format(props["relpath"]),
' "size_in_bytes": {},'.format(props["size_in_bytes"]),
' "utc_timestamp": {},'.format(props["utc_timestamp"]),
' "hash": "{}"'.format(props["hash"]),
'}',
]
formatted_json = "\n".join(json_lines)
colorful_json = pygments.highlight(
formatted_json,
pygments.lexers.JsonLexer(),
pygments.formatters.TerminalFormatter())
click.secho(colorful_json, nl=False) | Report item properties. | Below is the the instruction that describes the task:
### Input:
Report item properties.
### Response:
def properties(dataset_uri, item_identifier):
"""Report item properties."""
dataset = dtoolcore.DataSet.from_uri(dataset_uri)
try:
props = dataset.item_properties(item_identifier)
except KeyError:
click.secho(
"No such item in dataset: {}".format(item_identifier),
fg="red",
err=True
)
sys.exit(20)
json_lines = [
'{',
' "relpath": "{}",'.format(props["relpath"]),
' "size_in_bytes": {},'.format(props["size_in_bytes"]),
' "utc_timestamp": {},'.format(props["utc_timestamp"]),
' "hash": "{}"'.format(props["hash"]),
'}',
]
formatted_json = "\n".join(json_lines)
colorful_json = pygments.highlight(
formatted_json,
pygments.lexers.JsonLexer(),
pygments.formatters.TerminalFormatter())
click.secho(colorful_json, nl=False) |
def login():
"""
User authenticate method.
---
description: Authenticate user with supplied credentials.
parameters:
- name: username
in: formData
type: string
required: true
- name: password
in: formData
type: string
required: true
responses:
200:
description: User successfully logged in.
400:
description: User login failed.
"""
try:
username = request.form.get("username")
password = request.form.get("password")
user = authenticate(username, password)
if not user:
raise Exception("User not found!")
resp = jsonify({"message": "User authenticated"})
resp.status_code = 200
access_token = jwt.jwt_encode_callback(user)
# add token to response headers - so SwaggerUI can use it
resp.headers.extend({'jwt-token': access_token})
except Exception as e:
resp = jsonify({"message": "Bad username and/or password"})
resp.status_code = 401
return resp | User authenticate method.
---
description: Authenticate user with supplied credentials.
parameters:
- name: username
in: formData
type: string
required: true
- name: password
in: formData
type: string
required: true
responses:
200:
description: User successfully logged in.
400:
description: User login failed. | Below is the the instruction that describes the task:
### Input:
User authenticate method.
---
description: Authenticate user with supplied credentials.
parameters:
- name: username
in: formData
type: string
required: true
- name: password
in: formData
type: string
required: true
responses:
200:
description: User successfully logged in.
400:
description: User login failed.
### Response:
def login():
"""
User authenticate method.
---
description: Authenticate user with supplied credentials.
parameters:
- name: username
in: formData
type: string
required: true
- name: password
in: formData
type: string
required: true
responses:
200:
description: User successfully logged in.
400:
description: User login failed.
"""
try:
username = request.form.get("username")
password = request.form.get("password")
user = authenticate(username, password)
if not user:
raise Exception("User not found!")
resp = jsonify({"message": "User authenticated"})
resp.status_code = 200
access_token = jwt.jwt_encode_callback(user)
# add token to response headers - so SwaggerUI can use it
resp.headers.extend({'jwt-token': access_token})
except Exception as e:
resp = jsonify({"message": "Bad username and/or password"})
resp.status_code = 401
return resp |
def _determine_outliers_index(hist: Hist,
moving_average_threshold: float = 1.0,
number_of_values_to_search_ahead: int = 5,
limit_of_number_of_values_below_threshold: int = None) -> int:
""" Determine the location of where outliers begin in a 1D histogram.
When the moving average falls below the limit, we consider the outliers to have begun.
To determine the location of outliers:
- Calculate the moving average for number_of_values_to_search_ahead values.
- First, the moving average must go above the limit at least once to guard against a random cut
in a low pt bin causing most of the data to be cut out.
- Next, we look for a consecutive number of entries below limit_of_number_of_values_below_threshold.
- If we meet that condition, we have found the index where the outliers begin. We then return the ROOT
bin index of the value.
- If not, we return -1.
Note:
The index returned is when the moving average first drops below the threshold for a moving average
calculated with that bin at the center. This is somewhat different from a standard moving average
calculation which would only look forward in the array.
Args:
hist: Histogram to be checked for outliers.
moving_average_threshold: Value of moving average under which we consider the moving average
to be 0. Default: 2.
number_of_values_to_search_ahead: Number of values to search ahead in the array when calculating
the moving average. Default: 5.
limit_of_number_of_values_below_threshold: Number of consecutive bins below the threshold to be considered
the beginning of outliers. Default: None, which will correspond to number_of_values_to_search_ahead - 1.
Returns:
ROOT (ie 1-indexed) index of the histogram axes where the outliers begin.
"""
# Validation
import ROOT
if isinstance(hist, (ROOT.TH2, ROOT.TH3, ROOT.THnBase)):
raise ValueError(
f"Given histogram '{hist.GetName()}' of type {type(hist)}, but can only"
" determine the outlier location of a 1D histogram. Please project to"
" the particle level axis first."
)
if limit_of_number_of_values_below_threshold is None:
# In principle, this could be another value. However, this is what was used in the previous outliers
# removal implementation.
limit_of_number_of_values_below_threshold = number_of_values_to_search_ahead - 1
# It is much more convenient to work with a numpy array.
hist_to_check = histogram.Histogram1D.from_existing_hist(hist)
# Calculate the moving average for the entire axis, looking ahead including the current bin + 4 = 5 ahead.
number_of_values_to_search_ahead = 5
moving_average = utils.moving_average(hist_to_check.y, n = number_of_values_to_search_ahead)
#logger.debug(f"y: {hist_to_check.y}")
#logger.debug(f"moving_average: {moving_average}")
cut_index = _determine_outliers_for_moving_average(
moving_average = moving_average,
moving_average_threshold = moving_average_threshold,
number_of_values_to_search_ahead = number_of_values_to_search_ahead,
limit_of_number_of_values_below_threshold = limit_of_number_of_values_below_threshold,
)
if cut_index != -1:
# ROOT histograms are 1 indexed, so we add another 1.
cut_index += 1
return cut_index | Determine the location of where outliers begin in a 1D histogram.
When the moving average falls below the limit, we consider the outliers to have begun.
To determine the location of outliers:
- Calculate the moving average for number_of_values_to_search_ahead values.
- First, the moving average must go above the limit at least once to guard against a random cut
in a low pt bin causing most of the data to be cut out.
- Next, we look for a consecutive number of entries below limit_of_number_of_values_below_threshold.
- If we meet that condition, we have found the index where the outliers begin. We then return the ROOT
bin index of the value.
- If not, we return -1.
Note:
The index returned is when the moving average first drops below the threshold for a moving average
calculated with that bin at the center. This is somewhat different from a standard moving average
calculation which would only look forward in the array.
Args:
hist: Histogram to be checked for outliers.
moving_average_threshold: Value of moving average under which we consider the moving average
to be 0. Default: 2.
number_of_values_to_search_ahead: Number of values to search ahead in the array when calculating
the moving average. Default: 5.
limit_of_number_of_values_below_threshold: Number of consecutive bins below the threshold to be considered
the beginning of outliers. Default: None, which will correspond to number_of_values_to_search_ahead - 1.
Returns:
ROOT (ie 1-indexed) index of the histogram axes where the outliers begin. | Below is the the instruction that describes the task:
### Input:
Determine the location of where outliers begin in a 1D histogram.
When the moving average falls below the limit, we consider the outliers to have begun.
To determine the location of outliers:
- Calculate the moving average for number_of_values_to_search_ahead values.
- First, the moving average must go above the limit at least once to guard against a random cut
in a low pt bin causing most of the data to be cut out.
- Next, we look for a consecutive number of entries below limit_of_number_of_values_below_threshold.
- If we meet that condition, we have found the index where the outliers begin. We then return the ROOT
bin index of the value.
- If not, we return -1.
Note:
The index returned is when the moving average first drops below the threshold for a moving average
calculated with that bin at the center. This is somewhat different from a standard moving average
calculation which would only look forward in the array.
Args:
hist: Histogram to be checked for outliers.
moving_average_threshold: Value of moving average under which we consider the moving average
to be 0. Default: 2.
number_of_values_to_search_ahead: Number of values to search ahead in the array when calculating
the moving average. Default: 5.
limit_of_number_of_values_below_threshold: Number of consecutive bins below the threshold to be considered
the beginning of outliers. Default: None, which will correspond to number_of_values_to_search_ahead - 1.
Returns:
ROOT (ie 1-indexed) index of the histogram axes where the outliers begin.
### Response:
def _determine_outliers_index(hist: Hist,
moving_average_threshold: float = 1.0,
number_of_values_to_search_ahead: int = 5,
limit_of_number_of_values_below_threshold: int = None) -> int:
""" Determine the location of where outliers begin in a 1D histogram.
When the moving average falls below the limit, we consider the outliers to have begun.
To determine the location of outliers:
- Calculate the moving average for number_of_values_to_search_ahead values.
- First, the moving average must go above the limit at least once to guard against a random cut
in a low pt bin causing most of the data to be cut out.
- Next, we look for a consecutive number of entries below limit_of_number_of_values_below_threshold.
- If we meet that condition, we have found the index where the outliers begin. We then return the ROOT
bin index of the value.
- If not, we return -1.
Note:
The index returned is when the moving average first drops below the threshold for a moving average
calculated with that bin at the center. This is somewhat different from a standard moving average
calculation which would only look forward in the array.
Args:
hist: Histogram to be checked for outliers.
moving_average_threshold: Value of moving average under which we consider the moving average
to be 0. Default: 2.
number_of_values_to_search_ahead: Number of values to search ahead in the array when calculating
the moving average. Default: 5.
limit_of_number_of_values_below_threshold: Number of consecutive bins below the threshold to be considered
the beginning of outliers. Default: None, which will correspond to number_of_values_to_search_ahead - 1.
Returns:
ROOT (ie 1-indexed) index of the histogram axes where the outliers begin.
"""
# Validation
import ROOT
if isinstance(hist, (ROOT.TH2, ROOT.TH3, ROOT.THnBase)):
raise ValueError(
f"Given histogram '{hist.GetName()}' of type {type(hist)}, but can only"
" determine the outlier location of a 1D histogram. Please project to"
" the particle level axis first."
)
if limit_of_number_of_values_below_threshold is None:
# In principle, this could be another value. However, this is what was used in the previous outliers
# removal implementation.
limit_of_number_of_values_below_threshold = number_of_values_to_search_ahead - 1
# It is much more convenient to work with a numpy array.
hist_to_check = histogram.Histogram1D.from_existing_hist(hist)
# Calculate the moving average for the entire axis, looking ahead including the current bin + 4 = 5 ahead.
number_of_values_to_search_ahead = 5
moving_average = utils.moving_average(hist_to_check.y, n = number_of_values_to_search_ahead)
#logger.debug(f"y: {hist_to_check.y}")
#logger.debug(f"moving_average: {moving_average}")
cut_index = _determine_outliers_for_moving_average(
moving_average = moving_average,
moving_average_threshold = moving_average_threshold,
number_of_values_to_search_ahead = number_of_values_to_search_ahead,
limit_of_number_of_values_below_threshold = limit_of_number_of_values_below_threshold,
)
if cut_index != -1:
# ROOT histograms are 1 indexed, so we add another 1.
cut_index += 1
return cut_index |
def live_source_load(self, source):
"""
Send new source code to the bot
:param source:
:param good_cb: callback called if code was good
:param bad_cb: callback called if code was bad (will get contents of exception)
:return:
"""
source = source.rstrip('\n')
if source != self.source:
self.source = source
b64_source = base64.b64encode(bytes(bytearray(source, "ascii")))
self.send_command(CMD_LOAD_BASE64, b64_source) | Send new source code to the bot
:param source:
:param good_cb: callback called if code was good
:param bad_cb: callback called if code was bad (will get contents of exception)
:return: | Below is the the instruction that describes the task:
### Input:
Send new source code to the bot
:param source:
:param good_cb: callback called if code was good
:param bad_cb: callback called if code was bad (will get contents of exception)
:return:
### Response:
def live_source_load(self, source):
"""
Send new source code to the bot
:param source:
:param good_cb: callback called if code was good
:param bad_cb: callback called if code was bad (will get contents of exception)
:return:
"""
source = source.rstrip('\n')
if source != self.source:
self.source = source
b64_source = base64.b64encode(bytes(bytearray(source, "ascii")))
self.send_command(CMD_LOAD_BASE64, b64_source) |
def form_invalid(self, post_form, attachment_formset, **kwargs):
""" Processes invalid forms.
Called if one of the forms is invalid. Re-renders the context data with the data-filled
forms and errors.
"""
if (
attachment_formset and
not attachment_formset.is_valid() and
len(attachment_formset.errors)
):
messages.error(self.request, self.attachment_formset_general_error_message)
return self.render_to_response(
self.get_context_data(
post_form=post_form, attachment_formset=attachment_formset, **kwargs
),
) | Processes invalid forms.
Called if one of the forms is invalid. Re-renders the context data with the data-filled
forms and errors. | Below is the the instruction that describes the task:
### Input:
Processes invalid forms.
Called if one of the forms is invalid. Re-renders the context data with the data-filled
forms and errors.
### Response:
def form_invalid(self, post_form, attachment_formset, **kwargs):
""" Processes invalid forms.
Called if one of the forms is invalid. Re-renders the context data with the data-filled
forms and errors.
"""
if (
attachment_formset and
not attachment_formset.is_valid() and
len(attachment_formset.errors)
):
messages.error(self.request, self.attachment_formset_general_error_message)
return self.render_to_response(
self.get_context_data(
post_form=post_form, attachment_formset=attachment_formset, **kwargs
),
) |
def prepare_full_example_2(lastdate='1996-01-05') -> (
hydpytools.HydPy, hydpy.pub, testtools.TestIO):
"""Prepare the complete `LahnH` project for testing.
|prepare_full_example_2| calls |prepare_full_example_1|, but also
returns a readily prepared |HydPy| instance, as well as module
|pub| and class |TestIO|, for convenience:
>>> from hydpy.core.examples import prepare_full_example_2
>>> hp, pub, TestIO = prepare_full_example_2()
>>> hp.nodes
Nodes("dill", "lahn_1", "lahn_2", "lahn_3")
>>> hp.elements
Elements("land_dill", "land_lahn_1", "land_lahn_2", "land_lahn_3",
"stream_dill_lahn_2", "stream_lahn_1_lahn_2",
"stream_lahn_2_lahn_3")
>>> pub.timegrids
Timegrids(Timegrid('1996-01-01 00:00:00',
'1996-01-05 00:00:00',
'1d'))
>>> from hydpy import classname
>>> classname(TestIO)
'TestIO'
The last date of the initialisation period is configurable:
>>> hp, pub, TestIO = prepare_full_example_2('1996-02-01')
>>> pub.timegrids
Timegrids(Timegrid('1996-01-01 00:00:00',
'1996-02-01 00:00:00',
'1d'))
"""
prepare_full_example_1()
with testtools.TestIO():
hp = hydpytools.HydPy('LahnH')
hydpy.pub.timegrids = '1996-01-01', lastdate, '1d'
hp.prepare_everything()
return hp, hydpy.pub, testtools.TestIO | Prepare the complete `LahnH` project for testing.
|prepare_full_example_2| calls |prepare_full_example_1|, but also
returns a readily prepared |HydPy| instance, as well as module
|pub| and class |TestIO|, for convenience:
>>> from hydpy.core.examples import prepare_full_example_2
>>> hp, pub, TestIO = prepare_full_example_2()
>>> hp.nodes
Nodes("dill", "lahn_1", "lahn_2", "lahn_3")
>>> hp.elements
Elements("land_dill", "land_lahn_1", "land_lahn_2", "land_lahn_3",
"stream_dill_lahn_2", "stream_lahn_1_lahn_2",
"stream_lahn_2_lahn_3")
>>> pub.timegrids
Timegrids(Timegrid('1996-01-01 00:00:00',
'1996-01-05 00:00:00',
'1d'))
>>> from hydpy import classname
>>> classname(TestIO)
'TestIO'
The last date of the initialisation period is configurable:
>>> hp, pub, TestIO = prepare_full_example_2('1996-02-01')
>>> pub.timegrids
Timegrids(Timegrid('1996-01-01 00:00:00',
'1996-02-01 00:00:00',
'1d')) | Below is the the instruction that describes the task:
### Input:
Prepare the complete `LahnH` project for testing.
|prepare_full_example_2| calls |prepare_full_example_1|, but also
returns a readily prepared |HydPy| instance, as well as module
|pub| and class |TestIO|, for convenience:
>>> from hydpy.core.examples import prepare_full_example_2
>>> hp, pub, TestIO = prepare_full_example_2()
>>> hp.nodes
Nodes("dill", "lahn_1", "lahn_2", "lahn_3")
>>> hp.elements
Elements("land_dill", "land_lahn_1", "land_lahn_2", "land_lahn_3",
"stream_dill_lahn_2", "stream_lahn_1_lahn_2",
"stream_lahn_2_lahn_3")
>>> pub.timegrids
Timegrids(Timegrid('1996-01-01 00:00:00',
'1996-01-05 00:00:00',
'1d'))
>>> from hydpy import classname
>>> classname(TestIO)
'TestIO'
The last date of the initialisation period is configurable:
>>> hp, pub, TestIO = prepare_full_example_2('1996-02-01')
>>> pub.timegrids
Timegrids(Timegrid('1996-01-01 00:00:00',
'1996-02-01 00:00:00',
'1d'))
### Response:
def prepare_full_example_2(lastdate='1996-01-05') -> (
hydpytools.HydPy, hydpy.pub, testtools.TestIO):
"""Prepare the complete `LahnH` project for testing.
|prepare_full_example_2| calls |prepare_full_example_1|, but also
returns a readily prepared |HydPy| instance, as well as module
|pub| and class |TestIO|, for convenience:
>>> from hydpy.core.examples import prepare_full_example_2
>>> hp, pub, TestIO = prepare_full_example_2()
>>> hp.nodes
Nodes("dill", "lahn_1", "lahn_2", "lahn_3")
>>> hp.elements
Elements("land_dill", "land_lahn_1", "land_lahn_2", "land_lahn_3",
"stream_dill_lahn_2", "stream_lahn_1_lahn_2",
"stream_lahn_2_lahn_3")
>>> pub.timegrids
Timegrids(Timegrid('1996-01-01 00:00:00',
'1996-01-05 00:00:00',
'1d'))
>>> from hydpy import classname
>>> classname(TestIO)
'TestIO'
The last date of the initialisation period is configurable:
>>> hp, pub, TestIO = prepare_full_example_2('1996-02-01')
>>> pub.timegrids
Timegrids(Timegrid('1996-01-01 00:00:00',
'1996-02-01 00:00:00',
'1d'))
"""
prepare_full_example_1()
with testtools.TestIO():
hp = hydpytools.HydPy('LahnH')
hydpy.pub.timegrids = '1996-01-01', lastdate, '1d'
hp.prepare_everything()
return hp, hydpy.pub, testtools.TestIO |
def delete_resource_scenario(scenario_id, resource_attr_id, quiet=False, **kwargs):
"""
Remove the data associated with a resource in a scenario.
"""
_check_can_edit_scenario(scenario_id, kwargs['user_id'])
_delete_resourcescenario(scenario_id, resource_attr_id, suppress_error=quiet) | Remove the data associated with a resource in a scenario. | Below is the the instruction that describes the task:
### Input:
Remove the data associated with a resource in a scenario.
### Response:
def delete_resource_scenario(scenario_id, resource_attr_id, quiet=False, **kwargs):
"""
Remove the data associated with a resource in a scenario.
"""
_check_can_edit_scenario(scenario_id, kwargs['user_id'])
_delete_resourcescenario(scenario_id, resource_attr_id, suppress_error=quiet) |
def reorient(self, up, look):
'''
Reorient the mesh by specifying two vectors.
up: The foot-to-head direction.
look: The direction the body is facing.
In the result, the up will end up along +y, and look along +z
(i.e. facing towards a default OpenGL camera).
'''
from blmath.geometry.transform import rotation_from_up_and_look
from blmath.numerics import as_numeric_array
up = as_numeric_array(up, (3,))
look = as_numeric_array(look, (3,))
if self.v is not None:
self.v = np.dot(rotation_from_up_and_look(up, look), self.v.T).T | Reorient the mesh by specifying two vectors.
up: The foot-to-head direction.
look: The direction the body is facing.
In the result, the up will end up along +y, and look along +z
(i.e. facing towards a default OpenGL camera). | Below is the the instruction that describes the task:
### Input:
Reorient the mesh by specifying two vectors.
up: The foot-to-head direction.
look: The direction the body is facing.
In the result, the up will end up along +y, and look along +z
(i.e. facing towards a default OpenGL camera).
### Response:
def reorient(self, up, look):
'''
Reorient the mesh by specifying two vectors.
up: The foot-to-head direction.
look: The direction the body is facing.
In the result, the up will end up along +y, and look along +z
(i.e. facing towards a default OpenGL camera).
'''
from blmath.geometry.transform import rotation_from_up_and_look
from blmath.numerics import as_numeric_array
up = as_numeric_array(up, (3,))
look = as_numeric_array(look, (3,))
if self.v is not None:
self.v = np.dot(rotation_from_up_and_look(up, look), self.v.T).T |
def _spellchecker_for(word_set,
name,
spellcheck_cache_path=None,
sources=None):
"""Get a whoosh spellchecker for :word_set:.
The word graph for this spellchecker will be stored on-disk with
the unique-name :name: in :spellcheck_cache_path:, if it exists.
This allows for much faster loading of word graphs after they have been
pre-populated.
:sources: is a list of filenames which will be checked to see if they
are newer than the stored word graph. If they are newer, then the word
graph gets repopulated.
"""
assert "/" not in name and "\\" not in name
if _spellchecker_cache.get(name, None) is not None:
return _spellchecker_cache[name].corrector
# Check the modification time of all the paths in :sources: to see
# if they've been modified since the cache file was created. If so,
# delete the cache file. This will cause it to be regenerated.
#
# Note that this relies on an implementation detail in whoosh, namely
# that the cache file is always stored at spellcheck_cache_path/name.
if spellcheck_cache_path:
# Ensure that the directory has been created
try:
os.makedirs(spellcheck_cache_path)
except OSError as error:
if error.errno != errno.EEXIST: # suppress(PYC90)
raise error
graph_path = os.path.realpath(spellcheck_cache_path)
file_storage = FileStorage(graph_path)
preexisting_cache = os.path.abspath(os.path.join(spellcheck_cache_path,
name))
if os.path.exists(preexisting_cache):
cache_mtime = os.path.getmtime(preexisting_cache)
for source in sources:
source_path = os.path.realpath(source)
if not os.path.exists(source_path):
continue
if os.path.getmtime(source_path) > cache_mtime:
file_storage.delete_file(name)
break
try:
word_graph = copy_to_ram(file_storage).open_file(name)
except (IOError, NameError):
word_graph = _create_word_graph_file(name, file_storage, word_set)
else:
ram_storage = RamStorage()
word_graph = _create_word_graph_file(name, ram_storage, word_set)
reader = fst.GraphReader(word_graph)
corrector = spelling.GraphCorrector(reader)
_spellchecker_cache[name] = SpellcheckerCacheEntry(corrector, reader)
return corrector | Get a whoosh spellchecker for :word_set:.
The word graph for this spellchecker will be stored on-disk with
the unique-name :name: in :spellcheck_cache_path:, if it exists.
This allows for much faster loading of word graphs after they have been
pre-populated.
:sources: is a list of filenames which will be checked to see if they
are newer than the stored word graph. If they are newer, then the word
graph gets repopulated. | Below is the the instruction that describes the task:
### Input:
Get a whoosh spellchecker for :word_set:.
The word graph for this spellchecker will be stored on-disk with
the unique-name :name: in :spellcheck_cache_path:, if it exists.
This allows for much faster loading of word graphs after they have been
pre-populated.
:sources: is a list of filenames which will be checked to see if they
are newer than the stored word graph. If they are newer, then the word
graph gets repopulated.
### Response:
def _spellchecker_for(word_set,
name,
spellcheck_cache_path=None,
sources=None):
"""Get a whoosh spellchecker for :word_set:.
The word graph for this spellchecker will be stored on-disk with
the unique-name :name: in :spellcheck_cache_path:, if it exists.
This allows for much faster loading of word graphs after they have been
pre-populated.
:sources: is a list of filenames which will be checked to see if they
are newer than the stored word graph. If they are newer, then the word
graph gets repopulated.
"""
assert "/" not in name and "\\" not in name
if _spellchecker_cache.get(name, None) is not None:
return _spellchecker_cache[name].corrector
# Check the modification time of all the paths in :sources: to see
# if they've been modified since the cache file was created. If so,
# delete the cache file. This will cause it to be regenerated.
#
# Note that this relies on an implementation detail in whoosh, namely
# that the cache file is always stored at spellcheck_cache_path/name.
if spellcheck_cache_path:
# Ensure that the directory has been created
try:
os.makedirs(spellcheck_cache_path)
except OSError as error:
if error.errno != errno.EEXIST: # suppress(PYC90)
raise error
graph_path = os.path.realpath(spellcheck_cache_path)
file_storage = FileStorage(graph_path)
preexisting_cache = os.path.abspath(os.path.join(spellcheck_cache_path,
name))
if os.path.exists(preexisting_cache):
cache_mtime = os.path.getmtime(preexisting_cache)
for source in sources:
source_path = os.path.realpath(source)
if not os.path.exists(source_path):
continue
if os.path.getmtime(source_path) > cache_mtime:
file_storage.delete_file(name)
break
try:
word_graph = copy_to_ram(file_storage).open_file(name)
except (IOError, NameError):
word_graph = _create_word_graph_file(name, file_storage, word_set)
else:
ram_storage = RamStorage()
word_graph = _create_word_graph_file(name, ram_storage, word_set)
reader = fst.GraphReader(word_graph)
corrector = spelling.GraphCorrector(reader)
_spellchecker_cache[name] = SpellcheckerCacheEntry(corrector, reader)
return corrector |
def command(self, command, value=1, check=True,
allowable_errors=None, read_preference=ReadPreference.PRIMARY,
codec_options=DEFAULT_CODEC_OPTIONS, **kwargs):
"""Issue a MongoDB command.
Send command `command` to the database and return the
response. If `command` is an instance of :class:`basestring`
(:class:`str` in python 3) then the command {`command`: `value`}
will be sent. Otherwise, `command` must be an instance of
:class:`dict` and will be sent as is.
Any additional keyword arguments will be added to the final
command document before it is sent.
For example, a command like ``{buildinfo: 1}`` can be sent
using:
>>> db.command("buildinfo")
For a command where the value matters, like ``{collstats:
collection_name}`` we can do:
>>> db.command("collstats", collection_name)
For commands that take additional arguments we can use
kwargs. So ``{filemd5: object_id, root: file_root}`` becomes:
>>> db.command("filemd5", object_id, root=file_root)
:Parameters:
- `command`: document representing the command to be issued,
or the name of the command (for simple commands only).
.. note:: the order of keys in the `command` document is
significant (the "verb" must come first), so commands
which require multiple keys (e.g. `findandmodify`)
should use an instance of :class:`~bson.son.SON` or
a string and kwargs instead of a Python `dict`.
- `value` (optional): value to use for the command verb when
`command` is passed as a string
- `check` (optional): check the response for errors, raising
:class:`~pymongo.errors.OperationFailure` if there are any
- `allowable_errors`: if `check` is ``True``, error messages
in this list will be ignored by error-checking
- `read_preference`: The read preference for this operation.
See :mod:`~pymongo.read_preferences` for options.
- `codec_options`: A :class:`~bson.codec_options.CodecOptions`
instance.
- `**kwargs` (optional): additional keyword arguments will
be added to the command document before it is sent
.. note:: :meth:`command` does **not** obey :attr:`read_preference`
or :attr:`codec_options`. You must use the `read_preference` and
`codec_options` parameters instead.
.. versionchanged:: 3.0
Removed the `as_class`, `fields`, `uuid_subtype`, `tag_sets`,
and `secondary_acceptable_latency_ms` option.
Removed `compile_re` option: PyMongo now always represents BSON
regular expressions as :class:`~bson.regex.Regex` objects. Use
:meth:`~bson.regex.Regex.try_compile` to attempt to convert from a
BSON regular expression to a Python regular expression object.
Added the `codec_options` parameter.
.. versionchanged:: 2.7
Added `compile_re` option. If set to False, PyMongo represented BSON
regular expressions as :class:`~bson.regex.Regex` objects instead of
attempting to compile BSON regular expressions as Python native
regular expressions, thus preventing errors for some incompatible
patterns, see `PYTHON-500`_.
.. versionchanged:: 2.3
Added `tag_sets` and `secondary_acceptable_latency_ms` options.
.. versionchanged:: 2.2
Added support for `as_class` - the class you want to use for
the resulting documents
.. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500
.. mongodoc:: commands
"""
client = self.__client
with client._socket_for_reads(read_preference) as (sock_info, slave_ok):
return self._command(sock_info, command, slave_ok, value,
check, allowable_errors, read_preference,
codec_options, **kwargs) | Issue a MongoDB command.
Send command `command` to the database and return the
response. If `command` is an instance of :class:`basestring`
(:class:`str` in python 3) then the command {`command`: `value`}
will be sent. Otherwise, `command` must be an instance of
:class:`dict` and will be sent as is.
Any additional keyword arguments will be added to the final
command document before it is sent.
For example, a command like ``{buildinfo: 1}`` can be sent
using:
>>> db.command("buildinfo")
For a command where the value matters, like ``{collstats:
collection_name}`` we can do:
>>> db.command("collstats", collection_name)
For commands that take additional arguments we can use
kwargs. So ``{filemd5: object_id, root: file_root}`` becomes:
>>> db.command("filemd5", object_id, root=file_root)
:Parameters:
- `command`: document representing the command to be issued,
or the name of the command (for simple commands only).
.. note:: the order of keys in the `command` document is
significant (the "verb" must come first), so commands
which require multiple keys (e.g. `findandmodify`)
should use an instance of :class:`~bson.son.SON` or
a string and kwargs instead of a Python `dict`.
- `value` (optional): value to use for the command verb when
`command` is passed as a string
- `check` (optional): check the response for errors, raising
:class:`~pymongo.errors.OperationFailure` if there are any
- `allowable_errors`: if `check` is ``True``, error messages
in this list will be ignored by error-checking
- `read_preference`: The read preference for this operation.
See :mod:`~pymongo.read_preferences` for options.
- `codec_options`: A :class:`~bson.codec_options.CodecOptions`
instance.
- `**kwargs` (optional): additional keyword arguments will
be added to the command document before it is sent
.. note:: :meth:`command` does **not** obey :attr:`read_preference`
or :attr:`codec_options`. You must use the `read_preference` and
`codec_options` parameters instead.
.. versionchanged:: 3.0
Removed the `as_class`, `fields`, `uuid_subtype`, `tag_sets`,
and `secondary_acceptable_latency_ms` option.
Removed `compile_re` option: PyMongo now always represents BSON
regular expressions as :class:`~bson.regex.Regex` objects. Use
:meth:`~bson.regex.Regex.try_compile` to attempt to convert from a
BSON regular expression to a Python regular expression object.
Added the `codec_options` parameter.
.. versionchanged:: 2.7
Added `compile_re` option. If set to False, PyMongo represented BSON
regular expressions as :class:`~bson.regex.Regex` objects instead of
attempting to compile BSON regular expressions as Python native
regular expressions, thus preventing errors for some incompatible
patterns, see `PYTHON-500`_.
.. versionchanged:: 2.3
Added `tag_sets` and `secondary_acceptable_latency_ms` options.
.. versionchanged:: 2.2
Added support for `as_class` - the class you want to use for
the resulting documents
.. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500
.. mongodoc:: commands | Below is the the instruction that describes the task:
### Input:
Issue a MongoDB command.
Send command `command` to the database and return the
response. If `command` is an instance of :class:`basestring`
(:class:`str` in python 3) then the command {`command`: `value`}
will be sent. Otherwise, `command` must be an instance of
:class:`dict` and will be sent as is.
Any additional keyword arguments will be added to the final
command document before it is sent.
For example, a command like ``{buildinfo: 1}`` can be sent
using:
>>> db.command("buildinfo")
For a command where the value matters, like ``{collstats:
collection_name}`` we can do:
>>> db.command("collstats", collection_name)
For commands that take additional arguments we can use
kwargs. So ``{filemd5: object_id, root: file_root}`` becomes:
>>> db.command("filemd5", object_id, root=file_root)
:Parameters:
- `command`: document representing the command to be issued,
or the name of the command (for simple commands only).
.. note:: the order of keys in the `command` document is
significant (the "verb" must come first), so commands
which require multiple keys (e.g. `findandmodify`)
should use an instance of :class:`~bson.son.SON` or
a string and kwargs instead of a Python `dict`.
- `value` (optional): value to use for the command verb when
`command` is passed as a string
- `check` (optional): check the response for errors, raising
:class:`~pymongo.errors.OperationFailure` if there are any
- `allowable_errors`: if `check` is ``True``, error messages
in this list will be ignored by error-checking
- `read_preference`: The read preference for this operation.
See :mod:`~pymongo.read_preferences` for options.
- `codec_options`: A :class:`~bson.codec_options.CodecOptions`
instance.
- `**kwargs` (optional): additional keyword arguments will
be added to the command document before it is sent
.. note:: :meth:`command` does **not** obey :attr:`read_preference`
or :attr:`codec_options`. You must use the `read_preference` and
`codec_options` parameters instead.
.. versionchanged:: 3.0
Removed the `as_class`, `fields`, `uuid_subtype`, `tag_sets`,
and `secondary_acceptable_latency_ms` option.
Removed `compile_re` option: PyMongo now always represents BSON
regular expressions as :class:`~bson.regex.Regex` objects. Use
:meth:`~bson.regex.Regex.try_compile` to attempt to convert from a
BSON regular expression to a Python regular expression object.
Added the `codec_options` parameter.
.. versionchanged:: 2.7
Added `compile_re` option. If set to False, PyMongo represented BSON
regular expressions as :class:`~bson.regex.Regex` objects instead of
attempting to compile BSON regular expressions as Python native
regular expressions, thus preventing errors for some incompatible
patterns, see `PYTHON-500`_.
.. versionchanged:: 2.3
Added `tag_sets` and `secondary_acceptable_latency_ms` options.
.. versionchanged:: 2.2
Added support for `as_class` - the class you want to use for
the resulting documents
.. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500
.. mongodoc:: commands
### Response:
def command(self, command, value=1, check=True,
allowable_errors=None, read_preference=ReadPreference.PRIMARY,
codec_options=DEFAULT_CODEC_OPTIONS, **kwargs):
"""Issue a MongoDB command.
Send command `command` to the database and return the
response. If `command` is an instance of :class:`basestring`
(:class:`str` in python 3) then the command {`command`: `value`}
will be sent. Otherwise, `command` must be an instance of
:class:`dict` and will be sent as is.
Any additional keyword arguments will be added to the final
command document before it is sent.
For example, a command like ``{buildinfo: 1}`` can be sent
using:
>>> db.command("buildinfo")
For a command where the value matters, like ``{collstats:
collection_name}`` we can do:
>>> db.command("collstats", collection_name)
For commands that take additional arguments we can use
kwargs. So ``{filemd5: object_id, root: file_root}`` becomes:
>>> db.command("filemd5", object_id, root=file_root)
:Parameters:
- `command`: document representing the command to be issued,
or the name of the command (for simple commands only).
.. note:: the order of keys in the `command` document is
significant (the "verb" must come first), so commands
which require multiple keys (e.g. `findandmodify`)
should use an instance of :class:`~bson.son.SON` or
a string and kwargs instead of a Python `dict`.
- `value` (optional): value to use for the command verb when
`command` is passed as a string
- `check` (optional): check the response for errors, raising
:class:`~pymongo.errors.OperationFailure` if there are any
- `allowable_errors`: if `check` is ``True``, error messages
in this list will be ignored by error-checking
- `read_preference`: The read preference for this operation.
See :mod:`~pymongo.read_preferences` for options.
- `codec_options`: A :class:`~bson.codec_options.CodecOptions`
instance.
- `**kwargs` (optional): additional keyword arguments will
be added to the command document before it is sent
.. note:: :meth:`command` does **not** obey :attr:`read_preference`
or :attr:`codec_options`. You must use the `read_preference` and
`codec_options` parameters instead.
.. versionchanged:: 3.0
Removed the `as_class`, `fields`, `uuid_subtype`, `tag_sets`,
and `secondary_acceptable_latency_ms` option.
Removed `compile_re` option: PyMongo now always represents BSON
regular expressions as :class:`~bson.regex.Regex` objects. Use
:meth:`~bson.regex.Regex.try_compile` to attempt to convert from a
BSON regular expression to a Python regular expression object.
Added the `codec_options` parameter.
.. versionchanged:: 2.7
Added `compile_re` option. If set to False, PyMongo represented BSON
regular expressions as :class:`~bson.regex.Regex` objects instead of
attempting to compile BSON regular expressions as Python native
regular expressions, thus preventing errors for some incompatible
patterns, see `PYTHON-500`_.
.. versionchanged:: 2.3
Added `tag_sets` and `secondary_acceptable_latency_ms` options.
.. versionchanged:: 2.2
Added support for `as_class` - the class you want to use for
the resulting documents
.. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500
.. mongodoc:: commands
"""
client = self.__client
with client._socket_for_reads(read_preference) as (sock_info, slave_ok):
return self._command(sock_info, command, slave_ok, value,
check, allowable_errors, read_preference,
codec_options, **kwargs) |
def _setup_logging(self) -> None:
"""The IOLoop catches and logs exceptions, so it's
important that log output be visible. However, python's
default behavior for non-root loggers (prior to python
3.2) is to print an unhelpful "no handlers could be
found" message rather than the actual log entry, so we
must explicitly configure logging if we've made it this
far without anything.
This method should be called from start() in subclasses.
"""
if not any(
[
logging.getLogger().handlers,
logging.getLogger("tornado").handlers,
logging.getLogger("tornado.application").handlers,
]
):
logging.basicConfig() | The IOLoop catches and logs exceptions, so it's
important that log output be visible. However, python's
default behavior for non-root loggers (prior to python
3.2) is to print an unhelpful "no handlers could be
found" message rather than the actual log entry, so we
must explicitly configure logging if we've made it this
far without anything.
This method should be called from start() in subclasses. | Below is the the instruction that describes the task:
### Input:
The IOLoop catches and logs exceptions, so it's
important that log output be visible. However, python's
default behavior for non-root loggers (prior to python
3.2) is to print an unhelpful "no handlers could be
found" message rather than the actual log entry, so we
must explicitly configure logging if we've made it this
far without anything.
This method should be called from start() in subclasses.
### Response:
def _setup_logging(self) -> None:
"""The IOLoop catches and logs exceptions, so it's
important that log output be visible. However, python's
default behavior for non-root loggers (prior to python
3.2) is to print an unhelpful "no handlers could be
found" message rather than the actual log entry, so we
must explicitly configure logging if we've made it this
far without anything.
This method should be called from start() in subclasses.
"""
if not any(
[
logging.getLogger().handlers,
logging.getLogger("tornado").handlers,
logging.getLogger("tornado.application").handlers,
]
):
logging.basicConfig() |
def set_pwm(self, channel, on, off):
"""Sets a single PWM channel."""
self.i2c.write8(LED0_ON_L+4*channel, on & 0xFF)
self.i2c.write8(LED0_ON_H+4*channel, on >> 8)
self.i2c.write8(LED0_OFF_L+4*channel, off & 0xFF)
self.i2c.write8(LED0_OFF_H+4*channel, off >> 8) | Sets a single PWM channel. | Below is the the instruction that describes the task:
### Input:
Sets a single PWM channel.
### Response:
def set_pwm(self, channel, on, off):
"""Sets a single PWM channel."""
self.i2c.write8(LED0_ON_L+4*channel, on & 0xFF)
self.i2c.write8(LED0_ON_H+4*channel, on >> 8)
self.i2c.write8(LED0_OFF_L+4*channel, off & 0xFF)
self.i2c.write8(LED0_OFF_H+4*channel, off >> 8) |
def from_group(cls, group):
"""
Construct tags from the regex group
"""
if not group:
return
tag_items = group.split(";")
return list(map(cls.parse, tag_items)) | Construct tags from the regex group | Below is the the instruction that describes the task:
### Input:
Construct tags from the regex group
### Response:
def from_group(cls, group):
"""
Construct tags from the regex group
"""
if not group:
return
tag_items = group.split(";")
return list(map(cls.parse, tag_items)) |
def append_fresh_table(self, fresh_table):
"""
Gets called by FreshTable instances when they get written to.
"""
if fresh_table.name:
elements = []
if fresh_table.is_array:
elements += [element_factory.create_array_of_tables_header_element(fresh_table.name)]
else:
elements += [element_factory.create_table_header_element(fresh_table.name)]
elements += [fresh_table, element_factory.create_newline_element()]
self.append_elements(elements)
else:
# It's an anonymous table
self.prepend_elements([fresh_table, element_factory.create_newline_element()]) | Gets called by FreshTable instances when they get written to. | Below is the the instruction that describes the task:
### Input:
Gets called by FreshTable instances when they get written to.
### Response:
def append_fresh_table(self, fresh_table):
"""
Gets called by FreshTable instances when they get written to.
"""
if fresh_table.name:
elements = []
if fresh_table.is_array:
elements += [element_factory.create_array_of_tables_header_element(fresh_table.name)]
else:
elements += [element_factory.create_table_header_element(fresh_table.name)]
elements += [fresh_table, element_factory.create_newline_element()]
self.append_elements(elements)
else:
# It's an anonymous table
self.prepend_elements([fresh_table, element_factory.create_newline_element()]) |
def estimate_bitstring_probs(results):
"""
Given an array of single shot results estimate the probability distribution over all bitstrings.
:param np.array results: A 2d array where the outer axis iterates over shots
and the inner axis over bits.
:return: An array with as many axes as there are qubit and normalized such that it sums to one.
``p[i,j,...,k]`` gives the estimated probability of bitstring ``ij...k``.
:rtype: np.array
"""
nshots, nq = np.shape(results)
outcomes = np.array([int("".join(map(str, r)), 2) for r in results])
probs = np.histogram(outcomes, bins=np.arange(-.5, 2 ** nq, 1))[0] / float(nshots)
return _bitstring_probs_by_qubit(probs) | Given an array of single shot results estimate the probability distribution over all bitstrings.
:param np.array results: A 2d array where the outer axis iterates over shots
and the inner axis over bits.
:return: An array with as many axes as there are qubit and normalized such that it sums to one.
``p[i,j,...,k]`` gives the estimated probability of bitstring ``ij...k``.
:rtype: np.array | Below is the the instruction that describes the task:
### Input:
Given an array of single shot results estimate the probability distribution over all bitstrings.
:param np.array results: A 2d array where the outer axis iterates over shots
and the inner axis over bits.
:return: An array with as many axes as there are qubit and normalized such that it sums to one.
``p[i,j,...,k]`` gives the estimated probability of bitstring ``ij...k``.
:rtype: np.array
### Response:
def estimate_bitstring_probs(results):
"""
Given an array of single shot results estimate the probability distribution over all bitstrings.
:param np.array results: A 2d array where the outer axis iterates over shots
and the inner axis over bits.
:return: An array with as many axes as there are qubit and normalized such that it sums to one.
``p[i,j,...,k]`` gives the estimated probability of bitstring ``ij...k``.
:rtype: np.array
"""
nshots, nq = np.shape(results)
outcomes = np.array([int("".join(map(str, r)), 2) for r in results])
probs = np.histogram(outcomes, bins=np.arange(-.5, 2 ** nq, 1))[0] / float(nshots)
return _bitstring_probs_by_qubit(probs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.