code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def fetch_all(self, credentials, regions = [], partition_name = 'aws', targets = None):
"""
Fetch all the configuration supported by Scout2 for a given service
:param credentials: F
:param service: Name of the service
:param regions: Name of regions to fetch data from
:param partition_name: AWS partition to connect to
:param targets: Type of resources to be fetched; defaults to all.
"""
# Initialize targets
# Tweak params
realtargets = ()
if not targets:
targets = self.targets
for i, target in enumerate(targets['first_region']):
params = self.tweak_params(target[3], credentials)
realtargets = realtargets + ((target[0], target[1], target[2], params, target[4]),)
targets['first_region'] = realtargets
realtargets = ()
for i, target in enumerate(targets['other_regions']):
params = self.tweak_params(target[3], credentials)
realtargets = realtargets + ((target[0], target[1], target[2], params, target[4]),)
targets['other_regions'] = realtargets
printInfo('Fetching %s config...' % format_service_name(self.service))
self.fetchstatuslogger = FetchStatusLogger(targets['first_region'], True)
api_service = 'ec2' if self.service.lower() == 'vpc' else self.service.lower()
# Init regions
regions = build_region_list(api_service, regions, partition_name) # TODO: move this code within this class
self.fetchstatuslogger.counts['regions']['discovered'] = len(regions)
# Threading to fetch & parse resources (queue consumer)
q = self._init_threading(self._fetch_target, {}, self.thread_config['parse'])
# Threading to list resources (queue feeder)
qr = self._init_threading(self._fetch_region,
{'api_service': api_service, 'credentials': credentials, 'q': q, 'targets': ()},
self.thread_config['list'])
# Go
for i, region in enumerate(regions):
qr.put((region, targets['first_region'] if i == 0 else targets['other_regions']))
# Join
qr.join()
q.join()
# Show completion and force newline
self.fetchstatuslogger.show(True) | Fetch all the configuration supported by Scout2 for a given service
:param credentials: F
:param service: Name of the service
:param regions: Name of regions to fetch data from
:param partition_name: AWS partition to connect to
:param targets: Type of resources to be fetched; defaults to all. | Below is the the instruction that describes the task:
### Input:
Fetch all the configuration supported by Scout2 for a given service
:param credentials: F
:param service: Name of the service
:param regions: Name of regions to fetch data from
:param partition_name: AWS partition to connect to
:param targets: Type of resources to be fetched; defaults to all.
### Response:
def fetch_all(self, credentials, regions = [], partition_name = 'aws', targets = None):
"""
Fetch all the configuration supported by Scout2 for a given service
:param credentials: F
:param service: Name of the service
:param regions: Name of regions to fetch data from
:param partition_name: AWS partition to connect to
:param targets: Type of resources to be fetched; defaults to all.
"""
# Initialize targets
# Tweak params
realtargets = ()
if not targets:
targets = self.targets
for i, target in enumerate(targets['first_region']):
params = self.tweak_params(target[3], credentials)
realtargets = realtargets + ((target[0], target[1], target[2], params, target[4]),)
targets['first_region'] = realtargets
realtargets = ()
for i, target in enumerate(targets['other_regions']):
params = self.tweak_params(target[3], credentials)
realtargets = realtargets + ((target[0], target[1], target[2], params, target[4]),)
targets['other_regions'] = realtargets
printInfo('Fetching %s config...' % format_service_name(self.service))
self.fetchstatuslogger = FetchStatusLogger(targets['first_region'], True)
api_service = 'ec2' if self.service.lower() == 'vpc' else self.service.lower()
# Init regions
regions = build_region_list(api_service, regions, partition_name) # TODO: move this code within this class
self.fetchstatuslogger.counts['regions']['discovered'] = len(regions)
# Threading to fetch & parse resources (queue consumer)
q = self._init_threading(self._fetch_target, {}, self.thread_config['parse'])
# Threading to list resources (queue feeder)
qr = self._init_threading(self._fetch_region,
{'api_service': api_service, 'credentials': credentials, 'q': q, 'targets': ()},
self.thread_config['list'])
# Go
for i, region in enumerate(regions):
qr.put((region, targets['first_region'] if i == 0 else targets['other_regions']))
# Join
qr.join()
q.join()
# Show completion and force newline
self.fetchstatuslogger.show(True) |
def multiply(self, other):
"""Return the QuantumChannel self + other.
Args:
other (complex): a complex number.
Returns:
Chi: the scalar multiplication other * self as a Chi object.
Raises:
QiskitError: if other is not a valid scalar.
"""
if not isinstance(other, Number):
raise QiskitError("other is not a number")
return Chi(other * self._data, self._input_dims, self._output_dims) | Return the QuantumChannel self + other.
Args:
other (complex): a complex number.
Returns:
Chi: the scalar multiplication other * self as a Chi object.
Raises:
QiskitError: if other is not a valid scalar. | Below is the the instruction that describes the task:
### Input:
Return the QuantumChannel self + other.
Args:
other (complex): a complex number.
Returns:
Chi: the scalar multiplication other * self as a Chi object.
Raises:
QiskitError: if other is not a valid scalar.
### Response:
def multiply(self, other):
"""Return the QuantumChannel self + other.
Args:
other (complex): a complex number.
Returns:
Chi: the scalar multiplication other * self as a Chi object.
Raises:
QiskitError: if other is not a valid scalar.
"""
if not isinstance(other, Number):
raise QiskitError("other is not a number")
return Chi(other * self._data, self._input_dims, self._output_dims) |
def run(self, repo: str, branch: str, task: Task, git_repo: Repo, repo_path: Path):
""" Starts up a VM, builds an docker image and gets it to the VM, runs the script over SSH, returns result.
Stops the VM if ``keep_vm_running`` is not set.
"""
from fabric import api
from fabric.exceptions import CommandTimeout
# start up or get running VM
vm_location = self.get_vm_location()
self.ensure_vm_running(vm_location)
logger.info("Running with VM located at %s", vm_location)
# pushes the image to the registry so it can be pulled in the VM
self.check_docker_access() # init client
self.get_image_for_repo(repo, branch, git_repo, repo_path)
requirements_option, requirements_hash = self.get_requirements_information(repo_path)
# getting things needed for execution over SSH
image_tag = self.get_image_tag(requirements_option, requirements_hash, self.get_dependencies())
image_name = self.use_registry_name
task_filename, task_json = self.serialized_task(task)
(vm_location / task_filename).write_text(task_json)
container_name = self.get_container_name(repo, branch, git_repo)
# setting up Fabric
api.env.hosts = [self.vagrant.user_hostname_port()]
api.env.key_filename = self.vagrant.keyfile()
api.env.disable_known_hosts = True # useful for when the vagrant box ip changes.
api.env.abort_exception = BuildError # raises SystemExit otherwise
api.env.shell = "/bin/sh -l -c"
if self.quiet:
api.output.everything = False
else:
api.output.everything = True
# executes the task
try:
res = api.execute(self.fabric_task,
container_name=container_name,
definition_filename=task_filename,
image_name=image_name,
image_tag=image_tag,
repository=str(repo_path.relative_to(Path(self._arca.base_dir).resolve() / 'repos')),
timeout=task.timeout)
return Result(res[self.vagrant.user_hostname_port()].stdout)
except CommandTimeout:
raise BuildTimeoutError(f"The task timeouted after {task.timeout} seconds.")
except BuildError: # can be raised by :meth:`Result.__init__`
raise
except Exception as e:
logger.exception(e)
raise BuildError("The build failed", extra_info={
"exception": e
})
finally:
# stops or destroys the VM if it should not be kept running
if not self.keep_vm_running:
if self.destroy:
self.vagrant.destroy()
shutil.rmtree(self.vagrant.root, ignore_errors=True)
self.vagrant = None
else:
self.vagrant.halt() | Starts up a VM, builds an docker image and gets it to the VM, runs the script over SSH, returns result.
Stops the VM if ``keep_vm_running`` is not set. | Below is the the instruction that describes the task:
### Input:
Starts up a VM, builds an docker image and gets it to the VM, runs the script over SSH, returns result.
Stops the VM if ``keep_vm_running`` is not set.
### Response:
def run(self, repo: str, branch: str, task: Task, git_repo: Repo, repo_path: Path):
""" Starts up a VM, builds an docker image and gets it to the VM, runs the script over SSH, returns result.
Stops the VM if ``keep_vm_running`` is not set.
"""
from fabric import api
from fabric.exceptions import CommandTimeout
# start up or get running VM
vm_location = self.get_vm_location()
self.ensure_vm_running(vm_location)
logger.info("Running with VM located at %s", vm_location)
# pushes the image to the registry so it can be pulled in the VM
self.check_docker_access() # init client
self.get_image_for_repo(repo, branch, git_repo, repo_path)
requirements_option, requirements_hash = self.get_requirements_information(repo_path)
# getting things needed for execution over SSH
image_tag = self.get_image_tag(requirements_option, requirements_hash, self.get_dependencies())
image_name = self.use_registry_name
task_filename, task_json = self.serialized_task(task)
(vm_location / task_filename).write_text(task_json)
container_name = self.get_container_name(repo, branch, git_repo)
# setting up Fabric
api.env.hosts = [self.vagrant.user_hostname_port()]
api.env.key_filename = self.vagrant.keyfile()
api.env.disable_known_hosts = True # useful for when the vagrant box ip changes.
api.env.abort_exception = BuildError # raises SystemExit otherwise
api.env.shell = "/bin/sh -l -c"
if self.quiet:
api.output.everything = False
else:
api.output.everything = True
# executes the task
try:
res = api.execute(self.fabric_task,
container_name=container_name,
definition_filename=task_filename,
image_name=image_name,
image_tag=image_tag,
repository=str(repo_path.relative_to(Path(self._arca.base_dir).resolve() / 'repos')),
timeout=task.timeout)
return Result(res[self.vagrant.user_hostname_port()].stdout)
except CommandTimeout:
raise BuildTimeoutError(f"The task timeouted after {task.timeout} seconds.")
except BuildError: # can be raised by :meth:`Result.__init__`
raise
except Exception as e:
logger.exception(e)
raise BuildError("The build failed", extra_info={
"exception": e
})
finally:
# stops or destroys the VM if it should not be kept running
if not self.keep_vm_running:
if self.destroy:
self.vagrant.destroy()
shutil.rmtree(self.vagrant.root, ignore_errors=True)
self.vagrant = None
else:
self.vagrant.halt() |
def find_python_files(dirname):
"""Yield all of the importable Python files in `dirname`, recursively.
To be importable, the files have to be in a directory with a __init__.py,
except for `dirname` itself, which isn't required to have one. The
assumption is that `dirname` was specified directly, so the user knows
best, but subdirectories are checked for a __init__.py to be sure we only
find the importable files.
"""
for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)):
if i > 0 and '__init__.py' not in filenames:
# If a directory doesn't have __init__.py, then it isn't
# importable and neither are its files
del dirnames[:]
continue
for filename in filenames:
# We're only interested in files that look like reasonable Python
# files: Must end with .py or .pyw, and must not have certain funny
# characters that probably mean they are editor junk.
if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename):
yield os.path.join(dirpath, filename) | Yield all of the importable Python files in `dirname`, recursively.
To be importable, the files have to be in a directory with a __init__.py,
except for `dirname` itself, which isn't required to have one. The
assumption is that `dirname` was specified directly, so the user knows
best, but subdirectories are checked for a __init__.py to be sure we only
find the importable files. | Below is the the instruction that describes the task:
### Input:
Yield all of the importable Python files in `dirname`, recursively.
To be importable, the files have to be in a directory with a __init__.py,
except for `dirname` itself, which isn't required to have one. The
assumption is that `dirname` was specified directly, so the user knows
best, but subdirectories are checked for a __init__.py to be sure we only
find the importable files.
### Response:
def find_python_files(dirname):
"""Yield all of the importable Python files in `dirname`, recursively.
To be importable, the files have to be in a directory with a __init__.py,
except for `dirname` itself, which isn't required to have one. The
assumption is that `dirname` was specified directly, so the user knows
best, but subdirectories are checked for a __init__.py to be sure we only
find the importable files.
"""
for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)):
if i > 0 and '__init__.py' not in filenames:
# If a directory doesn't have __init__.py, then it isn't
# importable and neither are its files
del dirnames[:]
continue
for filename in filenames:
# We're only interested in files that look like reasonable Python
# files: Must end with .py or .pyw, and must not have certain funny
# characters that probably mean they are editor junk.
if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename):
yield os.path.join(dirpath, filename) |
def bind(self):
"""Activate this Shader, making it the currently-bound program.
Any Mesh.draw() calls after bind() will have their data processed by this Shader. To unbind, call Shader.unbind().
Example::
shader.bind()
mesh.draw()
shader.unbind()
.. note:: Shader.bind() and Shader.unbind() can be also be called implicitly by using the 'with' statement.
Example of with statement with Shader::
with shader:
mesh.draw()
"""
if not self.is_linked:
if not self.is_compiled:
self.compile()
self.link()
super(self.__class__, self).bind() | Activate this Shader, making it the currently-bound program.
Any Mesh.draw() calls after bind() will have their data processed by this Shader. To unbind, call Shader.unbind().
Example::
shader.bind()
mesh.draw()
shader.unbind()
.. note:: Shader.bind() and Shader.unbind() can be also be called implicitly by using the 'with' statement.
Example of with statement with Shader::
with shader:
mesh.draw() | Below is the the instruction that describes the task:
### Input:
Activate this Shader, making it the currently-bound program.
Any Mesh.draw() calls after bind() will have their data processed by this Shader. To unbind, call Shader.unbind().
Example::
shader.bind()
mesh.draw()
shader.unbind()
.. note:: Shader.bind() and Shader.unbind() can be also be called implicitly by using the 'with' statement.
Example of with statement with Shader::
with shader:
mesh.draw()
### Response:
def bind(self):
"""Activate this Shader, making it the currently-bound program.
Any Mesh.draw() calls after bind() will have their data processed by this Shader. To unbind, call Shader.unbind().
Example::
shader.bind()
mesh.draw()
shader.unbind()
.. note:: Shader.bind() and Shader.unbind() can be also be called implicitly by using the 'with' statement.
Example of with statement with Shader::
with shader:
mesh.draw()
"""
if not self.is_linked:
if not self.is_compiled:
self.compile()
self.link()
super(self.__class__, self).bind() |
def collections(self):
"""List top-level collections of the client's database.
Returns:
Sequence[~.firestore_v1beta1.collection.CollectionReference]:
iterator of subcollections of the current document.
"""
iterator = self._firestore_api.list_collection_ids(
self._database_string, metadata=self._rpc_metadata
)
iterator.client = self
iterator.item_to_value = _item_to_collection_ref
return iterator | List top-level collections of the client's database.
Returns:
Sequence[~.firestore_v1beta1.collection.CollectionReference]:
iterator of subcollections of the current document. | Below is the the instruction that describes the task:
### Input:
List top-level collections of the client's database.
Returns:
Sequence[~.firestore_v1beta1.collection.CollectionReference]:
iterator of subcollections of the current document.
### Response:
def collections(self):
"""List top-level collections of the client's database.
Returns:
Sequence[~.firestore_v1beta1.collection.CollectionReference]:
iterator of subcollections of the current document.
"""
iterator = self._firestore_api.list_collection_ids(
self._database_string, metadata=self._rpc_metadata
)
iterator.client = self
iterator.item_to_value = _item_to_collection_ref
return iterator |
def get_bounding_box(self):
"""
Returns the bounding box for this cell.
Returns
-------
out : Numpy array[2,2] or ``None``
Bounding box of this cell [[x_min, y_min], [x_max, y_max]],
or ``None`` if the cell is empty.
"""
if len(self.elements) == 0:
return None
if not (self._bb_valid and
all(ref._bb_valid for ref in self.get_dependencies(True))):
bb = numpy.array(((1e300, 1e300), (-1e300, -1e300)))
all_polygons = []
for element in self.elements:
if isinstance(element, PolygonSet):
all_polygons.extend(element.polygons)
elif isinstance(element, CellReference) or isinstance(
element, CellArray):
element_bb = element.get_bounding_box()
if element_bb is not None:
bb[0, 0] = min(bb[0, 0], element_bb[0, 0])
bb[0, 1] = min(bb[0, 1], element_bb[0, 1])
bb[1, 0] = max(bb[1, 0], element_bb[1, 0])
bb[1, 1] = max(bb[1, 1], element_bb[1, 1])
if len(all_polygons) > 0:
all_points = numpy.concatenate(all_polygons).transpose()
bb[0, 0] = min(bb[0, 0], all_points[0].min())
bb[0, 1] = min(bb[0, 1], all_points[1].min())
bb[1, 0] = max(bb[1, 0], all_points[0].max())
bb[1, 1] = max(bb[1, 1], all_points[1].max())
self._bb_valid = True
_bounding_boxes[self] = bb
return _bounding_boxes[self] | Returns the bounding box for this cell.
Returns
-------
out : Numpy array[2,2] or ``None``
Bounding box of this cell [[x_min, y_min], [x_max, y_max]],
or ``None`` if the cell is empty. | Below is the the instruction that describes the task:
### Input:
Returns the bounding box for this cell.
Returns
-------
out : Numpy array[2,2] or ``None``
Bounding box of this cell [[x_min, y_min], [x_max, y_max]],
or ``None`` if the cell is empty.
### Response:
def get_bounding_box(self):
"""
Returns the bounding box for this cell.
Returns
-------
out : Numpy array[2,2] or ``None``
Bounding box of this cell [[x_min, y_min], [x_max, y_max]],
or ``None`` if the cell is empty.
"""
if len(self.elements) == 0:
return None
if not (self._bb_valid and
all(ref._bb_valid for ref in self.get_dependencies(True))):
bb = numpy.array(((1e300, 1e300), (-1e300, -1e300)))
all_polygons = []
for element in self.elements:
if isinstance(element, PolygonSet):
all_polygons.extend(element.polygons)
elif isinstance(element, CellReference) or isinstance(
element, CellArray):
element_bb = element.get_bounding_box()
if element_bb is not None:
bb[0, 0] = min(bb[0, 0], element_bb[0, 0])
bb[0, 1] = min(bb[0, 1], element_bb[0, 1])
bb[1, 0] = max(bb[1, 0], element_bb[1, 0])
bb[1, 1] = max(bb[1, 1], element_bb[1, 1])
if len(all_polygons) > 0:
all_points = numpy.concatenate(all_polygons).transpose()
bb[0, 0] = min(bb[0, 0], all_points[0].min())
bb[0, 1] = min(bb[0, 1], all_points[1].min())
bb[1, 0] = max(bb[1, 0], all_points[0].max())
bb[1, 1] = max(bb[1, 1], all_points[1].max())
self._bb_valid = True
_bounding_boxes[self] = bb
return _bounding_boxes[self] |
def y_axis_rotation(theta):
"""Generates a 3x3 rotation matrix for a rotation of angle
theta about the y axis.
Parameters
----------
theta : float
amount to rotate, in radians
Returns
-------
:obj:`numpy.ndarray` of float
A random 3x3 rotation matrix.
"""
R = np.array([[np.cos(theta), 0, np.sin(theta)],
[0, 1, 0],
[-np.sin(theta), 0, np.cos(theta)]])
return R | Generates a 3x3 rotation matrix for a rotation of angle
theta about the y axis.
Parameters
----------
theta : float
amount to rotate, in radians
Returns
-------
:obj:`numpy.ndarray` of float
A random 3x3 rotation matrix. | Below is the the instruction that describes the task:
### Input:
Generates a 3x3 rotation matrix for a rotation of angle
theta about the y axis.
Parameters
----------
theta : float
amount to rotate, in radians
Returns
-------
:obj:`numpy.ndarray` of float
A random 3x3 rotation matrix.
### Response:
def y_axis_rotation(theta):
"""Generates a 3x3 rotation matrix for a rotation of angle
theta about the y axis.
Parameters
----------
theta : float
amount to rotate, in radians
Returns
-------
:obj:`numpy.ndarray` of float
A random 3x3 rotation matrix.
"""
R = np.array([[np.cos(theta), 0, np.sin(theta)],
[0, 1, 0],
[-np.sin(theta), 0, np.cos(theta)]])
return R |
def get(self, instance, acl):
"""Get an ACL by ID belonging to the instance specified by name.
:param str instance: The name of the instance from which to fetch the ACL.
:param str acl: The ID of the ACL to fetch.
:returns: An :py:class:`Acl` object, or None if ACL does not exist.
:rtype: :py:class:`Acl`
"""
base_url = self._url.format(instance=instance)
url = '{base}{aclid}/'.format(base=base_url, aclid=acl)
response = requests.get(url, **self._default_request_kwargs)
data = self._get_response_data(response)
return self._concrete_acl(data) | Get an ACL by ID belonging to the instance specified by name.
:param str instance: The name of the instance from which to fetch the ACL.
:param str acl: The ID of the ACL to fetch.
:returns: An :py:class:`Acl` object, or None if ACL does not exist.
:rtype: :py:class:`Acl` | Below is the the instruction that describes the task:
### Input:
Get an ACL by ID belonging to the instance specified by name.
:param str instance: The name of the instance from which to fetch the ACL.
:param str acl: The ID of the ACL to fetch.
:returns: An :py:class:`Acl` object, or None if ACL does not exist.
:rtype: :py:class:`Acl`
### Response:
def get(self, instance, acl):
"""Get an ACL by ID belonging to the instance specified by name.
:param str instance: The name of the instance from which to fetch the ACL.
:param str acl: The ID of the ACL to fetch.
:returns: An :py:class:`Acl` object, or None if ACL does not exist.
:rtype: :py:class:`Acl`
"""
base_url = self._url.format(instance=instance)
url = '{base}{aclid}/'.format(base=base_url, aclid=acl)
response = requests.get(url, **self._default_request_kwargs)
data = self._get_response_data(response)
return self._concrete_acl(data) |
def encode_eternal_jwt_token(self, user, **custom_claims):
"""
This utility function encodes a jwt token that never expires
.. note:: This should be used sparingly since the token could become
a security concern if it is ever lost. If you use this
method, you should be sure that your application also
implements a blacklist so that a given token can be blocked
should it be lost or become a security concern
"""
return self.encode_jwt_token(
user,
override_access_lifespan=VITAM_AETERNUM,
override_refresh_lifespan=VITAM_AETERNUM,
**custom_claims
) | This utility function encodes a jwt token that never expires
.. note:: This should be used sparingly since the token could become
a security concern if it is ever lost. If you use this
method, you should be sure that your application also
implements a blacklist so that a given token can be blocked
should it be lost or become a security concern | Below is the the instruction that describes the task:
### Input:
This utility function encodes a jwt token that never expires
.. note:: This should be used sparingly since the token could become
a security concern if it is ever lost. If you use this
method, you should be sure that your application also
implements a blacklist so that a given token can be blocked
should it be lost or become a security concern
### Response:
def encode_eternal_jwt_token(self, user, **custom_claims):
"""
This utility function encodes a jwt token that never expires
.. note:: This should be used sparingly since the token could become
a security concern if it is ever lost. If you use this
method, you should be sure that your application also
implements a blacklist so that a given token can be blocked
should it be lost or become a security concern
"""
return self.encode_jwt_token(
user,
override_access_lifespan=VITAM_AETERNUM,
override_refresh_lifespan=VITAM_AETERNUM,
**custom_claims
) |
def GetFeedItemIdsForCampaign(campaign_feed):
"""Gets the Feed Item Ids used by a campaign through a given Campaign Feed.
Args:
campaign_feed: the Campaign Feed we are retrieving Feed Item Ids from.
Returns:
A list of Feed Item IDs.
"""
feed_item_ids = set()
try:
lhs_operand = campaign_feed['matchingFunction']['lhsOperand']
except KeyError:
lhs_operand = None
if (lhs_operand and lhs_operand[0]['FunctionArgumentOperand.Type'] ==
'RequestContextOperand'):
request_context_operand = lhs_operand[0]
if (request_context_operand['contextType'] == 'FEED_ITEM_ID' and
campaign_feed['matchingFunction']['operator'] == 'IN'):
for argument in campaign_feed['matchingFunction']['rhsOperand']:
if argument['xsi_type'] == 'ConstantOperand':
feed_item_ids.add(argument['longValue'])
return feed_item_ids | Gets the Feed Item Ids used by a campaign through a given Campaign Feed.
Args:
campaign_feed: the Campaign Feed we are retrieving Feed Item Ids from.
Returns:
A list of Feed Item IDs. | Below is the the instruction that describes the task:
### Input:
Gets the Feed Item Ids used by a campaign through a given Campaign Feed.
Args:
campaign_feed: the Campaign Feed we are retrieving Feed Item Ids from.
Returns:
A list of Feed Item IDs.
### Response:
def GetFeedItemIdsForCampaign(campaign_feed):
"""Gets the Feed Item Ids used by a campaign through a given Campaign Feed.
Args:
campaign_feed: the Campaign Feed we are retrieving Feed Item Ids from.
Returns:
A list of Feed Item IDs.
"""
feed_item_ids = set()
try:
lhs_operand = campaign_feed['matchingFunction']['lhsOperand']
except KeyError:
lhs_operand = None
if (lhs_operand and lhs_operand[0]['FunctionArgumentOperand.Type'] ==
'RequestContextOperand'):
request_context_operand = lhs_operand[0]
if (request_context_operand['contextType'] == 'FEED_ITEM_ID' and
campaign_feed['matchingFunction']['operator'] == 'IN'):
for argument in campaign_feed['matchingFunction']['rhsOperand']:
if argument['xsi_type'] == 'ConstantOperand':
feed_item_ids.add(argument['longValue'])
return feed_item_ids |
def galactic2fk5(l, b):
"""
Convert galactic l/b to fk5 ra/dec
Parameters
----------
l, b : float
Galactic coordinates in radians.
Returns
-------
ra, dec : float
FK5 ecliptic coordinates in radians.
"""
a = SkyCoord(l, b, unit=(u.radian, u.radian), frame='galactic')
return a.fk5.ra.radian, a.fk5.dec.radian | Convert galactic l/b to fk5 ra/dec
Parameters
----------
l, b : float
Galactic coordinates in radians.
Returns
-------
ra, dec : float
FK5 ecliptic coordinates in radians. | Below is the the instruction that describes the task:
### Input:
Convert galactic l/b to fk5 ra/dec
Parameters
----------
l, b : float
Galactic coordinates in radians.
Returns
-------
ra, dec : float
FK5 ecliptic coordinates in radians.
### Response:
def galactic2fk5(l, b):
"""
Convert galactic l/b to fk5 ra/dec
Parameters
----------
l, b : float
Galactic coordinates in radians.
Returns
-------
ra, dec : float
FK5 ecliptic coordinates in radians.
"""
a = SkyCoord(l, b, unit=(u.radian, u.radian), frame='galactic')
return a.fk5.ra.radian, a.fk5.dec.radian |
def create(cls, community, record, user=None, expires_at=None,
notify=True):
"""Create a record inclusion request to a community.
:param community: Community object.
:param record: Record API object.
:param expires_at: Time after which the request expires and shouldn't
be resolved anymore.
"""
if expires_at and expires_at < datetime.utcnow():
raise InclusionRequestExpiryTimeError(
community=community, record=record)
if community.has_record(record):
raise InclusionRequestObsoleteError(
community=community, record=record)
try:
# Create inclusion request
with db.session.begin_nested():
obj = cls(
id_community=community.id,
id_record=record.id,
user=user,
expires_at=expires_at
)
db.session.add(obj)
except (IntegrityError, FlushError):
raise InclusionRequestExistsError(
community=community, record=record)
# Send signal
inclusion_request_created.send(
current_app._get_current_object(),
request=obj,
notify=notify
)
return obj | Create a record inclusion request to a community.
:param community: Community object.
:param record: Record API object.
:param expires_at: Time after which the request expires and shouldn't
be resolved anymore. | Below is the the instruction that describes the task:
### Input:
Create a record inclusion request to a community.
:param community: Community object.
:param record: Record API object.
:param expires_at: Time after which the request expires and shouldn't
be resolved anymore.
### Response:
def create(cls, community, record, user=None, expires_at=None,
notify=True):
"""Create a record inclusion request to a community.
:param community: Community object.
:param record: Record API object.
:param expires_at: Time after which the request expires and shouldn't
be resolved anymore.
"""
if expires_at and expires_at < datetime.utcnow():
raise InclusionRequestExpiryTimeError(
community=community, record=record)
if community.has_record(record):
raise InclusionRequestObsoleteError(
community=community, record=record)
try:
# Create inclusion request
with db.session.begin_nested():
obj = cls(
id_community=community.id,
id_record=record.id,
user=user,
expires_at=expires_at
)
db.session.add(obj)
except (IntegrityError, FlushError):
raise InclusionRequestExistsError(
community=community, record=record)
# Send signal
inclusion_request_created.send(
current_app._get_current_object(),
request=obj,
notify=notify
)
return obj |
def plot_file(self, name: str=None, time: int=None) -> None:
"""
Plot specific time for provided datafile.
If no time provided, will plot middle.
:param: savefile name
:param: time/data column
"""
if not time:
time = int(len(self.times) / 2)
if not name:
name = './img/' + self.filename + '.png'
yhat, residuals, residual_mean, noise = self._get_fit(time)
plt.figure()
plt.scatter(self.domain, self.averagedata[:, time], alpha=0.2)
plt.plot(yhat)
plt.savefig(name) | Plot specific time for provided datafile.
If no time provided, will plot middle.
:param: savefile name
:param: time/data column | Below is the the instruction that describes the task:
### Input:
Plot specific time for provided datafile.
If no time provided, will plot middle.
:param: savefile name
:param: time/data column
### Response:
def plot_file(self, name: str=None, time: int=None) -> None:
"""
Plot specific time for provided datafile.
If no time provided, will plot middle.
:param: savefile name
:param: time/data column
"""
if not time:
time = int(len(self.times) / 2)
if not name:
name = './img/' + self.filename + '.png'
yhat, residuals, residual_mean, noise = self._get_fit(time)
plt.figure()
plt.scatter(self.domain, self.averagedata[:, time], alpha=0.2)
plt.plot(yhat)
plt.savefig(name) |
def merge_dicts(*args):
r"""
add / concatenate / union / join / merge / combine dictionaries
Copies the first dictionary given and then repeatedly calls update using
the rest of the dicts given in args. Duplicate keys will receive the last
value specified the list of dictionaries.
Returns:
dict: mergedict_
CommandLine:
python -m utool.util_dict --test-merge_dicts
References:
http://stackoverflow.com/questions/38987/how-can-i-merge-two-python-dictionaries-in-a-single-expression
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> x = {'a': 1, 'b': 2}
>>> y = {'b': 3, 'c': 4}
>>> mergedict_ = merge_dicts(x, y)
>>> result = ut.repr4(mergedict_, sorted_=True, newlines=False)
>>> print(result)
{'a': 1, 'b': 3, 'c': 4}
"""
iter_ = iter(args)
mergedict_ = six.next(iter_).copy()
for dict_ in iter_:
mergedict_.update(dict_)
return mergedict_ | r"""
add / concatenate / union / join / merge / combine dictionaries
Copies the first dictionary given and then repeatedly calls update using
the rest of the dicts given in args. Duplicate keys will receive the last
value specified the list of dictionaries.
Returns:
dict: mergedict_
CommandLine:
python -m utool.util_dict --test-merge_dicts
References:
http://stackoverflow.com/questions/38987/how-can-i-merge-two-python-dictionaries-in-a-single-expression
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> x = {'a': 1, 'b': 2}
>>> y = {'b': 3, 'c': 4}
>>> mergedict_ = merge_dicts(x, y)
>>> result = ut.repr4(mergedict_, sorted_=True, newlines=False)
>>> print(result)
{'a': 1, 'b': 3, 'c': 4} | Below is the the instruction that describes the task:
### Input:
r"""
add / concatenate / union / join / merge / combine dictionaries
Copies the first dictionary given and then repeatedly calls update using
the rest of the dicts given in args. Duplicate keys will receive the last
value specified the list of dictionaries.
Returns:
dict: mergedict_
CommandLine:
python -m utool.util_dict --test-merge_dicts
References:
http://stackoverflow.com/questions/38987/how-can-i-merge-two-python-dictionaries-in-a-single-expression
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> x = {'a': 1, 'b': 2}
>>> y = {'b': 3, 'c': 4}
>>> mergedict_ = merge_dicts(x, y)
>>> result = ut.repr4(mergedict_, sorted_=True, newlines=False)
>>> print(result)
{'a': 1, 'b': 3, 'c': 4}
### Response:
def merge_dicts(*args):
r"""
add / concatenate / union / join / merge / combine dictionaries
Copies the first dictionary given and then repeatedly calls update using
the rest of the dicts given in args. Duplicate keys will receive the last
value specified the list of dictionaries.
Returns:
dict: mergedict_
CommandLine:
python -m utool.util_dict --test-merge_dicts
References:
http://stackoverflow.com/questions/38987/how-can-i-merge-two-python-dictionaries-in-a-single-expression
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> x = {'a': 1, 'b': 2}
>>> y = {'b': 3, 'c': 4}
>>> mergedict_ = merge_dicts(x, y)
>>> result = ut.repr4(mergedict_, sorted_=True, newlines=False)
>>> print(result)
{'a': 1, 'b': 3, 'c': 4}
"""
iter_ = iter(args)
mergedict_ = six.next(iter_).copy()
for dict_ in iter_:
mergedict_.update(dict_)
return mergedict_ |
def set_color_hsv(self, hue, saturation, value):
"""Turn the bulb on with the given values as HSV."""
try:
data = "action=on&color={};{};{}".format(hue, saturation, value)
request = requests.post(
'{}/{}/{}'.format(self.resource, URI, self._mac),
data=data, timeout=self.timeout)
if request.status_code == 200:
self.data['on'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError() | Turn the bulb on with the given values as HSV. | Below is the the instruction that describes the task:
### Input:
Turn the bulb on with the given values as HSV.
### Response:
def set_color_hsv(self, hue, saturation, value):
"""Turn the bulb on with the given values as HSV."""
try:
data = "action=on&color={};{};{}".format(hue, saturation, value)
request = requests.post(
'{}/{}/{}'.format(self.resource, URI, self._mac),
data=data, timeout=self.timeout)
if request.status_code == 200:
self.data['on'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError() |
def _check_satisfy_constraints(self, label, xmin, ymin, xmax, ymax, width, height):
"""Check if constrains are satisfied"""
if (xmax - xmin) * (ymax - ymin) < 2:
return False # only 1 pixel
x1 = float(xmin) / width
y1 = float(ymin) / height
x2 = float(xmax) / width
y2 = float(ymax) / height
object_areas = self._calculate_areas(label[:, 1:])
valid_objects = np.where(object_areas * width * height > 2)[0]
if valid_objects.size < 1:
return False
intersects = self._intersect(label[valid_objects, 1:], x1, y1, x2, y2)
coverages = self._calculate_areas(intersects) / object_areas[valid_objects]
coverages = coverages[np.where(coverages > 0)[0]]
return coverages.size > 0 and np.amin(coverages) > self.min_object_covered | Check if constrains are satisfied | Below is the the instruction that describes the task:
### Input:
Check if constrains are satisfied
### Response:
def _check_satisfy_constraints(self, label, xmin, ymin, xmax, ymax, width, height):
"""Check if constrains are satisfied"""
if (xmax - xmin) * (ymax - ymin) < 2:
return False # only 1 pixel
x1 = float(xmin) / width
y1 = float(ymin) / height
x2 = float(xmax) / width
y2 = float(ymax) / height
object_areas = self._calculate_areas(label[:, 1:])
valid_objects = np.where(object_areas * width * height > 2)[0]
if valid_objects.size < 1:
return False
intersects = self._intersect(label[valid_objects, 1:], x1, y1, x2, y2)
coverages = self._calculate_areas(intersects) / object_areas[valid_objects]
coverages = coverages[np.where(coverages > 0)[0]]
return coverages.size > 0 and np.amin(coverages) > self.min_object_covered |
def rectwv_coeff_add_longslit_model(rectwv_coeff, geometry, debugplot=0):
"""Compute longslit_model coefficients for RectWaveCoeff object.
Parameters
----------
rectwv_coeff : RectWaveCoeff instance
Rectification and wavelength calibration coefficients for a
particular CSU configuration corresponding to a longslit
observation.
geometry : TBD
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
Returns
-------
rectwv_coeff : RectWaveCoeff instance
Updated object with longslit_model coefficients computed.
"""
logger = logging.getLogger(__name__)
# check grism and filter
grism_name = rectwv_coeff.tags['grism']
logger.info('Grism: ' + grism_name)
filter_name = rectwv_coeff.tags['filter']
logger.info('Filter: ' + filter_name)
# list of slitlets to be computed
list_valid_islitlets = list(range(1, EMIR_NBARS + 1))
for idel in rectwv_coeff.missing_slitlets:
list_valid_islitlets.remove(idel)
if abs(debugplot) >= 10:
print('>>> valid slitlet numbers:\n', list_valid_islitlets)
# ---
# check that the CSU configuration corresponds to longslit
csu_bar_slit_center_list = []
for islitlet in list_valid_islitlets:
csu_bar_slit_center_list.append(
rectwv_coeff.contents[islitlet - 1]['csu_bar_slit_center']
)
if abs(debugplot) >= 10:
logger.debug('Checking csu_bar_slit_center values:')
summary(np.array(csu_bar_slit_center_list), debug=True)
pause_debugplot(debugplot)
# ---
# polynomial coefficients corresponding to the wavelength calibration
# step 0: determine poldeg_refined, checking that it is the same for
# all the slitlets
poldeg_refined_list = []
for islitlet in list_valid_islitlets:
poldeg_refined_list.append(
len(rectwv_coeff.contents[islitlet - 1]['wpoly_coeff']) - 1
)
# remove duplicates
poldeg_refined_list = list(set(poldeg_refined_list))
if len(poldeg_refined_list) != 1:
raise ValueError('Unexpected different poldeg_refined found: ' +
str(poldeg_refined_list))
poldeg_refined = poldeg_refined_list[0]
# step 1: compute variation of each coefficient as a function of
# y0_reference_middle of each slitlet
list_poly = []
for i in range(poldeg_refined + 1):
xp = []
yp = []
for islitlet in list_valid_islitlets:
tmp_dict = rectwv_coeff.contents[islitlet - 1]
wpoly_coeff = tmp_dict['wpoly_coeff']
if wpoly_coeff is not None:
xp.append(tmp_dict['y0_reference_middle'])
yp.append(wpoly_coeff[i])
poly, yres, reject = polfit_residuals_with_sigma_rejection(
x=np.array(xp),
y=np.array(yp),
deg=2,
times_sigma_reject=5,
xlabel='y0_rectified',
ylabel='coeff[' + str(i) + ']',
title="Fit to refined wavelength calibration coefficients",
geometry=geometry,
debugplot=debugplot
)
list_poly.append(poly)
# step 2: use the variation of each polynomial coefficient with
# y0_reference_middle to infer the expected wavelength calibration
# polynomial for each rectifified slitlet
for islitlet in list_valid_islitlets:
tmp_dict = rectwv_coeff.contents[islitlet - 1]
y0_reference_middle = tmp_dict['y0_reference_middle']
list_new_coeff = []
for i in range(poldeg_refined + 1):
new_coeff = list_poly[i](y0_reference_middle)
list_new_coeff.append(new_coeff)
tmp_dict['wpoly_coeff_longslit_model'] = list_new_coeff
# ---
# rectification transformation coefficients aij and bij
# step 0: determine order_fmap, checking that it is the same for
# all the slitlets
order_fmap_list = []
for islitlet in list_valid_islitlets:
order_fmap_list.append(
rectwv_coeff.contents[islitlet - 1]['ttd_order']
)
# remove duplicates
order_fmap_list = list(set(order_fmap_list))
if len(order_fmap_list) != 1:
raise ValueError('Unexpected different order_fmap found')
order_fmap = order_fmap_list[0]
# step 1: compute variation of each coefficient as a function of
# y0_reference_middle of each slitlet
list_poly_ttd_aij = []
list_poly_ttd_bij = []
list_poly_tti_aij = []
list_poly_tti_bij = []
ncoef_ttd = ncoef_fmap(order_fmap)
for i in range(ncoef_ttd):
xp = []
yp_ttd_aij = []
yp_ttd_bij = []
yp_tti_aij = []
yp_tti_bij = []
for islitlet in list_valid_islitlets:
tmp_dict = rectwv_coeff.contents[islitlet - 1]
ttd_aij = tmp_dict['ttd_aij']
ttd_bij = tmp_dict['ttd_bij']
tti_aij = tmp_dict['tti_aij']
tti_bij = tmp_dict['tti_bij']
if ttd_aij is not None:
xp.append(tmp_dict['y0_reference_middle'])
yp_ttd_aij.append(ttd_aij[i])
yp_ttd_bij.append(ttd_bij[i])
yp_tti_aij.append(tti_aij[i])
yp_tti_bij.append(tti_bij[i])
poly, yres, reject = polfit_residuals_with_sigma_rejection(
x=np.array(xp),
y=np.array(yp_ttd_aij),
deg=5,
times_sigma_reject=5,
xlabel='y0_rectified',
ylabel='ttd_aij[' + str(i) + ']',
geometry=geometry,
debugplot=debugplot
)
list_poly_ttd_aij.append(poly)
poly, yres, reject = polfit_residuals_with_sigma_rejection(
x=np.array(xp),
y=np.array(yp_ttd_bij),
deg=5,
times_sigma_reject=5,
xlabel='y0_rectified',
ylabel='ttd_bij[' + str(i) + ']',
geometry=geometry,
debugplot=debugplot
)
list_poly_ttd_bij.append(poly)
poly, yres, reject = polfit_residuals_with_sigma_rejection(
x=np.array(xp),
y=np.array(yp_tti_aij),
deg=5,
times_sigma_reject=5,
xlabel='y0_rectified',
ylabel='tti_aij[' + str(i) + ']',
geometry=geometry,
debugplot=debugplot
)
list_poly_tti_aij.append(poly)
poly, yres, reject = polfit_residuals_with_sigma_rejection(
x=np.array(xp),
y=np.array(yp_tti_bij),
deg=5,
times_sigma_reject=5,
xlabel='y0_rectified',
ylabel='tti_bij[' + str(i) + ']',
geometry=geometry,
debugplot=debugplot
)
list_poly_tti_bij.append(poly)
# step 2: use the variation of each coefficient with y0_reference_middle
# to infer the expected rectification transformation for each slitlet
for islitlet in list_valid_islitlets:
tmp_dict = rectwv_coeff.contents[islitlet - 1]
y0_reference_middle = tmp_dict['y0_reference_middle']
tmp_dict['ttd_order_longslit_model'] = order_fmap
ttd_aij_longslit_model = []
ttd_bij_longslit_model = []
tti_aij_longslit_model = []
tti_bij_longslit_model = []
for i in range(ncoef_ttd):
new_coeff = list_poly_ttd_aij[i](y0_reference_middle)
ttd_aij_longslit_model.append(new_coeff)
new_coeff = list_poly_ttd_bij[i](y0_reference_middle)
ttd_bij_longslit_model.append(new_coeff)
new_coeff = list_poly_tti_aij[i](y0_reference_middle)
tti_aij_longslit_model.append(new_coeff)
new_coeff = list_poly_tti_bij[i](y0_reference_middle)
tti_bij_longslit_model.append(new_coeff)
tmp_dict['ttd_aij_longslit_model'] = ttd_aij_longslit_model
tmp_dict['ttd_bij_longslit_model'] = ttd_bij_longslit_model
tmp_dict['tti_aij_longslit_model'] = tti_aij_longslit_model
tmp_dict['tti_bij_longslit_model'] = tti_bij_longslit_model
# ---
# update uuid and meta_info in output JSON structure
rectwv_coeff.uuid = str(uuid4())
rectwv_coeff.meta_info['creation_date'] = datetime.now().isoformat()
# return updated object
return rectwv_coeff | Compute longslit_model coefficients for RectWaveCoeff object.
Parameters
----------
rectwv_coeff : RectWaveCoeff instance
Rectification and wavelength calibration coefficients for a
particular CSU configuration corresponding to a longslit
observation.
geometry : TBD
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
Returns
-------
rectwv_coeff : RectWaveCoeff instance
Updated object with longslit_model coefficients computed. | Below is the the instruction that describes the task:
### Input:
Compute longslit_model coefficients for RectWaveCoeff object.
Parameters
----------
rectwv_coeff : RectWaveCoeff instance
Rectification and wavelength calibration coefficients for a
particular CSU configuration corresponding to a longslit
observation.
geometry : TBD
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
Returns
-------
rectwv_coeff : RectWaveCoeff instance
Updated object with longslit_model coefficients computed.
### Response:
def rectwv_coeff_add_longslit_model(rectwv_coeff, geometry, debugplot=0):
"""Compute longslit_model coefficients for RectWaveCoeff object.
Parameters
----------
rectwv_coeff : RectWaveCoeff instance
Rectification and wavelength calibration coefficients for a
particular CSU configuration corresponding to a longslit
observation.
geometry : TBD
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
Returns
-------
rectwv_coeff : RectWaveCoeff instance
Updated object with longslit_model coefficients computed.
"""
logger = logging.getLogger(__name__)
# check grism and filter
grism_name = rectwv_coeff.tags['grism']
logger.info('Grism: ' + grism_name)
filter_name = rectwv_coeff.tags['filter']
logger.info('Filter: ' + filter_name)
# list of slitlets to be computed
list_valid_islitlets = list(range(1, EMIR_NBARS + 1))
for idel in rectwv_coeff.missing_slitlets:
list_valid_islitlets.remove(idel)
if abs(debugplot) >= 10:
print('>>> valid slitlet numbers:\n', list_valid_islitlets)
# ---
# check that the CSU configuration corresponds to longslit
csu_bar_slit_center_list = []
for islitlet in list_valid_islitlets:
csu_bar_slit_center_list.append(
rectwv_coeff.contents[islitlet - 1]['csu_bar_slit_center']
)
if abs(debugplot) >= 10:
logger.debug('Checking csu_bar_slit_center values:')
summary(np.array(csu_bar_slit_center_list), debug=True)
pause_debugplot(debugplot)
# ---
# polynomial coefficients corresponding to the wavelength calibration
# step 0: determine poldeg_refined, checking that it is the same for
# all the slitlets
poldeg_refined_list = []
for islitlet in list_valid_islitlets:
poldeg_refined_list.append(
len(rectwv_coeff.contents[islitlet - 1]['wpoly_coeff']) - 1
)
# remove duplicates
poldeg_refined_list = list(set(poldeg_refined_list))
if len(poldeg_refined_list) != 1:
raise ValueError('Unexpected different poldeg_refined found: ' +
str(poldeg_refined_list))
poldeg_refined = poldeg_refined_list[0]
# step 1: compute variation of each coefficient as a function of
# y0_reference_middle of each slitlet
list_poly = []
for i in range(poldeg_refined + 1):
xp = []
yp = []
for islitlet in list_valid_islitlets:
tmp_dict = rectwv_coeff.contents[islitlet - 1]
wpoly_coeff = tmp_dict['wpoly_coeff']
if wpoly_coeff is not None:
xp.append(tmp_dict['y0_reference_middle'])
yp.append(wpoly_coeff[i])
poly, yres, reject = polfit_residuals_with_sigma_rejection(
x=np.array(xp),
y=np.array(yp),
deg=2,
times_sigma_reject=5,
xlabel='y0_rectified',
ylabel='coeff[' + str(i) + ']',
title="Fit to refined wavelength calibration coefficients",
geometry=geometry,
debugplot=debugplot
)
list_poly.append(poly)
# step 2: use the variation of each polynomial coefficient with
# y0_reference_middle to infer the expected wavelength calibration
# polynomial for each rectifified slitlet
for islitlet in list_valid_islitlets:
tmp_dict = rectwv_coeff.contents[islitlet - 1]
y0_reference_middle = tmp_dict['y0_reference_middle']
list_new_coeff = []
for i in range(poldeg_refined + 1):
new_coeff = list_poly[i](y0_reference_middle)
list_new_coeff.append(new_coeff)
tmp_dict['wpoly_coeff_longslit_model'] = list_new_coeff
# ---
# rectification transformation coefficients aij and bij
# step 0: determine order_fmap, checking that it is the same for
# all the slitlets
order_fmap_list = []
for islitlet in list_valid_islitlets:
order_fmap_list.append(
rectwv_coeff.contents[islitlet - 1]['ttd_order']
)
# remove duplicates
order_fmap_list = list(set(order_fmap_list))
if len(order_fmap_list) != 1:
raise ValueError('Unexpected different order_fmap found')
order_fmap = order_fmap_list[0]
# step 1: compute variation of each coefficient as a function of
# y0_reference_middle of each slitlet
list_poly_ttd_aij = []
list_poly_ttd_bij = []
list_poly_tti_aij = []
list_poly_tti_bij = []
ncoef_ttd = ncoef_fmap(order_fmap)
for i in range(ncoef_ttd):
xp = []
yp_ttd_aij = []
yp_ttd_bij = []
yp_tti_aij = []
yp_tti_bij = []
for islitlet in list_valid_islitlets:
tmp_dict = rectwv_coeff.contents[islitlet - 1]
ttd_aij = tmp_dict['ttd_aij']
ttd_bij = tmp_dict['ttd_bij']
tti_aij = tmp_dict['tti_aij']
tti_bij = tmp_dict['tti_bij']
if ttd_aij is not None:
xp.append(tmp_dict['y0_reference_middle'])
yp_ttd_aij.append(ttd_aij[i])
yp_ttd_bij.append(ttd_bij[i])
yp_tti_aij.append(tti_aij[i])
yp_tti_bij.append(tti_bij[i])
poly, yres, reject = polfit_residuals_with_sigma_rejection(
x=np.array(xp),
y=np.array(yp_ttd_aij),
deg=5,
times_sigma_reject=5,
xlabel='y0_rectified',
ylabel='ttd_aij[' + str(i) + ']',
geometry=geometry,
debugplot=debugplot
)
list_poly_ttd_aij.append(poly)
poly, yres, reject = polfit_residuals_with_sigma_rejection(
x=np.array(xp),
y=np.array(yp_ttd_bij),
deg=5,
times_sigma_reject=5,
xlabel='y0_rectified',
ylabel='ttd_bij[' + str(i) + ']',
geometry=geometry,
debugplot=debugplot
)
list_poly_ttd_bij.append(poly)
poly, yres, reject = polfit_residuals_with_sigma_rejection(
x=np.array(xp),
y=np.array(yp_tti_aij),
deg=5,
times_sigma_reject=5,
xlabel='y0_rectified',
ylabel='tti_aij[' + str(i) + ']',
geometry=geometry,
debugplot=debugplot
)
list_poly_tti_aij.append(poly)
poly, yres, reject = polfit_residuals_with_sigma_rejection(
x=np.array(xp),
y=np.array(yp_tti_bij),
deg=5,
times_sigma_reject=5,
xlabel='y0_rectified',
ylabel='tti_bij[' + str(i) + ']',
geometry=geometry,
debugplot=debugplot
)
list_poly_tti_bij.append(poly)
# step 2: use the variation of each coefficient with y0_reference_middle
# to infer the expected rectification transformation for each slitlet
for islitlet in list_valid_islitlets:
tmp_dict = rectwv_coeff.contents[islitlet - 1]
y0_reference_middle = tmp_dict['y0_reference_middle']
tmp_dict['ttd_order_longslit_model'] = order_fmap
ttd_aij_longslit_model = []
ttd_bij_longslit_model = []
tti_aij_longslit_model = []
tti_bij_longslit_model = []
for i in range(ncoef_ttd):
new_coeff = list_poly_ttd_aij[i](y0_reference_middle)
ttd_aij_longslit_model.append(new_coeff)
new_coeff = list_poly_ttd_bij[i](y0_reference_middle)
ttd_bij_longslit_model.append(new_coeff)
new_coeff = list_poly_tti_aij[i](y0_reference_middle)
tti_aij_longslit_model.append(new_coeff)
new_coeff = list_poly_tti_bij[i](y0_reference_middle)
tti_bij_longslit_model.append(new_coeff)
tmp_dict['ttd_aij_longslit_model'] = ttd_aij_longslit_model
tmp_dict['ttd_bij_longslit_model'] = ttd_bij_longslit_model
tmp_dict['tti_aij_longslit_model'] = tti_aij_longslit_model
tmp_dict['tti_bij_longslit_model'] = tti_bij_longslit_model
# ---
# update uuid and meta_info in output JSON structure
rectwv_coeff.uuid = str(uuid4())
rectwv_coeff.meta_info['creation_date'] = datetime.now().isoformat()
# return updated object
return rectwv_coeff |
def _make_safe_id(self, id):
"""Returns a modified id that has been made safe for SBML.
Replaces or deletes the ones that aren't allowed.
"""
substitutions = {
'-': '_DASH_',
'/': '_FSLASH_',
'\\': '_BSLASH_',
'(': '_LPAREN_',
')': '_RPAREN_',
'[': '_LSQBKT_',
']': '_RSQBKT_',
',': '_COMMA_',
'.': '_PERIOD_',
"'": '_APOS_'
}
id = re.sub(r'\(([a-z])\)$', '_\\1', id)
for symbol, escape in iteritems(substitutions):
id = id.replace(symbol, escape)
id = re.sub(r'[^a-zA-Z0-9_]', '', id)
return id | Returns a modified id that has been made safe for SBML.
Replaces or deletes the ones that aren't allowed. | Below is the the instruction that describes the task:
### Input:
Returns a modified id that has been made safe for SBML.
Replaces or deletes the ones that aren't allowed.
### Response:
def _make_safe_id(self, id):
"""Returns a modified id that has been made safe for SBML.
Replaces or deletes the ones that aren't allowed.
"""
substitutions = {
'-': '_DASH_',
'/': '_FSLASH_',
'\\': '_BSLASH_',
'(': '_LPAREN_',
')': '_RPAREN_',
'[': '_LSQBKT_',
']': '_RSQBKT_',
',': '_COMMA_',
'.': '_PERIOD_',
"'": '_APOS_'
}
id = re.sub(r'\(([a-z])\)$', '_\\1', id)
for symbol, escape in iteritems(substitutions):
id = id.replace(symbol, escape)
id = re.sub(r'[^a-zA-Z0-9_]', '', id)
return id |
def clock(self, interval, basis):
"""Return a NodeInput tuple for triggering an event every interval.
We request each distinct type of clock at most once and combine it with our
latch stream each time it is requested.
Args:
interval (int): The interval (in seconds) at which this input should
trigger.
"""
cache_name = self._classify_clock(interval, basis)
cache_data = self.clock_cache.get(cache_name)
if cache_data is None:
parent_stream, trigger = self.parent.clock(interval, basis)
if trigger.use_count is False:
raise SensorGraphSemanticError("Unsupported clock trigger in GatedClockScope", trigger=trigger)
elif interval % trigger.reference != 0:
raise SensorGraphSemanticError("Unsupported trigger ratio in GatedClockScope", trigger=trigger, interval=interval)
ratio = interval // trigger.reference
stream = self.allocator.allocate_stream(DataStream.CounterType)
latch_stream = self.allocator.attach_stream(self.latch_stream)
self.sensor_graph.add_node(u'({} always && {} {}) => {} using copy_latest_a'.format(parent_stream, latch_stream, self.latch_trigger, stream))
self.clock_cache[cache_name] = (stream, ratio)
else:
stream, ratio = cache_data
if interval % ratio != 0:
raise SensorGraphSemanticError("Unsupported trigger ratio in GatedClockScope", ratio=ratio, interval=interval)
count = interval // ratio
clock_stream = self.allocator.attach_stream(stream)
return clock_stream, InputTrigger(u'count', '>=', count) | Return a NodeInput tuple for triggering an event every interval.
We request each distinct type of clock at most once and combine it with our
latch stream each time it is requested.
Args:
interval (int): The interval (in seconds) at which this input should
trigger. | Below is the the instruction that describes the task:
### Input:
Return a NodeInput tuple for triggering an event every interval.
We request each distinct type of clock at most once and combine it with our
latch stream each time it is requested.
Args:
interval (int): The interval (in seconds) at which this input should
trigger.
### Response:
def clock(self, interval, basis):
"""Return a NodeInput tuple for triggering an event every interval.
We request each distinct type of clock at most once and combine it with our
latch stream each time it is requested.
Args:
interval (int): The interval (in seconds) at which this input should
trigger.
"""
cache_name = self._classify_clock(interval, basis)
cache_data = self.clock_cache.get(cache_name)
if cache_data is None:
parent_stream, trigger = self.parent.clock(interval, basis)
if trigger.use_count is False:
raise SensorGraphSemanticError("Unsupported clock trigger in GatedClockScope", trigger=trigger)
elif interval % trigger.reference != 0:
raise SensorGraphSemanticError("Unsupported trigger ratio in GatedClockScope", trigger=trigger, interval=interval)
ratio = interval // trigger.reference
stream = self.allocator.allocate_stream(DataStream.CounterType)
latch_stream = self.allocator.attach_stream(self.latch_stream)
self.sensor_graph.add_node(u'({} always && {} {}) => {} using copy_latest_a'.format(parent_stream, latch_stream, self.latch_trigger, stream))
self.clock_cache[cache_name] = (stream, ratio)
else:
stream, ratio = cache_data
if interval % ratio != 0:
raise SensorGraphSemanticError("Unsupported trigger ratio in GatedClockScope", ratio=ratio, interval=interval)
count = interval // ratio
clock_stream = self.allocator.attach_stream(stream)
return clock_stream, InputTrigger(u'count', '>=', count) |
async def execute(self, keys=[], args=[], client=None):
"Execute the script, passing any required ``args``"
if client is None:
client = self.registered_client
args = tuple(keys) + tuple(args)
# make sure the Redis server knows about the script
if isinstance(client, BasePipeline):
# make sure this script is good to go on pipeline
client.scripts.add(self)
try:
return await client.evalsha(self.sha, len(keys), *args)
except NoScriptError:
# Maybe the client is pointed to a differnet server than the client
# that created this instance?
# Overwrite the sha just in case there was a discrepancy.
self.sha = await client.script_load(self.script)
return await client.evalsha(self.sha, len(keys), *args) | Execute the script, passing any required ``args`` | Below is the the instruction that describes the task:
### Input:
Execute the script, passing any required ``args``
### Response:
async def execute(self, keys=[], args=[], client=None):
"Execute the script, passing any required ``args``"
if client is None:
client = self.registered_client
args = tuple(keys) + tuple(args)
# make sure the Redis server knows about the script
if isinstance(client, BasePipeline):
# make sure this script is good to go on pipeline
client.scripts.add(self)
try:
return await client.evalsha(self.sha, len(keys), *args)
except NoScriptError:
# Maybe the client is pointed to a differnet server than the client
# that created this instance?
# Overwrite the sha just in case there was a discrepancy.
self.sha = await client.script_load(self.script)
return await client.evalsha(self.sha, len(keys), *args) |
def dK_dr_via_X(self, X, X2):
"""
compute the derivative of K wrt X going through X
"""
#a convenience function, so we can cache dK_dr
return self.dK_dr(self._scaled_dist(X, X2)) | compute the derivative of K wrt X going through X | Below is the the instruction that describes the task:
### Input:
compute the derivative of K wrt X going through X
### Response:
def dK_dr_via_X(self, X, X2):
"""
compute the derivative of K wrt X going through X
"""
#a convenience function, so we can cache dK_dr
return self.dK_dr(self._scaled_dist(X, X2)) |
def rep(obj, *attrs, **kwargs):
"""Create a repr of a property based class quickly
Args:
obj -- instance of class
*attrs -- list of attrs to add to the representation
**kwargs -- Extra arguments to add that are not captured as attributes
Returns: A string representing the class
"""
s = obj.__class__.__name__
args = chain(((attr, getattr(obj, attr)) for attr in attrs), kwargs.items())
s += '(%s)' % ','.join('{}={!r}'.format(k, v) for k, v in args)
return s | Create a repr of a property based class quickly
Args:
obj -- instance of class
*attrs -- list of attrs to add to the representation
**kwargs -- Extra arguments to add that are not captured as attributes
Returns: A string representing the class | Below is the the instruction that describes the task:
### Input:
Create a repr of a property based class quickly
Args:
obj -- instance of class
*attrs -- list of attrs to add to the representation
**kwargs -- Extra arguments to add that are not captured as attributes
Returns: A string representing the class
### Response:
def rep(obj, *attrs, **kwargs):
"""Create a repr of a property based class quickly
Args:
obj -- instance of class
*attrs -- list of attrs to add to the representation
**kwargs -- Extra arguments to add that are not captured as attributes
Returns: A string representing the class
"""
s = obj.__class__.__name__
args = chain(((attr, getattr(obj, attr)) for attr in attrs), kwargs.items())
s += '(%s)' % ','.join('{}={!r}'.format(k, v) for k, v in args)
return s |
def plot_normal(x=None, mean_x=None,std_x=None,color='red',linewidth=2,alpha=1,bins=20,xlim=False,plot_mean=True,plot_std=False,plot_2std=True,figure=None,annotate=True,histogram=True):
"""
plot a fit of a normal distribution to the data in x.
"""
import pylab
if figure is None:
figure=pylab.figure()
if mean_x is None:
#fit maximum likelihood Normal distribution mean to samples X
mean_x = x.mean() #sample mean
if std_x is None:
#fit maximum likelihood Normal distribution standard deviation to samples X
std_x = x.std() #sample standard deviation
xvals=np.arange(mean_x-5*std_x,mean_x+5*std_x,.001)
yvals=st.norm.pdf(xvals,mean_x,std_x)
#plot normal distribution:
ax = pylab.plot(xvals,yvals,color=color,linewidth=linewidth,alpha=alpha)
if x is not None and histogram:
#plot histogram of x-values
pylab.hist(x,bins,normed=True)
if plot_mean:
#evaluate distribution at the mean:
max_cdf=st.norm.pdf(mean_x,mean_x,std_x)
pylab.plot([mean_x,mean_x],[0,max_cdf],color=color,linewidth=linewidth,alpha=alpha,linestyle="--")
if annotate:
pylab.annotate('$\mu$', xy=(mean_x+0.6*std_x, 1.0*max_cdf),
horizontalalignment='center', verticalalignment='center',fontsize=15,color=color)
if plot_std:#plot mean +- 1*standard deviation (64% interval)
std_cdf=st.norm.pdf(mean_x+std_x,mean_x,std_x)
pylab.plot([mean_x+std_x,mean_x+std_x],[0,std_cdf],color=color,linewidth=linewidth,alpha=alpha,linestyle="--")
pylab.plot([mean_x-std_x,mean_x-std_x],[0,std_cdf],color=color,linewidth=linewidth,alpha=alpha,linestyle="--")
if annotate:
pylab.annotate('$\mu+\sigma$', xy=(mean_x+1.6*std_x, 1.5*std_cdf),
horizontalalignment='center', verticalalignment='center',fontsize=15,color=color)
if plot_2std:#plot mean +- 2*standard deviations (95% interval)
std2_cdf=st.norm.pdf(mean_x+2*std_x,mean_x,std_x)
pylab.plot([mean_x+2*std_x,mean_x+2*std_x],[0,std2_cdf],color=color,linewidth=linewidth,alpha=alpha,linestyle="--")
pylab.plot([mean_x-2*std_x,mean_x-2*std_x],[0,std2_cdf],color=color,linewidth=linewidth,alpha=alpha,linestyle="--")
if annotate:
pylab.annotate('$\mu+2\sigma$', xy=(mean_x+2.6*std_x, 1.5*std2_cdf),
horizontalalignment='center', verticalalignment='center',fontsize=15,color=color)
if xlim: #cut of unused space on y-axis
pylab.xlim([mean_x-4*std_x,mean_x+4*std_x])
return figure | plot a fit of a normal distribution to the data in x. | Below is the the instruction that describes the task:
### Input:
plot a fit of a normal distribution to the data in x.
### Response:
def plot_normal(x=None, mean_x=None,std_x=None,color='red',linewidth=2,alpha=1,bins=20,xlim=False,plot_mean=True,plot_std=False,plot_2std=True,figure=None,annotate=True,histogram=True):
"""
plot a fit of a normal distribution to the data in x.
"""
import pylab
if figure is None:
figure=pylab.figure()
if mean_x is None:
#fit maximum likelihood Normal distribution mean to samples X
mean_x = x.mean() #sample mean
if std_x is None:
#fit maximum likelihood Normal distribution standard deviation to samples X
std_x = x.std() #sample standard deviation
xvals=np.arange(mean_x-5*std_x,mean_x+5*std_x,.001)
yvals=st.norm.pdf(xvals,mean_x,std_x)
#plot normal distribution:
ax = pylab.plot(xvals,yvals,color=color,linewidth=linewidth,alpha=alpha)
if x is not None and histogram:
#plot histogram of x-values
pylab.hist(x,bins,normed=True)
if plot_mean:
#evaluate distribution at the mean:
max_cdf=st.norm.pdf(mean_x,mean_x,std_x)
pylab.plot([mean_x,mean_x],[0,max_cdf],color=color,linewidth=linewidth,alpha=alpha,linestyle="--")
if annotate:
pylab.annotate('$\mu$', xy=(mean_x+0.6*std_x, 1.0*max_cdf),
horizontalalignment='center', verticalalignment='center',fontsize=15,color=color)
if plot_std:#plot mean +- 1*standard deviation (64% interval)
std_cdf=st.norm.pdf(mean_x+std_x,mean_x,std_x)
pylab.plot([mean_x+std_x,mean_x+std_x],[0,std_cdf],color=color,linewidth=linewidth,alpha=alpha,linestyle="--")
pylab.plot([mean_x-std_x,mean_x-std_x],[0,std_cdf],color=color,linewidth=linewidth,alpha=alpha,linestyle="--")
if annotate:
pylab.annotate('$\mu+\sigma$', xy=(mean_x+1.6*std_x, 1.5*std_cdf),
horizontalalignment='center', verticalalignment='center',fontsize=15,color=color)
if plot_2std:#plot mean +- 2*standard deviations (95% interval)
std2_cdf=st.norm.pdf(mean_x+2*std_x,mean_x,std_x)
pylab.plot([mean_x+2*std_x,mean_x+2*std_x],[0,std2_cdf],color=color,linewidth=linewidth,alpha=alpha,linestyle="--")
pylab.plot([mean_x-2*std_x,mean_x-2*std_x],[0,std2_cdf],color=color,linewidth=linewidth,alpha=alpha,linestyle="--")
if annotate:
pylab.annotate('$\mu+2\sigma$', xy=(mean_x+2.6*std_x, 1.5*std2_cdf),
horizontalalignment='center', verticalalignment='center',fontsize=15,color=color)
if xlim: #cut of unused space on y-axis
pylab.xlim([mean_x-4*std_x,mean_x+4*std_x])
return figure |
def p_restore(p):
""" statement : RESTORE
| RESTORE ID
| RESTORE NUMBER
"""
if len(p) == 2:
id_ = '__DATA__{0}'.format(len(gl.DATAS))
else:
id_ = p[2]
lbl = check_and_make_label(id_, p.lineno(1))
p[0] = make_sentence('RESTORE', lbl) | statement : RESTORE
| RESTORE ID
| RESTORE NUMBER | Below is the the instruction that describes the task:
### Input:
statement : RESTORE
| RESTORE ID
| RESTORE NUMBER
### Response:
def p_restore(p):
""" statement : RESTORE
| RESTORE ID
| RESTORE NUMBER
"""
if len(p) == 2:
id_ = '__DATA__{0}'.format(len(gl.DATAS))
else:
id_ = p[2]
lbl = check_and_make_label(id_, p.lineno(1))
p[0] = make_sentence('RESTORE', lbl) |
def _q_to_dcm(self, q):
"""
Create DCM from q
:param q: array q which represents a quaternion [w, x, y, z]
:returns: 3x3 dcm array
"""
assert(len(q) == 4)
assert(np.allclose(QuaternionBase.norm_array(q), 1))
dcm = np.zeros([3, 3])
a = q[0]
b = q[1]
c = q[2]
d = q[3]
a_sq = a * a
b_sq = b * b
c_sq = c * c
d_sq = d * d
dcm[0][0] = a_sq + b_sq - c_sq - d_sq
dcm[0][1] = 2 * (b * c - a * d)
dcm[0][2] = 2 * (a * c + b * d)
dcm[1][0] = 2 * (b * c + a * d)
dcm[1][1] = a_sq - b_sq + c_sq - d_sq
dcm[1][2] = 2 * (c * d - a * b)
dcm[2][0] = 2 * (b * d - a * c)
dcm[2][1] = 2 * (a * b + c * d)
dcm[2][2] = a_sq - b_sq - c_sq + d_sq
return dcm | Create DCM from q
:param q: array q which represents a quaternion [w, x, y, z]
:returns: 3x3 dcm array | Below is the the instruction that describes the task:
### Input:
Create DCM from q
:param q: array q which represents a quaternion [w, x, y, z]
:returns: 3x3 dcm array
### Response:
def _q_to_dcm(self, q):
"""
Create DCM from q
:param q: array q which represents a quaternion [w, x, y, z]
:returns: 3x3 dcm array
"""
assert(len(q) == 4)
assert(np.allclose(QuaternionBase.norm_array(q), 1))
dcm = np.zeros([3, 3])
a = q[0]
b = q[1]
c = q[2]
d = q[3]
a_sq = a * a
b_sq = b * b
c_sq = c * c
d_sq = d * d
dcm[0][0] = a_sq + b_sq - c_sq - d_sq
dcm[0][1] = 2 * (b * c - a * d)
dcm[0][2] = 2 * (a * c + b * d)
dcm[1][0] = 2 * (b * c + a * d)
dcm[1][1] = a_sq - b_sq + c_sq - d_sq
dcm[1][2] = 2 * (c * d - a * b)
dcm[2][0] = 2 * (b * d - a * c)
dcm[2][1] = 2 * (a * b + c * d)
dcm[2][2] = a_sq - b_sq - c_sq + d_sq
return dcm |
def create_main_target (self, name):
""" Returns a 'MainTarget' class instance corresponding to the 'name'.
"""
assert isinstance(name, basestring)
if not self.built_main_targets_:
self.build_main_targets ()
return self.main_targets_.get (name, None) | Returns a 'MainTarget' class instance corresponding to the 'name'. | Below is the the instruction that describes the task:
### Input:
Returns a 'MainTarget' class instance corresponding to the 'name'.
### Response:
def create_main_target (self, name):
""" Returns a 'MainTarget' class instance corresponding to the 'name'.
"""
assert isinstance(name, basestring)
if not self.built_main_targets_:
self.build_main_targets ()
return self.main_targets_.get (name, None) |
def _search_for_files(parts):
""" Given a list of parts, return all of the nested file parts. """
file_parts = []
for part in parts:
if isinstance(part, list):
file_parts.extend(_search_for_files(part))
elif isinstance(part, FileToken):
file_parts.append(part)
return file_parts | Given a list of parts, return all of the nested file parts. | Below is the the instruction that describes the task:
### Input:
Given a list of parts, return all of the nested file parts.
### Response:
def _search_for_files(parts):
""" Given a list of parts, return all of the nested file parts. """
file_parts = []
for part in parts:
if isinstance(part, list):
file_parts.extend(_search_for_files(part))
elif isinstance(part, FileToken):
file_parts.append(part)
return file_parts |
def loadImageData(filename, spacing=()):
"""Read and return a ``vtkImageData`` object from file."""
if not os.path.isfile(filename):
colors.printc("~noentry File not found:", filename, c=1)
return None
if ".tif" in filename.lower():
reader = vtk.vtkTIFFReader()
elif ".slc" in filename.lower():
reader = vtk.vtkSLCReader()
if not reader.CanReadFile(filename):
colors.printc("~prohibited Sorry bad slc file " + filename, c=1)
exit(1)
elif ".vti" in filename.lower():
reader = vtk.vtkXMLImageDataReader()
elif ".mhd" in filename.lower():
reader = vtk.vtkMetaImageReader()
reader.SetFileName(filename)
reader.Update()
image = reader.GetOutput()
if len(spacing) == 3:
image.SetSpacing(spacing[0], spacing[1], spacing[2])
return image | Read and return a ``vtkImageData`` object from file. | Below is the the instruction that describes the task:
### Input:
Read and return a ``vtkImageData`` object from file.
### Response:
def loadImageData(filename, spacing=()):
"""Read and return a ``vtkImageData`` object from file."""
if not os.path.isfile(filename):
colors.printc("~noentry File not found:", filename, c=1)
return None
if ".tif" in filename.lower():
reader = vtk.vtkTIFFReader()
elif ".slc" in filename.lower():
reader = vtk.vtkSLCReader()
if not reader.CanReadFile(filename):
colors.printc("~prohibited Sorry bad slc file " + filename, c=1)
exit(1)
elif ".vti" in filename.lower():
reader = vtk.vtkXMLImageDataReader()
elif ".mhd" in filename.lower():
reader = vtk.vtkMetaImageReader()
reader.SetFileName(filename)
reader.Update()
image = reader.GetOutput()
if len(spacing) == 3:
image.SetSpacing(spacing[0], spacing[1], spacing[2])
return image |
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin % lineno
output_prompt = self.promptout % lineno
image_file = None
image_directive = None
found_input = False
for token, data in block:
if token == COMMENT:
out_data = self.process_comment(data)
elif token == INPUT:
found_input = True
(out_data, input_lines, output, is_doctest,
decorator, image_file, image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token == OUTPUT:
if not found_input:
TAB = ' ' * 4
linenumber = 0
source = 'Unavailable'
content = 'Unavailable'
if self.directive:
linenumber = self.directive.state.document.current_line
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join([TAB + line for line in content])
e = ('\n\nInvalid block: Block contains an output prompt '
'without an input prompt.\n\n'
'Document source: {0}\n\n'
'Content begins at line {1}: \n\n{2}\n\n'
'Problematic block within content: \n\n{TAB}{3}\n\n')
e = e.format(source, linenumber, content, block, TAB=TAB)
# Write, rather than include in exception, since Sphinx
# will truncate tracebacks.
sys.stdout.write(e)
raise RuntimeError('An invalid block was detected.')
out_data = \
self.process_output(data, output_prompt, input_lines,
output, is_doctest, decorator,
image_file)
if out_data:
# Then there was user submitted output in verbatim mode.
# We need to remove the last element of `ret` that was
# added in `process_input`, as it is '' and would introduce
# an undesirable newline.
assert(ret[-1] == '')
del ret[-1]
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive | process block from the block_parser and return a list of processed lines | Below is the the instruction that describes the task:
### Input:
process block from the block_parser and return a list of processed lines
### Response:
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin % lineno
output_prompt = self.promptout % lineno
image_file = None
image_directive = None
found_input = False
for token, data in block:
if token == COMMENT:
out_data = self.process_comment(data)
elif token == INPUT:
found_input = True
(out_data, input_lines, output, is_doctest,
decorator, image_file, image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token == OUTPUT:
if not found_input:
TAB = ' ' * 4
linenumber = 0
source = 'Unavailable'
content = 'Unavailable'
if self.directive:
linenumber = self.directive.state.document.current_line
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join([TAB + line for line in content])
e = ('\n\nInvalid block: Block contains an output prompt '
'without an input prompt.\n\n'
'Document source: {0}\n\n'
'Content begins at line {1}: \n\n{2}\n\n'
'Problematic block within content: \n\n{TAB}{3}\n\n')
e = e.format(source, linenumber, content, block, TAB=TAB)
# Write, rather than include in exception, since Sphinx
# will truncate tracebacks.
sys.stdout.write(e)
raise RuntimeError('An invalid block was detected.')
out_data = \
self.process_output(data, output_prompt, input_lines,
output, is_doctest, decorator,
image_file)
if out_data:
# Then there was user submitted output in verbatim mode.
# We need to remove the last element of `ret` that was
# added in `process_input`, as it is '' and would introduce
# an undesirable newline.
assert(ret[-1] == '')
del ret[-1]
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive |
def walk_preorder(self):
"""Depth-first preorder walk over the cursor and its descendants.
Yields cursors.
"""
yield self
for child in self.get_children():
for descendant in child.walk_preorder():
yield descendant | Depth-first preorder walk over the cursor and its descendants.
Yields cursors. | Below is the the instruction that describes the task:
### Input:
Depth-first preorder walk over the cursor and its descendants.
Yields cursors.
### Response:
def walk_preorder(self):
"""Depth-first preorder walk over the cursor and its descendants.
Yields cursors.
"""
yield self
for child in self.get_children():
for descendant in child.walk_preorder():
yield descendant |
def get_output(self, transaction_hash, output_index):
"""
Gets an output and information about its asset ID and asset quantity.
:param bytes transaction_hash: The hash of the transaction containing the output.
:param int output_index: The index of the output.
:return: An object containing the output as well as its asset ID and asset quantity.
:rtype: Future[TransactionOutput]
"""
cached_output = yield from self._cache.get(transaction_hash, output_index)
if cached_output is not None:
return cached_output
transaction = yield from self._transaction_provider(transaction_hash)
if transaction is None:
raise ValueError('Transaction {0} could not be retrieved'.format(bitcoin.core.b2lx(transaction_hash)))
colored_outputs = yield from self.color_transaction(transaction)
for index, output in enumerate(colored_outputs):
yield from self._cache.put(transaction_hash, index, output)
return colored_outputs[output_index] | Gets an output and information about its asset ID and asset quantity.
:param bytes transaction_hash: The hash of the transaction containing the output.
:param int output_index: The index of the output.
:return: An object containing the output as well as its asset ID and asset quantity.
:rtype: Future[TransactionOutput] | Below is the the instruction that describes the task:
### Input:
Gets an output and information about its asset ID and asset quantity.
:param bytes transaction_hash: The hash of the transaction containing the output.
:param int output_index: The index of the output.
:return: An object containing the output as well as its asset ID and asset quantity.
:rtype: Future[TransactionOutput]
### Response:
def get_output(self, transaction_hash, output_index):
"""
Gets an output and information about its asset ID and asset quantity.
:param bytes transaction_hash: The hash of the transaction containing the output.
:param int output_index: The index of the output.
:return: An object containing the output as well as its asset ID and asset quantity.
:rtype: Future[TransactionOutput]
"""
cached_output = yield from self._cache.get(transaction_hash, output_index)
if cached_output is not None:
return cached_output
transaction = yield from self._transaction_provider(transaction_hash)
if transaction is None:
raise ValueError('Transaction {0} could not be retrieved'.format(bitcoin.core.b2lx(transaction_hash)))
colored_outputs = yield from self.color_transaction(transaction)
for index, output in enumerate(colored_outputs):
yield from self._cache.put(transaction_hash, index, output)
return colored_outputs[output_index] |
def __print_command_help(self, session, namespace, cmd_name):
"""
Prints the documentation of the given command
:param session: Session handler
:param namespace: Name space of the command
:param cmd_name: Name of the command
"""
# Extract documentation
args, doc = self.__extract_help(self._commands[namespace][cmd_name])
# Print the command name, and its arguments
if args:
session.write_line("- {0} {1}", cmd_name, args)
else:
session.write_line("- {0}", cmd_name)
# Print the documentation line
session.write_line("\t\t{0}", doc) | Prints the documentation of the given command
:param session: Session handler
:param namespace: Name space of the command
:param cmd_name: Name of the command | Below is the the instruction that describes the task:
### Input:
Prints the documentation of the given command
:param session: Session handler
:param namespace: Name space of the command
:param cmd_name: Name of the command
### Response:
def __print_command_help(self, session, namespace, cmd_name):
"""
Prints the documentation of the given command
:param session: Session handler
:param namespace: Name space of the command
:param cmd_name: Name of the command
"""
# Extract documentation
args, doc = self.__extract_help(self._commands[namespace][cmd_name])
# Print the command name, and its arguments
if args:
session.write_line("- {0} {1}", cmd_name, args)
else:
session.write_line("- {0}", cmd_name)
# Print the documentation line
session.write_line("\t\t{0}", doc) |
def add(self, *args, **kwargs):
"""See ReplayBuffer.store_effect"""
idx = self._next_idx
super().add(*args, **kwargs)
self._it_sum[idx] = self._max_priority ** self._alpha
self._it_min[idx] = self._max_priority ** self._alpha | See ReplayBuffer.store_effect | Below is the the instruction that describes the task:
### Input:
See ReplayBuffer.store_effect
### Response:
def add(self, *args, **kwargs):
"""See ReplayBuffer.store_effect"""
idx = self._next_idx
super().add(*args, **kwargs)
self._it_sum[idx] = self._max_priority ** self._alpha
self._it_min[idx] = self._max_priority ** self._alpha |
def _get_choices(self, gandi):
""" Internal method to get choices list """
packages = super(CertificatePackageType, self)._get_choices(gandi)
return list(set([pack.split('_')[1] for pack in packages])) | Internal method to get choices list | Below is the the instruction that describes the task:
### Input:
Internal method to get choices list
### Response:
def _get_choices(self, gandi):
""" Internal method to get choices list """
packages = super(CertificatePackageType, self)._get_choices(gandi)
return list(set([pack.split('_')[1] for pack in packages])) |
def is_username_valid(username):
"""
Check if a valid username.
valid:
oracle
bill-gates
steve.jobs
micro_soft
not valid
Bill Gates - no space allowed
[email protected] - @ is not a valid character
:param username: string
:return:
"""
pattern = re.compile(r"^[a-zA-Z0-9_.-]+$")
return bool(pattern.match(username)) | Check if a valid username.
valid:
oracle
bill-gates
steve.jobs
micro_soft
not valid
Bill Gates - no space allowed
[email protected] - @ is not a valid character
:param username: string
:return: | Below is the the instruction that describes the task:
### Input:
Check if a valid username.
valid:
oracle
bill-gates
steve.jobs
micro_soft
not valid
Bill Gates - no space allowed
[email protected] - @ is not a valid character
:param username: string
:return:
### Response:
def is_username_valid(username):
"""
Check if a valid username.
valid:
oracle
bill-gates
steve.jobs
micro_soft
not valid
Bill Gates - no space allowed
[email protected] - @ is not a valid character
:param username: string
:return:
"""
pattern = re.compile(r"^[a-zA-Z0-9_.-]+$")
return bool(pattern.match(username)) |
def get(self):
"""
Get a JSON-ready representation of this HtmlContent.
:returns: This HtmlContent, ready for use in a request body.
:rtype: dict
"""
content = {}
if self.mime_type is not None:
content["type"] = self.mime_type
if self.content is not None:
content["value"] = self.content
return content | Get a JSON-ready representation of this HtmlContent.
:returns: This HtmlContent, ready for use in a request body.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Get a JSON-ready representation of this HtmlContent.
:returns: This HtmlContent, ready for use in a request body.
:rtype: dict
### Response:
def get(self):
"""
Get a JSON-ready representation of this HtmlContent.
:returns: This HtmlContent, ready for use in a request body.
:rtype: dict
"""
content = {}
if self.mime_type is not None:
content["type"] = self.mime_type
if self.content is not None:
content["value"] = self.content
return content |
def stack_get(self, key):
"""Set a value in a task context stack
"""
task = Task.current_task()
try:
context = task._context_stack
except AttributeError:
return
if key in context:
return context[key][-1] | Set a value in a task context stack | Below is the the instruction that describes the task:
### Input:
Set a value in a task context stack
### Response:
def stack_get(self, key):
"""Set a value in a task context stack
"""
task = Task.current_task()
try:
context = task._context_stack
except AttributeError:
return
if key in context:
return context[key][-1] |
def direct_to_template(
request, template, extra_context=None, mimetype=None, **kwargs):
"""
Render a given template with any extra URL parameters in the context as
``{{ params }}``.
"""
if extra_context is None:
extra_context = {}
dictionary = {'params': kwargs}
for key, value in extra_context.items():
if callable(value):
dictionary[key] = value()
else:
dictionary[key] = value
t = loader.get_template(template)
return HttpResponse(
t.render(context=dictionary, request=request),
content_type=mimetype) | Render a given template with any extra URL parameters in the context as
``{{ params }}``. | Below is the the instruction that describes the task:
### Input:
Render a given template with any extra URL parameters in the context as
``{{ params }}``.
### Response:
def direct_to_template(
request, template, extra_context=None, mimetype=None, **kwargs):
"""
Render a given template with any extra URL parameters in the context as
``{{ params }}``.
"""
if extra_context is None:
extra_context = {}
dictionary = {'params': kwargs}
for key, value in extra_context.items():
if callable(value):
dictionary[key] = value()
else:
dictionary[key] = value
t = loader.get_template(template)
return HttpResponse(
t.render(context=dictionary, request=request),
content_type=mimetype) |
async def query_handler(service, action_type, payload, props, **kwds):
"""
This action handler interprets the payload as a query to be executed
by the api gateway service.
"""
# check that the action type indicates a query
if action_type == query_action_type():
print('encountered query event {!r} '.format(payload))
# perform the query
result = await parse_string(payload,
service.object_resolver,
service.connection_resolver,
service.mutation_resolver,
obey_auth=False
)
# the props for the reply message
reply_props = {'correlation_id': props['correlation_id']} if 'correlation_id' in props else {}
# publish the success event
await service.event_broker.send(
payload=result,
action_type=change_action_status(action_type, success_status()),
**reply_props
) | This action handler interprets the payload as a query to be executed
by the api gateway service. | Below is the the instruction that describes the task:
### Input:
This action handler interprets the payload as a query to be executed
by the api gateway service.
### Response:
async def query_handler(service, action_type, payload, props, **kwds):
"""
This action handler interprets the payload as a query to be executed
by the api gateway service.
"""
# check that the action type indicates a query
if action_type == query_action_type():
print('encountered query event {!r} '.format(payload))
# perform the query
result = await parse_string(payload,
service.object_resolver,
service.connection_resolver,
service.mutation_resolver,
obey_auth=False
)
# the props for the reply message
reply_props = {'correlation_id': props['correlation_id']} if 'correlation_id' in props else {}
# publish the success event
await service.event_broker.send(
payload=result,
action_type=change_action_status(action_type, success_status()),
**reply_props
) |
def inside(self, x, y):
"""
check, if field position is inside map
:param x: x pos
:param y: y pos
:return:
"""
return 0 <= x < self.width and 0 <= y < self.height | check, if field position is inside map
:param x: x pos
:param y: y pos
:return: | Below is the the instruction that describes the task:
### Input:
check, if field position is inside map
:param x: x pos
:param y: y pos
:return:
### Response:
def inside(self, x, y):
"""
check, if field position is inside map
:param x: x pos
:param y: y pos
:return:
"""
return 0 <= x < self.width and 0 <= y < self.height |
def get_additional_occurrences(self, start, end):
"""
Return persisted occurrences which are now in the period
"""
return [occ for _, occ in list(self.lookup.items()) if (occ.start < end and occ.end >= start and not occ.cancelled)] | Return persisted occurrences which are now in the period | Below is the the instruction that describes the task:
### Input:
Return persisted occurrences which are now in the period
### Response:
def get_additional_occurrences(self, start, end):
"""
Return persisted occurrences which are now in the period
"""
return [occ for _, occ in list(self.lookup.items()) if (occ.start < end and occ.end >= start and not occ.cancelled)] |
def convert_l2normalization(node, **kwargs):
"""Map MXNet's L2Normalization operator attributes to onnx's LpNormalization operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
mode = attrs.get("mode", "instance")
if mode != "channel":
raise AttributeError("L2Normalization: ONNX currently supports channel mode only")
l2norm_node = onnx.helper.make_node(
"LpNormalization",
input_nodes,
[name],
axis=1, # channel only
name=name
)
return [l2norm_node] | Map MXNet's L2Normalization operator attributes to onnx's LpNormalization operator
and return the created node. | Below is the the instruction that describes the task:
### Input:
Map MXNet's L2Normalization operator attributes to onnx's LpNormalization operator
and return the created node.
### Response:
def convert_l2normalization(node, **kwargs):
"""Map MXNet's L2Normalization operator attributes to onnx's LpNormalization operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
mode = attrs.get("mode", "instance")
if mode != "channel":
raise AttributeError("L2Normalization: ONNX currently supports channel mode only")
l2norm_node = onnx.helper.make_node(
"LpNormalization",
input_nodes,
[name],
axis=1, # channel only
name=name
)
return [l2norm_node] |
def load(self, prof_name):
"""
Load the profile with the given name.
:param str prof_name:
Profile name.
:rtype:
ProfileStub
:return:
An stub to loaded profile.
"""
prof_dir = self.__profile_dir(prof_name)
prof_ini_path = self.__profile_ini_path(prof_dir)
if not os.path.exists(prof_ini_path):
msg = "Profile '{}' does not exist"
raise Exception(msg.format(prof_name))
# Load profile
prof_ini_file = open(prof_ini_path, "r")
prof_ini = configparser.ConfigParser()
prof_ini.read_file(prof_ini_file)
prof_ini_file.close()
# Prepare profile
prof_type = prof_ini["profile"]["type"]
prof_stub = self.__profile_stub(prof_name, prof_type, prof_dir)
prof_stub.prepare(prof_ini["properties"])
return prof_stub | Load the profile with the given name.
:param str prof_name:
Profile name.
:rtype:
ProfileStub
:return:
An stub to loaded profile. | Below is the the instruction that describes the task:
### Input:
Load the profile with the given name.
:param str prof_name:
Profile name.
:rtype:
ProfileStub
:return:
An stub to loaded profile.
### Response:
def load(self, prof_name):
"""
Load the profile with the given name.
:param str prof_name:
Profile name.
:rtype:
ProfileStub
:return:
An stub to loaded profile.
"""
prof_dir = self.__profile_dir(prof_name)
prof_ini_path = self.__profile_ini_path(prof_dir)
if not os.path.exists(prof_ini_path):
msg = "Profile '{}' does not exist"
raise Exception(msg.format(prof_name))
# Load profile
prof_ini_file = open(prof_ini_path, "r")
prof_ini = configparser.ConfigParser()
prof_ini.read_file(prof_ini_file)
prof_ini_file.close()
# Prepare profile
prof_type = prof_ini["profile"]["type"]
prof_stub = self.__profile_stub(prof_name, prof_type, prof_dir)
prof_stub.prepare(prof_ini["properties"])
return prof_stub |
def get(self, request, pk=None):
""" Handles GET requests. """
self.top_level_forum = get_object_or_404(Forum, pk=pk) if pk else None
return super().get(request, pk) | Handles GET requests. | Below is the the instruction that describes the task:
### Input:
Handles GET requests.
### Response:
def get(self, request, pk=None):
""" Handles GET requests. """
self.top_level_forum = get_object_or_404(Forum, pk=pk) if pk else None
return super().get(request, pk) |
def run_command(cmd, sudo=False):
'''run_command uses subprocess to send a command to the terminal.
:param cmd: the command to send, should be a list for subprocess
:param error_message: the error message to give to user if fails,
if none specified, will alert that command failed.
:param sudopw: if specified (not None) command will be run asking for sudo
'''
if sudo is True:
cmd = ['sudo'] + cmd
output = Popen(cmd,stderr=STDOUT,stdout=PIPE)
t = output.communicate()[0],output.returncode
output = {'message':t[0],
'return_code':t[1]}
return output | run_command uses subprocess to send a command to the terminal.
:param cmd: the command to send, should be a list for subprocess
:param error_message: the error message to give to user if fails,
if none specified, will alert that command failed.
:param sudopw: if specified (not None) command will be run asking for sudo | Below is the the instruction that describes the task:
### Input:
run_command uses subprocess to send a command to the terminal.
:param cmd: the command to send, should be a list for subprocess
:param error_message: the error message to give to user if fails,
if none specified, will alert that command failed.
:param sudopw: if specified (not None) command will be run asking for sudo
### Response:
def run_command(cmd, sudo=False):
'''run_command uses subprocess to send a command to the terminal.
:param cmd: the command to send, should be a list for subprocess
:param error_message: the error message to give to user if fails,
if none specified, will alert that command failed.
:param sudopw: if specified (not None) command will be run asking for sudo
'''
if sudo is True:
cmd = ['sudo'] + cmd
output = Popen(cmd,stderr=STDOUT,stdout=PIPE)
t = output.communicate()[0],output.returncode
output = {'message':t[0],
'return_code':t[1]}
return output |
def set_class_value(self, selector, classname):
"""Set the class of element matched by the given selector."""
return self.evaluate("""
(function () {{
var element = document.querySelector({0});
element.className = {1};
}})();""".format(repr(selector), repr(classname))) | Set the class of element matched by the given selector. | Below is the the instruction that describes the task:
### Input:
Set the class of element matched by the given selector.
### Response:
def set_class_value(self, selector, classname):
"""Set the class of element matched by the given selector."""
return self.evaluate("""
(function () {{
var element = document.querySelector({0});
element.className = {1};
}})();""".format(repr(selector), repr(classname))) |
def deleted(self, src, path):
"""Update the reference tree when a handled file is deleted."""
if self.parents[path] is not None:
for parent in self.parents[path]:
self.children[parent].remove(path)
if not self.children[parent]:
del self.children[parent]
del self.parents[path] | Update the reference tree when a handled file is deleted. | Below is the the instruction that describes the task:
### Input:
Update the reference tree when a handled file is deleted.
### Response:
def deleted(self, src, path):
"""Update the reference tree when a handled file is deleted."""
if self.parents[path] is not None:
for parent in self.parents[path]:
self.children[parent].remove(path)
if not self.children[parent]:
del self.children[parent]
del self.parents[path] |
def main():
""" main """
# BZ 1581651 - Override the ArgumentParser to disable argument abbreviations.
parser = OverrideArgumentParser(
description=u'APB tooling for '
u'assisting in building and packaging APBs.'
)
parser.add_argument(
'--debug',
action='store_true',
dest='debug',
help=u'Enable debug output',
default=False
)
# TODO: Modify project to accept relative paths
parser.add_argument(
'--project',
'-p',
action='store',
dest='base_path',
help=u'Specify a path to your project. Defaults to CWD.',
default=os.getcwd()
)
parser.add_argument(
'--token',
action='store',
dest='auth_token',
help=u'Specify OpenShift auth token to be used',
default=None
)
subparsers = parser.add_subparsers(title='subcommand', dest='subcommand')
subparsers.required = True
for subcommand in AVAILABLE_COMMANDS:
subparser = subparsers.add_parser(
subcommand, help=AVAILABLE_COMMANDS[subcommand]
)
globals()['subcmd_%s_parser' % subcommand](subparser)
args = parser.parse_args()
if args.subcommand == 'help':
parser.print_help()
sys.exit(0)
if args.subcommand == 'version':
version = pkg_resources.require("apb")[0].version
print("Version: apb-%s" % version)
sys.exit(0)
try:
getattr(apb.engine,
u'cmdrun_{}'.format(args.subcommand))(**vars(args))
except Exception as e:
print("Exception occurred! %s" % e)
sys.exit(1) | main | Below is the the instruction that describes the task:
### Input:
main
### Response:
def main():
""" main """
# BZ 1581651 - Override the ArgumentParser to disable argument abbreviations.
parser = OverrideArgumentParser(
description=u'APB tooling for '
u'assisting in building and packaging APBs.'
)
parser.add_argument(
'--debug',
action='store_true',
dest='debug',
help=u'Enable debug output',
default=False
)
# TODO: Modify project to accept relative paths
parser.add_argument(
'--project',
'-p',
action='store',
dest='base_path',
help=u'Specify a path to your project. Defaults to CWD.',
default=os.getcwd()
)
parser.add_argument(
'--token',
action='store',
dest='auth_token',
help=u'Specify OpenShift auth token to be used',
default=None
)
subparsers = parser.add_subparsers(title='subcommand', dest='subcommand')
subparsers.required = True
for subcommand in AVAILABLE_COMMANDS:
subparser = subparsers.add_parser(
subcommand, help=AVAILABLE_COMMANDS[subcommand]
)
globals()['subcmd_%s_parser' % subcommand](subparser)
args = parser.parse_args()
if args.subcommand == 'help':
parser.print_help()
sys.exit(0)
if args.subcommand == 'version':
version = pkg_resources.require("apb")[0].version
print("Version: apb-%s" % version)
sys.exit(0)
try:
getattr(apb.engine,
u'cmdrun_{}'.format(args.subcommand))(**vars(args))
except Exception as e:
print("Exception occurred! %s" % e)
sys.exit(1) |
def engage(self, height):
"""
Move the magnet to a specific height, in mm from home position
"""
if height > MAX_ENGAGE_HEIGHT or height < 0:
raise ValueError('Invalid engage height. Should be 0 to {}'.format(
MAX_ENGAGE_HEIGHT))
self._driver.move(height)
self._engaged = True | Move the magnet to a specific height, in mm from home position | Below is the the instruction that describes the task:
### Input:
Move the magnet to a specific height, in mm from home position
### Response:
def engage(self, height):
"""
Move the magnet to a specific height, in mm from home position
"""
if height > MAX_ENGAGE_HEIGHT or height < 0:
raise ValueError('Invalid engage height. Should be 0 to {}'.format(
MAX_ENGAGE_HEIGHT))
self._driver.move(height)
self._engaged = True |
def do_parse(self, arg, fullparse=False):
"""Parse the test results from the specified directory and load them under the name
of 'module.executable ' that they were created with. E.g. parse classes.polya/
"""
from os import path
fullpath = path.abspath(path.expanduser(arg))
if path.isdir(fullpath):
if fullpath[-1] == "/":
end = -2
else:
end = -1
case = fullpath.split("/")[end]
self.tests[case] = Analysis(fullpath, fullparse)
self.do_set(case)
else:
msg.err("The folder {} does not exist.".format(fullpath)) | Parse the test results from the specified directory and load them under the name
of 'module.executable ' that they were created with. E.g. parse classes.polya/ | Below is the the instruction that describes the task:
### Input:
Parse the test results from the specified directory and load them under the name
of 'module.executable ' that they were created with. E.g. parse classes.polya/
### Response:
def do_parse(self, arg, fullparse=False):
"""Parse the test results from the specified directory and load them under the name
of 'module.executable ' that they were created with. E.g. parse classes.polya/
"""
from os import path
fullpath = path.abspath(path.expanduser(arg))
if path.isdir(fullpath):
if fullpath[-1] == "/":
end = -2
else:
end = -1
case = fullpath.split("/")[end]
self.tests[case] = Analysis(fullpath, fullparse)
self.do_set(case)
else:
msg.err("The folder {} does not exist.".format(fullpath)) |
def dumps(self):
"""
Returns contents of config file as string
OUT: out (type: str, hint: config content)
"""
out = ""
for option in self.options:
value = make_value(option.default_value)
out += "%s = %s%s\n" % (option.name, value,
(" # %s" % option.comment) if option.comment else "")
return out.rstrip("\n") | Returns contents of config file as string
OUT: out (type: str, hint: config content) | Below is the the instruction that describes the task:
### Input:
Returns contents of config file as string
OUT: out (type: str, hint: config content)
### Response:
def dumps(self):
"""
Returns contents of config file as string
OUT: out (type: str, hint: config content)
"""
out = ""
for option in self.options:
value = make_value(option.default_value)
out += "%s = %s%s\n" % (option.name, value,
(" # %s" % option.comment) if option.comment else "")
return out.rstrip("\n") |
def decode(self, covertext):
"""Given an input string ``unrank(X[:n]) || X[n:]`` returns ``X``.
"""
if not isinstance(covertext, str):
raise InvalidInputException('Input must be of type string.')
insufficient = (len(covertext) < self._fixed_slice)
if insufficient:
raise DecodeFailureError(
"Covertext is shorter than self._fixed_slice, can't decode.")
maximumBytesToRank = int(math.floor(self.getCapacity() / 8.0))
rank_payload = self._dfa.rank(covertext[:self._fixed_slice])
X = fte.bit_ops.long_to_bytes(rank_payload)
X = string.rjust(X, maximumBytesToRank, '\x00')
msg_len_header = self._encrypter.decryptOneBlock(
X[:DfaEncoderObject._COVERTEXT_HEADER_LEN_CIPHERTTEXT])
msg_len_header = msg_len_header[8:16]
msg_len = fte.bit_ops.bytes_to_long(
msg_len_header[:DfaEncoderObject._COVERTEXT_HEADER_LEN_PLAINTEXT])
retval = X[16:16 + msg_len]
retval += covertext[self._fixed_slice:]
ctxt_len = self._encrypter.getCiphertextLen(retval)
remaining_buffer = retval[ctxt_len:]
retval = retval[:ctxt_len]
retval = self._encrypter.decrypt(retval)
return retval, remaining_buffer | Given an input string ``unrank(X[:n]) || X[n:]`` returns ``X``. | Below is the the instruction that describes the task:
### Input:
Given an input string ``unrank(X[:n]) || X[n:]`` returns ``X``.
### Response:
def decode(self, covertext):
"""Given an input string ``unrank(X[:n]) || X[n:]`` returns ``X``.
"""
if not isinstance(covertext, str):
raise InvalidInputException('Input must be of type string.')
insufficient = (len(covertext) < self._fixed_slice)
if insufficient:
raise DecodeFailureError(
"Covertext is shorter than self._fixed_slice, can't decode.")
maximumBytesToRank = int(math.floor(self.getCapacity() / 8.0))
rank_payload = self._dfa.rank(covertext[:self._fixed_slice])
X = fte.bit_ops.long_to_bytes(rank_payload)
X = string.rjust(X, maximumBytesToRank, '\x00')
msg_len_header = self._encrypter.decryptOneBlock(
X[:DfaEncoderObject._COVERTEXT_HEADER_LEN_CIPHERTTEXT])
msg_len_header = msg_len_header[8:16]
msg_len = fte.bit_ops.bytes_to_long(
msg_len_header[:DfaEncoderObject._COVERTEXT_HEADER_LEN_PLAINTEXT])
retval = X[16:16 + msg_len]
retval += covertext[self._fixed_slice:]
ctxt_len = self._encrypter.getCiphertextLen(retval)
remaining_buffer = retval[ctxt_len:]
retval = retval[:ctxt_len]
retval = self._encrypter.decrypt(retval)
return retval, remaining_buffer |
def byte_href_anchors_state_machine(self):
'''
byte-based state machine extractor of anchor tags, so we can
compute byte offsets for anchor texts and associate them with
their href.
Generates tuple(href_string, first_byte, byte_length, anchor_text)
'''
tag_depth = 0
a_tag_depth = 0
vals = []
href = None
idx_bytes = enumerate( self.clean_html )
while 1:
end_idx, val, next_b = read_to( idx_bytes, '<' )
tag_depth += 1
if href:
## must be inside an anchor tag, so accumulate the
## whole anchor
assert a_tag_depth > 0, (href, self.clean_html)
vals.append(val)
## figure out if start of an "A" anchor tag or close
## of a previous tag
idx, next_b1 = idx_bytes.next()
if next_b1.lower() == 'a':
## could be start of "A" tag
idx, next_b2 = idx_bytes.next()
if next_b2 == ' ':
a_tag_depth += 1
href = None
for idx, attr_name, attr_val in iter_attrs( idx_bytes ):
if attr_name.lower() == 'href':
href = attr_val
if idx is None:
## doc ended mid tag, so invalid HTML--> just bail
return
first = idx + 1
## if we got an href, then we want to keep the
## first byte idx of the anchor:
if href:
## Someone could nest an A tag inside another
## A tag, which is invalid (even in HTML5), so
## vals could be nonempty. We only generate
## the leaf-level A tags in these rare cases
## of nested A tags, so reset it:
vals = []
elif next_b1 == '/':
idx, next_b1 = idx_bytes.next()
if next_b1 == 'a':
## could be end of "A" tag
idx, next_b2 = idx_bytes.next()
if next_b2 == '>':
a_tag_depth -= 1
if href:
## join is much faster than using += above
anchor = b''.join(vals)
length = len(anchor)
## yield the data
yield href, first, len(anchor), anchor
## reset, no yield again in a nested A tag
href = None
else:
## the next_b was not part of </a> or a nested <a tag,
## so keep it in the output
vals.append(next_b) | byte-based state machine extractor of anchor tags, so we can
compute byte offsets for anchor texts and associate them with
their href.
Generates tuple(href_string, first_byte, byte_length, anchor_text) | Below is the the instruction that describes the task:
### Input:
byte-based state machine extractor of anchor tags, so we can
compute byte offsets for anchor texts and associate them with
their href.
Generates tuple(href_string, first_byte, byte_length, anchor_text)
### Response:
def byte_href_anchors_state_machine(self):
'''
byte-based state machine extractor of anchor tags, so we can
compute byte offsets for anchor texts and associate them with
their href.
Generates tuple(href_string, first_byte, byte_length, anchor_text)
'''
tag_depth = 0
a_tag_depth = 0
vals = []
href = None
idx_bytes = enumerate( self.clean_html )
while 1:
end_idx, val, next_b = read_to( idx_bytes, '<' )
tag_depth += 1
if href:
## must be inside an anchor tag, so accumulate the
## whole anchor
assert a_tag_depth > 0, (href, self.clean_html)
vals.append(val)
## figure out if start of an "A" anchor tag or close
## of a previous tag
idx, next_b1 = idx_bytes.next()
if next_b1.lower() == 'a':
## could be start of "A" tag
idx, next_b2 = idx_bytes.next()
if next_b2 == ' ':
a_tag_depth += 1
href = None
for idx, attr_name, attr_val in iter_attrs( idx_bytes ):
if attr_name.lower() == 'href':
href = attr_val
if idx is None:
## doc ended mid tag, so invalid HTML--> just bail
return
first = idx + 1
## if we got an href, then we want to keep the
## first byte idx of the anchor:
if href:
## Someone could nest an A tag inside another
## A tag, which is invalid (even in HTML5), so
## vals could be nonempty. We only generate
## the leaf-level A tags in these rare cases
## of nested A tags, so reset it:
vals = []
elif next_b1 == '/':
idx, next_b1 = idx_bytes.next()
if next_b1 == 'a':
## could be end of "A" tag
idx, next_b2 = idx_bytes.next()
if next_b2 == '>':
a_tag_depth -= 1
if href:
## join is much faster than using += above
anchor = b''.join(vals)
length = len(anchor)
## yield the data
yield href, first, len(anchor), anchor
## reset, no yield again in a nested A tag
href = None
else:
## the next_b was not part of </a> or a nested <a tag,
## so keep it in the output
vals.append(next_b) |
def store_lines(self, key, content):
'''like store_iter, but appends a newline to each chunk of
content'''
return self.store_iter(
key,
(data + '\n'.encode('utf-8') for data in content)) | like store_iter, but appends a newline to each chunk of
content | Below is the the instruction that describes the task:
### Input:
like store_iter, but appends a newline to each chunk of
content
### Response:
def store_lines(self, key, content):
'''like store_iter, but appends a newline to each chunk of
content'''
return self.store_iter(
key,
(data + '\n'.encode('utf-8') for data in content)) |
def _example_number_anywhere_for_type(num_type):
"""Gets a valid number for the specified number type (it may belong to any country).
Arguments:
num_type -- The type of number that is needed.
Returns a valid number for the specified type. Returns None when the
metadata does not contain such information. This should only happen when
no numbers of this type are allocated anywhere in the world anymore.
"""
for region_code in SUPPORTED_REGIONS:
example_numobj = example_number_for_type(region_code, num_type)
if example_numobj is not None:
return example_numobj
# If there wasn't an example number for a region, try the non-geographical entities.
for country_calling_code in COUNTRY_CODES_FOR_NON_GEO_REGIONS:
metadata = PhoneMetadata.metadata_for_nongeo_region(country_calling_code, None)
desc = _number_desc_by_type(metadata, num_type)
if desc is not None and desc.example_number is not None:
try:
return parse(_PLUS_SIGN + unicod(country_calling_code) + desc.example_number, UNKNOWN_REGION)
except NumberParseException: # pragma no cover
pass
# There are no example numbers of this type for any country in the library.
return None | Gets a valid number for the specified number type (it may belong to any country).
Arguments:
num_type -- The type of number that is needed.
Returns a valid number for the specified type. Returns None when the
metadata does not contain such information. This should only happen when
no numbers of this type are allocated anywhere in the world anymore. | Below is the the instruction that describes the task:
### Input:
Gets a valid number for the specified number type (it may belong to any country).
Arguments:
num_type -- The type of number that is needed.
Returns a valid number for the specified type. Returns None when the
metadata does not contain such information. This should only happen when
no numbers of this type are allocated anywhere in the world anymore.
### Response:
def _example_number_anywhere_for_type(num_type):
"""Gets a valid number for the specified number type (it may belong to any country).
Arguments:
num_type -- The type of number that is needed.
Returns a valid number for the specified type. Returns None when the
metadata does not contain such information. This should only happen when
no numbers of this type are allocated anywhere in the world anymore.
"""
for region_code in SUPPORTED_REGIONS:
example_numobj = example_number_for_type(region_code, num_type)
if example_numobj is not None:
return example_numobj
# If there wasn't an example number for a region, try the non-geographical entities.
for country_calling_code in COUNTRY_CODES_FOR_NON_GEO_REGIONS:
metadata = PhoneMetadata.metadata_for_nongeo_region(country_calling_code, None)
desc = _number_desc_by_type(metadata, num_type)
if desc is not None and desc.example_number is not None:
try:
return parse(_PLUS_SIGN + unicod(country_calling_code) + desc.example_number, UNKNOWN_REGION)
except NumberParseException: # pragma no cover
pass
# There are no example numbers of this type for any country in the library.
return None |
def state(self, state):
"""Set state."""
self._state = state
self._manager[ATTR_STATE] = state
_LOGGER.info('state changed to %s', state) | Set state. | Below is the the instruction that describes the task:
### Input:
Set state.
### Response:
def state(self, state):
"""Set state."""
self._state = state
self._manager[ATTR_STATE] = state
_LOGGER.info('state changed to %s', state) |
def triangle(self, params=None, query=None, extent=0.999,
**kwargs):
"""
Makes a nifty corner plot.
Uses :func:`triangle.corner`.
:param params: (optional)
Names of columns (from :attr:`StarModel.samples`)
to plot. If ``None``, then it will plot samples
of the parameters used in the MCMC fit-- that is,
mass, age, [Fe/H], and optionally distance and A_V.
:param query: (optional)
Optional query on samples.
:param extent: (optional)
Will be appropriately passed to :func:`triangle.corner`.
:param **kwargs:
Additional keyword arguments passed to :func:`triangle.corner`.
:return:
Figure oject containing corner plot.
"""
if triangle is None:
raise ImportError('please run "pip install triangle_plot".')
if params is None:
if self.fit_for_distance:
params = ['mass', 'age', 'feh', 'distance', 'AV']
else:
params = ['mass', 'age', 'feh']
df = self.samples
if query is not None:
df = df.query(query)
#convert extent to ranges, but making sure
# that truths are in range.
extents = []
remove = []
for i,par in enumerate(params):
m = re.search('delta_(\w+)$',par)
if m:
if type(self) == BinaryStarModel:
b = m.group(1)
values = (df['{}_mag_B'.format(b)] -
df['{}_mag_A'.format(b)])
df[par] = values
else:
remove.append(i)
continue
else:
values = df[par]
qs = np.array([0.5 - 0.5*extent, 0.5 + 0.5*extent])
minval, maxval = values.quantile(qs)
if 'truths' in kwargs:
datarange = maxval - minval
if kwargs['truths'][i] < minval:
minval = kwargs['truths'][i] - 0.05*datarange
if kwargs['truths'][i] > maxval:
maxval = kwargs['truths'][i] + 0.05*datarange
extents.append((minval,maxval))
[params.pop(i) for i in remove]
fig = triangle.corner(df[params], labels=params,
extents=extents, **kwargs)
fig.suptitle(self.name, fontsize=22)
return fig | Makes a nifty corner plot.
Uses :func:`triangle.corner`.
:param params: (optional)
Names of columns (from :attr:`StarModel.samples`)
to plot. If ``None``, then it will plot samples
of the parameters used in the MCMC fit-- that is,
mass, age, [Fe/H], and optionally distance and A_V.
:param query: (optional)
Optional query on samples.
:param extent: (optional)
Will be appropriately passed to :func:`triangle.corner`.
:param **kwargs:
Additional keyword arguments passed to :func:`triangle.corner`.
:return:
Figure oject containing corner plot. | Below is the the instruction that describes the task:
### Input:
Makes a nifty corner plot.
Uses :func:`triangle.corner`.
:param params: (optional)
Names of columns (from :attr:`StarModel.samples`)
to plot. If ``None``, then it will plot samples
of the parameters used in the MCMC fit-- that is,
mass, age, [Fe/H], and optionally distance and A_V.
:param query: (optional)
Optional query on samples.
:param extent: (optional)
Will be appropriately passed to :func:`triangle.corner`.
:param **kwargs:
Additional keyword arguments passed to :func:`triangle.corner`.
:return:
Figure oject containing corner plot.
### Response:
def triangle(self, params=None, query=None, extent=0.999,
**kwargs):
"""
Makes a nifty corner plot.
Uses :func:`triangle.corner`.
:param params: (optional)
Names of columns (from :attr:`StarModel.samples`)
to plot. If ``None``, then it will plot samples
of the parameters used in the MCMC fit-- that is,
mass, age, [Fe/H], and optionally distance and A_V.
:param query: (optional)
Optional query on samples.
:param extent: (optional)
Will be appropriately passed to :func:`triangle.corner`.
:param **kwargs:
Additional keyword arguments passed to :func:`triangle.corner`.
:return:
Figure oject containing corner plot.
"""
if triangle is None:
raise ImportError('please run "pip install triangle_plot".')
if params is None:
if self.fit_for_distance:
params = ['mass', 'age', 'feh', 'distance', 'AV']
else:
params = ['mass', 'age', 'feh']
df = self.samples
if query is not None:
df = df.query(query)
#convert extent to ranges, but making sure
# that truths are in range.
extents = []
remove = []
for i,par in enumerate(params):
m = re.search('delta_(\w+)$',par)
if m:
if type(self) == BinaryStarModel:
b = m.group(1)
values = (df['{}_mag_B'.format(b)] -
df['{}_mag_A'.format(b)])
df[par] = values
else:
remove.append(i)
continue
else:
values = df[par]
qs = np.array([0.5 - 0.5*extent, 0.5 + 0.5*extent])
minval, maxval = values.quantile(qs)
if 'truths' in kwargs:
datarange = maxval - minval
if kwargs['truths'][i] < minval:
minval = kwargs['truths'][i] - 0.05*datarange
if kwargs['truths'][i] > maxval:
maxval = kwargs['truths'][i] + 0.05*datarange
extents.append((minval,maxval))
[params.pop(i) for i in remove]
fig = triangle.corner(df[params], labels=params,
extents=extents, **kwargs)
fig.suptitle(self.name, fontsize=22)
return fig |
def parse_fasta_annotations(fastas, annot_tables, trans_table):
"""
parse gene call information from Prodigal fasta output
"""
if annot_tables is not False:
annots = {}
for table in annot_tables:
for cds in open(table):
ID, start, end, strand = cds.strip().split()
annots[ID] = [start, end, int(strand)]
for fasta in fastas:
for seq in parse_fasta(fasta):
if ('# ;gc_cont' not in seq[0] and '# ID=' not in seq[0]) and annot_tables is False:
print('# specify fasta from Prodigal or annotations table (-t)', file=sys.stderr)
exit()
if 'ID=' in seq[0]:
ID = seq[0].rsplit('ID=', 1)[1].split(';', 1)[0]
contig = seq[0].split()[0].split('>')[1].rsplit('_%s' % (ID), 1)[0]
else:
contig = seq[0].split()[0].split('>')[1].rsplit('_', 1)[0]
locus = seq[0].split()[0].split('>')[1]
# annotation info from Prodigal
if ('# ;gc_cont' in seq[0] or '# ID=' in seq[0]):
info = seq[0].split(' # ')
start, end, strand = int(info[1]), int(info[2]), info[3]
if strand == '1':
strand = 1
else:
strand = -1
product = [''.join(info[4].split()[1:])]
# annotation info from table
else:
start, end, strand = annots[locus]
product = seq[0].split(' ', 1)[1]
info = {'transl_table':[trans_table], \
'translation':[seq[1]], \
'product':product}
yield contig, [locus, [start, end, strand], info] | parse gene call information from Prodigal fasta output | Below is the the instruction that describes the task:
### Input:
parse gene call information from Prodigal fasta output
### Response:
def parse_fasta_annotations(fastas, annot_tables, trans_table):
"""
parse gene call information from Prodigal fasta output
"""
if annot_tables is not False:
annots = {}
for table in annot_tables:
for cds in open(table):
ID, start, end, strand = cds.strip().split()
annots[ID] = [start, end, int(strand)]
for fasta in fastas:
for seq in parse_fasta(fasta):
if ('# ;gc_cont' not in seq[0] and '# ID=' not in seq[0]) and annot_tables is False:
print('# specify fasta from Prodigal or annotations table (-t)', file=sys.stderr)
exit()
if 'ID=' in seq[0]:
ID = seq[0].rsplit('ID=', 1)[1].split(';', 1)[0]
contig = seq[0].split()[0].split('>')[1].rsplit('_%s' % (ID), 1)[0]
else:
contig = seq[0].split()[0].split('>')[1].rsplit('_', 1)[0]
locus = seq[0].split()[0].split('>')[1]
# annotation info from Prodigal
if ('# ;gc_cont' in seq[0] or '# ID=' in seq[0]):
info = seq[0].split(' # ')
start, end, strand = int(info[1]), int(info[2]), info[3]
if strand == '1':
strand = 1
else:
strand = -1
product = [''.join(info[4].split()[1:])]
# annotation info from table
else:
start, end, strand = annots[locus]
product = seq[0].split(' ', 1)[1]
info = {'transl_table':[trans_table], \
'translation':[seq[1]], \
'product':product}
yield contig, [locus, [start, end, strand], info] |
def get_type_string(self, data, type_string):
""" Gets type string.
Finds the type string for 'data' contained in
``python_type_strings`` using its ``type``. Non-``None``
'type_string` overrides whatever type string is looked up.
The override makes it easier for subclasses to convert something
that the parent marshaller can write to disk but still put the
right type string in place).
Parameters
----------
data : type to be marshalled
The Python object that is being written to disk.
type_string : str or None
If it is a ``str``, it overrides any looked up type
string. ``None`` means don't override.
Returns
-------
str
The type string associated with 'data'. Will be
'type_string' if it is not ``None``.
Notes
-----
Subclasses probably do not need to override this method.
"""
if type_string is not None:
return type_string
else:
tp = type(data)
try:
return self.type_to_typestring[tp]
except KeyError:
return self.type_to_typestring[tp.__module__ + '.'
+ tp.__name__] | Gets type string.
Finds the type string for 'data' contained in
``python_type_strings`` using its ``type``. Non-``None``
'type_string` overrides whatever type string is looked up.
The override makes it easier for subclasses to convert something
that the parent marshaller can write to disk but still put the
right type string in place).
Parameters
----------
data : type to be marshalled
The Python object that is being written to disk.
type_string : str or None
If it is a ``str``, it overrides any looked up type
string. ``None`` means don't override.
Returns
-------
str
The type string associated with 'data'. Will be
'type_string' if it is not ``None``.
Notes
-----
Subclasses probably do not need to override this method. | Below is the the instruction that describes the task:
### Input:
Gets type string.
Finds the type string for 'data' contained in
``python_type_strings`` using its ``type``. Non-``None``
'type_string` overrides whatever type string is looked up.
The override makes it easier for subclasses to convert something
that the parent marshaller can write to disk but still put the
right type string in place).
Parameters
----------
data : type to be marshalled
The Python object that is being written to disk.
type_string : str or None
If it is a ``str``, it overrides any looked up type
string. ``None`` means don't override.
Returns
-------
str
The type string associated with 'data'. Will be
'type_string' if it is not ``None``.
Notes
-----
Subclasses probably do not need to override this method.
### Response:
def get_type_string(self, data, type_string):
""" Gets type string.
Finds the type string for 'data' contained in
``python_type_strings`` using its ``type``. Non-``None``
'type_string` overrides whatever type string is looked up.
The override makes it easier for subclasses to convert something
that the parent marshaller can write to disk but still put the
right type string in place).
Parameters
----------
data : type to be marshalled
The Python object that is being written to disk.
type_string : str or None
If it is a ``str``, it overrides any looked up type
string. ``None`` means don't override.
Returns
-------
str
The type string associated with 'data'. Will be
'type_string' if it is not ``None``.
Notes
-----
Subclasses probably do not need to override this method.
"""
if type_string is not None:
return type_string
else:
tp = type(data)
try:
return self.type_to_typestring[tp]
except KeyError:
return self.type_to_typestring[tp.__module__ + '.'
+ tp.__name__] |
def clean(amount):
"""
Converts a number to a number with decimal point.
:param str amount: The input number.
:rtype: str
"""
# Return empty input immediately.
if not amount:
return amount
if re.search(r'[\. ][0-9]{3},[0-9]{1,2}$', amount):
# Assume amount is in 1.123,12 or 1 123,12 format (Dutch).
return amount.replace('.', '').replace(' ', '').replace(',', '.')
if re.search(r'[, ][0-9]{3}\.[0-9]{1,2}$', amount):
# Assume amount is in 1,123.12 format (Engels).
return amount.replace(',', '').replace(' ', '')
if re.search(r'[0-9](,[0-9]{1,2}$)', amount):
# Assume amount is in 123,12 or in 123,1 format (Dutch).
return amount.replace(',', '.')
# Format of amount is not recognized. Return amount.
return amount | Converts a number to a number with decimal point.
:param str amount: The input number.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Converts a number to a number with decimal point.
:param str amount: The input number.
:rtype: str
### Response:
def clean(amount):
"""
Converts a number to a number with decimal point.
:param str amount: The input number.
:rtype: str
"""
# Return empty input immediately.
if not amount:
return amount
if re.search(r'[\. ][0-9]{3},[0-9]{1,2}$', amount):
# Assume amount is in 1.123,12 or 1 123,12 format (Dutch).
return amount.replace('.', '').replace(' ', '').replace(',', '.')
if re.search(r'[, ][0-9]{3}\.[0-9]{1,2}$', amount):
# Assume amount is in 1,123.12 format (Engels).
return amount.replace(',', '').replace(' ', '')
if re.search(r'[0-9](,[0-9]{1,2}$)', amount):
# Assume amount is in 123,12 or in 123,1 format (Dutch).
return amount.replace(',', '.')
# Format of amount is not recognized. Return amount.
return amount |
def save_existing_iam_env_vars(self):
"""Backup IAM environment variables for later restoration."""
for i in ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY',
'AWS_SESSION_TOKEN']:
if i in self.env_vars:
self.env_vars['OLD_' + i] = self.env_vars[i] | Backup IAM environment variables for later restoration. | Below is the the instruction that describes the task:
### Input:
Backup IAM environment variables for later restoration.
### Response:
def save_existing_iam_env_vars(self):
"""Backup IAM environment variables for later restoration."""
for i in ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY',
'AWS_SESSION_TOKEN']:
if i in self.env_vars:
self.env_vars['OLD_' + i] = self.env_vars[i] |
def InitiateEnrolment(self):
"""Initiate the enrollment process.
We do not sent more than one enrollment request every 10 minutes. Note that
we still communicate to the server in fast poll mode, but these requests are
not carrying any payload.
"""
logging.debug("sending enrollment request")
now = time.time()
if now > self.last_enrollment_time + 10 * 60:
if not self.last_enrollment_time:
# This is the first enrollment request - we should enter fastpoll mode.
self.timer.FastPoll()
self.last_enrollment_time = now
# Send registration request:
self.client_worker.SendReply(
rdf_crypto.Certificate(
type=rdf_crypto.Certificate.Type.CSR,
pem=self.communicator.GetCSRAsPem()),
session_id=rdfvalue.SessionID(
queue=queues.ENROLLMENT, flow_name="Enrol")) | Initiate the enrollment process.
We do not sent more than one enrollment request every 10 minutes. Note that
we still communicate to the server in fast poll mode, but these requests are
not carrying any payload. | Below is the the instruction that describes the task:
### Input:
Initiate the enrollment process.
We do not sent more than one enrollment request every 10 minutes. Note that
we still communicate to the server in fast poll mode, but these requests are
not carrying any payload.
### Response:
def InitiateEnrolment(self):
"""Initiate the enrollment process.
We do not sent more than one enrollment request every 10 minutes. Note that
we still communicate to the server in fast poll mode, but these requests are
not carrying any payload.
"""
logging.debug("sending enrollment request")
now = time.time()
if now > self.last_enrollment_time + 10 * 60:
if not self.last_enrollment_time:
# This is the first enrollment request - we should enter fastpoll mode.
self.timer.FastPoll()
self.last_enrollment_time = now
# Send registration request:
self.client_worker.SendReply(
rdf_crypto.Certificate(
type=rdf_crypto.Certificate.Type.CSR,
pem=self.communicator.GetCSRAsPem()),
session_id=rdfvalue.SessionID(
queue=queues.ENROLLMENT, flow_name="Enrol")) |
def getQuotes(symbols):
'''
get real-time quotes (index, last trade price, last trade time, etc) for stocks, using google api: http://finance.google.com/finance/info?client=ig&q=symbols
Unlike python package 'yahoo-finance' (15 min delay), There is no delay for NYSE and NASDAQ stocks in 'googlefinance' package.
example:
quotes = getQuotes('AAPL')
return:
[{u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'129.09', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'129.09', u'Yield': u'1.46', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'0.47', u'StockSymbol': u'AAPL', u'ID': u'22144'}]
quotes = getQuotes(['AAPL', 'GOOG'])
return:
[{u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'129.09', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'129.09', u'Yield': u'1.46', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'0.47', u'StockSymbol': u'AAPL', u'ID': u'22144'}, {u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'571.34', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'571.34', u'Yield': u'', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'', u'StockSymbol': u'GOOG', u'ID': u'304466804484872'}]
:param symbols: a single symbol or a list of stock symbols
:return: real-time quotes list
'''
if type(symbols) == type('str'):
symbols = [symbols]
content = json.loads(request(symbols))
return replaceKeys(content); | get real-time quotes (index, last trade price, last trade time, etc) for stocks, using google api: http://finance.google.com/finance/info?client=ig&q=symbols
Unlike python package 'yahoo-finance' (15 min delay), There is no delay for NYSE and NASDAQ stocks in 'googlefinance' package.
example:
quotes = getQuotes('AAPL')
return:
[{u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'129.09', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'129.09', u'Yield': u'1.46', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'0.47', u'StockSymbol': u'AAPL', u'ID': u'22144'}]
quotes = getQuotes(['AAPL', 'GOOG'])
return:
[{u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'129.09', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'129.09', u'Yield': u'1.46', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'0.47', u'StockSymbol': u'AAPL', u'ID': u'22144'}, {u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'571.34', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'571.34', u'Yield': u'', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'', u'StockSymbol': u'GOOG', u'ID': u'304466804484872'}]
:param symbols: a single symbol or a list of stock symbols
:return: real-time quotes list | Below is the the instruction that describes the task:
### Input:
get real-time quotes (index, last trade price, last trade time, etc) for stocks, using google api: http://finance.google.com/finance/info?client=ig&q=symbols
Unlike python package 'yahoo-finance' (15 min delay), There is no delay for NYSE and NASDAQ stocks in 'googlefinance' package.
example:
quotes = getQuotes('AAPL')
return:
[{u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'129.09', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'129.09', u'Yield': u'1.46', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'0.47', u'StockSymbol': u'AAPL', u'ID': u'22144'}]
quotes = getQuotes(['AAPL', 'GOOG'])
return:
[{u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'129.09', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'129.09', u'Yield': u'1.46', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'0.47', u'StockSymbol': u'AAPL', u'ID': u'22144'}, {u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'571.34', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'571.34', u'Yield': u'', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'', u'StockSymbol': u'GOOG', u'ID': u'304466804484872'}]
:param symbols: a single symbol or a list of stock symbols
:return: real-time quotes list
### Response:
def getQuotes(symbols):
'''
get real-time quotes (index, last trade price, last trade time, etc) for stocks, using google api: http://finance.google.com/finance/info?client=ig&q=symbols
Unlike python package 'yahoo-finance' (15 min delay), There is no delay for NYSE and NASDAQ stocks in 'googlefinance' package.
example:
quotes = getQuotes('AAPL')
return:
[{u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'129.09', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'129.09', u'Yield': u'1.46', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'0.47', u'StockSymbol': u'AAPL', u'ID': u'22144'}]
quotes = getQuotes(['AAPL', 'GOOG'])
return:
[{u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'129.09', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'129.09', u'Yield': u'1.46', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'0.47', u'StockSymbol': u'AAPL', u'ID': u'22144'}, {u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'571.34', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'571.34', u'Yield': u'', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'', u'StockSymbol': u'GOOG', u'ID': u'304466804484872'}]
:param symbols: a single symbol or a list of stock symbols
:return: real-time quotes list
'''
if type(symbols) == type('str'):
symbols = [symbols]
content = json.loads(request(symbols))
return replaceKeys(content); |
def train(self, data_iterator):
"""Train a keras model on a worker
"""
optimizer = get_optimizer(self.master_optimizer)
self.model = model_from_yaml(self.yaml, self.custom_objects)
self.model.compile(optimizer=optimizer,
loss=self.master_loss, metrics=self.master_metrics)
self.model.set_weights(self.parameters.value)
feature_iterator, label_iterator = tee(data_iterator, 2)
x_train = np.asarray([x for x, y in feature_iterator])
y_train = np.asarray([y for x, y in label_iterator])
self.model.compile(optimizer=self.master_optimizer,
loss=self.master_loss,
metrics=self.master_metrics)
weights_before_training = self.model.get_weights()
if x_train.shape[0] > self.train_config.get('batch_size'):
self.model.fit(x_train, y_train, **self.train_config)
weights_after_training = self.model.get_weights()
deltas = subtract_params(
weights_before_training, weights_after_training)
yield deltas | Train a keras model on a worker | Below is the the instruction that describes the task:
### Input:
Train a keras model on a worker
### Response:
def train(self, data_iterator):
"""Train a keras model on a worker
"""
optimizer = get_optimizer(self.master_optimizer)
self.model = model_from_yaml(self.yaml, self.custom_objects)
self.model.compile(optimizer=optimizer,
loss=self.master_loss, metrics=self.master_metrics)
self.model.set_weights(self.parameters.value)
feature_iterator, label_iterator = tee(data_iterator, 2)
x_train = np.asarray([x for x, y in feature_iterator])
y_train = np.asarray([y for x, y in label_iterator])
self.model.compile(optimizer=self.master_optimizer,
loss=self.master_loss,
metrics=self.master_metrics)
weights_before_training = self.model.get_weights()
if x_train.shape[0] > self.train_config.get('batch_size'):
self.model.fit(x_train, y_train, **self.train_config)
weights_after_training = self.model.get_weights()
deltas = subtract_params(
weights_before_training, weights_after_training)
yield deltas |
def get_stddevs(self, mag, imt, stddev_types, num_sites):
"""
Returns the total standard deviation
"""
stddevs = []
for stddev_type in stddev_types:
assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
if stddev_type == const.StdDev.TOTAL:
sigma = self._get_total_sigma(imt, mag)
stddevs.append(sigma + np.zeros(num_sites))
return stddevs | Returns the total standard deviation | Below is the the instruction that describes the task:
### Input:
Returns the total standard deviation
### Response:
def get_stddevs(self, mag, imt, stddev_types, num_sites):
"""
Returns the total standard deviation
"""
stddevs = []
for stddev_type in stddev_types:
assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
if stddev_type == const.StdDev.TOTAL:
sigma = self._get_total_sigma(imt, mag)
stddevs.append(sigma + np.zeros(num_sites))
return stddevs |
def hide_arp_holder_arp_entry_interfacetype_Port_channel_Port_channel(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp")
arp_entry = ET.SubElement(hide_arp_holder, "arp-entry")
arp_ip_address_key = ET.SubElement(arp_entry, "arp-ip-address")
arp_ip_address_key.text = kwargs.pop('arp_ip_address')
interfacetype = ET.SubElement(arp_entry, "interfacetype")
Port_channel = ET.SubElement(interfacetype, "Port-channel")
Port_channel = ET.SubElement(Port_channel, "Port-channel")
Port_channel.text = kwargs.pop('Port_channel')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def hide_arp_holder_arp_entry_interfacetype_Port_channel_Port_channel(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp")
arp_entry = ET.SubElement(hide_arp_holder, "arp-entry")
arp_ip_address_key = ET.SubElement(arp_entry, "arp-ip-address")
arp_ip_address_key.text = kwargs.pop('arp_ip_address')
interfacetype = ET.SubElement(arp_entry, "interfacetype")
Port_channel = ET.SubElement(interfacetype, "Port-channel")
Port_channel = ET.SubElement(Port_channel, "Port-channel")
Port_channel.text = kwargs.pop('Port_channel')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _handleLegacyResult(result):
"""
make sure the result is backward compatible
"""
if not isinstance(result, dict):
warnings.warn('The Gerrit status callback uses the old way to '
'communicate results. The outcome might be not what is '
'expected.')
message, verified, reviewed = result
result = makeReviewResult(message,
(GERRIT_LABEL_VERIFIED, verified),
(GERRIT_LABEL_REVIEWED, reviewed))
return result | make sure the result is backward compatible | Below is the the instruction that describes the task:
### Input:
make sure the result is backward compatible
### Response:
def _handleLegacyResult(result):
"""
make sure the result is backward compatible
"""
if not isinstance(result, dict):
warnings.warn('The Gerrit status callback uses the old way to '
'communicate results. The outcome might be not what is '
'expected.')
message, verified, reviewed = result
result = makeReviewResult(message,
(GERRIT_LABEL_VERIFIED, verified),
(GERRIT_LABEL_REVIEWED, reviewed))
return result |
def config(
state, host, key, value,
repo=None,
):
'''
Manage git config for a repository or globally.
+ key: the key of the config to ensure
+ value: the value this key should have
+ repo: specify the git repo path to edit local config (defaults to global)
'''
existing_config = host.fact.git_config(repo)
if key not in existing_config or existing_config[key] != value:
if repo is None:
yield 'git config --global {0} "{1}"'.format(key, value)
else:
yield 'cd {0} && git config --local {1} "{2}"'.format(repo, key, value) | Manage git config for a repository or globally.
+ key: the key of the config to ensure
+ value: the value this key should have
+ repo: specify the git repo path to edit local config (defaults to global) | Below is the the instruction that describes the task:
### Input:
Manage git config for a repository or globally.
+ key: the key of the config to ensure
+ value: the value this key should have
+ repo: specify the git repo path to edit local config (defaults to global)
### Response:
def config(
state, host, key, value,
repo=None,
):
'''
Manage git config for a repository or globally.
+ key: the key of the config to ensure
+ value: the value this key should have
+ repo: specify the git repo path to edit local config (defaults to global)
'''
existing_config = host.fact.git_config(repo)
if key not in existing_config or existing_config[key] != value:
if repo is None:
yield 'git config --global {0} "{1}"'.format(key, value)
else:
yield 'cd {0} && git config --local {1} "{2}"'.format(repo, key, value) |
def extractPrintSaveIntermittens():
"""
This function will print out the intermittents onto the screen for casual viewing. It will also print out
where the giant summary dictionary is going to be stored.
:return: None
"""
# extract intermittents from collected failed tests
global g_summary_dict_intermittents
localtz = time.tzname[0]
for ind in range(len(g_summary_dict_all["TestName"])):
if g_summary_dict_all["TestInfo"][ind]["FailureCount"] >= g_threshold_failure:
addFailedTests(g_summary_dict_intermittents, g_summary_dict_all, ind)
# save dict in file
if len(g_summary_dict_intermittents["TestName"]) > 0:
json.dump(g_summary_dict_intermittents, open(g_summary_dict_name, 'w'))
with open(g_summary_csv_filename, 'w') as summaryFile:
for ind in range(len(g_summary_dict_intermittents["TestName"])):
testName = g_summary_dict_intermittents["TestName"][ind]
numberFailure = g_summary_dict_intermittents["TestInfo"][ind]["FailureCount"]
firstFailedTS = parser.parse(time.ctime(min(g_summary_dict_intermittents["TestInfo"][ind]["Timestamp"]))+
' '+localtz)
firstFailedStr = firstFailedTS.strftime("%a %b %d %H:%M:%S %Y %Z")
recentFail = parser.parse(time.ctime(max(g_summary_dict_intermittents["TestInfo"][ind]["Timestamp"]))+
' '+localtz)
recentFailStr = recentFail.strftime("%a %b %d %H:%M:%S %Y %Z")
eachTest = "{0}, {1}, {2}, {3}\n".format(testName, recentFailStr, numberFailure,
g_summary_dict_intermittents["TestInfo"][ind]["TestCategory"][0])
summaryFile.write(eachTest)
print("Intermittent: {0}, Last failed: {1}, Failed {2} times since "
"{3}".format(testName, recentFailStr, numberFailure, firstFailedStr)) | This function will print out the intermittents onto the screen for casual viewing. It will also print out
where the giant summary dictionary is going to be stored.
:return: None | Below is the the instruction that describes the task:
### Input:
This function will print out the intermittents onto the screen for casual viewing. It will also print out
where the giant summary dictionary is going to be stored.
:return: None
### Response:
def extractPrintSaveIntermittens():
"""
This function will print out the intermittents onto the screen for casual viewing. It will also print out
where the giant summary dictionary is going to be stored.
:return: None
"""
# extract intermittents from collected failed tests
global g_summary_dict_intermittents
localtz = time.tzname[0]
for ind in range(len(g_summary_dict_all["TestName"])):
if g_summary_dict_all["TestInfo"][ind]["FailureCount"] >= g_threshold_failure:
addFailedTests(g_summary_dict_intermittents, g_summary_dict_all, ind)
# save dict in file
if len(g_summary_dict_intermittents["TestName"]) > 0:
json.dump(g_summary_dict_intermittents, open(g_summary_dict_name, 'w'))
with open(g_summary_csv_filename, 'w') as summaryFile:
for ind in range(len(g_summary_dict_intermittents["TestName"])):
testName = g_summary_dict_intermittents["TestName"][ind]
numberFailure = g_summary_dict_intermittents["TestInfo"][ind]["FailureCount"]
firstFailedTS = parser.parse(time.ctime(min(g_summary_dict_intermittents["TestInfo"][ind]["Timestamp"]))+
' '+localtz)
firstFailedStr = firstFailedTS.strftime("%a %b %d %H:%M:%S %Y %Z")
recentFail = parser.parse(time.ctime(max(g_summary_dict_intermittents["TestInfo"][ind]["Timestamp"]))+
' '+localtz)
recentFailStr = recentFail.strftime("%a %b %d %H:%M:%S %Y %Z")
eachTest = "{0}, {1}, {2}, {3}\n".format(testName, recentFailStr, numberFailure,
g_summary_dict_intermittents["TestInfo"][ind]["TestCategory"][0])
summaryFile.write(eachTest)
print("Intermittent: {0}, Last failed: {1}, Failed {2} times since "
"{3}".format(testName, recentFailStr, numberFailure, firstFailedStr)) |
def padded(self, padding):
"""Return a new Rect padded (smaller) by padding on all sides
Parameters
----------
padding : float
The padding.
Returns
-------
rect : instance of Rect
The padded rectangle.
"""
return Rect(pos=(self.pos[0]+padding, self.pos[1]+padding),
size=(self.size[0]-2*padding, self.size[1]-2*padding)) | Return a new Rect padded (smaller) by padding on all sides
Parameters
----------
padding : float
The padding.
Returns
-------
rect : instance of Rect
The padded rectangle. | Below is the the instruction that describes the task:
### Input:
Return a new Rect padded (smaller) by padding on all sides
Parameters
----------
padding : float
The padding.
Returns
-------
rect : instance of Rect
The padded rectangle.
### Response:
def padded(self, padding):
"""Return a new Rect padded (smaller) by padding on all sides
Parameters
----------
padding : float
The padding.
Returns
-------
rect : instance of Rect
The padded rectangle.
"""
return Rect(pos=(self.pos[0]+padding, self.pos[1]+padding),
size=(self.size[0]-2*padding, self.size[1]-2*padding)) |
def enforce_timezone(self, value):
"""
When `self.default_timezone` is `None`, always return naive datetimes.
When `self.default_timezone` is not `None`, always return aware datetimes.
"""
field_timezone = getattr(self, 'timezone', self.default_timezone())
if (field_timezone is not None) and not timezone.is_aware(value):
return timezone.make_aware(value, field_timezone)
elif (field_timezone is None) and timezone.is_aware(value):
return timezone.make_naive(value, timezone.UTC())
return value | When `self.default_timezone` is `None`, always return naive datetimes.
When `self.default_timezone` is not `None`, always return aware datetimes. | Below is the the instruction that describes the task:
### Input:
When `self.default_timezone` is `None`, always return naive datetimes.
When `self.default_timezone` is not `None`, always return aware datetimes.
### Response:
def enforce_timezone(self, value):
"""
When `self.default_timezone` is `None`, always return naive datetimes.
When `self.default_timezone` is not `None`, always return aware datetimes.
"""
field_timezone = getattr(self, 'timezone', self.default_timezone())
if (field_timezone is not None) and not timezone.is_aware(value):
return timezone.make_aware(value, field_timezone)
elif (field_timezone is None) and timezone.is_aware(value):
return timezone.make_naive(value, timezone.UTC())
return value |
def populate_slug(instance, field):
'''
Populate a slug field if needed.
'''
value = getattr(instance, field.db_field)
try:
previous = instance.__class__.objects.get(id=instance.id)
except Exception:
previous = None
# Field value has changed
changed = field.db_field in instance._get_changed_fields()
# Field initial value has been manually set
manual = not previous and value or changed
if not manual and field.populate_from:
# value to slugify is extracted from populate_from parameter
value = getattr(instance, field.populate_from)
if previous and value == getattr(previous, field.populate_from):
return value
if previous and getattr(previous, field.db_field) == value:
# value is unchanged from DB
return value
if previous and not changed and not field.update:
# Field is not manually set and slug should not update on change
return value
slug = field.slugify(value)
# This can happen when serializing an object which does not contain
# the properties used to generate the slug. Typically, when such
# an object is passed to one of the Celery workers (see issue #20).
if slug is None:
return
old_slug = getattr(previous, field.db_field, None)
if slug == old_slug:
return slug
# Ensure uniqueness
if field.unique:
base_slug = slug
index = 1
qs = instance.__class__.objects
if previous:
qs = qs(id__ne=previous.id)
def exists(s):
return qs(
class_check=False, **{field.db_field: s}
).limit(1).count(True) > 0
while exists(slug):
slug = '{0}-{1}'.format(base_slug, index)
index += 1
# Track old slugs for this class
if field.follow and old_slug != slug:
ns = instance.__class__.__name__
# Destroy redirections from this new slug
SlugFollow.objects(namespace=ns, old_slug=slug).delete()
if old_slug:
# Create a redirect for previous slug
slug_follower, created = SlugFollow.objects.get_or_create(
namespace=ns,
old_slug=old_slug,
auto_save=False,
)
slug_follower.new_slug = slug
slug_follower.save()
# Maintain previous redirects
SlugFollow.objects(namespace=ns, new_slug=old_slug).update(new_slug=slug)
setattr(instance, field.db_field, slug)
return slug | Populate a slug field if needed. | Below is the the instruction that describes the task:
### Input:
Populate a slug field if needed.
### Response:
def populate_slug(instance, field):
'''
Populate a slug field if needed.
'''
value = getattr(instance, field.db_field)
try:
previous = instance.__class__.objects.get(id=instance.id)
except Exception:
previous = None
# Field value has changed
changed = field.db_field in instance._get_changed_fields()
# Field initial value has been manually set
manual = not previous and value or changed
if not manual and field.populate_from:
# value to slugify is extracted from populate_from parameter
value = getattr(instance, field.populate_from)
if previous and value == getattr(previous, field.populate_from):
return value
if previous and getattr(previous, field.db_field) == value:
# value is unchanged from DB
return value
if previous and not changed and not field.update:
# Field is not manually set and slug should not update on change
return value
slug = field.slugify(value)
# This can happen when serializing an object which does not contain
# the properties used to generate the slug. Typically, when such
# an object is passed to one of the Celery workers (see issue #20).
if slug is None:
return
old_slug = getattr(previous, field.db_field, None)
if slug == old_slug:
return slug
# Ensure uniqueness
if field.unique:
base_slug = slug
index = 1
qs = instance.__class__.objects
if previous:
qs = qs(id__ne=previous.id)
def exists(s):
return qs(
class_check=False, **{field.db_field: s}
).limit(1).count(True) > 0
while exists(slug):
slug = '{0}-{1}'.format(base_slug, index)
index += 1
# Track old slugs for this class
if field.follow and old_slug != slug:
ns = instance.__class__.__name__
# Destroy redirections from this new slug
SlugFollow.objects(namespace=ns, old_slug=slug).delete()
if old_slug:
# Create a redirect for previous slug
slug_follower, created = SlugFollow.objects.get_or_create(
namespace=ns,
old_slug=old_slug,
auto_save=False,
)
slug_follower.new_slug = slug
slug_follower.save()
# Maintain previous redirects
SlugFollow.objects(namespace=ns, new_slug=old_slug).update(new_slug=slug)
setattr(instance, field.db_field, slug)
return slug |
def reload_functions(self):
"""
Replace functions in namespace with functions from edited_source.
"""
with LiveExecution.lock:
if self.edited_source:
tree = ast.parse(self.edited_source)
for f in [n for n in ast.walk(tree) if isinstance(n, ast.FunctionDef)]:
self.ns[f.name].__code__ = meta.decompiler.compile_func(f, self.filename, self.ns).__code__ | Replace functions in namespace with functions from edited_source. | Below is the the instruction that describes the task:
### Input:
Replace functions in namespace with functions from edited_source.
### Response:
def reload_functions(self):
"""
Replace functions in namespace with functions from edited_source.
"""
with LiveExecution.lock:
if self.edited_source:
tree = ast.parse(self.edited_source)
for f in [n for n in ast.walk(tree) if isinstance(n, ast.FunctionDef)]:
self.ns[f.name].__code__ = meta.decompiler.compile_func(f, self.filename, self.ns).__code__ |
def fetch_trades_since(self, since: int) -> List[Trade]:
"""Fetch trades since given timestamp."""
return self._fetch_since('trades', self.market.code)(self._trades_since)(since) | Fetch trades since given timestamp. | Below is the the instruction that describes the task:
### Input:
Fetch trades since given timestamp.
### Response:
def fetch_trades_since(self, since: int) -> List[Trade]:
"""Fetch trades since given timestamp."""
return self._fetch_since('trades', self.market.code)(self._trades_since)(since) |
def renumber_atoms(self):
"""Reset the molecule's atoms :attr:`number` to be 1-indexed"""
if self.atoms:
# reset the mapping
self._anumb_to_atom = {}
for i,atom in enumerate(self.atoms):
atom.number = i+1 # starting from 1
else:
self.logger("the number of atoms is zero - no renumbering") | Reset the molecule's atoms :attr:`number` to be 1-indexed | Below is the the instruction that describes the task:
### Input:
Reset the molecule's atoms :attr:`number` to be 1-indexed
### Response:
def renumber_atoms(self):
"""Reset the molecule's atoms :attr:`number` to be 1-indexed"""
if self.atoms:
# reset the mapping
self._anumb_to_atom = {}
for i,atom in enumerate(self.atoms):
atom.number = i+1 # starting from 1
else:
self.logger("the number of atoms is zero - no renumbering") |
def clone(self, repo, ref, deps=()):
"""Clone the given url and checkout the specific ref."""
if os.path.isdir(repo):
repo = os.path.abspath(repo)
def clone_strategy(directory):
env = git.no_git_env()
def _git_cmd(*args):
cmd_output('git', *args, cwd=directory, env=env)
_git_cmd('init', '.')
_git_cmd('remote', 'add', 'origin', repo)
try:
self._shallow_clone(ref, _git_cmd)
except CalledProcessError:
self._complete_clone(ref, _git_cmd)
return self._new_repo(repo, ref, deps, clone_strategy) | Clone the given url and checkout the specific ref. | Below is the the instruction that describes the task:
### Input:
Clone the given url and checkout the specific ref.
### Response:
def clone(self, repo, ref, deps=()):
"""Clone the given url and checkout the specific ref."""
if os.path.isdir(repo):
repo = os.path.abspath(repo)
def clone_strategy(directory):
env = git.no_git_env()
def _git_cmd(*args):
cmd_output('git', *args, cwd=directory, env=env)
_git_cmd('init', '.')
_git_cmd('remote', 'add', 'origin', repo)
try:
self._shallow_clone(ref, _git_cmd)
except CalledProcessError:
self._complete_clone(ref, _git_cmd)
return self._new_repo(repo, ref, deps, clone_strategy) |
def get_term_after(aterm):
"""
Returns a uw_sws.models.Term object,
for the term after the term given.
"""
next_year = aterm.year
if aterm.quarter == "autumn":
next_quarter = QUARTER_SEQ[0]
else:
next_quarter = QUARTER_SEQ[QUARTER_SEQ.index(aterm.quarter) + 1]
if next_quarter == "winter":
next_year += 1
return get_term_by_year_and_quarter(next_year, next_quarter) | Returns a uw_sws.models.Term object,
for the term after the term given. | Below is the the instruction that describes the task:
### Input:
Returns a uw_sws.models.Term object,
for the term after the term given.
### Response:
def get_term_after(aterm):
"""
Returns a uw_sws.models.Term object,
for the term after the term given.
"""
next_year = aterm.year
if aterm.quarter == "autumn":
next_quarter = QUARTER_SEQ[0]
else:
next_quarter = QUARTER_SEQ[QUARTER_SEQ.index(aterm.quarter) + 1]
if next_quarter == "winter":
next_year += 1
return get_term_by_year_and_quarter(next_year, next_quarter) |
def _attempt_slice_retry(self, shard_state, tstate):
"""Attempt to retry this slice.
This method may modify shard_state and tstate to prepare for retry or fail.
Args:
shard_state: model.ShardState for current shard.
tstate: model.TransientShardState for current shard.
Returns:
A _TASK_DIRECTIVE enum. RETRY_SLICE if slice should be retried.
RETRY_SHARD if shard retry should be attempted.
"""
if (shard_state.slice_retries + 1 <
parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS):
logging.warning(
"Slice %s %s failed for the %s of up to %s attempts "
"(%s of %s taskqueue execution attempts). "
"Will retry now.",
tstate.shard_id,
tstate.slice_id,
shard_state.slice_retries + 1,
parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS,
self.task_retry_count() + 1,
parameters.config.TASK_MAX_ATTEMPTS)
# Clear info related to current exception. Otherwise, the real
# callstack that includes a frame for this method will show up
# in log.
sys.exc_clear()
self._try_free_lease(shard_state, slice_retry=True)
return self._TASK_DIRECTIVE.RETRY_SLICE
if parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS > 0:
logging.warning("Slice attempt %s exceeded %s max attempts.",
self.task_retry_count() + 1,
parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS)
return self._TASK_DIRECTIVE.RETRY_SHARD | Attempt to retry this slice.
This method may modify shard_state and tstate to prepare for retry or fail.
Args:
shard_state: model.ShardState for current shard.
tstate: model.TransientShardState for current shard.
Returns:
A _TASK_DIRECTIVE enum. RETRY_SLICE if slice should be retried.
RETRY_SHARD if shard retry should be attempted. | Below is the the instruction that describes the task:
### Input:
Attempt to retry this slice.
This method may modify shard_state and tstate to prepare for retry or fail.
Args:
shard_state: model.ShardState for current shard.
tstate: model.TransientShardState for current shard.
Returns:
A _TASK_DIRECTIVE enum. RETRY_SLICE if slice should be retried.
RETRY_SHARD if shard retry should be attempted.
### Response:
def _attempt_slice_retry(self, shard_state, tstate):
"""Attempt to retry this slice.
This method may modify shard_state and tstate to prepare for retry or fail.
Args:
shard_state: model.ShardState for current shard.
tstate: model.TransientShardState for current shard.
Returns:
A _TASK_DIRECTIVE enum. RETRY_SLICE if slice should be retried.
RETRY_SHARD if shard retry should be attempted.
"""
if (shard_state.slice_retries + 1 <
parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS):
logging.warning(
"Slice %s %s failed for the %s of up to %s attempts "
"(%s of %s taskqueue execution attempts). "
"Will retry now.",
tstate.shard_id,
tstate.slice_id,
shard_state.slice_retries + 1,
parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS,
self.task_retry_count() + 1,
parameters.config.TASK_MAX_ATTEMPTS)
# Clear info related to current exception. Otherwise, the real
# callstack that includes a frame for this method will show up
# in log.
sys.exc_clear()
self._try_free_lease(shard_state, slice_retry=True)
return self._TASK_DIRECTIVE.RETRY_SLICE
if parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS > 0:
logging.warning("Slice attempt %s exceeded %s max attempts.",
self.task_retry_count() + 1,
parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS)
return self._TASK_DIRECTIVE.RETRY_SHARD |
def length(self,threshold=0.2,phys=False,ang=False,tdisrupt=None,
**kwargs):
"""
NAME:
length
PURPOSE:
calculate the length of the stream
INPUT:
threshold - threshold down from the density near the progenitor at which to define the 'end' of the stream
phys= (False) if True, return the length in physical kpc
ang= (False) if True, return the length in sky angular arc length in degree
coord - coordinate to return the density in ('apar' [default],
'll','ra','customra','phi')
OUTPUT:
length (rad for parallel angle; kpc for physical length; deg for sky arc length)
HISTORY:
2015-12-22 - Written - Bovy (UofT)
"""
peak_dens= self.density_par(0.1,tdisrupt=tdisrupt,**kwargs) # assume that this is the peak
try:
result=\
optimize.brentq(lambda x: self.density_par(x,
tdisrupt=tdisrupt,
**kwargs)\
-peak_dens*threshold,
0.1,self._deltaAngleTrack)
except RuntimeError: #pragma: no cover
raise RuntimeError('Length could not be returned, because length method failed to find the threshold value')
except ValueError:
raise ValueError('Length could not be returned, because length method failed to initialize')
if phys:
# Need to now integrate length
dXda= self._interpTrackX.derivative()
dYda= self._interpTrackY.derivative()
dZda= self._interpTrackZ.derivative()
result= integrate.quad(lambda da: numpy.sqrt(dXda(da)**2.\
+dYda(da)**2.\
+dZda(da)**2.),
0.,result)[0]*self._ro
elif ang:
# Need to now integrate length
if numpy.median(numpy.roll(self._interpolatedObsTrackLB[:,0],-1)
-self._interpolatedObsTrackLB[:,0]) > 0.:
ll= dePeriod(self._interpolatedObsTrackLB[:,0][:,numpy.newaxis].T*numpy.pi/180.).T*180./numpy.pi
else:
ll= dePeriod(self._interpolatedObsTrackLB[::-1,0][:,numpy.newaxis].T*numpy.pi/180.).T[::-1]*180./numpy.pi
if numpy.median(numpy.roll(self._interpolatedObsTrackLB[:,1],-1)
-self._interpolatedObsTrackLB[:,1]) > 0.:
bb= dePeriod(self._interpolatedObsTrackLB[:,1][:,numpy.newaxis].T*numpy.pi/180.).T*180./numpy.pi
else:
bb= dePeriod(self._interpolatedObsTrackLB[::-1,1][:,numpy.newaxis].T*numpy.pi/180.).T[::-1]*180./numpy.pi
dlda= interpolate.InterpolatedUnivariateSpline(\
self._interpolatedThetasTrack,ll,k=3).derivative()
dbda= interpolate.InterpolatedUnivariateSpline(\
self._interpolatedThetasTrack,bb,k=3).derivative()
result= integrate.quad(lambda da: numpy.sqrt(dlda(da)**2.\
+dbda(da)**2.),
0.,result)[0]
return result | NAME:
length
PURPOSE:
calculate the length of the stream
INPUT:
threshold - threshold down from the density near the progenitor at which to define the 'end' of the stream
phys= (False) if True, return the length in physical kpc
ang= (False) if True, return the length in sky angular arc length in degree
coord - coordinate to return the density in ('apar' [default],
'll','ra','customra','phi')
OUTPUT:
length (rad for parallel angle; kpc for physical length; deg for sky arc length)
HISTORY:
2015-12-22 - Written - Bovy (UofT) | Below is the the instruction that describes the task:
### Input:
NAME:
length
PURPOSE:
calculate the length of the stream
INPUT:
threshold - threshold down from the density near the progenitor at which to define the 'end' of the stream
phys= (False) if True, return the length in physical kpc
ang= (False) if True, return the length in sky angular arc length in degree
coord - coordinate to return the density in ('apar' [default],
'll','ra','customra','phi')
OUTPUT:
length (rad for parallel angle; kpc for physical length; deg for sky arc length)
HISTORY:
2015-12-22 - Written - Bovy (UofT)
### Response:
def length(self,threshold=0.2,phys=False,ang=False,tdisrupt=None,
**kwargs):
"""
NAME:
length
PURPOSE:
calculate the length of the stream
INPUT:
threshold - threshold down from the density near the progenitor at which to define the 'end' of the stream
phys= (False) if True, return the length in physical kpc
ang= (False) if True, return the length in sky angular arc length in degree
coord - coordinate to return the density in ('apar' [default],
'll','ra','customra','phi')
OUTPUT:
length (rad for parallel angle; kpc for physical length; deg for sky arc length)
HISTORY:
2015-12-22 - Written - Bovy (UofT)
"""
peak_dens= self.density_par(0.1,tdisrupt=tdisrupt,**kwargs) # assume that this is the peak
try:
result=\
optimize.brentq(lambda x: self.density_par(x,
tdisrupt=tdisrupt,
**kwargs)\
-peak_dens*threshold,
0.1,self._deltaAngleTrack)
except RuntimeError: #pragma: no cover
raise RuntimeError('Length could not be returned, because length method failed to find the threshold value')
except ValueError:
raise ValueError('Length could not be returned, because length method failed to initialize')
if phys:
# Need to now integrate length
dXda= self._interpTrackX.derivative()
dYda= self._interpTrackY.derivative()
dZda= self._interpTrackZ.derivative()
result= integrate.quad(lambda da: numpy.sqrt(dXda(da)**2.\
+dYda(da)**2.\
+dZda(da)**2.),
0.,result)[0]*self._ro
elif ang:
# Need to now integrate length
if numpy.median(numpy.roll(self._interpolatedObsTrackLB[:,0],-1)
-self._interpolatedObsTrackLB[:,0]) > 0.:
ll= dePeriod(self._interpolatedObsTrackLB[:,0][:,numpy.newaxis].T*numpy.pi/180.).T*180./numpy.pi
else:
ll= dePeriod(self._interpolatedObsTrackLB[::-1,0][:,numpy.newaxis].T*numpy.pi/180.).T[::-1]*180./numpy.pi
if numpy.median(numpy.roll(self._interpolatedObsTrackLB[:,1],-1)
-self._interpolatedObsTrackLB[:,1]) > 0.:
bb= dePeriod(self._interpolatedObsTrackLB[:,1][:,numpy.newaxis].T*numpy.pi/180.).T*180./numpy.pi
else:
bb= dePeriod(self._interpolatedObsTrackLB[::-1,1][:,numpy.newaxis].T*numpy.pi/180.).T[::-1]*180./numpy.pi
dlda= interpolate.InterpolatedUnivariateSpline(\
self._interpolatedThetasTrack,ll,k=3).derivative()
dbda= interpolate.InterpolatedUnivariateSpline(\
self._interpolatedThetasTrack,bb,k=3).derivative()
result= integrate.quad(lambda da: numpy.sqrt(dlda(da)**2.\
+dbda(da)**2.),
0.,result)[0]
return result |
def eigtransform(self, sequences, right=True, mode='clip'):
r"""Transform a list of sequences by projecting the sequences onto
the first `n_timescales` dynamical eigenvectors.
Parameters
----------
sequences : list of array-like
List of sequences, or a single sequence. Each sequence should be a
1D iterable of state labels. Labels can be integers, strings, or
other orderable objects.
right : bool
Which eigenvectors to map onto. Both the left (:math:`\Phi`) and
the right (:math`\Psi`) eigenvectors of the transition matrix are
commonly used, and differ in their normalization. The two sets of
eigenvectors are related by the stationary distribution ::
\Phi_i(x) = \Psi_i(x) * \mu(x)
In the MSM literature, the right vectors (default here) are
approximations to the transfer operator eigenfunctions, whereas
the left eigenfunction are approximations to the propagator
eigenfunctions. For more details, refer to reference [1].
mode : {'clip', 'fill'}
Method by which to treat labels in `sequences` which do not have
a corresponding index. This can be due, for example, to the ergodic
trimming step.
``clip``
Unmapped labels are removed during transform. If they occur
at the beginning or end of a sequence, the resulting transformed
sequence will be shorted. If they occur in the middle of a
sequence, that sequence will be broken into two (or more)
sequences. (Default)
``fill``
Unmapped labels will be replaced with NaN, to signal missing
data. [The use of NaN to signal missing data is not fantastic,
but it's consistent with current behavior of the ``pandas``
library.]
Returns
-------
transformed : list of 2d arrays
Each element of transformed is an array of shape ``(n_samples,
n_timescales)`` containing the transformed data.
References
----------
.. [1] Prinz, Jan-Hendrik, et al. "Markov models of molecular kinetics:
Generation and validation." J. Chem. Phys. 134.17 (2011): 174105.
"""
result = []
for y in self.transform(sequences, mode=mode):
if right:
op = self.right_eigenvectors_[:, 1:]
else:
op = self.left_eigenvectors_[:, 1:]
is_finite = np.isfinite(y)
if not np.all(is_finite):
value = np.empty((y.shape[0], op.shape[1]))
value[is_finite, :] = np.take(op, y[is_finite].astype(np.int), axis=0)
value[~is_finite, :] = np.nan
else:
value = np.take(op, y, axis=0)
result.append(value)
return result | r"""Transform a list of sequences by projecting the sequences onto
the first `n_timescales` dynamical eigenvectors.
Parameters
----------
sequences : list of array-like
List of sequences, or a single sequence. Each sequence should be a
1D iterable of state labels. Labels can be integers, strings, or
other orderable objects.
right : bool
Which eigenvectors to map onto. Both the left (:math:`\Phi`) and
the right (:math`\Psi`) eigenvectors of the transition matrix are
commonly used, and differ in their normalization. The two sets of
eigenvectors are related by the stationary distribution ::
\Phi_i(x) = \Psi_i(x) * \mu(x)
In the MSM literature, the right vectors (default here) are
approximations to the transfer operator eigenfunctions, whereas
the left eigenfunction are approximations to the propagator
eigenfunctions. For more details, refer to reference [1].
mode : {'clip', 'fill'}
Method by which to treat labels in `sequences` which do not have
a corresponding index. This can be due, for example, to the ergodic
trimming step.
``clip``
Unmapped labels are removed during transform. If they occur
at the beginning or end of a sequence, the resulting transformed
sequence will be shorted. If they occur in the middle of a
sequence, that sequence will be broken into two (or more)
sequences. (Default)
``fill``
Unmapped labels will be replaced with NaN, to signal missing
data. [The use of NaN to signal missing data is not fantastic,
but it's consistent with current behavior of the ``pandas``
library.]
Returns
-------
transformed : list of 2d arrays
Each element of transformed is an array of shape ``(n_samples,
n_timescales)`` containing the transformed data.
References
----------
.. [1] Prinz, Jan-Hendrik, et al. "Markov models of molecular kinetics:
Generation and validation." J. Chem. Phys. 134.17 (2011): 174105. | Below is the the instruction that describes the task:
### Input:
r"""Transform a list of sequences by projecting the sequences onto
the first `n_timescales` dynamical eigenvectors.
Parameters
----------
sequences : list of array-like
List of sequences, or a single sequence. Each sequence should be a
1D iterable of state labels. Labels can be integers, strings, or
other orderable objects.
right : bool
Which eigenvectors to map onto. Both the left (:math:`\Phi`) and
the right (:math`\Psi`) eigenvectors of the transition matrix are
commonly used, and differ in their normalization. The two sets of
eigenvectors are related by the stationary distribution ::
\Phi_i(x) = \Psi_i(x) * \mu(x)
In the MSM literature, the right vectors (default here) are
approximations to the transfer operator eigenfunctions, whereas
the left eigenfunction are approximations to the propagator
eigenfunctions. For more details, refer to reference [1].
mode : {'clip', 'fill'}
Method by which to treat labels in `sequences` which do not have
a corresponding index. This can be due, for example, to the ergodic
trimming step.
``clip``
Unmapped labels are removed during transform. If they occur
at the beginning or end of a sequence, the resulting transformed
sequence will be shorted. If they occur in the middle of a
sequence, that sequence will be broken into two (or more)
sequences. (Default)
``fill``
Unmapped labels will be replaced with NaN, to signal missing
data. [The use of NaN to signal missing data is not fantastic,
but it's consistent with current behavior of the ``pandas``
library.]
Returns
-------
transformed : list of 2d arrays
Each element of transformed is an array of shape ``(n_samples,
n_timescales)`` containing the transformed data.
References
----------
.. [1] Prinz, Jan-Hendrik, et al. "Markov models of molecular kinetics:
Generation and validation." J. Chem. Phys. 134.17 (2011): 174105.
### Response:
def eigtransform(self, sequences, right=True, mode='clip'):
r"""Transform a list of sequences by projecting the sequences onto
the first `n_timescales` dynamical eigenvectors.
Parameters
----------
sequences : list of array-like
List of sequences, or a single sequence. Each sequence should be a
1D iterable of state labels. Labels can be integers, strings, or
other orderable objects.
right : bool
Which eigenvectors to map onto. Both the left (:math:`\Phi`) and
the right (:math`\Psi`) eigenvectors of the transition matrix are
commonly used, and differ in their normalization. The two sets of
eigenvectors are related by the stationary distribution ::
\Phi_i(x) = \Psi_i(x) * \mu(x)
In the MSM literature, the right vectors (default here) are
approximations to the transfer operator eigenfunctions, whereas
the left eigenfunction are approximations to the propagator
eigenfunctions. For more details, refer to reference [1].
mode : {'clip', 'fill'}
Method by which to treat labels in `sequences` which do not have
a corresponding index. This can be due, for example, to the ergodic
trimming step.
``clip``
Unmapped labels are removed during transform. If they occur
at the beginning or end of a sequence, the resulting transformed
sequence will be shorted. If they occur in the middle of a
sequence, that sequence will be broken into two (or more)
sequences. (Default)
``fill``
Unmapped labels will be replaced with NaN, to signal missing
data. [The use of NaN to signal missing data is not fantastic,
but it's consistent with current behavior of the ``pandas``
library.]
Returns
-------
transformed : list of 2d arrays
Each element of transformed is an array of shape ``(n_samples,
n_timescales)`` containing the transformed data.
References
----------
.. [1] Prinz, Jan-Hendrik, et al. "Markov models of molecular kinetics:
Generation and validation." J. Chem. Phys. 134.17 (2011): 174105.
"""
result = []
for y in self.transform(sequences, mode=mode):
if right:
op = self.right_eigenvectors_[:, 1:]
else:
op = self.left_eigenvectors_[:, 1:]
is_finite = np.isfinite(y)
if not np.all(is_finite):
value = np.empty((y.shape[0], op.shape[1]))
value[is_finite, :] = np.take(op, y[is_finite].astype(np.int), axis=0)
value[~is_finite, :] = np.nan
else:
value = np.take(op, y, axis=0)
result.append(value)
return result |
def cassist(self,dc,dt,dt2,nodiag=False,memlimit=-1):
"""Calculates probability of gene i regulating gene j with continuous data assisted method,
with the recommended combination of multiple tests.
Probabilities are converted from likelihood ratios separately for each A. This gives better
predictions when the number of secondary targets (dt2) is large. (Check program warnings.)
dc: numpy.ndarray(nt,ns,dtype=ftype(='f4' by default)) Continuous anchor data.
Entry dc[i,j] is anchor i's value for sample j.
Anchor i is used to infer the probability of gene i -> any other gene.
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
When dt2 is a superset of (or identical with) dt, dt2 must be arranged
to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and
set parameter nodiag = 1.
nodiag: skip diagonal regulations, i.e. regulation A->B for A=B.
This should be set to True when A is a subset of B and aligned correspondingly.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)).
Probability function from for recommended combination of multiple tests.
For more information on tests, see paper.
ftype can be found in auto.py.
Example: see findr.examples.geuvadis5
"""
return _cassist_any(self,dc,dt,dt2,"pij_cassist",nodiag=nodiag,memlimit=memlimit) | Calculates probability of gene i regulating gene j with continuous data assisted method,
with the recommended combination of multiple tests.
Probabilities are converted from likelihood ratios separately for each A. This gives better
predictions when the number of secondary targets (dt2) is large. (Check program warnings.)
dc: numpy.ndarray(nt,ns,dtype=ftype(='f4' by default)) Continuous anchor data.
Entry dc[i,j] is anchor i's value for sample j.
Anchor i is used to infer the probability of gene i -> any other gene.
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
When dt2 is a superset of (or identical with) dt, dt2 must be arranged
to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and
set parameter nodiag = 1.
nodiag: skip diagonal regulations, i.e. regulation A->B for A=B.
This should be set to True when A is a subset of B and aligned correspondingly.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)).
Probability function from for recommended combination of multiple tests.
For more information on tests, see paper.
ftype can be found in auto.py.
Example: see findr.examples.geuvadis5 | Below is the the instruction that describes the task:
### Input:
Calculates probability of gene i regulating gene j with continuous data assisted method,
with the recommended combination of multiple tests.
Probabilities are converted from likelihood ratios separately for each A. This gives better
predictions when the number of secondary targets (dt2) is large. (Check program warnings.)
dc: numpy.ndarray(nt,ns,dtype=ftype(='f4' by default)) Continuous anchor data.
Entry dc[i,j] is anchor i's value for sample j.
Anchor i is used to infer the probability of gene i -> any other gene.
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
When dt2 is a superset of (or identical with) dt, dt2 must be arranged
to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and
set parameter nodiag = 1.
nodiag: skip diagonal regulations, i.e. regulation A->B for A=B.
This should be set to True when A is a subset of B and aligned correspondingly.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)).
Probability function from for recommended combination of multiple tests.
For more information on tests, see paper.
ftype can be found in auto.py.
Example: see findr.examples.geuvadis5
### Response:
def cassist(self,dc,dt,dt2,nodiag=False,memlimit=-1):
"""Calculates probability of gene i regulating gene j with continuous data assisted method,
with the recommended combination of multiple tests.
Probabilities are converted from likelihood ratios separately for each A. This gives better
predictions when the number of secondary targets (dt2) is large. (Check program warnings.)
dc: numpy.ndarray(nt,ns,dtype=ftype(='f4' by default)) Continuous anchor data.
Entry dc[i,j] is anchor i's value for sample j.
Anchor i is used to infer the probability of gene i -> any other gene.
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
When dt2 is a superset of (or identical with) dt, dt2 must be arranged
to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and
set parameter nodiag = 1.
nodiag: skip diagonal regulations, i.e. regulation A->B for A=B.
This should be set to True when A is a subset of B and aligned correspondingly.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)).
Probability function from for recommended combination of multiple tests.
For more information on tests, see paper.
ftype can be found in auto.py.
Example: see findr.examples.geuvadis5
"""
return _cassist_any(self,dc,dt,dt2,"pij_cassist",nodiag=nodiag,memlimit=memlimit) |
def _parse_template(self, code, label):
'''
Pare smart indented templates
Takes a template a returns a list of sub-templates, taking in account
the indentation of the original code based on the first line indentation(0)
Special treatment of whitespace: returns special Offset and INTERNAL_WHITESPACE, so the generation can be configurable
It auto detects the indentation width used, as the indent of the first indented line
>>> indented("""
def %<code>
e =
%<code2>
""")
['def', INTERNAL_WHITESPACE, Placeholder('code', 0), NEWLINE,
Offset(1),'e', INTERNAL_WHITESPACE, '=', NEWLINE,
Placeholder('code2', 1), NEWLINE]
'''
if isinstance(code, tuple):
return tuple(self._parse_template(c, label) for c in code)
elif isinstance(code, dict):
return {
k: self._parse_template(v, label) if k != '_key' else v
for k, v
in code.items()
}
elif not isinstance(code, str):
return []
lines = code.split('\n')
parsed = []
if len(lines) == 1:
i = re.match(r'^( +)', lines[0])
indent_size = len(i.group()) if i else 0
indent = 1 if i else 0
actual = lines
base = 0
else:
base = len(re.match(r'^( *)', lines[1]).group())
rebased = [line[base:] for line in lines]
for line in rebased:
i = re.match(r'^( +)', line)
if i:
indent_size = len(i.group())
break
else:
indent_size = 0
actual = rebased[1:]
for line in actual:
j = LINE_FIRS.match(line)
indent = len(j.group()) // indent_size if j else 0
if parsed:
parsed.append(Offset(indent))
in_placeholder = False
in_action = False
in_args = False
in_string_arg = False
in_double_arg = False
in_type = False
c = int(indent * indent_size)
m = c
placeholder = ''
while m < len(line):
# print(m, line[m], 'place:', in_placeholder, 'act:', in_action, 'a:', in_args, 's:', in_string_arg, yaml.dump(parsed))
f = line[m]
next_f = line[m + 1] if m < len(line) - 1 else None
if f == '%' and not in_placeholder and next_f == '<':
m += 2
in_placeholder = True
placeholder = ''
continue
elif f == ':' and in_placeholder:
m += 1
in_placeholder = False
in_action = True
action = ''
continue
elif f == ' ' and in_placeholder:
m += 1
continue
elif f == ' ' and in_action:
m += 1
in_action = False
in_args = True
args = ['']
continue
elif f == ' ' and (in_string_arg or in_double_arg):
args[-1] += f
m += 1
continue
elif f == ' ' and in_args:
m += 1
args.append('')
continue
elif f == '\'' and in_args:
m += 1
if in_string_arg:
in_string_arg = False
if args[-1] == '\\n':
args[-1] = '\n'
args[-1] += f
elif in_double_arg:
args[-1] += f
else:
in_string_arg = True
continue
elif f == '"' and in_args:
m += 1
if in_double_arg:
in_double_arg = False
if args[-1] == '\\n':
args[-1] = '\n'
args[-1] += f
elif in_string_arg:
args[-1] += f
else:
in_string_arg = True
continue
elif f == '>' and in_args and not in_string_arg and not in_double_arg:
m += 1
if args[-1] == '':
args = args[:-1]
args = [arg[:-1] if arg[-1] == '\'' else int(arg) for arg in args]
in_args = False
parsed.append(Action(placeholder, action, args))
continue
elif f == '>' and in_action:
m += 1
in_action = False
parsed.append(Action(placeholder, action, []))
elif f == '>' and in_placeholder:
m += 1
q = None
# if '.' in placeholder[1:]:
# input(placeholder)
if placeholder[0] == '#':
q = Function(placeholder[1:])
elif placeholder[0] == '@':
q = PseudoType(placeholder[1:].split('.'))
elif placeholder[0] == '.':
q = SubTemplate(label, placeholder[1:])
elif '.' in placeholder:
q = SubElement(placeholder.split('.'))
else:
q = Placeholder(placeholder)
in_placeholder = False
parsed.append(q)
elif f == ' ':
m += 1
parsed.append(INTERNAL_WHITESPACE)
elif in_placeholder:
m += 1
placeholder += f
elif in_action:
m += 1
action += f
elif in_args:
m += 1
args[-1] += f
else:
m += 1
if parsed and isinstance(parsed[-1], str):
parsed[-1] += f
else:
parsed.append(f)
if len(actual) > 1:
parsed.append(NEWLINE)
return parsed | Pare smart indented templates
Takes a template a returns a list of sub-templates, taking in account
the indentation of the original code based on the first line indentation(0)
Special treatment of whitespace: returns special Offset and INTERNAL_WHITESPACE, so the generation can be configurable
It auto detects the indentation width used, as the indent of the first indented line
>>> indented("""
def %<code>
e =
%<code2>
""")
['def', INTERNAL_WHITESPACE, Placeholder('code', 0), NEWLINE,
Offset(1),'e', INTERNAL_WHITESPACE, '=', NEWLINE,
Placeholder('code2', 1), NEWLINE] | Below is the the instruction that describes the task:
### Input:
Pare smart indented templates
Takes a template a returns a list of sub-templates, taking in account
the indentation of the original code based on the first line indentation(0)
Special treatment of whitespace: returns special Offset and INTERNAL_WHITESPACE, so the generation can be configurable
It auto detects the indentation width used, as the indent of the first indented line
>>> indented("""
def %<code>
e =
%<code2>
""")
['def', INTERNAL_WHITESPACE, Placeholder('code', 0), NEWLINE,
Offset(1),'e', INTERNAL_WHITESPACE, '=', NEWLINE,
Placeholder('code2', 1), NEWLINE]
### Response:
def _parse_template(self, code, label):
'''
Pare smart indented templates
Takes a template a returns a list of sub-templates, taking in account
the indentation of the original code based on the first line indentation(0)
Special treatment of whitespace: returns special Offset and INTERNAL_WHITESPACE, so the generation can be configurable
It auto detects the indentation width used, as the indent of the first indented line
>>> indented("""
def %<code>
e =
%<code2>
""")
['def', INTERNAL_WHITESPACE, Placeholder('code', 0), NEWLINE,
Offset(1),'e', INTERNAL_WHITESPACE, '=', NEWLINE,
Placeholder('code2', 1), NEWLINE]
'''
if isinstance(code, tuple):
return tuple(self._parse_template(c, label) for c in code)
elif isinstance(code, dict):
return {
k: self._parse_template(v, label) if k != '_key' else v
for k, v
in code.items()
}
elif not isinstance(code, str):
return []
lines = code.split('\n')
parsed = []
if len(lines) == 1:
i = re.match(r'^( +)', lines[0])
indent_size = len(i.group()) if i else 0
indent = 1 if i else 0
actual = lines
base = 0
else:
base = len(re.match(r'^( *)', lines[1]).group())
rebased = [line[base:] for line in lines]
for line in rebased:
i = re.match(r'^( +)', line)
if i:
indent_size = len(i.group())
break
else:
indent_size = 0
actual = rebased[1:]
for line in actual:
j = LINE_FIRS.match(line)
indent = len(j.group()) // indent_size if j else 0
if parsed:
parsed.append(Offset(indent))
in_placeholder = False
in_action = False
in_args = False
in_string_arg = False
in_double_arg = False
in_type = False
c = int(indent * indent_size)
m = c
placeholder = ''
while m < len(line):
# print(m, line[m], 'place:', in_placeholder, 'act:', in_action, 'a:', in_args, 's:', in_string_arg, yaml.dump(parsed))
f = line[m]
next_f = line[m + 1] if m < len(line) - 1 else None
if f == '%' and not in_placeholder and next_f == '<':
m += 2
in_placeholder = True
placeholder = ''
continue
elif f == ':' and in_placeholder:
m += 1
in_placeholder = False
in_action = True
action = ''
continue
elif f == ' ' and in_placeholder:
m += 1
continue
elif f == ' ' and in_action:
m += 1
in_action = False
in_args = True
args = ['']
continue
elif f == ' ' and (in_string_arg or in_double_arg):
args[-1] += f
m += 1
continue
elif f == ' ' and in_args:
m += 1
args.append('')
continue
elif f == '\'' and in_args:
m += 1
if in_string_arg:
in_string_arg = False
if args[-1] == '\\n':
args[-1] = '\n'
args[-1] += f
elif in_double_arg:
args[-1] += f
else:
in_string_arg = True
continue
elif f == '"' and in_args:
m += 1
if in_double_arg:
in_double_arg = False
if args[-1] == '\\n':
args[-1] = '\n'
args[-1] += f
elif in_string_arg:
args[-1] += f
else:
in_string_arg = True
continue
elif f == '>' and in_args and not in_string_arg and not in_double_arg:
m += 1
if args[-1] == '':
args = args[:-1]
args = [arg[:-1] if arg[-1] == '\'' else int(arg) for arg in args]
in_args = False
parsed.append(Action(placeholder, action, args))
continue
elif f == '>' and in_action:
m += 1
in_action = False
parsed.append(Action(placeholder, action, []))
elif f == '>' and in_placeholder:
m += 1
q = None
# if '.' in placeholder[1:]:
# input(placeholder)
if placeholder[0] == '#':
q = Function(placeholder[1:])
elif placeholder[0] == '@':
q = PseudoType(placeholder[1:].split('.'))
elif placeholder[0] == '.':
q = SubTemplate(label, placeholder[1:])
elif '.' in placeholder:
q = SubElement(placeholder.split('.'))
else:
q = Placeholder(placeholder)
in_placeholder = False
parsed.append(q)
elif f == ' ':
m += 1
parsed.append(INTERNAL_WHITESPACE)
elif in_placeholder:
m += 1
placeholder += f
elif in_action:
m += 1
action += f
elif in_args:
m += 1
args[-1] += f
else:
m += 1
if parsed and isinstance(parsed[-1], str):
parsed[-1] += f
else:
parsed.append(f)
if len(actual) > 1:
parsed.append(NEWLINE)
return parsed |
def _redirect_with_params(url_name, *args, **kwargs):
"""Helper method to create a redirect response with URL params.
This builds a redirect string that converts kwargs into a
query string.
Args:
url_name: The name of the url to redirect to.
kwargs: the query string param and their values to build.
Returns:
A properly formatted redirect string.
"""
url = urlresolvers.reverse(url_name, args=args)
params = parse.urlencode(kwargs, True)
return "{0}?{1}".format(url, params) | Helper method to create a redirect response with URL params.
This builds a redirect string that converts kwargs into a
query string.
Args:
url_name: The name of the url to redirect to.
kwargs: the query string param and their values to build.
Returns:
A properly formatted redirect string. | Below is the the instruction that describes the task:
### Input:
Helper method to create a redirect response with URL params.
This builds a redirect string that converts kwargs into a
query string.
Args:
url_name: The name of the url to redirect to.
kwargs: the query string param and their values to build.
Returns:
A properly formatted redirect string.
### Response:
def _redirect_with_params(url_name, *args, **kwargs):
"""Helper method to create a redirect response with URL params.
This builds a redirect string that converts kwargs into a
query string.
Args:
url_name: The name of the url to redirect to.
kwargs: the query string param and their values to build.
Returns:
A properly formatted redirect string.
"""
url = urlresolvers.reverse(url_name, args=args)
params = parse.urlencode(kwargs, True)
return "{0}?{1}".format(url, params) |
def _reverse_annotations(old_record, new_record):
"""
Copy annotations form old_record to new_record, reversing any
lists / tuples / strings.
"""
# Copy the annotations over
for k, v in list(old_record.annotations.items()):
# Trim if appropriate
if isinstance(v, (tuple, list)) and len(v) == len(old_record):
assert len(v) == len(old_record)
v = v[::-1]
new_record.annotations[k] = v
# Letter annotations must be lists / tuples / strings of the same
# length as the sequence
for k, v in list(old_record.letter_annotations.items()):
assert len(v) == len(old_record)
new_record.letter_annotations[k] = v[::-1] | Copy annotations form old_record to new_record, reversing any
lists / tuples / strings. | Below is the the instruction that describes the task:
### Input:
Copy annotations form old_record to new_record, reversing any
lists / tuples / strings.
### Response:
def _reverse_annotations(old_record, new_record):
"""
Copy annotations form old_record to new_record, reversing any
lists / tuples / strings.
"""
# Copy the annotations over
for k, v in list(old_record.annotations.items()):
# Trim if appropriate
if isinstance(v, (tuple, list)) and len(v) == len(old_record):
assert len(v) == len(old_record)
v = v[::-1]
new_record.annotations[k] = v
# Letter annotations must be lists / tuples / strings of the same
# length as the sequence
for k, v in list(old_record.letter_annotations.items()):
assert len(v) == len(old_record)
new_record.letter_annotations[k] = v[::-1] |
def get_messages(self, locale=None):
""" Get a dictionary of translated messages """
if locale is None:
locale = self.locale
if self.translator:
def translate(error):
return self.translator.translate(error, locale)
else:
def translate(error):
return error
errors = deepcopy(self.errors)
errors = self._translate_errors(errors, translate)
return errors | Get a dictionary of translated messages | Below is the the instruction that describes the task:
### Input:
Get a dictionary of translated messages
### Response:
def get_messages(self, locale=None):
""" Get a dictionary of translated messages """
if locale is None:
locale = self.locale
if self.translator:
def translate(error):
return self.translator.translate(error, locale)
else:
def translate(error):
return error
errors = deepcopy(self.errors)
errors = self._translate_errors(errors, translate)
return errors |
def _euler_to_q(self, euler):
"""
Create q array from euler angles
:param euler: array [roll, pitch, yaw] in rad
:returns: array q which represents a quaternion [w, x, y, z]
"""
assert(len(euler) == 3)
phi = euler[0]
theta = euler[1]
psi = euler[2]
c_phi_2 = np.cos(phi / 2)
s_phi_2 = np.sin(phi / 2)
c_theta_2 = np.cos(theta / 2)
s_theta_2 = np.sin(theta / 2)
c_psi_2 = np.cos(psi / 2)
s_psi_2 = np.sin(psi / 2)
q = np.zeros(4)
q[0] = (c_phi_2 * c_theta_2 * c_psi_2 +
s_phi_2 * s_theta_2 * s_psi_2)
q[1] = (s_phi_2 * c_theta_2 * c_psi_2 -
c_phi_2 * s_theta_2 * s_psi_2)
q[2] = (c_phi_2 * s_theta_2 * c_psi_2 +
s_phi_2 * c_theta_2 * s_psi_2)
q[3] = (c_phi_2 * c_theta_2 * s_psi_2 -
s_phi_2 * s_theta_2 * c_psi_2)
return q | Create q array from euler angles
:param euler: array [roll, pitch, yaw] in rad
:returns: array q which represents a quaternion [w, x, y, z] | Below is the the instruction that describes the task:
### Input:
Create q array from euler angles
:param euler: array [roll, pitch, yaw] in rad
:returns: array q which represents a quaternion [w, x, y, z]
### Response:
def _euler_to_q(self, euler):
"""
Create q array from euler angles
:param euler: array [roll, pitch, yaw] in rad
:returns: array q which represents a quaternion [w, x, y, z]
"""
assert(len(euler) == 3)
phi = euler[0]
theta = euler[1]
psi = euler[2]
c_phi_2 = np.cos(phi / 2)
s_phi_2 = np.sin(phi / 2)
c_theta_2 = np.cos(theta / 2)
s_theta_2 = np.sin(theta / 2)
c_psi_2 = np.cos(psi / 2)
s_psi_2 = np.sin(psi / 2)
q = np.zeros(4)
q[0] = (c_phi_2 * c_theta_2 * c_psi_2 +
s_phi_2 * s_theta_2 * s_psi_2)
q[1] = (s_phi_2 * c_theta_2 * c_psi_2 -
c_phi_2 * s_theta_2 * s_psi_2)
q[2] = (c_phi_2 * s_theta_2 * c_psi_2 +
s_phi_2 * c_theta_2 * s_psi_2)
q[3] = (c_phi_2 * c_theta_2 * s_psi_2 -
s_phi_2 * s_theta_2 * c_psi_2)
return q |
def direction_cossim(layer, vec, batch=None):
"""Visualize a direction (cossine similarity)"""
def inner(T):
act_mags = tf.sqrt(tf.reduce_sum(T(layer)**2, -1, keepdims=True))
vec_mag = tf.sqrt(tf.reduce_sum(vec**2))
mags = act_mags * vec_mag
if batch is None:
return tf.reduce_mean(T(layer) * vec.reshape([1, 1, 1, -1]) / mags)
else:
return tf.reduce_mean(T(layer)[batch] * vec.reshape([1, 1, -1]) / mags)
return inner | Visualize a direction (cossine similarity) | Below is the the instruction that describes the task:
### Input:
Visualize a direction (cossine similarity)
### Response:
def direction_cossim(layer, vec, batch=None):
"""Visualize a direction (cossine similarity)"""
def inner(T):
act_mags = tf.sqrt(tf.reduce_sum(T(layer)**2, -1, keepdims=True))
vec_mag = tf.sqrt(tf.reduce_sum(vec**2))
mags = act_mags * vec_mag
if batch is None:
return tf.reduce_mean(T(layer) * vec.reshape([1, 1, 1, -1]) / mags)
else:
return tf.reduce_mean(T(layer)[batch] * vec.reshape([1, 1, -1]) / mags)
return inner |
def astype(self, dtype):
"""Returns a view that does on the fly type conversion of the underlying data.
Parameters
----------
dtype : string or dtype
NumPy dtype.
Notes
-----
This method returns a new Array object which is a view on the same
underlying chunk data. Modifying any data via the view is currently
not permitted and will result in an error. This is an experimental
feature and its behavior is subject to change in the future.
See Also
--------
Array.view
Examples
--------
>>> import zarr
>>> import numpy as np
>>> data = np.arange(100, dtype=np.uint8)
>>> a = zarr.array(data, chunks=10)
>>> a[:]
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
96, 97, 98, 99], dtype=uint8)
>>> v = a.astype(np.float32)
>>> v.is_view
True
>>> v[:]
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.],
dtype=float32)
"""
dtype = np.dtype(dtype)
filters = []
if self._filters:
filters.extend(self._filters)
filters.insert(0, AsType(encode_dtype=self._dtype, decode_dtype=dtype))
return self.view(filters=filters, dtype=dtype, read_only=True) | Returns a view that does on the fly type conversion of the underlying data.
Parameters
----------
dtype : string or dtype
NumPy dtype.
Notes
-----
This method returns a new Array object which is a view on the same
underlying chunk data. Modifying any data via the view is currently
not permitted and will result in an error. This is an experimental
feature and its behavior is subject to change in the future.
See Also
--------
Array.view
Examples
--------
>>> import zarr
>>> import numpy as np
>>> data = np.arange(100, dtype=np.uint8)
>>> a = zarr.array(data, chunks=10)
>>> a[:]
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
96, 97, 98, 99], dtype=uint8)
>>> v = a.astype(np.float32)
>>> v.is_view
True
>>> v[:]
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.],
dtype=float32) | Below is the the instruction that describes the task:
### Input:
Returns a view that does on the fly type conversion of the underlying data.
Parameters
----------
dtype : string or dtype
NumPy dtype.
Notes
-----
This method returns a new Array object which is a view on the same
underlying chunk data. Modifying any data via the view is currently
not permitted and will result in an error. This is an experimental
feature and its behavior is subject to change in the future.
See Also
--------
Array.view
Examples
--------
>>> import zarr
>>> import numpy as np
>>> data = np.arange(100, dtype=np.uint8)
>>> a = zarr.array(data, chunks=10)
>>> a[:]
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
96, 97, 98, 99], dtype=uint8)
>>> v = a.astype(np.float32)
>>> v.is_view
True
>>> v[:]
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.],
dtype=float32)
### Response:
def astype(self, dtype):
"""Returns a view that does on the fly type conversion of the underlying data.
Parameters
----------
dtype : string or dtype
NumPy dtype.
Notes
-----
This method returns a new Array object which is a view on the same
underlying chunk data. Modifying any data via the view is currently
not permitted and will result in an error. This is an experimental
feature and its behavior is subject to change in the future.
See Also
--------
Array.view
Examples
--------
>>> import zarr
>>> import numpy as np
>>> data = np.arange(100, dtype=np.uint8)
>>> a = zarr.array(data, chunks=10)
>>> a[:]
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
96, 97, 98, 99], dtype=uint8)
>>> v = a.astype(np.float32)
>>> v.is_view
True
>>> v[:]
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.],
dtype=float32)
"""
dtype = np.dtype(dtype)
filters = []
if self._filters:
filters.extend(self._filters)
filters.insert(0, AsType(encode_dtype=self._dtype, decode_dtype=dtype))
return self.view(filters=filters, dtype=dtype, read_only=True) |
def fit(self, X, y=None):
'''
Learn the linear transformation to flipped eigenvalues.
Parameters
----------
X : array, shape [n, n]
The *symmetric* input similarities. If X is asymmetric, it will be
treated as if it were symmetric based on its lower-triangular part.
'''
n = X.shape[0]
if X.shape != (n, n):
raise TypeError("Input must be a square matrix.")
# TODO: only get negative eigs somehow?
memory = get_memory(self.memory)
vals, vecs = memory.cache(scipy.linalg.eigh, ignore=['overwrite_a'])(
X, overwrite_a=not self.copy)
vals = vals[:, None]
self.flip_ = np.dot(vecs, np.sign(vals) * vecs.T)
return self | Learn the linear transformation to flipped eigenvalues.
Parameters
----------
X : array, shape [n, n]
The *symmetric* input similarities. If X is asymmetric, it will be
treated as if it were symmetric based on its lower-triangular part. | Below is the the instruction that describes the task:
### Input:
Learn the linear transformation to flipped eigenvalues.
Parameters
----------
X : array, shape [n, n]
The *symmetric* input similarities. If X is asymmetric, it will be
treated as if it were symmetric based on its lower-triangular part.
### Response:
def fit(self, X, y=None):
'''
Learn the linear transformation to flipped eigenvalues.
Parameters
----------
X : array, shape [n, n]
The *symmetric* input similarities. If X is asymmetric, it will be
treated as if it were symmetric based on its lower-triangular part.
'''
n = X.shape[0]
if X.shape != (n, n):
raise TypeError("Input must be a square matrix.")
# TODO: only get negative eigs somehow?
memory = get_memory(self.memory)
vals, vecs = memory.cache(scipy.linalg.eigh, ignore=['overwrite_a'])(
X, overwrite_a=not self.copy)
vals = vals[:, None]
self.flip_ = np.dot(vecs, np.sign(vals) * vecs.T)
return self |
def all(self):
""" Returns list with vids of all indexed partitions. """
partitions = []
query = text("""
SELECT dataset_vid, vid
FROM partition_index;""")
for result in self.execute(query):
dataset_vid, vid = result
partitions.append(PartitionSearchResult(dataset_vid=dataset_vid, vid=vid, score=1))
return partitions | Returns list with vids of all indexed partitions. | Below is the the instruction that describes the task:
### Input:
Returns list with vids of all indexed partitions.
### Response:
def all(self):
""" Returns list with vids of all indexed partitions. """
partitions = []
query = text("""
SELECT dataset_vid, vid
FROM partition_index;""")
for result in self.execute(query):
dataset_vid, vid = result
partitions.append(PartitionSearchResult(dataset_vid=dataset_vid, vid=vid, score=1))
return partitions |
def utf8(value: Union[None, str, bytes]) -> Optional[bytes]: # noqa: F811
"""Converts a string argument to a byte string.
If the argument is already a byte string or None, it is returned unchanged.
Otherwise it must be a unicode string and is encoded as utf8.
"""
if isinstance(value, _UTF8_TYPES):
return value
if not isinstance(value, unicode_type):
raise TypeError("Expected bytes, unicode, or None; got %r" % type(value))
return value.encode("utf-8") | Converts a string argument to a byte string.
If the argument is already a byte string or None, it is returned unchanged.
Otherwise it must be a unicode string and is encoded as utf8. | Below is the the instruction that describes the task:
### Input:
Converts a string argument to a byte string.
If the argument is already a byte string or None, it is returned unchanged.
Otherwise it must be a unicode string and is encoded as utf8.
### Response:
def utf8(value: Union[None, str, bytes]) -> Optional[bytes]: # noqa: F811
"""Converts a string argument to a byte string.
If the argument is already a byte string or None, it is returned unchanged.
Otherwise it must be a unicode string and is encoded as utf8.
"""
if isinstance(value, _UTF8_TYPES):
return value
if not isinstance(value, unicode_type):
raise TypeError("Expected bytes, unicode, or None; got %r" % type(value))
return value.encode("utf-8") |
def eigenvalues_(self):
"""The eigenvalues associated with each principal component."""
utils.validation.check_is_fitted(self, 's_')
return np.square(self.s_).tolist() | The eigenvalues associated with each principal component. | Below is the the instruction that describes the task:
### Input:
The eigenvalues associated with each principal component.
### Response:
def eigenvalues_(self):
"""The eigenvalues associated with each principal component."""
utils.validation.check_is_fitted(self, 's_')
return np.square(self.s_).tolist() |
def map_udp_port(public_port, private_port, lifetime=3600, gateway_ip=None,
retry=9, use_exception=True):
"""A high-level wrapper to map_port() that requests a mapping for
a public UDP port on the NAT to a private UDP port on this host.
Returns the complete response on success.
public_port - the public port of the mapping requested
private_port - the private port of the mapping requested
lifetime - the duration of the mapping in seconds.
Defaults to 3600, per specification.
gateway_ip - the IP to the NAT-PMP compatible gateway.
Defaults to using auto-detection function
get_gateway_addr()
retry - the number of times to retry the request if unsuccessful.
Defaults to 9 as per specification.
use_exception - throw an exception if an error result is
received from the gateway. Defaults to True.
"""
return map_port(NATPMP_PROTOCOL_UDP, public_port, private_port, lifetime,
gateway_ip=gateway_ip, retry=retry,
use_exception=use_exception) | A high-level wrapper to map_port() that requests a mapping for
a public UDP port on the NAT to a private UDP port on this host.
Returns the complete response on success.
public_port - the public port of the mapping requested
private_port - the private port of the mapping requested
lifetime - the duration of the mapping in seconds.
Defaults to 3600, per specification.
gateway_ip - the IP to the NAT-PMP compatible gateway.
Defaults to using auto-detection function
get_gateway_addr()
retry - the number of times to retry the request if unsuccessful.
Defaults to 9 as per specification.
use_exception - throw an exception if an error result is
received from the gateway. Defaults to True. | Below is the the instruction that describes the task:
### Input:
A high-level wrapper to map_port() that requests a mapping for
a public UDP port on the NAT to a private UDP port on this host.
Returns the complete response on success.
public_port - the public port of the mapping requested
private_port - the private port of the mapping requested
lifetime - the duration of the mapping in seconds.
Defaults to 3600, per specification.
gateway_ip - the IP to the NAT-PMP compatible gateway.
Defaults to using auto-detection function
get_gateway_addr()
retry - the number of times to retry the request if unsuccessful.
Defaults to 9 as per specification.
use_exception - throw an exception if an error result is
received from the gateway. Defaults to True.
### Response:
def map_udp_port(public_port, private_port, lifetime=3600, gateway_ip=None,
retry=9, use_exception=True):
"""A high-level wrapper to map_port() that requests a mapping for
a public UDP port on the NAT to a private UDP port on this host.
Returns the complete response on success.
public_port - the public port of the mapping requested
private_port - the private port of the mapping requested
lifetime - the duration of the mapping in seconds.
Defaults to 3600, per specification.
gateway_ip - the IP to the NAT-PMP compatible gateway.
Defaults to using auto-detection function
get_gateway_addr()
retry - the number of times to retry the request if unsuccessful.
Defaults to 9 as per specification.
use_exception - throw an exception if an error result is
received from the gateway. Defaults to True.
"""
return map_port(NATPMP_PROTOCOL_UDP, public_port, private_port, lifetime,
gateway_ip=gateway_ip, retry=retry,
use_exception=use_exception) |
def unsign_filters_and_actions(sign, dotted_model_name):
"""Return the list of filters and actions for dotted_model_name."""
permissions = signing.loads(sign)
return permissions.get(dotted_model_name, []) | Return the list of filters and actions for dotted_model_name. | Below is the the instruction that describes the task:
### Input:
Return the list of filters and actions for dotted_model_name.
### Response:
def unsign_filters_and_actions(sign, dotted_model_name):
"""Return the list of filters and actions for dotted_model_name."""
permissions = signing.loads(sign)
return permissions.get(dotted_model_name, []) |
def find_by_fields(self, table, queryset={}):
'''
从数据库里查询 符合多个条件的记录
Args:
table: 表名字 str
queryset : key 字段 value 值 dict
return:
成功: [dict] 保存的记录
失败: -1 并打印返回报错信息
'''
querys = ""
for k, v in queryset.items():
querys += "{} = '{}' and ".format(k, v)
sql = "select * from {} where {} ".format(
table, querys[:-4])
res = self.query(sql)
return res | 从数据库里查询 符合多个条件的记录
Args:
table: 表名字 str
queryset : key 字段 value 值 dict
return:
成功: [dict] 保存的记录
失败: -1 并打印返回报错信息 | Below is the the instruction that describes the task:
### Input:
从数据库里查询 符合多个条件的记录
Args:
table: 表名字 str
queryset : key 字段 value 值 dict
return:
成功: [dict] 保存的记录
失败: -1 并打印返回报错信息
### Response:
def find_by_fields(self, table, queryset={}):
'''
从数据库里查询 符合多个条件的记录
Args:
table: 表名字 str
queryset : key 字段 value 值 dict
return:
成功: [dict] 保存的记录
失败: -1 并打印返回报错信息
'''
querys = ""
for k, v in queryset.items():
querys += "{} = '{}' and ".format(k, v)
sql = "select * from {} where {} ".format(
table, querys[:-4])
res = self.query(sql)
return res |
def migrate_all():
"""
Create schema migrations for all apps specified in INSTALLED_APPS,
then run a migrate command.
"""
if 'south' in settings.INSTALLED_APPS:
return _south_migrate_all()
from django.core.management.commands import makemigrations, migrate
schema_args = [sys.executable, 'makemigrations']
for app in settings.INSTALLED_APPS:
if not app.startswith('django'):
schema_args += [app]
schema_cmd = makemigrations.Command()
schema_cmd.run_from_argv(schema_args)
migrate_cmd = migrate.Command()
sys.stderr.write("MIGRATE ALL!\n")
return migrate_cmd.run_from_argv([sys.executable, 'migrate']) | Create schema migrations for all apps specified in INSTALLED_APPS,
then run a migrate command. | Below is the the instruction that describes the task:
### Input:
Create schema migrations for all apps specified in INSTALLED_APPS,
then run a migrate command.
### Response:
def migrate_all():
"""
Create schema migrations for all apps specified in INSTALLED_APPS,
then run a migrate command.
"""
if 'south' in settings.INSTALLED_APPS:
return _south_migrate_all()
from django.core.management.commands import makemigrations, migrate
schema_args = [sys.executable, 'makemigrations']
for app in settings.INSTALLED_APPS:
if not app.startswith('django'):
schema_args += [app]
schema_cmd = makemigrations.Command()
schema_cmd.run_from_argv(schema_args)
migrate_cmd = migrate.Command()
sys.stderr.write("MIGRATE ALL!\n")
return migrate_cmd.run_from_argv([sys.executable, 'migrate']) |
def classical_risk(riskinputs, riskmodel, param, monitor):
"""
Compute and return the average losses for each asset.
:param riskinputs:
:class:`openquake.risklib.riskinput.RiskInput` objects
:param riskmodel:
a :class:`openquake.risklib.riskinput.CompositeRiskModel` instance
:param param:
dictionary of extra parameters
:param monitor:
:class:`openquake.baselib.performance.Monitor` instance
"""
result = dict(loss_curves=[], stat_curves=[])
weights = [w['default'] for w in param['weights']]
statnames, stats = zip(*param['stats'])
for ri in riskinputs:
A = len(ri.assets)
L = len(riskmodel.lti)
R = ri.hazard_getter.num_rlzs
loss_curves = numpy.zeros((R, L, A), object)
avg_losses = numpy.zeros((R, L, A))
for out in riskmodel.gen_outputs(ri, monitor):
r = out.rlzi
for l, loss_type in enumerate(riskmodel.loss_types):
# loss_curves has shape (A, C)
for i, asset in enumerate(ri.assets):
loss_curves[out.rlzi, l, i] = lc = out[loss_type][i]
aid = asset['ordinal']
avg = scientific.average_loss(lc)
avg_losses[r, l, i] = avg
lcurve = (lc['loss'], lc['poe'], avg)
result['loss_curves'].append((l, r, aid, lcurve))
# compute statistics
for l, loss_type in enumerate(riskmodel.loss_types):
for i, asset in enumerate(ri.assets):
avg_stats = compute_stats(avg_losses[:, l, i], stats, weights)
losses = loss_curves[0, l, i]['loss']
all_poes = numpy.array(
[loss_curves[r, l, i]['poe'] for r in range(R)])
poes_stats = compute_stats(all_poes, stats, weights)
result['stat_curves'].append(
(l, asset['ordinal'], losses, poes_stats, avg_stats))
if R == 1: # the realization is the same as the mean
del result['loss_curves']
return result | Compute and return the average losses for each asset.
:param riskinputs:
:class:`openquake.risklib.riskinput.RiskInput` objects
:param riskmodel:
a :class:`openquake.risklib.riskinput.CompositeRiskModel` instance
:param param:
dictionary of extra parameters
:param monitor:
:class:`openquake.baselib.performance.Monitor` instance | Below is the the instruction that describes the task:
### Input:
Compute and return the average losses for each asset.
:param riskinputs:
:class:`openquake.risklib.riskinput.RiskInput` objects
:param riskmodel:
a :class:`openquake.risklib.riskinput.CompositeRiskModel` instance
:param param:
dictionary of extra parameters
:param monitor:
:class:`openquake.baselib.performance.Monitor` instance
### Response:
def classical_risk(riskinputs, riskmodel, param, monitor):
"""
Compute and return the average losses for each asset.
:param riskinputs:
:class:`openquake.risklib.riskinput.RiskInput` objects
:param riskmodel:
a :class:`openquake.risklib.riskinput.CompositeRiskModel` instance
:param param:
dictionary of extra parameters
:param monitor:
:class:`openquake.baselib.performance.Monitor` instance
"""
result = dict(loss_curves=[], stat_curves=[])
weights = [w['default'] for w in param['weights']]
statnames, stats = zip(*param['stats'])
for ri in riskinputs:
A = len(ri.assets)
L = len(riskmodel.lti)
R = ri.hazard_getter.num_rlzs
loss_curves = numpy.zeros((R, L, A), object)
avg_losses = numpy.zeros((R, L, A))
for out in riskmodel.gen_outputs(ri, monitor):
r = out.rlzi
for l, loss_type in enumerate(riskmodel.loss_types):
# loss_curves has shape (A, C)
for i, asset in enumerate(ri.assets):
loss_curves[out.rlzi, l, i] = lc = out[loss_type][i]
aid = asset['ordinal']
avg = scientific.average_loss(lc)
avg_losses[r, l, i] = avg
lcurve = (lc['loss'], lc['poe'], avg)
result['loss_curves'].append((l, r, aid, lcurve))
# compute statistics
for l, loss_type in enumerate(riskmodel.loss_types):
for i, asset in enumerate(ri.assets):
avg_stats = compute_stats(avg_losses[:, l, i], stats, weights)
losses = loss_curves[0, l, i]['loss']
all_poes = numpy.array(
[loss_curves[r, l, i]['poe'] for r in range(R)])
poes_stats = compute_stats(all_poes, stats, weights)
result['stat_curves'].append(
(l, asset['ordinal'], losses, poes_stats, avg_stats))
if R == 1: # the realization is the same as the mean
del result['loss_curves']
return result |
def find_resources(library, session, query):
"""Queries a VISA system to locate the resources associated with a specified interface.
Corresponds to viFindRsrc function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session (unused, just to uniform signatures).
:param query: A regular expression followed by an optional logical expression. Use '?*' for all.
:return: find_list, return_counter, instrument_description, return value of the library call.
:rtype: ViFindList, int, unicode (Py2) or str (Py3), :class:`pyvisa.constants.StatusCode`
"""
find_list = ViFindList()
return_counter = ViUInt32()
instrument_description = create_string_buffer(constants.VI_FIND_BUFLEN)
# [ViSession, ViString, ViPFindList, ViPUInt32, ViAChar]
# ViString converts from (str, unicode, bytes) to bytes
ret = library.viFindRsrc(session, query,
byref(find_list), byref(return_counter),
instrument_description)
return find_list, return_counter.value, buffer_to_text(instrument_description), ret | Queries a VISA system to locate the resources associated with a specified interface.
Corresponds to viFindRsrc function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session (unused, just to uniform signatures).
:param query: A regular expression followed by an optional logical expression. Use '?*' for all.
:return: find_list, return_counter, instrument_description, return value of the library call.
:rtype: ViFindList, int, unicode (Py2) or str (Py3), :class:`pyvisa.constants.StatusCode` | Below is the the instruction that describes the task:
### Input:
Queries a VISA system to locate the resources associated with a specified interface.
Corresponds to viFindRsrc function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session (unused, just to uniform signatures).
:param query: A regular expression followed by an optional logical expression. Use '?*' for all.
:return: find_list, return_counter, instrument_description, return value of the library call.
:rtype: ViFindList, int, unicode (Py2) or str (Py3), :class:`pyvisa.constants.StatusCode`
### Response:
def find_resources(library, session, query):
"""Queries a VISA system to locate the resources associated with a specified interface.
Corresponds to viFindRsrc function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session (unused, just to uniform signatures).
:param query: A regular expression followed by an optional logical expression. Use '?*' for all.
:return: find_list, return_counter, instrument_description, return value of the library call.
:rtype: ViFindList, int, unicode (Py2) or str (Py3), :class:`pyvisa.constants.StatusCode`
"""
find_list = ViFindList()
return_counter = ViUInt32()
instrument_description = create_string_buffer(constants.VI_FIND_BUFLEN)
# [ViSession, ViString, ViPFindList, ViPUInt32, ViAChar]
# ViString converts from (str, unicode, bytes) to bytes
ret = library.viFindRsrc(session, query,
byref(find_list), byref(return_counter),
instrument_description)
return find_list, return_counter.value, buffer_to_text(instrument_description), ret |
def _set_ipv6_ve_intf_cmds(self, v, load=False):
"""
Setter method for ipv6_ve_intf_cmds, mapped from YANG variable /rbridge_id/interface/ve/ipv6/ipv6_ve_intf_cmds (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv6_ve_intf_cmds is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv6_ve_intf_cmds() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ipv6_ve_intf_cmds.ipv6_ve_intf_cmds, is_container='container', presence=False, yang_name="ipv6-ve-intf-cmds", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'ipv6 mlds ve interface commands', u'cli-drop-node-name': None, u'callpoint': u'MldsVe'}}, namespace='urn:brocade.com:mgmt:brocade-mld-snooping', defining_module='brocade-mld-snooping', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ipv6_ve_intf_cmds must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=ipv6_ve_intf_cmds.ipv6_ve_intf_cmds, is_container='container', presence=False, yang_name="ipv6-ve-intf-cmds", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'ipv6 mlds ve interface commands', u'cli-drop-node-name': None, u'callpoint': u'MldsVe'}}, namespace='urn:brocade.com:mgmt:brocade-mld-snooping', defining_module='brocade-mld-snooping', yang_type='container', is_config=True)""",
})
self.__ipv6_ve_intf_cmds = t
if hasattr(self, '_set'):
self._set() | Setter method for ipv6_ve_intf_cmds, mapped from YANG variable /rbridge_id/interface/ve/ipv6/ipv6_ve_intf_cmds (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv6_ve_intf_cmds is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv6_ve_intf_cmds() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for ipv6_ve_intf_cmds, mapped from YANG variable /rbridge_id/interface/ve/ipv6/ipv6_ve_intf_cmds (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv6_ve_intf_cmds is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv6_ve_intf_cmds() directly.
### Response:
def _set_ipv6_ve_intf_cmds(self, v, load=False):
"""
Setter method for ipv6_ve_intf_cmds, mapped from YANG variable /rbridge_id/interface/ve/ipv6/ipv6_ve_intf_cmds (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv6_ve_intf_cmds is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv6_ve_intf_cmds() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ipv6_ve_intf_cmds.ipv6_ve_intf_cmds, is_container='container', presence=False, yang_name="ipv6-ve-intf-cmds", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'ipv6 mlds ve interface commands', u'cli-drop-node-name': None, u'callpoint': u'MldsVe'}}, namespace='urn:brocade.com:mgmt:brocade-mld-snooping', defining_module='brocade-mld-snooping', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ipv6_ve_intf_cmds must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=ipv6_ve_intf_cmds.ipv6_ve_intf_cmds, is_container='container', presence=False, yang_name="ipv6-ve-intf-cmds", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'ipv6 mlds ve interface commands', u'cli-drop-node-name': None, u'callpoint': u'MldsVe'}}, namespace='urn:brocade.com:mgmt:brocade-mld-snooping', defining_module='brocade-mld-snooping', yang_type='container', is_config=True)""",
})
self.__ipv6_ve_intf_cmds = t
if hasattr(self, '_set'):
self._set() |
Subsets and Splits