code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def setupWorkerTransition():
"""Hook Twisted deprecation machinery to use custom warning class
for Worker API deprecation warnings."""
default_warn_method = getWarningMethod()
def custom_warn_method(message, category, stacklevel):
if stacklevel is not None:
stacklevel += 1
if _WORKER_WARNING_MARK in message:
# Message contains our mark - it's Worker API Renaming warning,
# issue it appropriately.
message = message.replace(_WORKER_WARNING_MARK, "")
warnings.warn(
DeprecatedWorkerNameWarning(message), message, stacklevel)
else:
# Other's warning message
default_warn_method(message, category, stacklevel)
setWarningMethod(custom_warn_method) | Hook Twisted deprecation machinery to use custom warning class
for Worker API deprecation warnings. | Below is the the instruction that describes the task:
### Input:
Hook Twisted deprecation machinery to use custom warning class
for Worker API deprecation warnings.
### Response:
def setupWorkerTransition():
"""Hook Twisted deprecation machinery to use custom warning class
for Worker API deprecation warnings."""
default_warn_method = getWarningMethod()
def custom_warn_method(message, category, stacklevel):
if stacklevel is not None:
stacklevel += 1
if _WORKER_WARNING_MARK in message:
# Message contains our mark - it's Worker API Renaming warning,
# issue it appropriately.
message = message.replace(_WORKER_WARNING_MARK, "")
warnings.warn(
DeprecatedWorkerNameWarning(message), message, stacklevel)
else:
# Other's warning message
default_warn_method(message, category, stacklevel)
setWarningMethod(custom_warn_method) |
def get_string_from_view(self, request, view_name, url_kwargs,
render_type='string'):
"""
Returns a string that is a rendering of the view given a
request, view_name, and the original url_kwargs. Makes the
following changes the view before rendering:
* Sets can_submit to False.
* Adds action_url to the context. This is the url where \
this view actually lives.
* Sets the default base_template to be 'cms/partial.html'
This will always call GET and never POST as any actions
that modify data should take place on the original
url and not like this.
:param request: The request object.
:param view_name: The name of the view that you want.
:param url_kwargs: The url keyword arguments that came \
with the request object. The view itself is responsible \
to remove arguments that would not be part of a normal match \
for that view. This is done by calling the `get_url_kwargs` \
method on the view.
:param render_type: The render type to use. Defaults to \
'string'.
"""
response = ""
try:
view, name = self.get_initialized_view_and_name(view_name,
render_type=render_type,
can_submit=False,
base_template='cms/partial.html',
request=request, kwargs=url_kwargs)
if isinstance(view, URLAlias):
view_name = view.get_view_name(view_name)
bundle = view.get_bundle(self, url_kwargs, {})
if bundle and isinstance(bundle, Bundle):
return bundle.get_string_from_view(request, view_name,
url_kwargs,
render_type=render_type)
elif view:
if view and name and view.can_view(request.user):
response = self._render_view_as_string(view, name, request,
url_kwargs)
except http.Http404:
pass
return response | Returns a string that is a rendering of the view given a
request, view_name, and the original url_kwargs. Makes the
following changes the view before rendering:
* Sets can_submit to False.
* Adds action_url to the context. This is the url where \
this view actually lives.
* Sets the default base_template to be 'cms/partial.html'
This will always call GET and never POST as any actions
that modify data should take place on the original
url and not like this.
:param request: The request object.
:param view_name: The name of the view that you want.
:param url_kwargs: The url keyword arguments that came \
with the request object. The view itself is responsible \
to remove arguments that would not be part of a normal match \
for that view. This is done by calling the `get_url_kwargs` \
method on the view.
:param render_type: The render type to use. Defaults to \
'string'. | Below is the the instruction that describes the task:
### Input:
Returns a string that is a rendering of the view given a
request, view_name, and the original url_kwargs. Makes the
following changes the view before rendering:
* Sets can_submit to False.
* Adds action_url to the context. This is the url where \
this view actually lives.
* Sets the default base_template to be 'cms/partial.html'
This will always call GET and never POST as any actions
that modify data should take place on the original
url and not like this.
:param request: The request object.
:param view_name: The name of the view that you want.
:param url_kwargs: The url keyword arguments that came \
with the request object. The view itself is responsible \
to remove arguments that would not be part of a normal match \
for that view. This is done by calling the `get_url_kwargs` \
method on the view.
:param render_type: The render type to use. Defaults to \
'string'.
### Response:
def get_string_from_view(self, request, view_name, url_kwargs,
render_type='string'):
"""
Returns a string that is a rendering of the view given a
request, view_name, and the original url_kwargs. Makes the
following changes the view before rendering:
* Sets can_submit to False.
* Adds action_url to the context. This is the url where \
this view actually lives.
* Sets the default base_template to be 'cms/partial.html'
This will always call GET and never POST as any actions
that modify data should take place on the original
url and not like this.
:param request: The request object.
:param view_name: The name of the view that you want.
:param url_kwargs: The url keyword arguments that came \
with the request object. The view itself is responsible \
to remove arguments that would not be part of a normal match \
for that view. This is done by calling the `get_url_kwargs` \
method on the view.
:param render_type: The render type to use. Defaults to \
'string'.
"""
response = ""
try:
view, name = self.get_initialized_view_and_name(view_name,
render_type=render_type,
can_submit=False,
base_template='cms/partial.html',
request=request, kwargs=url_kwargs)
if isinstance(view, URLAlias):
view_name = view.get_view_name(view_name)
bundle = view.get_bundle(self, url_kwargs, {})
if bundle and isinstance(bundle, Bundle):
return bundle.get_string_from_view(request, view_name,
url_kwargs,
render_type=render_type)
elif view:
if view and name and view.can_view(request.user):
response = self._render_view_as_string(view, name, request,
url_kwargs)
except http.Http404:
pass
return response |
def teff(cluster):
"""
Calculate Teff for main sequence stars ranging from Teff 3500K - 8000K. Use
[Fe/H] of the cluster, if available.
Returns a list of Teff values.
"""
b_vs, _ = cluster.stars()
teffs = []
for b_v in b_vs:
b_v -= cluster.eb_v
if b_v > -0.04:
x = (14.551 - b_v) / 3.684
else:
x = (3.402 - math.sqrt(0.515 + 1.376 * b_v)) / 0.688
teffs.append(math.pow(10, x))
return teffs | Calculate Teff for main sequence stars ranging from Teff 3500K - 8000K. Use
[Fe/H] of the cluster, if available.
Returns a list of Teff values. | Below is the the instruction that describes the task:
### Input:
Calculate Teff for main sequence stars ranging from Teff 3500K - 8000K. Use
[Fe/H] of the cluster, if available.
Returns a list of Teff values.
### Response:
def teff(cluster):
"""
Calculate Teff for main sequence stars ranging from Teff 3500K - 8000K. Use
[Fe/H] of the cluster, if available.
Returns a list of Teff values.
"""
b_vs, _ = cluster.stars()
teffs = []
for b_v in b_vs:
b_v -= cluster.eb_v
if b_v > -0.04:
x = (14.551 - b_v) / 3.684
else:
x = (3.402 - math.sqrt(0.515 + 1.376 * b_v)) / 0.688
teffs.append(math.pow(10, x))
return teffs |
def loadSignal(self, name, start=None, end=None):
"""
Loads the named entry from the upload cache as a signal.
:param name: the name.
:param start: the time to start from in HH:mm:ss.SSS format
:param end: the time to end at in HH:mm:ss.SSS format.
:return: the signal if the named upload exists.
"""
entry = self._getCacheEntry(name)
if entry is not None:
from analyser.common.signal import loadSignalFromWav
return loadSignalFromWav(entry['path'], start=start, end=end)
else:
return None | Loads the named entry from the upload cache as a signal.
:param name: the name.
:param start: the time to start from in HH:mm:ss.SSS format
:param end: the time to end at in HH:mm:ss.SSS format.
:return: the signal if the named upload exists. | Below is the the instruction that describes the task:
### Input:
Loads the named entry from the upload cache as a signal.
:param name: the name.
:param start: the time to start from in HH:mm:ss.SSS format
:param end: the time to end at in HH:mm:ss.SSS format.
:return: the signal if the named upload exists.
### Response:
def loadSignal(self, name, start=None, end=None):
"""
Loads the named entry from the upload cache as a signal.
:param name: the name.
:param start: the time to start from in HH:mm:ss.SSS format
:param end: the time to end at in HH:mm:ss.SSS format.
:return: the signal if the named upload exists.
"""
entry = self._getCacheEntry(name)
if entry is not None:
from analyser.common.signal import loadSignalFromWav
return loadSignalFromWav(entry['path'], start=start, end=end)
else:
return None |
def get_obj(app_label, model_name, object_id):
"""
Function used to get a object
:param app_label: A valid Django Model or a string with format: <app_label>.<model_name>
:param model_name: Key into kwargs that contains de data: new_person
:param object_id:
:return: instance
"""
try:
model = apps.get_model("{}.{}".format(app_label, model_name))
assert is_valid_django_model(model), ("Model {}.{} do not exist.").format(
app_label, model_name
)
obj = get_Object_or_None(model, pk=object_id)
return obj
except model.DoesNotExist:
return None
except LookupError:
pass
except ValidationError as e:
raise ValidationError(e.__str__())
except TypeError as e:
raise TypeError(e.__str__())
except Exception as e:
raise Exception(e.__str__()) | Function used to get a object
:param app_label: A valid Django Model or a string with format: <app_label>.<model_name>
:param model_name: Key into kwargs that contains de data: new_person
:param object_id:
:return: instance | Below is the the instruction that describes the task:
### Input:
Function used to get a object
:param app_label: A valid Django Model or a string with format: <app_label>.<model_name>
:param model_name: Key into kwargs that contains de data: new_person
:param object_id:
:return: instance
### Response:
def get_obj(app_label, model_name, object_id):
"""
Function used to get a object
:param app_label: A valid Django Model or a string with format: <app_label>.<model_name>
:param model_name: Key into kwargs that contains de data: new_person
:param object_id:
:return: instance
"""
try:
model = apps.get_model("{}.{}".format(app_label, model_name))
assert is_valid_django_model(model), ("Model {}.{} do not exist.").format(
app_label, model_name
)
obj = get_Object_or_None(model, pk=object_id)
return obj
except model.DoesNotExist:
return None
except LookupError:
pass
except ValidationError as e:
raise ValidationError(e.__str__())
except TypeError as e:
raise TypeError(e.__str__())
except Exception as e:
raise Exception(e.__str__()) |
def annealing_cos(start:Number, end:Number, pct:float)->Number:
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = np.cos(np.pi * pct) + 1
return end + (start-end)/2 * cos_out | Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0. | Below is the the instruction that describes the task:
### Input:
Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0.
### Response:
def annealing_cos(start:Number, end:Number, pct:float)->Number:
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = np.cos(np.pi * pct) + 1
return end + (start-end)/2 * cos_out |
def run(self, executable_input, project=None, folder=None, name=None, tags=None, properties=None, details=None,
instance_type=None, stage_instance_types=None, stage_folders=None, rerun_stages=None, cluster_spec=None,
depends_on=None, allow_ssh=None, debug=None, delay_workspace_destruction=None, priority=None,
ignore_reuse=None, ignore_reuse_stages=None, extra_args=None, **kwargs):
'''
:param executable_input: Hash of the executable's input arguments
:type executable_input: dict
:param project: Project ID of the project context
:type project: string
:param folder: Folder in which executable's outputs will be placed in *project*
:type folder: string
:param name: Name for the new job (default is "<name of the executable>")
:type name: string
:param tags: Tags to associate with the job
:type tags: list of strings
:param properties: Properties to associate with the job
:type properties: dict with string values
:param details: Details to set for the job
:type details: dict or list
:param instance_type: Instance type on which the jobs will be run, or a dict mapping function names to instance type requests
:type instance_type: string or dict
:param depends_on: List of data objects or jobs to wait that need to enter the "closed" or "done" states, respectively, before the new job will be run; each element in the list can either be a dxpy handler or a string ID
:type depends_on: list
:param allow_ssh: List of hostname or IP masks to allow SSH connections from
:type allow_ssh: list
:param debug: Configuration options for job debugging
:type debug: dict
:param delay_workspace_destruction: Whether to keep the job's temporary workspace around for debugging purposes for 3 days after it succeeds or fails
:type delay_workspace_destruction: boolean
:param priority: Priority level to request for all jobs created in the execution tree, either "normal" or "high"
:type priority: string
:param ignore_reuse: Disable job reuse for this execution
:type ignore_reuse: boolean
:param ignore_reuse_stages: Stages of a workflow (IDs, names, or indices) or "*" for which job reuse should be disabled
:type ignore_reuse_stages: list
:param extra_args: If provided, a hash of options that will be merged into the underlying JSON given for the API call
:type extra_args: dict
:returns: Object handler of the newly created job
:rtype: :class:`~dxpy.bindings.dxjob.DXJob`
Creates a new job that executes the function "main" of this executable with
the given input *executable_input*.
'''
# stage_instance_types, stage_folders, and rerun_stages are
# only supported for workflows, but we include them
# here. Applet-based executables should detect when they
# receive a truthy workflow-specific value and raise an error.
run_input = self._get_run_input(executable_input,
project=project,
folder=folder,
name=name,
tags=tags,
properties=properties,
details=details,
instance_type=instance_type,
stage_instance_types=stage_instance_types,
stage_folders=stage_folders,
rerun_stages=rerun_stages,
cluster_spec=cluster_spec,
depends_on=depends_on,
allow_ssh=allow_ssh,
ignore_reuse=ignore_reuse,
ignore_reuse_stages=ignore_reuse_stages,
debug=debug,
delay_workspace_destruction=delay_workspace_destruction,
priority=priority,
extra_args=extra_args)
return self._run_impl(run_input, **kwargs) | :param executable_input: Hash of the executable's input arguments
:type executable_input: dict
:param project: Project ID of the project context
:type project: string
:param folder: Folder in which executable's outputs will be placed in *project*
:type folder: string
:param name: Name for the new job (default is "<name of the executable>")
:type name: string
:param tags: Tags to associate with the job
:type tags: list of strings
:param properties: Properties to associate with the job
:type properties: dict with string values
:param details: Details to set for the job
:type details: dict or list
:param instance_type: Instance type on which the jobs will be run, or a dict mapping function names to instance type requests
:type instance_type: string or dict
:param depends_on: List of data objects or jobs to wait that need to enter the "closed" or "done" states, respectively, before the new job will be run; each element in the list can either be a dxpy handler or a string ID
:type depends_on: list
:param allow_ssh: List of hostname or IP masks to allow SSH connections from
:type allow_ssh: list
:param debug: Configuration options for job debugging
:type debug: dict
:param delay_workspace_destruction: Whether to keep the job's temporary workspace around for debugging purposes for 3 days after it succeeds or fails
:type delay_workspace_destruction: boolean
:param priority: Priority level to request for all jobs created in the execution tree, either "normal" or "high"
:type priority: string
:param ignore_reuse: Disable job reuse for this execution
:type ignore_reuse: boolean
:param ignore_reuse_stages: Stages of a workflow (IDs, names, or indices) or "*" for which job reuse should be disabled
:type ignore_reuse_stages: list
:param extra_args: If provided, a hash of options that will be merged into the underlying JSON given for the API call
:type extra_args: dict
:returns: Object handler of the newly created job
:rtype: :class:`~dxpy.bindings.dxjob.DXJob`
Creates a new job that executes the function "main" of this executable with
the given input *executable_input*. | Below is the the instruction that describes the task:
### Input:
:param executable_input: Hash of the executable's input arguments
:type executable_input: dict
:param project: Project ID of the project context
:type project: string
:param folder: Folder in which executable's outputs will be placed in *project*
:type folder: string
:param name: Name for the new job (default is "<name of the executable>")
:type name: string
:param tags: Tags to associate with the job
:type tags: list of strings
:param properties: Properties to associate with the job
:type properties: dict with string values
:param details: Details to set for the job
:type details: dict or list
:param instance_type: Instance type on which the jobs will be run, or a dict mapping function names to instance type requests
:type instance_type: string or dict
:param depends_on: List of data objects or jobs to wait that need to enter the "closed" or "done" states, respectively, before the new job will be run; each element in the list can either be a dxpy handler or a string ID
:type depends_on: list
:param allow_ssh: List of hostname or IP masks to allow SSH connections from
:type allow_ssh: list
:param debug: Configuration options for job debugging
:type debug: dict
:param delay_workspace_destruction: Whether to keep the job's temporary workspace around for debugging purposes for 3 days after it succeeds or fails
:type delay_workspace_destruction: boolean
:param priority: Priority level to request for all jobs created in the execution tree, either "normal" or "high"
:type priority: string
:param ignore_reuse: Disable job reuse for this execution
:type ignore_reuse: boolean
:param ignore_reuse_stages: Stages of a workflow (IDs, names, or indices) or "*" for which job reuse should be disabled
:type ignore_reuse_stages: list
:param extra_args: If provided, a hash of options that will be merged into the underlying JSON given for the API call
:type extra_args: dict
:returns: Object handler of the newly created job
:rtype: :class:`~dxpy.bindings.dxjob.DXJob`
Creates a new job that executes the function "main" of this executable with
the given input *executable_input*.
### Response:
def run(self, executable_input, project=None, folder=None, name=None, tags=None, properties=None, details=None,
instance_type=None, stage_instance_types=None, stage_folders=None, rerun_stages=None, cluster_spec=None,
depends_on=None, allow_ssh=None, debug=None, delay_workspace_destruction=None, priority=None,
ignore_reuse=None, ignore_reuse_stages=None, extra_args=None, **kwargs):
'''
:param executable_input: Hash of the executable's input arguments
:type executable_input: dict
:param project: Project ID of the project context
:type project: string
:param folder: Folder in which executable's outputs will be placed in *project*
:type folder: string
:param name: Name for the new job (default is "<name of the executable>")
:type name: string
:param tags: Tags to associate with the job
:type tags: list of strings
:param properties: Properties to associate with the job
:type properties: dict with string values
:param details: Details to set for the job
:type details: dict or list
:param instance_type: Instance type on which the jobs will be run, or a dict mapping function names to instance type requests
:type instance_type: string or dict
:param depends_on: List of data objects or jobs to wait that need to enter the "closed" or "done" states, respectively, before the new job will be run; each element in the list can either be a dxpy handler or a string ID
:type depends_on: list
:param allow_ssh: List of hostname or IP masks to allow SSH connections from
:type allow_ssh: list
:param debug: Configuration options for job debugging
:type debug: dict
:param delay_workspace_destruction: Whether to keep the job's temporary workspace around for debugging purposes for 3 days after it succeeds or fails
:type delay_workspace_destruction: boolean
:param priority: Priority level to request for all jobs created in the execution tree, either "normal" or "high"
:type priority: string
:param ignore_reuse: Disable job reuse for this execution
:type ignore_reuse: boolean
:param ignore_reuse_stages: Stages of a workflow (IDs, names, or indices) or "*" for which job reuse should be disabled
:type ignore_reuse_stages: list
:param extra_args: If provided, a hash of options that will be merged into the underlying JSON given for the API call
:type extra_args: dict
:returns: Object handler of the newly created job
:rtype: :class:`~dxpy.bindings.dxjob.DXJob`
Creates a new job that executes the function "main" of this executable with
the given input *executable_input*.
'''
# stage_instance_types, stage_folders, and rerun_stages are
# only supported for workflows, but we include them
# here. Applet-based executables should detect when they
# receive a truthy workflow-specific value and raise an error.
run_input = self._get_run_input(executable_input,
project=project,
folder=folder,
name=name,
tags=tags,
properties=properties,
details=details,
instance_type=instance_type,
stage_instance_types=stage_instance_types,
stage_folders=stage_folders,
rerun_stages=rerun_stages,
cluster_spec=cluster_spec,
depends_on=depends_on,
allow_ssh=allow_ssh,
ignore_reuse=ignore_reuse,
ignore_reuse_stages=ignore_reuse_stages,
debug=debug,
delay_workspace_destruction=delay_workspace_destruction,
priority=priority,
extra_args=extra_args)
return self._run_impl(run_input, **kwargs) |
def append_string(t, string):
"""Append a string to a node, as text or tail of last child."""
node = t.tree
if string:
if len(node) == 0:
if node.text is not None:
node.text += string
else:
node.text = string
else: # Get last child
child = list(node)[-1]
if child.tail is not None:
child.tail += string
else:
child.tail = string | Append a string to a node, as text or tail of last child. | Below is the the instruction that describes the task:
### Input:
Append a string to a node, as text or tail of last child.
### Response:
def append_string(t, string):
"""Append a string to a node, as text or tail of last child."""
node = t.tree
if string:
if len(node) == 0:
if node.text is not None:
node.text += string
else:
node.text = string
else: # Get last child
child = list(node)[-1]
if child.tail is not None:
child.tail += string
else:
child.tail = string |
def getNorthSouthClone(self, i):
"""
Returns the adjacent clone name from both sides.
"""
north = self.getAdjacentClone(i, south=False)
south = self.getAdjacentClone(i)
return north, south | Returns the adjacent clone name from both sides. | Below is the the instruction that describes the task:
### Input:
Returns the adjacent clone name from both sides.
### Response:
def getNorthSouthClone(self, i):
"""
Returns the adjacent clone name from both sides.
"""
north = self.getAdjacentClone(i, south=False)
south = self.getAdjacentClone(i)
return north, south |
def in_virtual_env():
"""
returns True if you are running inside a python virtual environment.
(DOES NOT WORK IF IN IPYTHON AND USING A VIRTUALENV)
sys.prefix gives the location of the virtualenv
Notes:
It seems IPython does not respect virtual environments properly.
TODO: find a solution
http://stackoverflow.com/questions/7335992/ipython-and-virtualenv-ignoring-site-packages
References:
http://stackoverflow.com/questions/1871549/python-determine-if-running-inside-virtualenv
CommandLine:
python -m utool.util_sysreq in_virtual_env
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_sysreq import * # NOQA
>>> import utool as ut
>>> result = in_virtual_env()
>>> print(result)
"""
import sys
has_venv = False
if hasattr(sys, 'real_prefix'):
# For virtualenv module
has_venv = True
elif hasattr(sys, 'base_prefix'):
# For venv module
has_venv = sys.base_prefix != sys.prefix
return has_venv | returns True if you are running inside a python virtual environment.
(DOES NOT WORK IF IN IPYTHON AND USING A VIRTUALENV)
sys.prefix gives the location of the virtualenv
Notes:
It seems IPython does not respect virtual environments properly.
TODO: find a solution
http://stackoverflow.com/questions/7335992/ipython-and-virtualenv-ignoring-site-packages
References:
http://stackoverflow.com/questions/1871549/python-determine-if-running-inside-virtualenv
CommandLine:
python -m utool.util_sysreq in_virtual_env
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_sysreq import * # NOQA
>>> import utool as ut
>>> result = in_virtual_env()
>>> print(result) | Below is the the instruction that describes the task:
### Input:
returns True if you are running inside a python virtual environment.
(DOES NOT WORK IF IN IPYTHON AND USING A VIRTUALENV)
sys.prefix gives the location of the virtualenv
Notes:
It seems IPython does not respect virtual environments properly.
TODO: find a solution
http://stackoverflow.com/questions/7335992/ipython-and-virtualenv-ignoring-site-packages
References:
http://stackoverflow.com/questions/1871549/python-determine-if-running-inside-virtualenv
CommandLine:
python -m utool.util_sysreq in_virtual_env
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_sysreq import * # NOQA
>>> import utool as ut
>>> result = in_virtual_env()
>>> print(result)
### Response:
def in_virtual_env():
"""
returns True if you are running inside a python virtual environment.
(DOES NOT WORK IF IN IPYTHON AND USING A VIRTUALENV)
sys.prefix gives the location of the virtualenv
Notes:
It seems IPython does not respect virtual environments properly.
TODO: find a solution
http://stackoverflow.com/questions/7335992/ipython-and-virtualenv-ignoring-site-packages
References:
http://stackoverflow.com/questions/1871549/python-determine-if-running-inside-virtualenv
CommandLine:
python -m utool.util_sysreq in_virtual_env
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_sysreq import * # NOQA
>>> import utool as ut
>>> result = in_virtual_env()
>>> print(result)
"""
import sys
has_venv = False
if hasattr(sys, 'real_prefix'):
# For virtualenv module
has_venv = True
elif hasattr(sys, 'base_prefix'):
# For venv module
has_venv = sys.base_prefix != sys.prefix
return has_venv |
def get_string():
"""A better str(_get_keycodes()) method"""
keycodes = _get_keycodes()
initial_code, codes = keycodes[0], keycodes[1:]
initial_char = chr(initial_code)
if initial_code == 27:
initial_char = '\\e'
elif not ascii.isgraph(initial_char):
initial_char = '\\x%x' % initial_code
chars = ''.join([chr(c) for c in codes])
return ''.join((initial_char, chars)) | A better str(_get_keycodes()) method | Below is the the instruction that describes the task:
### Input:
A better str(_get_keycodes()) method
### Response:
def get_string():
"""A better str(_get_keycodes()) method"""
keycodes = _get_keycodes()
initial_code, codes = keycodes[0], keycodes[1:]
initial_char = chr(initial_code)
if initial_code == 27:
initial_char = '\\e'
elif not ascii.isgraph(initial_char):
initial_char = '\\x%x' % initial_code
chars = ''.join([chr(c) for c in codes])
return ''.join((initial_char, chars)) |
def ConsumeRange(self, start, end):
"""Consumes an entire range, or part thereof.
If the finger has no ranges left, or the curent range start is higher
than the end of the consumed block, nothing happens. Otherwise,
the current range is adjusted for the consumed block, or removed,
if the entire block is consumed. For things to work, the consumed
range and the current finger starts must be equal, and the length
of the consumed range may not exceed the length of the current range.
Args:
start: Beginning of range to be consumed.
end: First offset after the consumed range (end + 1).
Raises:
RuntimeError: if the start position of the consumed range is
higher than the start of the current range in the finger, or if
the consumed range cuts accross block boundaries.
"""
old = self.CurrentRange()
if old is None:
return
if old.start > start:
if old.start < end:
raise RuntimeError('Block end too high.')
return
if old.start < start:
raise RuntimeError('Block start too high.')
if old.end == end:
del self.ranges[0]
elif old.end > end:
self.ranges[0] = Range(end, old.end)
else:
raise RuntimeError('Block length exceeds range.') | Consumes an entire range, or part thereof.
If the finger has no ranges left, or the curent range start is higher
than the end of the consumed block, nothing happens. Otherwise,
the current range is adjusted for the consumed block, or removed,
if the entire block is consumed. For things to work, the consumed
range and the current finger starts must be equal, and the length
of the consumed range may not exceed the length of the current range.
Args:
start: Beginning of range to be consumed.
end: First offset after the consumed range (end + 1).
Raises:
RuntimeError: if the start position of the consumed range is
higher than the start of the current range in the finger, or if
the consumed range cuts accross block boundaries. | Below is the the instruction that describes the task:
### Input:
Consumes an entire range, or part thereof.
If the finger has no ranges left, or the curent range start is higher
than the end of the consumed block, nothing happens. Otherwise,
the current range is adjusted for the consumed block, or removed,
if the entire block is consumed. For things to work, the consumed
range and the current finger starts must be equal, and the length
of the consumed range may not exceed the length of the current range.
Args:
start: Beginning of range to be consumed.
end: First offset after the consumed range (end + 1).
Raises:
RuntimeError: if the start position of the consumed range is
higher than the start of the current range in the finger, or if
the consumed range cuts accross block boundaries.
### Response:
def ConsumeRange(self, start, end):
"""Consumes an entire range, or part thereof.
If the finger has no ranges left, or the curent range start is higher
than the end of the consumed block, nothing happens. Otherwise,
the current range is adjusted for the consumed block, or removed,
if the entire block is consumed. For things to work, the consumed
range and the current finger starts must be equal, and the length
of the consumed range may not exceed the length of the current range.
Args:
start: Beginning of range to be consumed.
end: First offset after the consumed range (end + 1).
Raises:
RuntimeError: if the start position of the consumed range is
higher than the start of the current range in the finger, or if
the consumed range cuts accross block boundaries.
"""
old = self.CurrentRange()
if old is None:
return
if old.start > start:
if old.start < end:
raise RuntimeError('Block end too high.')
return
if old.start < start:
raise RuntimeError('Block start too high.')
if old.end == end:
del self.ranges[0]
elif old.end > end:
self.ranges[0] = Range(end, old.end)
else:
raise RuntimeError('Block length exceeds range.') |
def get_query(self, show=True, proxy=None, timeout=0):
"""
GET MediaWiki:API action=query selected data
https://en.wikipedia.org/w/api.php?action=help&modules=query
Required {params}: title OR pageid
- title: <str> article title
- pageid: <int> Wikipedia database ID
Optional arguments:
- [show]: <bool> echo page data if true
- [proxy]: <str> use this HTTP proxy
- [timeout]: <int> timeout in seconds (0=wait forever)
Data captured:
- description: <str> Wikidata description (via pageterms)
- extext: <str> plain text (Markdown) extract
- extract: <str> HTML extract from Extension:TextExtract
- image: <dict> {query-pageimage, query-thumbnail}
- label: <str> Wikidata label (via pageterms)
- modified (page): <str> ISO8601 date and time
- pageid: <int> Wikipedia database ID
- random: <str> a random article title with every request!
- requests: list of request actions made
- url: <str> the canonical wiki URL
- url_raw: <str> ostensible raw wikitext URL
- watchers: <int> number of people watching this page
"""
if not self.params.get('title') and not self.params.get('pageid'):
raise ValueError("get_query needs title or pageid")
self._get('query', show, proxy, timeout)
while self.data.get('continue'):
self._get('query', show, proxy, timeout)
return self | GET MediaWiki:API action=query selected data
https://en.wikipedia.org/w/api.php?action=help&modules=query
Required {params}: title OR pageid
- title: <str> article title
- pageid: <int> Wikipedia database ID
Optional arguments:
- [show]: <bool> echo page data if true
- [proxy]: <str> use this HTTP proxy
- [timeout]: <int> timeout in seconds (0=wait forever)
Data captured:
- description: <str> Wikidata description (via pageterms)
- extext: <str> plain text (Markdown) extract
- extract: <str> HTML extract from Extension:TextExtract
- image: <dict> {query-pageimage, query-thumbnail}
- label: <str> Wikidata label (via pageterms)
- modified (page): <str> ISO8601 date and time
- pageid: <int> Wikipedia database ID
- random: <str> a random article title with every request!
- requests: list of request actions made
- url: <str> the canonical wiki URL
- url_raw: <str> ostensible raw wikitext URL
- watchers: <int> number of people watching this page | Below is the the instruction that describes the task:
### Input:
GET MediaWiki:API action=query selected data
https://en.wikipedia.org/w/api.php?action=help&modules=query
Required {params}: title OR pageid
- title: <str> article title
- pageid: <int> Wikipedia database ID
Optional arguments:
- [show]: <bool> echo page data if true
- [proxy]: <str> use this HTTP proxy
- [timeout]: <int> timeout in seconds (0=wait forever)
Data captured:
- description: <str> Wikidata description (via pageterms)
- extext: <str> plain text (Markdown) extract
- extract: <str> HTML extract from Extension:TextExtract
- image: <dict> {query-pageimage, query-thumbnail}
- label: <str> Wikidata label (via pageterms)
- modified (page): <str> ISO8601 date and time
- pageid: <int> Wikipedia database ID
- random: <str> a random article title with every request!
- requests: list of request actions made
- url: <str> the canonical wiki URL
- url_raw: <str> ostensible raw wikitext URL
- watchers: <int> number of people watching this page
### Response:
def get_query(self, show=True, proxy=None, timeout=0):
"""
GET MediaWiki:API action=query selected data
https://en.wikipedia.org/w/api.php?action=help&modules=query
Required {params}: title OR pageid
- title: <str> article title
- pageid: <int> Wikipedia database ID
Optional arguments:
- [show]: <bool> echo page data if true
- [proxy]: <str> use this HTTP proxy
- [timeout]: <int> timeout in seconds (0=wait forever)
Data captured:
- description: <str> Wikidata description (via pageterms)
- extext: <str> plain text (Markdown) extract
- extract: <str> HTML extract from Extension:TextExtract
- image: <dict> {query-pageimage, query-thumbnail}
- label: <str> Wikidata label (via pageterms)
- modified (page): <str> ISO8601 date and time
- pageid: <int> Wikipedia database ID
- random: <str> a random article title with every request!
- requests: list of request actions made
- url: <str> the canonical wiki URL
- url_raw: <str> ostensible raw wikitext URL
- watchers: <int> number of people watching this page
"""
if not self.params.get('title') and not self.params.get('pageid'):
raise ValueError("get_query needs title or pageid")
self._get('query', show, proxy, timeout)
while self.data.get('continue'):
self._get('query', show, proxy, timeout)
return self |
def _update(qs):
"""
Increment the sort_order in a queryset.
Handle IntegrityErrors caused by unique constraints.
"""
try:
with transaction.atomic():
qs.update(sort_order=models.F('sort_order') + 1)
except IntegrityError:
for obj in qs.order_by('-sort_order'):
qs.filter(pk=obj.pk).update(sort_order=models.F('sort_order') + 1) | Increment the sort_order in a queryset.
Handle IntegrityErrors caused by unique constraints. | Below is the the instruction that describes the task:
### Input:
Increment the sort_order in a queryset.
Handle IntegrityErrors caused by unique constraints.
### Response:
def _update(qs):
"""
Increment the sort_order in a queryset.
Handle IntegrityErrors caused by unique constraints.
"""
try:
with transaction.atomic():
qs.update(sort_order=models.F('sort_order') + 1)
except IntegrityError:
for obj in qs.order_by('-sort_order'):
qs.filter(pk=obj.pk).update(sort_order=models.F('sort_order') + 1) |
def initialize_openstack(func):
'''
Initialize and refresh openstack connection
'''
async def wrap(self, *args, **kwargs):
if not hasattr(self, 'auth') or not self.auth.is_token_valid():
self.auth = AuthPassword(auth_url=self.config['auth_url'],
username=self.config['username'],
password=self.config['password'],
project_name=self.config['project_name'],
user_domain_name=self.config['user_domain_name'],
project_domain_name=self.config['project_domain_name'])
self.nova = NovaClient(session=self.auth)
self.glance = GlanceClient(session=self.auth)
await self.nova.init_api(timeout=self.config.get('http_timeout', 10))
await self.glance.init_api(timeout=self.config.get('http_timeout', 10))
if not hasattr(self, 'last_init') or self.last_init < (time.time() - 60):
await self.initialize()
self.last_init = time.time()
return await func(self, *args, **kwargs)
return wrap | Initialize and refresh openstack connection | Below is the the instruction that describes the task:
### Input:
Initialize and refresh openstack connection
### Response:
def initialize_openstack(func):
'''
Initialize and refresh openstack connection
'''
async def wrap(self, *args, **kwargs):
if not hasattr(self, 'auth') or not self.auth.is_token_valid():
self.auth = AuthPassword(auth_url=self.config['auth_url'],
username=self.config['username'],
password=self.config['password'],
project_name=self.config['project_name'],
user_domain_name=self.config['user_domain_name'],
project_domain_name=self.config['project_domain_name'])
self.nova = NovaClient(session=self.auth)
self.glance = GlanceClient(session=self.auth)
await self.nova.init_api(timeout=self.config.get('http_timeout', 10))
await self.glance.init_api(timeout=self.config.get('http_timeout', 10))
if not hasattr(self, 'last_init') or self.last_init < (time.time() - 60):
await self.initialize()
self.last_init = time.time()
return await func(self, *args, **kwargs)
return wrap |
def run(self):
"""Start the recurring task."""
if self.init_sec:
sleep(self.init_sec)
self._functime = time()
while self._running:
start = time()
self._func()
self._functime += self.interval_sec
if self._functime - start > 0:
sleep(self._functime - start) | Start the recurring task. | Below is the the instruction that describes the task:
### Input:
Start the recurring task.
### Response:
def run(self):
"""Start the recurring task."""
if self.init_sec:
sleep(self.init_sec)
self._functime = time()
while self._running:
start = time()
self._func()
self._functime += self.interval_sec
if self._functime - start > 0:
sleep(self._functime - start) |
def uniqueId(self, prefix=""):
"""
Generate a unique integer id (unique within the entire client session).
Useful for temporary DOM ids.
"""
_IdCounter.count += 1
id = _IdCounter.count
if prefix:
return self._wrap(prefix + str(id))
else:
return self._wrap(id) | Generate a unique integer id (unique within the entire client session).
Useful for temporary DOM ids. | Below is the the instruction that describes the task:
### Input:
Generate a unique integer id (unique within the entire client session).
Useful for temporary DOM ids.
### Response:
def uniqueId(self, prefix=""):
"""
Generate a unique integer id (unique within the entire client session).
Useful for temporary DOM ids.
"""
_IdCounter.count += 1
id = _IdCounter.count
if prefix:
return self._wrap(prefix + str(id))
else:
return self._wrap(id) |
def _write_arg_to_bytes(builder, arg, args, name=None):
"""
Writes the .__bytes__() code for the given argument
:param builder: The source code builder
:param arg: The argument to write
:param args: All the other arguments in TLObject same __bytes__.
This is required to determine the flags value
:param name: The name of the argument. Defaults to "self.argname"
This argument is an option because it's required when
writing Vectors<>
"""
if arg.generic_definition:
return # Do nothing, this only specifies a later type
if name is None:
name = 'self.{}'.format(arg.name)
# The argument may be a flag, only write if it's not None AND
# if it's not a True type.
# True types are not actually sent, but instead only used to
# determine the flags.
if arg.is_flag:
if arg.type == 'true':
return # Exit, since True type is never written
elif arg.is_vector:
# Vector flags are special since they consist of 3 values,
# so we need an extra join here. Note that empty vector flags
# should NOT be sent either!
builder.write("b'' if {0} is None or {0} is False "
"else b''.join((", name)
else:
builder.write("b'' if {0} is None or {0} is False "
"else (", name)
if arg.is_vector:
if arg.use_vector_id:
# vector code, unsigned 0x1cb5c415 as little endian
builder.write(r"b'\x15\xc4\xb5\x1c',")
builder.write("struct.pack('<i', len({})),", name)
# Cannot unpack the values for the outer tuple through *[(
# since that's a Python >3.5 feature, so add another join.
builder.write("b''.join(")
# Temporary disable .is_vector, not to enter this if again
# Also disable .is_flag since it's not needed per element
old_flag = arg.is_flag
arg.is_vector = arg.is_flag = False
_write_arg_to_bytes(builder, arg, args, name='x')
arg.is_vector = True
arg.is_flag = old_flag
builder.write(' for x in {})', name)
elif arg.flag_indicator:
# Calculate the flags with those items which are not None
if not any(f.is_flag for f in args):
# There's a flag indicator, but no flag arguments so it's 0
builder.write(r"b'\0\0\0\0'")
else:
builder.write("struct.pack('<I', ")
builder.write(
' | '.join('(0 if {0} is None or {0} is False else {1})'
.format('self.{}'.format(flag.name),
1 << flag.flag_index)
for flag in args if flag.is_flag)
)
builder.write(')')
elif 'int' == arg.type:
# struct.pack is around 4 times faster than int.to_bytes
builder.write("struct.pack('<i', {})", name)
elif 'long' == arg.type:
builder.write("struct.pack('<q', {})", name)
elif 'int128' == arg.type:
builder.write("{}.to_bytes(16, 'little', signed=True)", name)
elif 'int256' == arg.type:
builder.write("{}.to_bytes(32, 'little', signed=True)", name)
elif 'double' == arg.type:
builder.write("struct.pack('<d', {})", name)
elif 'string' == arg.type:
builder.write('self.serialize_bytes({})', name)
elif 'Bool' == arg.type:
# 0x997275b5 if boolean else 0xbc799737
builder.write(r"b'\xb5ur\x99' if {} else b'7\x97y\xbc'", name)
elif 'true' == arg.type:
pass # These are actually NOT written! Only used for flags
elif 'bytes' == arg.type:
builder.write('self.serialize_bytes({})', name)
elif 'date' == arg.type: # Custom format
builder.write('self.serialize_datetime({})', name)
else:
# Else it may be a custom type
builder.write('bytes({})', name)
# If the type is not boxed (i.e. starts with lowercase) we should
# not serialize the constructor ID (so remove its first 4 bytes).
boxed = arg.type[arg.type.find('.') + 1].isupper()
if not boxed:
builder.write('[4:]')
if arg.is_flag:
builder.write(')')
if arg.is_vector:
builder.write(')') # We were using a tuple
return True | Writes the .__bytes__() code for the given argument
:param builder: The source code builder
:param arg: The argument to write
:param args: All the other arguments in TLObject same __bytes__.
This is required to determine the flags value
:param name: The name of the argument. Defaults to "self.argname"
This argument is an option because it's required when
writing Vectors<> | Below is the the instruction that describes the task:
### Input:
Writes the .__bytes__() code for the given argument
:param builder: The source code builder
:param arg: The argument to write
:param args: All the other arguments in TLObject same __bytes__.
This is required to determine the flags value
:param name: The name of the argument. Defaults to "self.argname"
This argument is an option because it's required when
writing Vectors<>
### Response:
def _write_arg_to_bytes(builder, arg, args, name=None):
"""
Writes the .__bytes__() code for the given argument
:param builder: The source code builder
:param arg: The argument to write
:param args: All the other arguments in TLObject same __bytes__.
This is required to determine the flags value
:param name: The name of the argument. Defaults to "self.argname"
This argument is an option because it's required when
writing Vectors<>
"""
if arg.generic_definition:
return # Do nothing, this only specifies a later type
if name is None:
name = 'self.{}'.format(arg.name)
# The argument may be a flag, only write if it's not None AND
# if it's not a True type.
# True types are not actually sent, but instead only used to
# determine the flags.
if arg.is_flag:
if arg.type == 'true':
return # Exit, since True type is never written
elif arg.is_vector:
# Vector flags are special since they consist of 3 values,
# so we need an extra join here. Note that empty vector flags
# should NOT be sent either!
builder.write("b'' if {0} is None or {0} is False "
"else b''.join((", name)
else:
builder.write("b'' if {0} is None or {0} is False "
"else (", name)
if arg.is_vector:
if arg.use_vector_id:
# vector code, unsigned 0x1cb5c415 as little endian
builder.write(r"b'\x15\xc4\xb5\x1c',")
builder.write("struct.pack('<i', len({})),", name)
# Cannot unpack the values for the outer tuple through *[(
# since that's a Python >3.5 feature, so add another join.
builder.write("b''.join(")
# Temporary disable .is_vector, not to enter this if again
# Also disable .is_flag since it's not needed per element
old_flag = arg.is_flag
arg.is_vector = arg.is_flag = False
_write_arg_to_bytes(builder, arg, args, name='x')
arg.is_vector = True
arg.is_flag = old_flag
builder.write(' for x in {})', name)
elif arg.flag_indicator:
# Calculate the flags with those items which are not None
if not any(f.is_flag for f in args):
# There's a flag indicator, but no flag arguments so it's 0
builder.write(r"b'\0\0\0\0'")
else:
builder.write("struct.pack('<I', ")
builder.write(
' | '.join('(0 if {0} is None or {0} is False else {1})'
.format('self.{}'.format(flag.name),
1 << flag.flag_index)
for flag in args if flag.is_flag)
)
builder.write(')')
elif 'int' == arg.type:
# struct.pack is around 4 times faster than int.to_bytes
builder.write("struct.pack('<i', {})", name)
elif 'long' == arg.type:
builder.write("struct.pack('<q', {})", name)
elif 'int128' == arg.type:
builder.write("{}.to_bytes(16, 'little', signed=True)", name)
elif 'int256' == arg.type:
builder.write("{}.to_bytes(32, 'little', signed=True)", name)
elif 'double' == arg.type:
builder.write("struct.pack('<d', {})", name)
elif 'string' == arg.type:
builder.write('self.serialize_bytes({})', name)
elif 'Bool' == arg.type:
# 0x997275b5 if boolean else 0xbc799737
builder.write(r"b'\xb5ur\x99' if {} else b'7\x97y\xbc'", name)
elif 'true' == arg.type:
pass # These are actually NOT written! Only used for flags
elif 'bytes' == arg.type:
builder.write('self.serialize_bytes({})', name)
elif 'date' == arg.type: # Custom format
builder.write('self.serialize_datetime({})', name)
else:
# Else it may be a custom type
builder.write('bytes({})', name)
# If the type is not boxed (i.e. starts with lowercase) we should
# not serialize the constructor ID (so remove its first 4 bytes).
boxed = arg.type[arg.type.find('.') + 1].isupper()
if not boxed:
builder.write('[4:]')
if arg.is_flag:
builder.write(')')
if arg.is_vector:
builder.write(')') # We were using a tuple
return True |
def extended_help_option(extended_help=None, *param_decls, **attrs):
"""
Based on the click.help_option code.
Adds a ``--extended-help`` option which immediately ends the program
printing out the extended extended-help page. Defaults to using the
callback's doc string, but can be given an explicit value as well.
This is intended for use as a decorator on a command to provide a 3rd level
of help verbosity suitable for use as a manpage (though not formatted as such explicitly).
Like :func:`version_option`, this is implemented as eager option that
prints in the callback and exits.
All arguments are forwarded to :func:`option`.
"""
def decorator(f):
def callback(ctx, param, value):
if value and not ctx.resilient_parsing:
if not extended_help:
ctx.command.help = ctx.command.callback.__doc__
click.echo(ctx.get_help(), color=ctx.color)
else:
ctx.command.help = extended_help
click.echo(ctx.get_help(), color=ctx.color)
ctx.exit()
attrs.setdefault('is_flag', True)
attrs.setdefault('expose_value', False)
attrs.setdefault('help', 'Show extended help content, similar to manpage, and exit.')
attrs.setdefault('is_eager', True)
attrs['callback'] = callback
return click.option(*(param_decls or ('--extended-help',)), **attrs)(f)
return decorator | Based on the click.help_option code.
Adds a ``--extended-help`` option which immediately ends the program
printing out the extended extended-help page. Defaults to using the
callback's doc string, but can be given an explicit value as well.
This is intended for use as a decorator on a command to provide a 3rd level
of help verbosity suitable for use as a manpage (though not formatted as such explicitly).
Like :func:`version_option`, this is implemented as eager option that
prints in the callback and exits.
All arguments are forwarded to :func:`option`. | Below is the the instruction that describes the task:
### Input:
Based on the click.help_option code.
Adds a ``--extended-help`` option which immediately ends the program
printing out the extended extended-help page. Defaults to using the
callback's doc string, but can be given an explicit value as well.
This is intended for use as a decorator on a command to provide a 3rd level
of help verbosity suitable for use as a manpage (though not formatted as such explicitly).
Like :func:`version_option`, this is implemented as eager option that
prints in the callback and exits.
All arguments are forwarded to :func:`option`.
### Response:
def extended_help_option(extended_help=None, *param_decls, **attrs):
"""
Based on the click.help_option code.
Adds a ``--extended-help`` option which immediately ends the program
printing out the extended extended-help page. Defaults to using the
callback's doc string, but can be given an explicit value as well.
This is intended for use as a decorator on a command to provide a 3rd level
of help verbosity suitable for use as a manpage (though not formatted as such explicitly).
Like :func:`version_option`, this is implemented as eager option that
prints in the callback and exits.
All arguments are forwarded to :func:`option`.
"""
def decorator(f):
def callback(ctx, param, value):
if value and not ctx.resilient_parsing:
if not extended_help:
ctx.command.help = ctx.command.callback.__doc__
click.echo(ctx.get_help(), color=ctx.color)
else:
ctx.command.help = extended_help
click.echo(ctx.get_help(), color=ctx.color)
ctx.exit()
attrs.setdefault('is_flag', True)
attrs.setdefault('expose_value', False)
attrs.setdefault('help', 'Show extended help content, similar to manpage, and exit.')
attrs.setdefault('is_eager', True)
attrs['callback'] = callback
return click.option(*(param_decls or ('--extended-help',)), **attrs)(f)
return decorator |
def find_cycle(graph):
"""
Find a cycle in the given graph.
This function will return a list of nodes which form a cycle in the graph or an empty list if
no cycle exists.
@type graph: graph, digraph
@param graph: Graph.
@rtype: list
@return: List of nodes.
"""
if (isinstance(graph, graph_class)):
directed = False
elif (isinstance(graph, digraph_class)):
directed = True
else:
raise InvalidGraphType
def find_cycle_to_ancestor(node, ancestor):
"""
Find a cycle containing both node and ancestor.
"""
path = []
while (node != ancestor):
if (node is None):
return []
path.append(node)
node = spanning_tree[node]
path.append(node)
path.reverse()
return path
def dfs(node):
"""
Depth-first search subfunction.
"""
visited[node] = 1
# Explore recursively the connected component
for each in graph[node]:
if (cycle):
return
if (each not in visited):
spanning_tree[each] = node
dfs(each)
else:
if (directed or spanning_tree[node] != each):
cycle.extend(find_cycle_to_ancestor(node, each))
recursionlimit = getrecursionlimit()
setrecursionlimit(max(len(graph.nodes())*2,recursionlimit))
visited = {} # List for marking visited and non-visited nodes
spanning_tree = {} # Spanning tree
cycle = []
# Algorithm outer-loop
for each in graph:
# Select a non-visited node
if (each not in visited):
spanning_tree[each] = None
# Explore node's connected component
dfs(each)
if (cycle):
setrecursionlimit(recursionlimit)
return cycle
setrecursionlimit(recursionlimit)
return [] | Find a cycle in the given graph.
This function will return a list of nodes which form a cycle in the graph or an empty list if
no cycle exists.
@type graph: graph, digraph
@param graph: Graph.
@rtype: list
@return: List of nodes. | Below is the the instruction that describes the task:
### Input:
Find a cycle in the given graph.
This function will return a list of nodes which form a cycle in the graph or an empty list if
no cycle exists.
@type graph: graph, digraph
@param graph: Graph.
@rtype: list
@return: List of nodes.
### Response:
def find_cycle(graph):
"""
Find a cycle in the given graph.
This function will return a list of nodes which form a cycle in the graph or an empty list if
no cycle exists.
@type graph: graph, digraph
@param graph: Graph.
@rtype: list
@return: List of nodes.
"""
if (isinstance(graph, graph_class)):
directed = False
elif (isinstance(graph, digraph_class)):
directed = True
else:
raise InvalidGraphType
def find_cycle_to_ancestor(node, ancestor):
"""
Find a cycle containing both node and ancestor.
"""
path = []
while (node != ancestor):
if (node is None):
return []
path.append(node)
node = spanning_tree[node]
path.append(node)
path.reverse()
return path
def dfs(node):
"""
Depth-first search subfunction.
"""
visited[node] = 1
# Explore recursively the connected component
for each in graph[node]:
if (cycle):
return
if (each not in visited):
spanning_tree[each] = node
dfs(each)
else:
if (directed or spanning_tree[node] != each):
cycle.extend(find_cycle_to_ancestor(node, each))
recursionlimit = getrecursionlimit()
setrecursionlimit(max(len(graph.nodes())*2,recursionlimit))
visited = {} # List for marking visited and non-visited nodes
spanning_tree = {} # Spanning tree
cycle = []
# Algorithm outer-loop
for each in graph:
# Select a non-visited node
if (each not in visited):
spanning_tree[each] = None
# Explore node's connected component
dfs(each)
if (cycle):
setrecursionlimit(recursionlimit)
return cycle
setrecursionlimit(recursionlimit)
return [] |
def parse_string(xml):
""" Returns a slash-formatted string from the given XML representation.
The return value is a TokenString (for MBSP) or TaggedString (for Pattern).
"""
string = ""
# Traverse all the <sentence> elements in the XML.
dom = XML(xml)
for sentence in dom(XML_SENTENCE):
_anchors.clear() # Populated by calling _parse_tokens().
_attachments.clear() # Populated by calling _parse_tokens().
# Parse the language from <sentence language="">.
language = sentence.get(XML_LANGUAGE, "en")
# Parse the token tag format from <sentence token="">.
# This information is returned in TokenString.tags,
# so the format and order of the token tags is retained when exporting/importing as XML.
format = sentence.get(XML_TOKEN, [WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA])
format = not isinstance(format, basestring) and format or format.replace(" ","").split(",")
# Traverse all <chunk> and <chink> elements in the sentence.
# Find the <word> elements inside and create tokens.
tokens = []
for chunk in sentence:
tokens.extend(_parse_tokens(chunk, format))
# Attach PNP's to their anchors.
# Keys in _anchors have linked anchor chunks (each chunk is a list of tokens).
# The keys correspond to the keys in _attachments, which have linked PNP chunks.
if ANCHOR in format:
A, P, a, i = _anchors, _attachments, 1, format.index(ANCHOR)
for id in sorted(A.keys()):
for token in A[id]:
token[i] += "-"+"-".join(["A"+str(a+p) for p in range(len(P[id]))])
token[i] = token[i].strip("O-")
for p, pnp in enumerate(P[id]):
for token in pnp:
token[i] += "-"+"P"+str(a+p)
token[i] = token[i].strip("O-")
a += len(P[id])
# Collapse the tokens to string.
# Separate multiple sentences with a new line.
tokens = ["/".join([tag for tag in token]) for token in tokens]
tokens = " ".join(tokens)
string += tokens + "\n"
# Return a TokenString, which is a unicode string that transforms easily
# into a plain str, a list of tokens, or a Sentence.
try:
if MBSP: from mbsp import TokenString
return TokenString(string.strip(), tags=format, language=language)
except:
return TaggedString(string.strip(), tags=format, language=language) | Returns a slash-formatted string from the given XML representation.
The return value is a TokenString (for MBSP) or TaggedString (for Pattern). | Below is the the instruction that describes the task:
### Input:
Returns a slash-formatted string from the given XML representation.
The return value is a TokenString (for MBSP) or TaggedString (for Pattern).
### Response:
def parse_string(xml):
""" Returns a slash-formatted string from the given XML representation.
The return value is a TokenString (for MBSP) or TaggedString (for Pattern).
"""
string = ""
# Traverse all the <sentence> elements in the XML.
dom = XML(xml)
for sentence in dom(XML_SENTENCE):
_anchors.clear() # Populated by calling _parse_tokens().
_attachments.clear() # Populated by calling _parse_tokens().
# Parse the language from <sentence language="">.
language = sentence.get(XML_LANGUAGE, "en")
# Parse the token tag format from <sentence token="">.
# This information is returned in TokenString.tags,
# so the format and order of the token tags is retained when exporting/importing as XML.
format = sentence.get(XML_TOKEN, [WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA])
format = not isinstance(format, basestring) and format or format.replace(" ","").split(",")
# Traverse all <chunk> and <chink> elements in the sentence.
# Find the <word> elements inside and create tokens.
tokens = []
for chunk in sentence:
tokens.extend(_parse_tokens(chunk, format))
# Attach PNP's to their anchors.
# Keys in _anchors have linked anchor chunks (each chunk is a list of tokens).
# The keys correspond to the keys in _attachments, which have linked PNP chunks.
if ANCHOR in format:
A, P, a, i = _anchors, _attachments, 1, format.index(ANCHOR)
for id in sorted(A.keys()):
for token in A[id]:
token[i] += "-"+"-".join(["A"+str(a+p) for p in range(len(P[id]))])
token[i] = token[i].strip("O-")
for p, pnp in enumerate(P[id]):
for token in pnp:
token[i] += "-"+"P"+str(a+p)
token[i] = token[i].strip("O-")
a += len(P[id])
# Collapse the tokens to string.
# Separate multiple sentences with a new line.
tokens = ["/".join([tag for tag in token]) for token in tokens]
tokens = " ".join(tokens)
string += tokens + "\n"
# Return a TokenString, which is a unicode string that transforms easily
# into a plain str, a list of tokens, or a Sentence.
try:
if MBSP: from mbsp import TokenString
return TokenString(string.strip(), tags=format, language=language)
except:
return TaggedString(string.strip(), tags=format, language=language) |
def _GetNextLogCountPerToken(token):
"""Wrapper for _log_counter_per_token.
Args:
token: The token for which to look up the count.
Returns:
The number of times this function has been called with
*token* as an argument (starting at 0)
"""
global _log_counter_per_token # pylint: disable=global-variable-not-assigned
_log_counter_per_token[token] = 1 + _log_counter_per_token.get(token, -1)
return _log_counter_per_token[token] | Wrapper for _log_counter_per_token.
Args:
token: The token for which to look up the count.
Returns:
The number of times this function has been called with
*token* as an argument (starting at 0) | Below is the the instruction that describes the task:
### Input:
Wrapper for _log_counter_per_token.
Args:
token: The token for which to look up the count.
Returns:
The number of times this function has been called with
*token* as an argument (starting at 0)
### Response:
def _GetNextLogCountPerToken(token):
"""Wrapper for _log_counter_per_token.
Args:
token: The token for which to look up the count.
Returns:
The number of times this function has been called with
*token* as an argument (starting at 0)
"""
global _log_counter_per_token # pylint: disable=global-variable-not-assigned
_log_counter_per_token[token] = 1 + _log_counter_per_token.get(token, -1)
return _log_counter_per_token[token] |
def set_perplexity(self, new_perplexity):
"""Change the perplexity of the affinity matrix.
Note that we only allow lowering the perplexity or restoring it to its
original value. This restriction exists because setting a higher
perplexity value requires recomputing all the nearest neighbors, which
can take a long time. To avoid potential confusion as to why execution
time is slow, this is not allowed. If you would like to increase the
perplexity above the initial value, simply create a new instance.
Parameters
----------
new_perplexity: float
The new perplexity.
"""
# If the value hasn't changed, there's nothing to do
if new_perplexity == self.perplexity:
return
# Verify that the perplexity isn't too large
new_perplexity = self.check_perplexity(new_perplexity)
# Recompute the affinity matrix
k_neighbors = min(self.n_samples - 1, int(3 * new_perplexity))
if k_neighbors > self.__neighbors.shape[1]:
raise RuntimeError(
"The desired perplexity `%.2f` is larger than the initial one "
"used. This would need to recompute the nearest neighbors, "
"which is not efficient. Please create a new `%s` instance "
"with the increased perplexity."
% (new_perplexity, self.__class__.__name__)
)
self.perplexity = new_perplexity
self.P = joint_probabilities_nn(
self.__neighbors[:, :k_neighbors],
self.__distances[:, :k_neighbors],
[self.perplexity],
symmetrize=True,
n_jobs=self.n_jobs,
) | Change the perplexity of the affinity matrix.
Note that we only allow lowering the perplexity or restoring it to its
original value. This restriction exists because setting a higher
perplexity value requires recomputing all the nearest neighbors, which
can take a long time. To avoid potential confusion as to why execution
time is slow, this is not allowed. If you would like to increase the
perplexity above the initial value, simply create a new instance.
Parameters
----------
new_perplexity: float
The new perplexity. | Below is the the instruction that describes the task:
### Input:
Change the perplexity of the affinity matrix.
Note that we only allow lowering the perplexity or restoring it to its
original value. This restriction exists because setting a higher
perplexity value requires recomputing all the nearest neighbors, which
can take a long time. To avoid potential confusion as to why execution
time is slow, this is not allowed. If you would like to increase the
perplexity above the initial value, simply create a new instance.
Parameters
----------
new_perplexity: float
The new perplexity.
### Response:
def set_perplexity(self, new_perplexity):
"""Change the perplexity of the affinity matrix.
Note that we only allow lowering the perplexity or restoring it to its
original value. This restriction exists because setting a higher
perplexity value requires recomputing all the nearest neighbors, which
can take a long time. To avoid potential confusion as to why execution
time is slow, this is not allowed. If you would like to increase the
perplexity above the initial value, simply create a new instance.
Parameters
----------
new_perplexity: float
The new perplexity.
"""
# If the value hasn't changed, there's nothing to do
if new_perplexity == self.perplexity:
return
# Verify that the perplexity isn't too large
new_perplexity = self.check_perplexity(new_perplexity)
# Recompute the affinity matrix
k_neighbors = min(self.n_samples - 1, int(3 * new_perplexity))
if k_neighbors > self.__neighbors.shape[1]:
raise RuntimeError(
"The desired perplexity `%.2f` is larger than the initial one "
"used. This would need to recompute the nearest neighbors, "
"which is not efficient. Please create a new `%s` instance "
"with the increased perplexity."
% (new_perplexity, self.__class__.__name__)
)
self.perplexity = new_perplexity
self.P = joint_probabilities_nn(
self.__neighbors[:, :k_neighbors],
self.__distances[:, :k_neighbors],
[self.perplexity],
symmetrize=True,
n_jobs=self.n_jobs,
) |
def getValue(self, key):
"""
Some devices allow to directly get values for specific parameters.
"""
LOG.debug("HMGeneric.getValue: address = '%s', key = '%s'" % (self._ADDRESS, key))
try:
returnvalue = self._proxy.getValue(self._ADDRESS, key)
self._VALUES[key] = returnvalue
return returnvalue
except Exception as err:
LOG.warning("HMGeneric.getValue: %s on %s Exception: %s", key,
self._ADDRESS, err)
return False | Some devices allow to directly get values for specific parameters. | Below is the the instruction that describes the task:
### Input:
Some devices allow to directly get values for specific parameters.
### Response:
def getValue(self, key):
"""
Some devices allow to directly get values for specific parameters.
"""
LOG.debug("HMGeneric.getValue: address = '%s', key = '%s'" % (self._ADDRESS, key))
try:
returnvalue = self._proxy.getValue(self._ADDRESS, key)
self._VALUES[key] = returnvalue
return returnvalue
except Exception as err:
LOG.warning("HMGeneric.getValue: %s on %s Exception: %s", key,
self._ADDRESS, err)
return False |
def validate_email_domain(email):
""" Validates email domain by blacklist. """
try:
domain = email.split('@', 1)[1].lower().strip()
except IndexError:
return
if domain in dju_settings.DJU_EMAIL_DOMAIN_BLACK_LIST:
raise ValidationError(_(u'Email with domain "%(domain)s" is disallowed.'),
code='banned_domain', params={'domain': domain}) | Validates email domain by blacklist. | Below is the the instruction that describes the task:
### Input:
Validates email domain by blacklist.
### Response:
def validate_email_domain(email):
""" Validates email domain by blacklist. """
try:
domain = email.split('@', 1)[1].lower().strip()
except IndexError:
return
if domain in dju_settings.DJU_EMAIL_DOMAIN_BLACK_LIST:
raise ValidationError(_(u'Email with domain "%(domain)s" is disallowed.'),
code='banned_domain', params={'domain': domain}) |
def setCollectors(self, collectors):
"""
Sets the collector methods that will be used for this schema.
:param collectors | [<orb.Collectors>, ..]
"""
self.__collectors = {}
for name, collector in collectors.items():
self.__collectors[name] = collector
collector.setSchema(self) | Sets the collector methods that will be used for this schema.
:param collectors | [<orb.Collectors>, ..] | Below is the the instruction that describes the task:
### Input:
Sets the collector methods that will be used for this schema.
:param collectors | [<orb.Collectors>, ..]
### Response:
def setCollectors(self, collectors):
"""
Sets the collector methods that will be used for this schema.
:param collectors | [<orb.Collectors>, ..]
"""
self.__collectors = {}
for name, collector in collectors.items():
self.__collectors[name] = collector
collector.setSchema(self) |
def get_domain(self):
"""
:returns: opposite vertices of the bounding prism for this
object in the form of ndarray([min], [max])
.. note:: This method automatically stores the solution in order
to do not repeat calculations if the user needs to call it
more than once.
"""
points = ([poly.points for poly in self]+
[holes.points for holes in self.holes])
points = np.concatenate(points, axis=0)
return np.array([points.min(axis=0), points.max(axis=0)]) | :returns: opposite vertices of the bounding prism for this
object in the form of ndarray([min], [max])
.. note:: This method automatically stores the solution in order
to do not repeat calculations if the user needs to call it
more than once. | Below is the the instruction that describes the task:
### Input:
:returns: opposite vertices of the bounding prism for this
object in the form of ndarray([min], [max])
.. note:: This method automatically stores the solution in order
to do not repeat calculations if the user needs to call it
more than once.
### Response:
def get_domain(self):
"""
:returns: opposite vertices of the bounding prism for this
object in the form of ndarray([min], [max])
.. note:: This method automatically stores the solution in order
to do not repeat calculations if the user needs to call it
more than once.
"""
points = ([poly.points for poly in self]+
[holes.points for holes in self.holes])
points = np.concatenate(points, axis=0)
return np.array([points.min(axis=0), points.max(axis=0)]) |
def compute_distance(x_ori, x_pert, constraint='l2'):
""" Compute the distance between two images. """
if constraint == 'l2':
dist = np.linalg.norm(x_ori - x_pert)
elif constraint == 'linf':
dist = np.max(abs(x_ori - x_pert))
return dist | Compute the distance between two images. | Below is the the instruction that describes the task:
### Input:
Compute the distance between two images.
### Response:
def compute_distance(x_ori, x_pert, constraint='l2'):
""" Compute the distance between two images. """
if constraint == 'l2':
dist = np.linalg.norm(x_ori - x_pert)
elif constraint == 'linf':
dist = np.max(abs(x_ori - x_pert))
return dist |
def sortByTotal(requestContext, seriesList):
"""
Takes one metric or a wildcard seriesList.
Sorts the list of metrics by the sum of values across the time period
specified.
"""
return list(sorted(seriesList, key=safeSum, reverse=True)) | Takes one metric or a wildcard seriesList.
Sorts the list of metrics by the sum of values across the time period
specified. | Below is the the instruction that describes the task:
### Input:
Takes one metric or a wildcard seriesList.
Sorts the list of metrics by the sum of values across the time period
specified.
### Response:
def sortByTotal(requestContext, seriesList):
"""
Takes one metric or a wildcard seriesList.
Sorts the list of metrics by the sum of values across the time period
specified.
"""
return list(sorted(seriesList, key=safeSum, reverse=True)) |
def seek_end(fileobj, offset):
"""Like fileobj.seek(-offset, 2), but will not try to go beyond the start
Needed since file objects from BytesIO will not raise IOError and
file objects from open() will raise IOError if going to a negative offset.
To make things easier for custom implementations, instead of allowing
both behaviors, we just don't do it.
Args:
fileobj (fileobj)
offset (int): how many bytes away from the end backwards to seek to
Raises:
IOError
"""
if offset < 0:
raise ValueError
if get_size(fileobj) < offset:
fileobj.seek(0, 0)
else:
fileobj.seek(-offset, 2) | Like fileobj.seek(-offset, 2), but will not try to go beyond the start
Needed since file objects from BytesIO will not raise IOError and
file objects from open() will raise IOError if going to a negative offset.
To make things easier for custom implementations, instead of allowing
both behaviors, we just don't do it.
Args:
fileobj (fileobj)
offset (int): how many bytes away from the end backwards to seek to
Raises:
IOError | Below is the the instruction that describes the task:
### Input:
Like fileobj.seek(-offset, 2), but will not try to go beyond the start
Needed since file objects from BytesIO will not raise IOError and
file objects from open() will raise IOError if going to a negative offset.
To make things easier for custom implementations, instead of allowing
both behaviors, we just don't do it.
Args:
fileobj (fileobj)
offset (int): how many bytes away from the end backwards to seek to
Raises:
IOError
### Response:
def seek_end(fileobj, offset):
"""Like fileobj.seek(-offset, 2), but will not try to go beyond the start
Needed since file objects from BytesIO will not raise IOError and
file objects from open() will raise IOError if going to a negative offset.
To make things easier for custom implementations, instead of allowing
both behaviors, we just don't do it.
Args:
fileobj (fileobj)
offset (int): how many bytes away from the end backwards to seek to
Raises:
IOError
"""
if offset < 0:
raise ValueError
if get_size(fileobj) < offset:
fileobj.seek(0, 0)
else:
fileobj.seek(-offset, 2) |
def prepare_bam(bam_in, precursors):
"""
Clean BAM file to keep only position inside the bigger cluster
"""
# use pybedtools to keep valid positions
# intersect option with -b bigger_cluster_loci
a = pybedtools.BedTool(bam_in)
b = pybedtools.BedTool(precursors)
c = a.intersect(b, u=True)
out_file = utils.splitext_plus(op.basename(bam_in))[0] + "_clean.bam"
c.saveas(out_file)
return op.abspath(out_file) | Clean BAM file to keep only position inside the bigger cluster | Below is the the instruction that describes the task:
### Input:
Clean BAM file to keep only position inside the bigger cluster
### Response:
def prepare_bam(bam_in, precursors):
"""
Clean BAM file to keep only position inside the bigger cluster
"""
# use pybedtools to keep valid positions
# intersect option with -b bigger_cluster_loci
a = pybedtools.BedTool(bam_in)
b = pybedtools.BedTool(precursors)
c = a.intersect(b, u=True)
out_file = utils.splitext_plus(op.basename(bam_in))[0] + "_clean.bam"
c.saveas(out_file)
return op.abspath(out_file) |
async def main(self):
"""
Main coroutine
"""
try:
lastkeys = set()
dataupdate = FlowUpdaterNotification.createMatcher(self, FlowUpdaterNotification.DATAUPDATED)
startwalk = FlowUpdaterNotification.createMatcher(self, FlowUpdaterNotification.STARTWALK)
self.subroutine(self._flowupdater(), False, '_flowupdateroutine')
# Cache updated objects
presave_update = set()
while True:
self._restartwalk = False
presave_update.update(self._updatedset)
self._updatedset.clear()
_initialkeys = set(self._initialkeys)
try:
walk_result = await call_api(self, 'objectdb', 'walk',
{'keys': self._initialkeys, 'walkerdict': self._walkerdict,
'requestid': (self._requstid, self._requestindex)})
except Exception:
self._logger.warning("Flow updater %r walk step failed, conn = %r", self, self._connection,
exc_info=True)
# Cleanup
await call_api(self, 'objectdb', 'unwatchall',
{'requestid': (self._requstid, self._requestindex)})
await self.wait_with_timeout(2)
self._requestindex += 1
if self._restartwalk:
continue
if self._updatedset:
if any(v.getkey() in _initialkeys for v in self._updatedset):
# During walk, there are other initial keys that are updated
# To make sure we get the latest result, restart the walk
continue
lastkeys = set(self._savedkeys)
_savedkeys, _savedresult = walk_result
removekeys = tuple(lastkeys.difference(_savedkeys))
self.reset_initialkeys(_savedkeys, _savedresult)
_initialkeys = set(self._initialkeys)
if self._dataupdateroutine:
self.terminate(self._dataupdateroutine)
# Start detecting updates
self.subroutine(self._dataobject_update_detect(_initialkeys, _savedresult), False, "_dataupdateroutine")
# Set the updates back (potentially merged with newly updated objects)
self._updatedset.update(v for v in presave_update)
presave_update.clear()
await self.walkcomplete(_savedkeys, _savedresult)
if removekeys:
await call_api(self, 'objectdb', 'munwatch', {'keys': removekeys,
'requestid': (self._requstid, self._requestindex)})
# Transfer updated objects to updatedset2 before a flow update notification
# This helps to make `walkcomplete` executes before `updateflow`
#
# But notice that since there is only a single data object copy in all the program,
# it is impossible to hide the change completely during `updateflow`
self._updatedset2.update(self._updatedset)
self._updatedset.clear()
self._savedkeys = _savedkeys
self._savedresult = _savedresult
await self.wait_for_send(FlowUpdaterNotification(self, FlowUpdaterNotification.FLOWUPDATE))
while not self._restartwalk:
if self._updatedset:
if any(v.getkey() in _initialkeys for v in self._updatedset):
break
else:
self._updatedset2.update(self._updatedset)
self._updatedset.clear()
self.scheduler.emergesend(FlowUpdaterNotification(self, FlowUpdaterNotification.FLOWUPDATE))
await M_(dataupdate, startwalk)
except Exception:
self._logger.exception("Flow updater %r stops update by an exception, conn = %r", self, self._connection)
raise
finally:
self.subroutine(send_api(self, 'objectdb', 'unwatchall', {'requestid': (self._requstid, self._requestindex)}),
False)
if self._flowupdateroutine:
self.terminate(self._flowupdateroutine)
self._flowupdateroutine = None
if self._dataupdateroutine:
self.terminate(self._dataupdateroutine)
self._dataupdateroutine = None | Main coroutine | Below is the the instruction that describes the task:
### Input:
Main coroutine
### Response:
async def main(self):
"""
Main coroutine
"""
try:
lastkeys = set()
dataupdate = FlowUpdaterNotification.createMatcher(self, FlowUpdaterNotification.DATAUPDATED)
startwalk = FlowUpdaterNotification.createMatcher(self, FlowUpdaterNotification.STARTWALK)
self.subroutine(self._flowupdater(), False, '_flowupdateroutine')
# Cache updated objects
presave_update = set()
while True:
self._restartwalk = False
presave_update.update(self._updatedset)
self._updatedset.clear()
_initialkeys = set(self._initialkeys)
try:
walk_result = await call_api(self, 'objectdb', 'walk',
{'keys': self._initialkeys, 'walkerdict': self._walkerdict,
'requestid': (self._requstid, self._requestindex)})
except Exception:
self._logger.warning("Flow updater %r walk step failed, conn = %r", self, self._connection,
exc_info=True)
# Cleanup
await call_api(self, 'objectdb', 'unwatchall',
{'requestid': (self._requstid, self._requestindex)})
await self.wait_with_timeout(2)
self._requestindex += 1
if self._restartwalk:
continue
if self._updatedset:
if any(v.getkey() in _initialkeys for v in self._updatedset):
# During walk, there are other initial keys that are updated
# To make sure we get the latest result, restart the walk
continue
lastkeys = set(self._savedkeys)
_savedkeys, _savedresult = walk_result
removekeys = tuple(lastkeys.difference(_savedkeys))
self.reset_initialkeys(_savedkeys, _savedresult)
_initialkeys = set(self._initialkeys)
if self._dataupdateroutine:
self.terminate(self._dataupdateroutine)
# Start detecting updates
self.subroutine(self._dataobject_update_detect(_initialkeys, _savedresult), False, "_dataupdateroutine")
# Set the updates back (potentially merged with newly updated objects)
self._updatedset.update(v for v in presave_update)
presave_update.clear()
await self.walkcomplete(_savedkeys, _savedresult)
if removekeys:
await call_api(self, 'objectdb', 'munwatch', {'keys': removekeys,
'requestid': (self._requstid, self._requestindex)})
# Transfer updated objects to updatedset2 before a flow update notification
# This helps to make `walkcomplete` executes before `updateflow`
#
# But notice that since there is only a single data object copy in all the program,
# it is impossible to hide the change completely during `updateflow`
self._updatedset2.update(self._updatedset)
self._updatedset.clear()
self._savedkeys = _savedkeys
self._savedresult = _savedresult
await self.wait_for_send(FlowUpdaterNotification(self, FlowUpdaterNotification.FLOWUPDATE))
while not self._restartwalk:
if self._updatedset:
if any(v.getkey() in _initialkeys for v in self._updatedset):
break
else:
self._updatedset2.update(self._updatedset)
self._updatedset.clear()
self.scheduler.emergesend(FlowUpdaterNotification(self, FlowUpdaterNotification.FLOWUPDATE))
await M_(dataupdate, startwalk)
except Exception:
self._logger.exception("Flow updater %r stops update by an exception, conn = %r", self, self._connection)
raise
finally:
self.subroutine(send_api(self, 'objectdb', 'unwatchall', {'requestid': (self._requstid, self._requestindex)}),
False)
if self._flowupdateroutine:
self.terminate(self._flowupdateroutine)
self._flowupdateroutine = None
if self._dataupdateroutine:
self.terminate(self._dataupdateroutine)
self._dataupdateroutine = None |
async def get_proxies(self):
"""Receive proxies from the provider and return them.
:return: :attr:`.proxies`
"""
log.debug('Try to get proxies from %s' % self.domain)
async with aiohttp.ClientSession(
headers=get_headers(), cookies=self._cookies, loop=self._loop
) as self._session:
await self._pipe()
log.debug(
'%d proxies received from %s: %s'
% (len(self.proxies), self.domain, self.proxies)
)
return self.proxies | Receive proxies from the provider and return them.
:return: :attr:`.proxies` | Below is the the instruction that describes the task:
### Input:
Receive proxies from the provider and return them.
:return: :attr:`.proxies`
### Response:
async def get_proxies(self):
"""Receive proxies from the provider and return them.
:return: :attr:`.proxies`
"""
log.debug('Try to get proxies from %s' % self.domain)
async with aiohttp.ClientSession(
headers=get_headers(), cookies=self._cookies, loop=self._loop
) as self._session:
await self._pipe()
log.debug(
'%d proxies received from %s: %s'
% (len(self.proxies), self.domain, self.proxies)
)
return self.proxies |
def catch_error(response):
'''
Checks for Errors in a Response.
401 or 403 - Security Rules Violation.
404 or 417 - Firebase NOT Found.
response - (Request.Response) - response from a request.
'''
status = response.status_code
if status == 401 or status == 403:
raise EnvironmentError("Forbidden")
elif status == 417 or status == 404:
raise EnvironmentError("NotFound") | Checks for Errors in a Response.
401 or 403 - Security Rules Violation.
404 or 417 - Firebase NOT Found.
response - (Request.Response) - response from a request. | Below is the the instruction that describes the task:
### Input:
Checks for Errors in a Response.
401 or 403 - Security Rules Violation.
404 or 417 - Firebase NOT Found.
response - (Request.Response) - response from a request.
### Response:
def catch_error(response):
'''
Checks for Errors in a Response.
401 or 403 - Security Rules Violation.
404 or 417 - Firebase NOT Found.
response - (Request.Response) - response from a request.
'''
status = response.status_code
if status == 401 or status == 403:
raise EnvironmentError("Forbidden")
elif status == 417 or status == 404:
raise EnvironmentError("NotFound") |
def extendedMeasurementOrder():
"""EXTENDED MEASUREMENT ORDER Section 9.1.51"""
a = L2PseudoLength(l2pLength=0x12)
b = TpPd(pd=0x6)
c = MessageType(mesType=0x37) # 00110111
d = ExtendedMeasurementFrequencyList()
packet = a / b / c / d
return packet | EXTENDED MEASUREMENT ORDER Section 9.1.51 | Below is the the instruction that describes the task:
### Input:
EXTENDED MEASUREMENT ORDER Section 9.1.51
### Response:
def extendedMeasurementOrder():
"""EXTENDED MEASUREMENT ORDER Section 9.1.51"""
a = L2PseudoLength(l2pLength=0x12)
b = TpPd(pd=0x6)
c = MessageType(mesType=0x37) # 00110111
d = ExtendedMeasurementFrequencyList()
packet = a / b / c / d
return packet |
def off(self, event):
'Remove an event handler'
try:
self._once_events.remove(event)
except KeyError:
pass
self._callback_by_event.pop(event, None) | Remove an event handler | Below is the the instruction that describes the task:
### Input:
Remove an event handler
### Response:
def off(self, event):
'Remove an event handler'
try:
self._once_events.remove(event)
except KeyError:
pass
self._callback_by_event.pop(event, None) |
def compile(stream_spec, cmd='ffmpeg', overwrite_output=False):
"""Build command-line for invoking ffmpeg.
The :meth:`run` function uses this to build the commnad line
arguments and should work in most cases, but calling this function
directly is useful for debugging or if you need to invoke ffmpeg
manually for whatever reason.
This is the same as calling :meth:`get_args` except that it also
includes the ``ffmpeg`` command as the first argument.
"""
if isinstance(cmd, basestring):
cmd = [cmd]
elif type(cmd) != list:
cmd = list(cmd)
return cmd + get_args(stream_spec, overwrite_output=overwrite_output) | Build command-line for invoking ffmpeg.
The :meth:`run` function uses this to build the commnad line
arguments and should work in most cases, but calling this function
directly is useful for debugging or if you need to invoke ffmpeg
manually for whatever reason.
This is the same as calling :meth:`get_args` except that it also
includes the ``ffmpeg`` command as the first argument. | Below is the the instruction that describes the task:
### Input:
Build command-line for invoking ffmpeg.
The :meth:`run` function uses this to build the commnad line
arguments and should work in most cases, but calling this function
directly is useful for debugging or if you need to invoke ffmpeg
manually for whatever reason.
This is the same as calling :meth:`get_args` except that it also
includes the ``ffmpeg`` command as the first argument.
### Response:
def compile(stream_spec, cmd='ffmpeg', overwrite_output=False):
"""Build command-line for invoking ffmpeg.
The :meth:`run` function uses this to build the commnad line
arguments and should work in most cases, but calling this function
directly is useful for debugging or if you need to invoke ffmpeg
manually for whatever reason.
This is the same as calling :meth:`get_args` except that it also
includes the ``ffmpeg`` command as the first argument.
"""
if isinstance(cmd, basestring):
cmd = [cmd]
elif type(cmd) != list:
cmd = list(cmd)
return cmd + get_args(stream_spec, overwrite_output=overwrite_output) |
def modprobe(state, host, name, present=True, force=False):
'''
Load/unload kernel modules.
+ name: name of the module to manage
+ present: whether the module should be loaded or not
+ force: whether to force any add/remove modules
'''
modules = host.fact.kernel_modules
is_present = name in modules
args = ''
if force:
args = ' -f'
# Module is loaded and we don't want it?
if not present and is_present:
yield 'modprobe{0} -r {1}'.format(args, name)
# Module isn't loaded and we want it?
elif present and not is_present:
yield 'modprobe{0} {1}'.format(args, name) | Load/unload kernel modules.
+ name: name of the module to manage
+ present: whether the module should be loaded or not
+ force: whether to force any add/remove modules | Below is the the instruction that describes the task:
### Input:
Load/unload kernel modules.
+ name: name of the module to manage
+ present: whether the module should be loaded or not
+ force: whether to force any add/remove modules
### Response:
def modprobe(state, host, name, present=True, force=False):
'''
Load/unload kernel modules.
+ name: name of the module to manage
+ present: whether the module should be loaded or not
+ force: whether to force any add/remove modules
'''
modules = host.fact.kernel_modules
is_present = name in modules
args = ''
if force:
args = ' -f'
# Module is loaded and we don't want it?
if not present and is_present:
yield 'modprobe{0} -r {1}'.format(args, name)
# Module isn't loaded and we want it?
elif present and not is_present:
yield 'modprobe{0} {1}'.format(args, name) |
def walk(self, action, user_data=None):
"""
Walk the hierarchy, applying action to each filename.
Args:
action: callable, the callable to invoke for each filename,
will be invoked with the filename, the subfiles, and
the level in the sitemap.
"""
action(self.index_file, self.__root, 0, user_data)
self.__do_walk(self.__root, 1, action, user_data) | Walk the hierarchy, applying action to each filename.
Args:
action: callable, the callable to invoke for each filename,
will be invoked with the filename, the subfiles, and
the level in the sitemap. | Below is the the instruction that describes the task:
### Input:
Walk the hierarchy, applying action to each filename.
Args:
action: callable, the callable to invoke for each filename,
will be invoked with the filename, the subfiles, and
the level in the sitemap.
### Response:
def walk(self, action, user_data=None):
"""
Walk the hierarchy, applying action to each filename.
Args:
action: callable, the callable to invoke for each filename,
will be invoked with the filename, the subfiles, and
the level in the sitemap.
"""
action(self.index_file, self.__root, 0, user_data)
self.__do_walk(self.__root, 1, action, user_data) |
def get_gnupg_components(sp=subprocess):
"""Parse GnuPG components' paths."""
args = [util.which('gpgconf'), '--list-components']
output = check_output(args=args, sp=sp)
components = dict(re.findall('(.*):.*:(.*)', output.decode('utf-8')))
log.debug('gpgconf --list-components: %s', components)
return components | Parse GnuPG components' paths. | Below is the the instruction that describes the task:
### Input:
Parse GnuPG components' paths.
### Response:
def get_gnupg_components(sp=subprocess):
"""Parse GnuPG components' paths."""
args = [util.which('gpgconf'), '--list-components']
output = check_output(args=args, sp=sp)
components = dict(re.findall('(.*):.*:(.*)', output.decode('utf-8')))
log.debug('gpgconf --list-components: %s', components)
return components |
def load_settings(self, settings):
"""Load settings from file"""
with open(settings) as settings_file:
settings_dict = simplejson.load(settings_file)
for key, value in settings_dict.items():
self.__setattr__(key, value) | Load settings from file | Below is the the instruction that describes the task:
### Input:
Load settings from file
### Response:
def load_settings(self, settings):
"""Load settings from file"""
with open(settings) as settings_file:
settings_dict = simplejson.load(settings_file)
for key, value in settings_dict.items():
self.__setattr__(key, value) |
def get_full_durable_object(arn, event_time, durable_model):
"""
Utility method to fetch items from the Durable table if they are too big for SNS/SQS.
:param record:
:param durable_model:
:return:
"""
LOG.debug(f'[-->] Item with ARN: {arn} was too big for SNS -- fetching it from the Durable table...')
item = list(durable_model.query(arn, durable_model.eventTime == event_time))
# It is not clear if this would ever be the case... We will consider this an error condition for now.
if not item:
LOG.error(f'[?] Item with ARN/Event Time: {arn}/{event_time} was NOT found in the Durable table...'
f' This is odd.')
raise DurableItemIsMissingException({"item_arn": arn, "event_time": event_time})
# We need to place the real configuration data into the record so it can be deserialized into
# the durable model correctly:
return item[0] | Utility method to fetch items from the Durable table if they are too big for SNS/SQS.
:param record:
:param durable_model:
:return: | Below is the the instruction that describes the task:
### Input:
Utility method to fetch items from the Durable table if they are too big for SNS/SQS.
:param record:
:param durable_model:
:return:
### Response:
def get_full_durable_object(arn, event_time, durable_model):
"""
Utility method to fetch items from the Durable table if they are too big for SNS/SQS.
:param record:
:param durable_model:
:return:
"""
LOG.debug(f'[-->] Item with ARN: {arn} was too big for SNS -- fetching it from the Durable table...')
item = list(durable_model.query(arn, durable_model.eventTime == event_time))
# It is not clear if this would ever be the case... We will consider this an error condition for now.
if not item:
LOG.error(f'[?] Item with ARN/Event Time: {arn}/{event_time} was NOT found in the Durable table...'
f' This is odd.')
raise DurableItemIsMissingException({"item_arn": arn, "event_time": event_time})
# We need to place the real configuration data into the record so it can be deserialized into
# the durable model correctly:
return item[0] |
def find_keyword_in_context(tokens, keyword, contextsize=1):
"""Find a keyword in a particular sequence of tokens, and return the local context. Contextsize is the number of words to the left and right. The keyword may have multiple word, in which case it should to passed as a tuple or list"""
if isinstance(keyword,tuple) and isinstance(keyword,list):
l = len(keyword)
else:
keyword = (keyword,)
l = 1
n = l + contextsize*2
focuspos = contextsize + 1
for ngram in Windower(tokens,n,None,None):
if ngram[focuspos:focuspos+l] == keyword:
yield ngram[:focuspos], ngram[focuspos:focuspos+l],ngram[focuspos+l+1:] | Find a keyword in a particular sequence of tokens, and return the local context. Contextsize is the number of words to the left and right. The keyword may have multiple word, in which case it should to passed as a tuple or list | Below is the the instruction that describes the task:
### Input:
Find a keyword in a particular sequence of tokens, and return the local context. Contextsize is the number of words to the left and right. The keyword may have multiple word, in which case it should to passed as a tuple or list
### Response:
def find_keyword_in_context(tokens, keyword, contextsize=1):
"""Find a keyword in a particular sequence of tokens, and return the local context. Contextsize is the number of words to the left and right. The keyword may have multiple word, in which case it should to passed as a tuple or list"""
if isinstance(keyword,tuple) and isinstance(keyword,list):
l = len(keyword)
else:
keyword = (keyword,)
l = 1
n = l + contextsize*2
focuspos = contextsize + 1
for ngram in Windower(tokens,n,None,None):
if ngram[focuspos:focuspos+l] == keyword:
yield ngram[:focuspos], ngram[focuspos:focuspos+l],ngram[focuspos+l+1:] |
def delete_nic(self, instance_id, port_id):
"""Delete a Network Interface Controller"""
self.client.servers.interface_detach(instance_id, port_id)
return True | Delete a Network Interface Controller | Below is the the instruction that describes the task:
### Input:
Delete a Network Interface Controller
### Response:
def delete_nic(self, instance_id, port_id):
"""Delete a Network Interface Controller"""
self.client.servers.interface_detach(instance_id, port_id)
return True |
def init_glance_consumer(self, mq):
"""
Init openstack glance mq
1. Check if enable listening glance notification
2. Create consumer
:param mq: class ternya.mq.MQ
"""
if not self.enable_component_notification(Openstack.Glance):
log.debug("disable listening glance notification")
return
for i in range(self.config.glance_mq_consumer_count):
mq.create_consumer(self.config.glance_mq_exchange,
self.config.glance_mq_queue,
ProcessFactory.process(Openstack.Glance))
log.debug("enable listening openstack glance notification.") | Init openstack glance mq
1. Check if enable listening glance notification
2. Create consumer
:param mq: class ternya.mq.MQ | Below is the the instruction that describes the task:
### Input:
Init openstack glance mq
1. Check if enable listening glance notification
2. Create consumer
:param mq: class ternya.mq.MQ
### Response:
def init_glance_consumer(self, mq):
"""
Init openstack glance mq
1. Check if enable listening glance notification
2. Create consumer
:param mq: class ternya.mq.MQ
"""
if not self.enable_component_notification(Openstack.Glance):
log.debug("disable listening glance notification")
return
for i in range(self.config.glance_mq_consumer_count):
mq.create_consumer(self.config.glance_mq_exchange,
self.config.glance_mq_queue,
ProcessFactory.process(Openstack.Glance))
log.debug("enable listening openstack glance notification.") |
def _new_DatetimeIndex(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__ """
if "data" in d and not isinstance(d["data"], DatetimeIndex):
# Avoid need to verify integrity by calling simple_new directly
data = d.pop("data")
result = cls._simple_new(data, **d)
else:
with warnings.catch_warnings():
# we ignore warnings from passing verify_integrity=False
# TODO: If we knew what was going in to **d, we might be able to
# go through _simple_new instead
warnings.simplefilter("ignore")
result = cls.__new__(cls, verify_integrity=False, **d)
return result | This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__ | Below is the the instruction that describes the task:
### Input:
This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
### Response:
def _new_DatetimeIndex(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__ """
if "data" in d and not isinstance(d["data"], DatetimeIndex):
# Avoid need to verify integrity by calling simple_new directly
data = d.pop("data")
result = cls._simple_new(data, **d)
else:
with warnings.catch_warnings():
# we ignore warnings from passing verify_integrity=False
# TODO: If we knew what was going in to **d, we might be able to
# go through _simple_new instead
warnings.simplefilter("ignore")
result = cls.__new__(cls, verify_integrity=False, **d)
return result |
def regression():
"""
Run regression testing - lint and then run all tests.
"""
# HACK: Start using hitchbuildpy to get around this.
Command("touch", DIR.project.joinpath("pathquery", "__init__.py").abspath()).run()
storybook = _storybook({}).only_uninherited()
#storybook.with_params(**{"python version": "2.7.10"})\
#.ordered_by_name().play()
Command("touch", DIR.project.joinpath("pathquery", "__init__.py").abspath()).run()
storybook.with_params(**{"python version": "3.5.0"}).ordered_by_name().play()
lint() | Run regression testing - lint and then run all tests. | Below is the the instruction that describes the task:
### Input:
Run regression testing - lint and then run all tests.
### Response:
def regression():
"""
Run regression testing - lint and then run all tests.
"""
# HACK: Start using hitchbuildpy to get around this.
Command("touch", DIR.project.joinpath("pathquery", "__init__.py").abspath()).run()
storybook = _storybook({}).only_uninherited()
#storybook.with_params(**{"python version": "2.7.10"})\
#.ordered_by_name().play()
Command("touch", DIR.project.joinpath("pathquery", "__init__.py").abspath()).run()
storybook.with_params(**{"python version": "3.5.0"}).ordered_by_name().play()
lint() |
def parse_header_line(self, line):
"""docstring for parse_header_line"""
self.header = line[1:].rstrip().split('\t')
if len(self.header) < 9:
self.header = line[1:].rstrip().split()
self.individuals = self.header[9:] | docstring for parse_header_line | Below is the the instruction that describes the task:
### Input:
docstring for parse_header_line
### Response:
def parse_header_line(self, line):
"""docstring for parse_header_line"""
self.header = line[1:].rstrip().split('\t')
if len(self.header) < 9:
self.header = line[1:].rstrip().split()
self.individuals = self.header[9:] |
def fetch_suvi_l1b(self, product, correct=True, median_kernel=5):
"""
Given a product keyword, downloads the SUVI l1b image into the current directory.
NOTE: the suvi_l1b_url must be properly set for the Fetcher object
:param product: the keyword for the product, e.g. suvi-l1b-fe094
:param correct: remove nans and negatives
:return: tuple of product name, fits header, and data object
the header and data object will be None if the request failed
"""
if self.date < datetime(2018, 5, 23) and not (self.date >= datetime(2017, 9, 6) \
and self.date <= datetime(2017, 9, 10, 23, 59)):
print("SUVI data is only available after 2018-5-23")
return product, None, None
url = self.suvi_base_url + product + "/{}/{:02d}/{:02d}".format(self.date.year, self.date.month, self.date.day)
if self.verbose:
print("Requesting from {}".format(url))
try:
req = urllib.request.Request(url)
with urllib.request.urlopen(req) as response:
page = response.read()
except (URLError, HTTPError):
msg = "The SUVI URL you requested, {}, appears to be unavailable. Check it through a web browser."
raise RuntimeError(msg.format(url))
soup = BeautifulSoup(page, 'html.parser')
links = [link['href'] for link in soup.find_all('a', href=True)]
links = [link for link in links if "SUVI" in link]
meta = [self.parse_filename_meta(fn) for fn in links if ".fits" in fn]
links = sorted(meta, key=lambda m: np.abs((m[2] - self.date).total_seconds()))[:10]
links = [fn for fn, _, _, _, _ in links]
i = 0
def download_and_check(i):
try:
urllib.request.urlretrieve(url + "/" + links[i], "{}.fits".format(product))
except (URLError, HTTPError):
msg = "THE SUVI file you requested, {}, appears to be unvailable. Check if the website is correct."
raise RuntimeError(msg.format(url + "/" + links[i]))
with fits.open("{}.fits".format(product)) as hdu:
head = hdu[0].header
return head['exptime'] > 0.5
while not download_and_check(i):
i += 1
with fits.open("{}.fits".format(product)) as hdu:
head = hdu[0].header
data = hdu[0].data
os.remove("{}.fits".format(product))
if correct:
data[np.isnan(data)] = 0
data[data < 0] = 0
if median_kernel:
data = medfilt(data, median_kernel)
data, head = self.align_solar_fov(head, data, 2.5, 2.0, rotate=True, scale=False)
if self.verbose:
print(product, " is using ", head['date-obs'])
return product, head, data | Given a product keyword, downloads the SUVI l1b image into the current directory.
NOTE: the suvi_l1b_url must be properly set for the Fetcher object
:param product: the keyword for the product, e.g. suvi-l1b-fe094
:param correct: remove nans and negatives
:return: tuple of product name, fits header, and data object
the header and data object will be None if the request failed | Below is the the instruction that describes the task:
### Input:
Given a product keyword, downloads the SUVI l1b image into the current directory.
NOTE: the suvi_l1b_url must be properly set for the Fetcher object
:param product: the keyword for the product, e.g. suvi-l1b-fe094
:param correct: remove nans and negatives
:return: tuple of product name, fits header, and data object
the header and data object will be None if the request failed
### Response:
def fetch_suvi_l1b(self, product, correct=True, median_kernel=5):
"""
Given a product keyword, downloads the SUVI l1b image into the current directory.
NOTE: the suvi_l1b_url must be properly set for the Fetcher object
:param product: the keyword for the product, e.g. suvi-l1b-fe094
:param correct: remove nans and negatives
:return: tuple of product name, fits header, and data object
the header and data object will be None if the request failed
"""
if self.date < datetime(2018, 5, 23) and not (self.date >= datetime(2017, 9, 6) \
and self.date <= datetime(2017, 9, 10, 23, 59)):
print("SUVI data is only available after 2018-5-23")
return product, None, None
url = self.suvi_base_url + product + "/{}/{:02d}/{:02d}".format(self.date.year, self.date.month, self.date.day)
if self.verbose:
print("Requesting from {}".format(url))
try:
req = urllib.request.Request(url)
with urllib.request.urlopen(req) as response:
page = response.read()
except (URLError, HTTPError):
msg = "The SUVI URL you requested, {}, appears to be unavailable. Check it through a web browser."
raise RuntimeError(msg.format(url))
soup = BeautifulSoup(page, 'html.parser')
links = [link['href'] for link in soup.find_all('a', href=True)]
links = [link for link in links if "SUVI" in link]
meta = [self.parse_filename_meta(fn) for fn in links if ".fits" in fn]
links = sorted(meta, key=lambda m: np.abs((m[2] - self.date).total_seconds()))[:10]
links = [fn for fn, _, _, _, _ in links]
i = 0
def download_and_check(i):
try:
urllib.request.urlretrieve(url + "/" + links[i], "{}.fits".format(product))
except (URLError, HTTPError):
msg = "THE SUVI file you requested, {}, appears to be unvailable. Check if the website is correct."
raise RuntimeError(msg.format(url + "/" + links[i]))
with fits.open("{}.fits".format(product)) as hdu:
head = hdu[0].header
return head['exptime'] > 0.5
while not download_and_check(i):
i += 1
with fits.open("{}.fits".format(product)) as hdu:
head = hdu[0].header
data = hdu[0].data
os.remove("{}.fits".format(product))
if correct:
data[np.isnan(data)] = 0
data[data < 0] = 0
if median_kernel:
data = medfilt(data, median_kernel)
data, head = self.align_solar_fov(head, data, 2.5, 2.0, rotate=True, scale=False)
if self.verbose:
print(product, " is using ", head['date-obs'])
return product, head, data |
def current(sam=False):
'''
Get the username that salt-minion is running under. If salt-minion is
running as a service it should return the Local System account. If salt is
running from a command prompt it should return the username that started the
command prompt.
.. versionadded:: 2015.5.6
Args:
sam (bool, optional): False returns just the username without any domain
notation. True returns the domain with the username in the SAM
format. Ie: ``domain\\username``
Returns:
str: Returns username
CLI Example:
.. code-block:: bash
salt '*' user.current
'''
try:
if sam:
user_name = win32api.GetUserNameEx(win32con.NameSamCompatible)
else:
user_name = win32api.GetUserName()
except pywintypes.error as exc:
log.error('Failed to get current user')
log.error('nbr: %s', exc.winerror)
log.error('ctx: %s', exc.funcname)
log.error('msg: %s', exc.strerror)
raise CommandExecutionError('Failed to get current user', info=exc)
if not user_name:
raise CommandExecutionError('Failed to get current user')
return user_name | Get the username that salt-minion is running under. If salt-minion is
running as a service it should return the Local System account. If salt is
running from a command prompt it should return the username that started the
command prompt.
.. versionadded:: 2015.5.6
Args:
sam (bool, optional): False returns just the username without any domain
notation. True returns the domain with the username in the SAM
format. Ie: ``domain\\username``
Returns:
str: Returns username
CLI Example:
.. code-block:: bash
salt '*' user.current | Below is the the instruction that describes the task:
### Input:
Get the username that salt-minion is running under. If salt-minion is
running as a service it should return the Local System account. If salt is
running from a command prompt it should return the username that started the
command prompt.
.. versionadded:: 2015.5.6
Args:
sam (bool, optional): False returns just the username without any domain
notation. True returns the domain with the username in the SAM
format. Ie: ``domain\\username``
Returns:
str: Returns username
CLI Example:
.. code-block:: bash
salt '*' user.current
### Response:
def current(sam=False):
'''
Get the username that salt-minion is running under. If salt-minion is
running as a service it should return the Local System account. If salt is
running from a command prompt it should return the username that started the
command prompt.
.. versionadded:: 2015.5.6
Args:
sam (bool, optional): False returns just the username without any domain
notation. True returns the domain with the username in the SAM
format. Ie: ``domain\\username``
Returns:
str: Returns username
CLI Example:
.. code-block:: bash
salt '*' user.current
'''
try:
if sam:
user_name = win32api.GetUserNameEx(win32con.NameSamCompatible)
else:
user_name = win32api.GetUserName()
except pywintypes.error as exc:
log.error('Failed to get current user')
log.error('nbr: %s', exc.winerror)
log.error('ctx: %s', exc.funcname)
log.error('msg: %s', exc.strerror)
raise CommandExecutionError('Failed to get current user', info=exc)
if not user_name:
raise CommandExecutionError('Failed to get current user')
return user_name |
def json(self):
"""
Return JSON representation of object.
"""
data = {}
for item in self._data:
if isinstance(self._data[item], filetree):
data[item] = self._data[item].json()
else:
data[item] = self._data[item]
return data | Return JSON representation of object. | Below is the the instruction that describes the task:
### Input:
Return JSON representation of object.
### Response:
def json(self):
"""
Return JSON representation of object.
"""
data = {}
for item in self._data:
if isinstance(self._data[item], filetree):
data[item] = self._data[item].json()
else:
data[item] = self._data[item]
return data |
def submit(self):
"""
Partitions the file into chunks and submits them into group of 4
for upload on the api upload pool.
:return: Futures
"""
futures = []
while self.submitted < 4 and not self.done():
part = self.parts.pop(0)
part_number = part['part']
part_read_offset = part['offset']
part_read_limit = part['limit']
self.fp.seek(part_read_offset)
part_data = self.fp.read(part_read_limit - part_read_offset)
futures.append(
self.pool.submit(
_upload_part, self.api, self.session,
self._URL['upload_part'], self.upload_id,
part_number, part_data, self.retry, self.timeout
)
)
self.submitted += 1
self.total_submitted += 1
return futures | Partitions the file into chunks and submits them into group of 4
for upload on the api upload pool.
:return: Futures | Below is the the instruction that describes the task:
### Input:
Partitions the file into chunks and submits them into group of 4
for upload on the api upload pool.
:return: Futures
### Response:
def submit(self):
"""
Partitions the file into chunks and submits them into group of 4
for upload on the api upload pool.
:return: Futures
"""
futures = []
while self.submitted < 4 and not self.done():
part = self.parts.pop(0)
part_number = part['part']
part_read_offset = part['offset']
part_read_limit = part['limit']
self.fp.seek(part_read_offset)
part_data = self.fp.read(part_read_limit - part_read_offset)
futures.append(
self.pool.submit(
_upload_part, self.api, self.session,
self._URL['upload_part'], self.upload_id,
part_number, part_data, self.retry, self.timeout
)
)
self.submitted += 1
self.total_submitted += 1
return futures |
def install_dir(self):
"""Returns application installation path.
.. note::
If fails this falls back to a restricted interface, which can only be used by approved apps.
:rtype: str
"""
max_len = 500
directory = self._get_str(self._iface.get_install_dir, [self.app_id], max_len=max_len)
if not directory:
# Fallback to restricted interface (can only be used by approved apps).
directory = self._get_str(self._iface_list.get_install_dir, [self.app_id], max_len=max_len)
return directory | Returns application installation path.
.. note::
If fails this falls back to a restricted interface, which can only be used by approved apps.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Returns application installation path.
.. note::
If fails this falls back to a restricted interface, which can only be used by approved apps.
:rtype: str
### Response:
def install_dir(self):
"""Returns application installation path.
.. note::
If fails this falls back to a restricted interface, which can only be used by approved apps.
:rtype: str
"""
max_len = 500
directory = self._get_str(self._iface.get_install_dir, [self.app_id], max_len=max_len)
if not directory:
# Fallback to restricted interface (can only be used by approved apps).
directory = self._get_str(self._iface_list.get_install_dir, [self.app_id], max_len=max_len)
return directory |
def startall(self, wait=False, **kwdargs):
"""Start all of the threads in the thread pool. If _wait_ is True
then don't return until all threads are up and running. Any extra
keyword arguments are passed to the worker thread constructor.
"""
self.logger.debug("startall called")
with self.regcond:
while self.status != 'down':
if self.status in ('start', 'up') or self.ev_quit.is_set():
# For now, abandon additional request to start
self.logger.error("ignoring duplicate request to start thread pool")
return
self.logger.debug("waiting for threads: count=%d" %
self.runningcount)
self.regcond.wait()
#assert(self.status == 'down')
if self.ev_quit.is_set():
return
self.runningcount = 0
self.status = 'start'
self.workers = []
if wait:
tpool = self
else:
tpool = None
# Start all worker threads
self.logger.debug("starting threads in thread pool")
for i in range(self.numthreads):
t = self.workerClass(self.queue, logger=self.logger,
ev_quit=self.ev_quit, tpool=tpool,
**kwdargs)
self.workers.append(t)
t.start()
# if started with wait=True, then expect that threads will register
# themselves and last one up will set status to "up"
if wait:
# Threads are on the way up. Wait until last one starts.
while self.status != 'up' and not self.ev_quit.is_set():
self.logger.debug("waiting for threads: count=%d" %
self.runningcount)
self.regcond.wait()
else:
# otherwise, we just assume the pool is up
self.status = 'up'
self.logger.debug("startall done") | Start all of the threads in the thread pool. If _wait_ is True
then don't return until all threads are up and running. Any extra
keyword arguments are passed to the worker thread constructor. | Below is the the instruction that describes the task:
### Input:
Start all of the threads in the thread pool. If _wait_ is True
then don't return until all threads are up and running. Any extra
keyword arguments are passed to the worker thread constructor.
### Response:
def startall(self, wait=False, **kwdargs):
"""Start all of the threads in the thread pool. If _wait_ is True
then don't return until all threads are up and running. Any extra
keyword arguments are passed to the worker thread constructor.
"""
self.logger.debug("startall called")
with self.regcond:
while self.status != 'down':
if self.status in ('start', 'up') or self.ev_quit.is_set():
# For now, abandon additional request to start
self.logger.error("ignoring duplicate request to start thread pool")
return
self.logger.debug("waiting for threads: count=%d" %
self.runningcount)
self.regcond.wait()
#assert(self.status == 'down')
if self.ev_quit.is_set():
return
self.runningcount = 0
self.status = 'start'
self.workers = []
if wait:
tpool = self
else:
tpool = None
# Start all worker threads
self.logger.debug("starting threads in thread pool")
for i in range(self.numthreads):
t = self.workerClass(self.queue, logger=self.logger,
ev_quit=self.ev_quit, tpool=tpool,
**kwdargs)
self.workers.append(t)
t.start()
# if started with wait=True, then expect that threads will register
# themselves and last one up will set status to "up"
if wait:
# Threads are on the way up. Wait until last one starts.
while self.status != 'up' and not self.ev_quit.is_set():
self.logger.debug("waiting for threads: count=%d" %
self.runningcount)
self.regcond.wait()
else:
# otherwise, we just assume the pool is up
self.status = 'up'
self.logger.debug("startall done") |
def input_validate_aead(aead, name='aead', expected_len=None, max_aead_len = pyhsm.defines.YSM_AEAD_MAX_SIZE):
""" Input validation for YHSM_GeneratedAEAD or string. """
if isinstance(aead, pyhsm.aead_cmd.YHSM_GeneratedAEAD):
aead = aead.data
if expected_len != None:
return input_validate_str(aead, name, exact_len = expected_len)
else:
return input_validate_str(aead, name, max_len=max_aead_len) | Input validation for YHSM_GeneratedAEAD or string. | Below is the the instruction that describes the task:
### Input:
Input validation for YHSM_GeneratedAEAD or string.
### Response:
def input_validate_aead(aead, name='aead', expected_len=None, max_aead_len = pyhsm.defines.YSM_AEAD_MAX_SIZE):
""" Input validation for YHSM_GeneratedAEAD or string. """
if isinstance(aead, pyhsm.aead_cmd.YHSM_GeneratedAEAD):
aead = aead.data
if expected_len != None:
return input_validate_str(aead, name, exact_len = expected_len)
else:
return input_validate_str(aead, name, max_len=max_aead_len) |
def compute_cost(A2, Y):
"""
Computes the cross-entropy cost given in equation (13)
Arguments:
A2 -- The sigmoid output of the second activation, of shape (1, number of examples)
Y -- "true" labels vector of shape (1, number of examples)
parameters -- python dictionary containing your parameters W1, b1, W2 and b2
Returns:
cost -- cross-entropy cost given equation (13)
"""
m = Y.shape[1] # number of example
# Compute the cross-entropy cost
logprobs = np.multiply(np.log(A2), Y) + np.multiply(np.log(1 - A2), (1 - Y))
cost = -np.sum(logprobs) / m
cost = np.squeeze(cost) # makes sure cost is the dimension we expect.
# E.g., turns [[17]] into 17
assert (isinstance(cost, float))
return cost | Computes the cross-entropy cost given in equation (13)
Arguments:
A2 -- The sigmoid output of the second activation, of shape (1, number of examples)
Y -- "true" labels vector of shape (1, number of examples)
parameters -- python dictionary containing your parameters W1, b1, W2 and b2
Returns:
cost -- cross-entropy cost given equation (13) | Below is the the instruction that describes the task:
### Input:
Computes the cross-entropy cost given in equation (13)
Arguments:
A2 -- The sigmoid output of the second activation, of shape (1, number of examples)
Y -- "true" labels vector of shape (1, number of examples)
parameters -- python dictionary containing your parameters W1, b1, W2 and b2
Returns:
cost -- cross-entropy cost given equation (13)
### Response:
def compute_cost(A2, Y):
"""
Computes the cross-entropy cost given in equation (13)
Arguments:
A2 -- The sigmoid output of the second activation, of shape (1, number of examples)
Y -- "true" labels vector of shape (1, number of examples)
parameters -- python dictionary containing your parameters W1, b1, W2 and b2
Returns:
cost -- cross-entropy cost given equation (13)
"""
m = Y.shape[1] # number of example
# Compute the cross-entropy cost
logprobs = np.multiply(np.log(A2), Y) + np.multiply(np.log(1 - A2), (1 - Y))
cost = -np.sum(logprobs) / m
cost = np.squeeze(cost) # makes sure cost is the dimension we expect.
# E.g., turns [[17]] into 17
assert (isinstance(cost, float))
return cost |
def jsonarrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the array JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.ARRLEN', name, str_path(path)) | Returns the length of the array JSON value under ``path`` at key
``name`` | Below is the the instruction that describes the task:
### Input:
Returns the length of the array JSON value under ``path`` at key
``name``
### Response:
def jsonarrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the array JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.ARRLEN', name, str_path(path)) |
def _model_to_sbml(cobra_model, f_replace=None, units=True):
"""Convert Cobra model to SBMLDocument.
Parameters
----------
cobra_model : cobra.core.Model
Cobra model instance
f_replace : dict of replacement functions
Replacement to apply on identifiers.
units : boolean
Should the FLUX_UNITS be written in the SBMLDocument.
Returns
-------
libsbml.SBMLDocument
"""
if f_replace is None:
f_replace = {}
sbml_ns = libsbml.SBMLNamespaces(3, 1) # SBML L3V1
sbml_ns.addPackageNamespace("fbc", 2) # fbc-v2
doc = libsbml.SBMLDocument(sbml_ns) # noqa: E501 type: libsbml.SBMLDocument
doc.setPackageRequired("fbc", False)
doc.setSBOTerm(SBO_FBA_FRAMEWORK)
model = doc.createModel() # type: libsbml.Model
model_fbc = model.getPlugin("fbc") # type: libsbml.FbcModelPlugin
model_fbc.setStrict(True)
if cobra_model.id is not None:
model.setId(cobra_model.id)
model.setMetaId("meta_" + cobra_model.id)
else:
model.setMetaId("meta_model")
if cobra_model.name is not None:
model.setName(cobra_model.name)
_sbase_annotations(model, cobra_model.annotation)
# Meta information (ModelHistory)
if hasattr(cobra_model, "_sbml"):
meta = cobra_model._sbml
if "annotation" in meta:
_sbase_annotations(doc, meta["annotation"])
if "notes" in meta:
_sbase_notes_dict(doc, meta["notes"])
history = libsbml.ModelHistory() # type: libsbml.ModelHistory
if "created" in meta and meta["created"]:
history.setCreatedDate(meta["created"])
else:
time = datetime.datetime.now()
timestr = time.strftime('%Y-%m-%dT%H:%M:%S')
date = libsbml.Date(timestr)
_check(history.setCreatedDate(date), 'set creation date')
_check(history.setModifiedDate(date), 'set modified date')
if "creators" in meta:
for cobra_creator in meta["creators"]:
creator = libsbml.ModelCreator() # noqa: E501 type: libsbml.ModelCreator
if cobra_creator.get("familyName", None):
creator.setFamilyName(cobra_creator["familyName"])
if cobra_creator.get("givenName", None):
creator.setGivenName(cobra_creator["givenName"])
if cobra_creator.get("organisation", None):
creator.setOrganisation(cobra_creator["organisation"])
if cobra_creator.get("email", None):
creator.setEmail(cobra_creator["email"])
_check(history.addCreator(creator),
"adding creator to ModelHistory.")
_check(model.setModelHistory(history), 'set model history')
# Units
if units:
flux_udef = model.createUnitDefinition() # noqa: E501 type: libsbml.UnitDefinition
flux_udef.setId(UNITS_FLUX[0])
for u in UNITS_FLUX[1]:
unit = flux_udef.createUnit() # type: libsbml.Unit
unit.setKind(u.kind)
unit.setExponent(u.exponent)
unit.setScale(u.scale)
unit.setMultiplier(u.multiplier)
# minimum and maximum value from model
if len(cobra_model.reactions) > 0:
min_value = min(cobra_model.reactions.list_attr("lower_bound"))
max_value = max(cobra_model.reactions.list_attr("upper_bound"))
else:
min_value = config.lower_bound
max_value = config.upper_bound
_create_parameter(model, pid=LOWER_BOUND_ID,
value=min_value, sbo=SBO_DEFAULT_FLUX_BOUND)
_create_parameter(model, pid=UPPER_BOUND_ID,
value=max_value, sbo=SBO_DEFAULT_FLUX_BOUND)
_create_parameter(model, pid=ZERO_BOUND_ID,
value=0, sbo=SBO_DEFAULT_FLUX_BOUND)
_create_parameter(model, pid=BOUND_MINUS_INF,
value=-float("Inf"), sbo=SBO_FLUX_BOUND)
_create_parameter(model, pid=BOUND_PLUS_INF,
value=float("Inf"), sbo=SBO_FLUX_BOUND)
# Compartments
# FIXME: use first class compartment model (and write notes & annotations)
# (https://github.com/opencobra/cobrapy/issues/811)
for cid, name in iteritems(cobra_model.compartments):
compartment = model.createCompartment() # type: libsbml.Compartment
compartment.setId(cid)
compartment.setName(name)
compartment.setConstant(True)
# FIXME: write annotations and notes
# _sbase_notes(c, com.notes)
# _sbase_annotations(c, com.annotation)
# Species
for metabolite in cobra_model.metabolites:
specie = model.createSpecies() # type: libsbml.Species
mid = metabolite.id
if f_replace and F_SPECIE_REV in f_replace:
mid = f_replace[F_SPECIE_REV](mid)
specie.setId(mid)
specie.setConstant(False)
specie.setBoundaryCondition(False)
specie.setHasOnlySubstanceUnits(False)
specie.setName(metabolite.name)
specie.setCompartment(metabolite.compartment)
s_fbc = specie.getPlugin("fbc") # type: libsbml.FbcSpeciesPlugin
if metabolite.charge is not None:
s_fbc.setCharge(metabolite.charge)
if metabolite.formula is not None:
s_fbc.setChemicalFormula(metabolite.formula)
_sbase_annotations(specie, metabolite.annotation)
_sbase_notes_dict(specie, metabolite.notes)
# Genes
for cobra_gene in cobra_model.genes:
gp = model_fbc.createGeneProduct() # type: libsbml.GeneProduct
gid = cobra_gene.id
if f_replace and F_GENE_REV in f_replace:
gid = f_replace[F_GENE_REV](gid)
gp.setId(gid)
gname = cobra_gene.name
if gname is None or len(gname) == 0:
gname = gid
gp.setName(gname)
gp.setLabel(gid)
_sbase_annotations(gp, cobra_gene.annotation)
_sbase_notes_dict(gp, cobra_gene.notes)
# Objective
objective = model_fbc.createObjective() # type: libsbml.Objective
objective.setId("obj")
objective.setType(SHORT_LONG_DIRECTION[cobra_model.objective.direction])
model_fbc.setActiveObjectiveId("obj")
# Reactions
reaction_coefficients = linear_reaction_coefficients(cobra_model)
for cobra_reaction in cobra_model.reactions:
rid = cobra_reaction.id
if f_replace and F_REACTION_REV in f_replace:
rid = f_replace[F_REACTION_REV](rid)
reaction = model.createReaction() # type: libsbml.Reaction
reaction.setId(rid)
reaction.setName(cobra_reaction.name)
reaction.setFast(False)
reaction.setReversible((cobra_reaction.lower_bound < 0))
_sbase_annotations(reaction, cobra_reaction.annotation)
_sbase_notes_dict(reaction, cobra_reaction.notes)
# stoichiometry
for metabolite, stoichiometry in iteritems(cobra_reaction._metabolites): # noqa: E501
sid = metabolite.id
if f_replace and F_SPECIE_REV in f_replace:
sid = f_replace[F_SPECIE_REV](sid)
if stoichiometry < 0:
sref = reaction.createReactant() # noqa: E501 type: libsbml.SpeciesReference
sref.setSpecies(sid)
sref.setStoichiometry(-stoichiometry)
sref.setConstant(True)
else:
sref = reaction.createProduct() # noqa: E501 type: libsbml.SpeciesReference
sref.setSpecies(sid)
sref.setStoichiometry(stoichiometry)
sref.setConstant(True)
# bounds
r_fbc = reaction.getPlugin("fbc") # type: libsbml.FbcReactionPlugin
r_fbc.setLowerFluxBound(_create_bound(model, cobra_reaction,
"lower_bound",
f_replace=f_replace, units=units,
flux_udef=flux_udef))
r_fbc.setUpperFluxBound(_create_bound(model, cobra_reaction,
"upper_bound",
f_replace=f_replace, units=units,
flux_udef=flux_udef))
# GPR
gpr = cobra_reaction.gene_reaction_rule
if gpr is not None and len(gpr) > 0:
# replace ids in string
if f_replace and F_GENE_REV in f_replace:
gpr = gpr.replace('(', '( ')
gpr = gpr.replace(')', ' )')
tokens = gpr.split(' ')
for k in range(len(tokens)):
if tokens[k] not in [' ', 'and', 'or', '(', ')']:
tokens[k] = f_replace[F_GENE_REV](tokens[k])
gpr_new = " ".join(tokens)
gpa = r_fbc.createGeneProductAssociation() # noqa: E501 type: libsbml.GeneProductAssociation
gpa.setAssociation(gpr_new)
# objective coefficients
if reaction_coefficients.get(cobra_reaction, 0) != 0:
flux_obj = objective.createFluxObjective() # noqa: E501 type: libsbml.FluxObjective
flux_obj.setReaction(rid)
flux_obj.setCoefficient(cobra_reaction.objective_coefficient)
# write groups
if len(cobra_model.groups) > 0:
doc.enablePackage(
"http://www.sbml.org/sbml/level3/version1/groups/version1",
"groups", True)
doc.setPackageRequired("groups", False)
model_group = model.getPlugin("groups") # noqa: E501 type: libsbml.GroupsModelPlugin
for cobra_group in cobra_model.groups:
group = model_group.createGroup() # type: libsbml.Group
group.setId(cobra_group.id)
group.setName(cobra_group.name)
group.setKind(cobra_group.kind)
_sbase_notes_dict(group, cobra_group.notes)
_sbase_annotations(group, cobra_group.annotation)
for cobra_member in cobra_group.members:
member = group.createMember() # type: libsbml.Member
mid = cobra_member.id
m_type = str(type(cobra_member))
# id replacements
if "Reaction" in m_type:
if f_replace and F_REACTION_REV in f_replace:
mid = f_replace[F_REACTION_REV](mid)
if "Metabolite" in m_type:
if f_replace and F_SPECIE_REV in f_replace:
mid = f_replace[F_SPECIE_REV](mid)
if "Gene" in m_type:
if f_replace and F_GENE_REV in f_replace:
mid = f_replace[F_GENE_REV](mid)
member.setIdRef(mid)
if cobra_member.name and len(cobra_member.name) > 0:
member.setName(cobra_member.name)
return doc | Convert Cobra model to SBMLDocument.
Parameters
----------
cobra_model : cobra.core.Model
Cobra model instance
f_replace : dict of replacement functions
Replacement to apply on identifiers.
units : boolean
Should the FLUX_UNITS be written in the SBMLDocument.
Returns
-------
libsbml.SBMLDocument | Below is the the instruction that describes the task:
### Input:
Convert Cobra model to SBMLDocument.
Parameters
----------
cobra_model : cobra.core.Model
Cobra model instance
f_replace : dict of replacement functions
Replacement to apply on identifiers.
units : boolean
Should the FLUX_UNITS be written in the SBMLDocument.
Returns
-------
libsbml.SBMLDocument
### Response:
def _model_to_sbml(cobra_model, f_replace=None, units=True):
"""Convert Cobra model to SBMLDocument.
Parameters
----------
cobra_model : cobra.core.Model
Cobra model instance
f_replace : dict of replacement functions
Replacement to apply on identifiers.
units : boolean
Should the FLUX_UNITS be written in the SBMLDocument.
Returns
-------
libsbml.SBMLDocument
"""
if f_replace is None:
f_replace = {}
sbml_ns = libsbml.SBMLNamespaces(3, 1) # SBML L3V1
sbml_ns.addPackageNamespace("fbc", 2) # fbc-v2
doc = libsbml.SBMLDocument(sbml_ns) # noqa: E501 type: libsbml.SBMLDocument
doc.setPackageRequired("fbc", False)
doc.setSBOTerm(SBO_FBA_FRAMEWORK)
model = doc.createModel() # type: libsbml.Model
model_fbc = model.getPlugin("fbc") # type: libsbml.FbcModelPlugin
model_fbc.setStrict(True)
if cobra_model.id is not None:
model.setId(cobra_model.id)
model.setMetaId("meta_" + cobra_model.id)
else:
model.setMetaId("meta_model")
if cobra_model.name is not None:
model.setName(cobra_model.name)
_sbase_annotations(model, cobra_model.annotation)
# Meta information (ModelHistory)
if hasattr(cobra_model, "_sbml"):
meta = cobra_model._sbml
if "annotation" in meta:
_sbase_annotations(doc, meta["annotation"])
if "notes" in meta:
_sbase_notes_dict(doc, meta["notes"])
history = libsbml.ModelHistory() # type: libsbml.ModelHistory
if "created" in meta and meta["created"]:
history.setCreatedDate(meta["created"])
else:
time = datetime.datetime.now()
timestr = time.strftime('%Y-%m-%dT%H:%M:%S')
date = libsbml.Date(timestr)
_check(history.setCreatedDate(date), 'set creation date')
_check(history.setModifiedDate(date), 'set modified date')
if "creators" in meta:
for cobra_creator in meta["creators"]:
creator = libsbml.ModelCreator() # noqa: E501 type: libsbml.ModelCreator
if cobra_creator.get("familyName", None):
creator.setFamilyName(cobra_creator["familyName"])
if cobra_creator.get("givenName", None):
creator.setGivenName(cobra_creator["givenName"])
if cobra_creator.get("organisation", None):
creator.setOrganisation(cobra_creator["organisation"])
if cobra_creator.get("email", None):
creator.setEmail(cobra_creator["email"])
_check(history.addCreator(creator),
"adding creator to ModelHistory.")
_check(model.setModelHistory(history), 'set model history')
# Units
if units:
flux_udef = model.createUnitDefinition() # noqa: E501 type: libsbml.UnitDefinition
flux_udef.setId(UNITS_FLUX[0])
for u in UNITS_FLUX[1]:
unit = flux_udef.createUnit() # type: libsbml.Unit
unit.setKind(u.kind)
unit.setExponent(u.exponent)
unit.setScale(u.scale)
unit.setMultiplier(u.multiplier)
# minimum and maximum value from model
if len(cobra_model.reactions) > 0:
min_value = min(cobra_model.reactions.list_attr("lower_bound"))
max_value = max(cobra_model.reactions.list_attr("upper_bound"))
else:
min_value = config.lower_bound
max_value = config.upper_bound
_create_parameter(model, pid=LOWER_BOUND_ID,
value=min_value, sbo=SBO_DEFAULT_FLUX_BOUND)
_create_parameter(model, pid=UPPER_BOUND_ID,
value=max_value, sbo=SBO_DEFAULT_FLUX_BOUND)
_create_parameter(model, pid=ZERO_BOUND_ID,
value=0, sbo=SBO_DEFAULT_FLUX_BOUND)
_create_parameter(model, pid=BOUND_MINUS_INF,
value=-float("Inf"), sbo=SBO_FLUX_BOUND)
_create_parameter(model, pid=BOUND_PLUS_INF,
value=float("Inf"), sbo=SBO_FLUX_BOUND)
# Compartments
# FIXME: use first class compartment model (and write notes & annotations)
# (https://github.com/opencobra/cobrapy/issues/811)
for cid, name in iteritems(cobra_model.compartments):
compartment = model.createCompartment() # type: libsbml.Compartment
compartment.setId(cid)
compartment.setName(name)
compartment.setConstant(True)
# FIXME: write annotations and notes
# _sbase_notes(c, com.notes)
# _sbase_annotations(c, com.annotation)
# Species
for metabolite in cobra_model.metabolites:
specie = model.createSpecies() # type: libsbml.Species
mid = metabolite.id
if f_replace and F_SPECIE_REV in f_replace:
mid = f_replace[F_SPECIE_REV](mid)
specie.setId(mid)
specie.setConstant(False)
specie.setBoundaryCondition(False)
specie.setHasOnlySubstanceUnits(False)
specie.setName(metabolite.name)
specie.setCompartment(metabolite.compartment)
s_fbc = specie.getPlugin("fbc") # type: libsbml.FbcSpeciesPlugin
if metabolite.charge is not None:
s_fbc.setCharge(metabolite.charge)
if metabolite.formula is not None:
s_fbc.setChemicalFormula(metabolite.formula)
_sbase_annotations(specie, metabolite.annotation)
_sbase_notes_dict(specie, metabolite.notes)
# Genes
for cobra_gene in cobra_model.genes:
gp = model_fbc.createGeneProduct() # type: libsbml.GeneProduct
gid = cobra_gene.id
if f_replace and F_GENE_REV in f_replace:
gid = f_replace[F_GENE_REV](gid)
gp.setId(gid)
gname = cobra_gene.name
if gname is None or len(gname) == 0:
gname = gid
gp.setName(gname)
gp.setLabel(gid)
_sbase_annotations(gp, cobra_gene.annotation)
_sbase_notes_dict(gp, cobra_gene.notes)
# Objective
objective = model_fbc.createObjective() # type: libsbml.Objective
objective.setId("obj")
objective.setType(SHORT_LONG_DIRECTION[cobra_model.objective.direction])
model_fbc.setActiveObjectiveId("obj")
# Reactions
reaction_coefficients = linear_reaction_coefficients(cobra_model)
for cobra_reaction in cobra_model.reactions:
rid = cobra_reaction.id
if f_replace and F_REACTION_REV in f_replace:
rid = f_replace[F_REACTION_REV](rid)
reaction = model.createReaction() # type: libsbml.Reaction
reaction.setId(rid)
reaction.setName(cobra_reaction.name)
reaction.setFast(False)
reaction.setReversible((cobra_reaction.lower_bound < 0))
_sbase_annotations(reaction, cobra_reaction.annotation)
_sbase_notes_dict(reaction, cobra_reaction.notes)
# stoichiometry
for metabolite, stoichiometry in iteritems(cobra_reaction._metabolites): # noqa: E501
sid = metabolite.id
if f_replace and F_SPECIE_REV in f_replace:
sid = f_replace[F_SPECIE_REV](sid)
if stoichiometry < 0:
sref = reaction.createReactant() # noqa: E501 type: libsbml.SpeciesReference
sref.setSpecies(sid)
sref.setStoichiometry(-stoichiometry)
sref.setConstant(True)
else:
sref = reaction.createProduct() # noqa: E501 type: libsbml.SpeciesReference
sref.setSpecies(sid)
sref.setStoichiometry(stoichiometry)
sref.setConstant(True)
# bounds
r_fbc = reaction.getPlugin("fbc") # type: libsbml.FbcReactionPlugin
r_fbc.setLowerFluxBound(_create_bound(model, cobra_reaction,
"lower_bound",
f_replace=f_replace, units=units,
flux_udef=flux_udef))
r_fbc.setUpperFluxBound(_create_bound(model, cobra_reaction,
"upper_bound",
f_replace=f_replace, units=units,
flux_udef=flux_udef))
# GPR
gpr = cobra_reaction.gene_reaction_rule
if gpr is not None and len(gpr) > 0:
# replace ids in string
if f_replace and F_GENE_REV in f_replace:
gpr = gpr.replace('(', '( ')
gpr = gpr.replace(')', ' )')
tokens = gpr.split(' ')
for k in range(len(tokens)):
if tokens[k] not in [' ', 'and', 'or', '(', ')']:
tokens[k] = f_replace[F_GENE_REV](tokens[k])
gpr_new = " ".join(tokens)
gpa = r_fbc.createGeneProductAssociation() # noqa: E501 type: libsbml.GeneProductAssociation
gpa.setAssociation(gpr_new)
# objective coefficients
if reaction_coefficients.get(cobra_reaction, 0) != 0:
flux_obj = objective.createFluxObjective() # noqa: E501 type: libsbml.FluxObjective
flux_obj.setReaction(rid)
flux_obj.setCoefficient(cobra_reaction.objective_coefficient)
# write groups
if len(cobra_model.groups) > 0:
doc.enablePackage(
"http://www.sbml.org/sbml/level3/version1/groups/version1",
"groups", True)
doc.setPackageRequired("groups", False)
model_group = model.getPlugin("groups") # noqa: E501 type: libsbml.GroupsModelPlugin
for cobra_group in cobra_model.groups:
group = model_group.createGroup() # type: libsbml.Group
group.setId(cobra_group.id)
group.setName(cobra_group.name)
group.setKind(cobra_group.kind)
_sbase_notes_dict(group, cobra_group.notes)
_sbase_annotations(group, cobra_group.annotation)
for cobra_member in cobra_group.members:
member = group.createMember() # type: libsbml.Member
mid = cobra_member.id
m_type = str(type(cobra_member))
# id replacements
if "Reaction" in m_type:
if f_replace and F_REACTION_REV in f_replace:
mid = f_replace[F_REACTION_REV](mid)
if "Metabolite" in m_type:
if f_replace and F_SPECIE_REV in f_replace:
mid = f_replace[F_SPECIE_REV](mid)
if "Gene" in m_type:
if f_replace and F_GENE_REV in f_replace:
mid = f_replace[F_GENE_REV](mid)
member.setIdRef(mid)
if cobra_member.name and len(cobra_member.name) > 0:
member.setName(cobra_member.name)
return doc |
def p_file_comments_on_lics(self, f_term, predicate):
"""Sets file license comment."""
try:
for _, _, comment in self.graph.triples((f_term, predicate, None)):
self.builder.set_file_license_comment(self.doc, six.text_type(comment))
except CardinalityError:
self.more_than_one_error('file comments on license') | Sets file license comment. | Below is the the instruction that describes the task:
### Input:
Sets file license comment.
### Response:
def p_file_comments_on_lics(self, f_term, predicate):
"""Sets file license comment."""
try:
for _, _, comment in self.graph.triples((f_term, predicate, None)):
self.builder.set_file_license_comment(self.doc, six.text_type(comment))
except CardinalityError:
self.more_than_one_error('file comments on license') |
def pseudotime(starting_node, edges, fitted_vals):
"""
Args:
starting_node (int): index of the starting node
edges (list): list of tuples (node1, node2)
fitted_vals (array): output of lineage (2 x cells)
Returns:
A 1d array containing the pseudotime value of each cell.
"""
# TODO
# 1. calculate a distance matrix...
distances = np.array([[sum((x - y)**2) for x in fitted_vals.T] for y in fitted_vals.T])
# 2. start from the root node/cell, calculate distance along graph
distance_dict = graph_distances(starting_node, edges, distances)
output = []
for i in range(fitted_vals.shape[1]):
output.append(distance_dict[i])
return np.array(output) | Args:
starting_node (int): index of the starting node
edges (list): list of tuples (node1, node2)
fitted_vals (array): output of lineage (2 x cells)
Returns:
A 1d array containing the pseudotime value of each cell. | Below is the the instruction that describes the task:
### Input:
Args:
starting_node (int): index of the starting node
edges (list): list of tuples (node1, node2)
fitted_vals (array): output of lineage (2 x cells)
Returns:
A 1d array containing the pseudotime value of each cell.
### Response:
def pseudotime(starting_node, edges, fitted_vals):
"""
Args:
starting_node (int): index of the starting node
edges (list): list of tuples (node1, node2)
fitted_vals (array): output of lineage (2 x cells)
Returns:
A 1d array containing the pseudotime value of each cell.
"""
# TODO
# 1. calculate a distance matrix...
distances = np.array([[sum((x - y)**2) for x in fitted_vals.T] for y in fitted_vals.T])
# 2. start from the root node/cell, calculate distance along graph
distance_dict = graph_distances(starting_node, edges, distances)
output = []
for i in range(fitted_vals.shape[1]):
output.append(distance_dict[i])
return np.array(output) |
def get_probs_for_labels(labels, prediction_results):
""" Given ML Workbench prediction results, get probs of each label for each instance.
The prediction results are like:
[
{'predicted': 'daisy', 'probability': 0.8, 'predicted_2': 'rose', 'probability_2': 0.1},
{'predicted': 'sunflower', 'probability': 0.9, 'predicted_2': 'daisy', 'probability_2': 0.01},
...
]
Each instance is ordered by prob. But in some cases probs are needed for fixed
order of labels. For example, given labels = ['daisy', 'rose', 'sunflower'], the
results of above is expected to be:
[
[0.8, 0.1, 0.0],
[0.01, 0.0, 0.9],
...
]
Note that the sum of each instance may not be always 1. If model's top_n is set to
none-zero, and is less than number of labels, then prediction results may not contain
probs for all labels.
Args:
labels: a list of labels specifying the order of the labels.
prediction_results: a pandas DataFrame containing prediction results, usually returned
by get_prediction_results() call.
Returns:
A list of list of probs for each class.
"""
probs = []
if 'probability' in prediction_results:
# 'probability' exists so top-n is set to none zero, and results are like
# "predicted, predicted_2,...,probability,probability_2,...
for i, r in prediction_results.iterrows():
probs_one = [0.0] * len(labels)
for k, v in six.iteritems(r):
if v in labels and k.startswith('predicted'):
if k == 'predict':
prob_name = 'probability'
else:
prob_name = 'probability' + k[9:]
probs_one[labels.index(v)] = r[prob_name]
probs.append(probs_one)
return probs
else:
# 'probability' does not exist, so top-n is set to zero. Results are like
# "predicted, class_name1, class_name2,...
for i, r in prediction_results.iterrows():
probs_one = [0.0] * len(labels)
for k, v in six.iteritems(r):
if k in labels:
probs_one[labels.index(k)] = v
probs.append(probs_one)
return probs | Given ML Workbench prediction results, get probs of each label for each instance.
The prediction results are like:
[
{'predicted': 'daisy', 'probability': 0.8, 'predicted_2': 'rose', 'probability_2': 0.1},
{'predicted': 'sunflower', 'probability': 0.9, 'predicted_2': 'daisy', 'probability_2': 0.01},
...
]
Each instance is ordered by prob. But in some cases probs are needed for fixed
order of labels. For example, given labels = ['daisy', 'rose', 'sunflower'], the
results of above is expected to be:
[
[0.8, 0.1, 0.0],
[0.01, 0.0, 0.9],
...
]
Note that the sum of each instance may not be always 1. If model's top_n is set to
none-zero, and is less than number of labels, then prediction results may not contain
probs for all labels.
Args:
labels: a list of labels specifying the order of the labels.
prediction_results: a pandas DataFrame containing prediction results, usually returned
by get_prediction_results() call.
Returns:
A list of list of probs for each class. | Below is the the instruction that describes the task:
### Input:
Given ML Workbench prediction results, get probs of each label for each instance.
The prediction results are like:
[
{'predicted': 'daisy', 'probability': 0.8, 'predicted_2': 'rose', 'probability_2': 0.1},
{'predicted': 'sunflower', 'probability': 0.9, 'predicted_2': 'daisy', 'probability_2': 0.01},
...
]
Each instance is ordered by prob. But in some cases probs are needed for fixed
order of labels. For example, given labels = ['daisy', 'rose', 'sunflower'], the
results of above is expected to be:
[
[0.8, 0.1, 0.0],
[0.01, 0.0, 0.9],
...
]
Note that the sum of each instance may not be always 1. If model's top_n is set to
none-zero, and is less than number of labels, then prediction results may not contain
probs for all labels.
Args:
labels: a list of labels specifying the order of the labels.
prediction_results: a pandas DataFrame containing prediction results, usually returned
by get_prediction_results() call.
Returns:
A list of list of probs for each class.
### Response:
def get_probs_for_labels(labels, prediction_results):
""" Given ML Workbench prediction results, get probs of each label for each instance.
The prediction results are like:
[
{'predicted': 'daisy', 'probability': 0.8, 'predicted_2': 'rose', 'probability_2': 0.1},
{'predicted': 'sunflower', 'probability': 0.9, 'predicted_2': 'daisy', 'probability_2': 0.01},
...
]
Each instance is ordered by prob. But in some cases probs are needed for fixed
order of labels. For example, given labels = ['daisy', 'rose', 'sunflower'], the
results of above is expected to be:
[
[0.8, 0.1, 0.0],
[0.01, 0.0, 0.9],
...
]
Note that the sum of each instance may not be always 1. If model's top_n is set to
none-zero, and is less than number of labels, then prediction results may not contain
probs for all labels.
Args:
labels: a list of labels specifying the order of the labels.
prediction_results: a pandas DataFrame containing prediction results, usually returned
by get_prediction_results() call.
Returns:
A list of list of probs for each class.
"""
probs = []
if 'probability' in prediction_results:
# 'probability' exists so top-n is set to none zero, and results are like
# "predicted, predicted_2,...,probability,probability_2,...
for i, r in prediction_results.iterrows():
probs_one = [0.0] * len(labels)
for k, v in six.iteritems(r):
if v in labels and k.startswith('predicted'):
if k == 'predict':
prob_name = 'probability'
else:
prob_name = 'probability' + k[9:]
probs_one[labels.index(v)] = r[prob_name]
probs.append(probs_one)
return probs
else:
# 'probability' does not exist, so top-n is set to zero. Results are like
# "predicted, class_name1, class_name2,...
for i, r in prediction_results.iterrows():
probs_one = [0.0] * len(labels)
for k, v in six.iteritems(r):
if k in labels:
probs_one[labels.index(k)] = v
probs.append(probs_one)
return probs |
def last_string(self):
"""The last entry in code_builder, or ``None`` if none so far."""
cb = self.code_builder
len_cb = len(cb)
if len_cb > 0:
return cb[len_cb - 1]
else:
return None | The last entry in code_builder, or ``None`` if none so far. | Below is the the instruction that describes the task:
### Input:
The last entry in code_builder, or ``None`` if none so far.
### Response:
def last_string(self):
"""The last entry in code_builder, or ``None`` if none so far."""
cb = self.code_builder
len_cb = len(cb)
if len_cb > 0:
return cb[len_cb - 1]
else:
return None |
def _todo_do_update(self, line):
"update [:tablename] {hashkey[,rangekey]} [!fieldname:expectedvalue] [-add|-delete] [+ALL_OLD|ALL_NEW|UPDATED_OLD|UPDATED_NEW] {attributes}"
table, line = self.get_table_params(line)
hkey, line = line.split(" ", 1)
expected, attr = self.get_expected(line)
if attr[0] == '-':
op, attr = attr.split(" ", 1)
op = op[1]
else:
op = "u"
if attr[0] == '+':
ret, attr = attr.split(" ", 1)
ret = ret[1:]
else:
ret = "ALL_NEW"
if ',' in hkey:
hkey, rkey = hkey.split(",", 1)
else:
rkey = None
item = table.new_item(hash_key=self.get_typed_key_value(table, hkey), range_key=self.get_typed_key_value(table, rkey, False))
attr = json.loads(attr.strip())
for name in attr.keys():
value = attr[name]
if isinstance(value, list):
value = set(value)
if op == 'a':
item.add_attribute(name, value)
elif op == 'd':
item.delete_attribute(name, value)
else:
item.put_attribute(name, value)
self.pprint(item)
updated = item.save(expected_value=expected or None, return_values=ret)
self.pprint(updated)
if self.consumed:
print "consumed units:", item.consumed_units | update [:tablename] {hashkey[,rangekey]} [!fieldname:expectedvalue] [-add|-delete] [+ALL_OLD|ALL_NEW|UPDATED_OLD|UPDATED_NEW] {attributes} | Below is the the instruction that describes the task:
### Input:
update [:tablename] {hashkey[,rangekey]} [!fieldname:expectedvalue] [-add|-delete] [+ALL_OLD|ALL_NEW|UPDATED_OLD|UPDATED_NEW] {attributes}
### Response:
def _todo_do_update(self, line):
"update [:tablename] {hashkey[,rangekey]} [!fieldname:expectedvalue] [-add|-delete] [+ALL_OLD|ALL_NEW|UPDATED_OLD|UPDATED_NEW] {attributes}"
table, line = self.get_table_params(line)
hkey, line = line.split(" ", 1)
expected, attr = self.get_expected(line)
if attr[0] == '-':
op, attr = attr.split(" ", 1)
op = op[1]
else:
op = "u"
if attr[0] == '+':
ret, attr = attr.split(" ", 1)
ret = ret[1:]
else:
ret = "ALL_NEW"
if ',' in hkey:
hkey, rkey = hkey.split(",", 1)
else:
rkey = None
item = table.new_item(hash_key=self.get_typed_key_value(table, hkey), range_key=self.get_typed_key_value(table, rkey, False))
attr = json.loads(attr.strip())
for name in attr.keys():
value = attr[name]
if isinstance(value, list):
value = set(value)
if op == 'a':
item.add_attribute(name, value)
elif op == 'd':
item.delete_attribute(name, value)
else:
item.put_attribute(name, value)
self.pprint(item)
updated = item.save(expected_value=expected or None, return_values=ret)
self.pprint(updated)
if self.consumed:
print "consumed units:", item.consumed_units |
def replace_namespaced_service_account(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_service_account # noqa: E501
replace the specified ServiceAccount # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_service_account(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ServiceAccount (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ServiceAccount body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1ServiceAccount
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_service_account_with_http_info(name, namespace, body, **kwargs) # noqa: E501
else:
(data) = self.replace_namespaced_service_account_with_http_info(name, namespace, body, **kwargs) # noqa: E501
return data | replace_namespaced_service_account # noqa: E501
replace the specified ServiceAccount # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_service_account(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ServiceAccount (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ServiceAccount body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1ServiceAccount
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
replace_namespaced_service_account # noqa: E501
replace the specified ServiceAccount # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_service_account(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ServiceAccount (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ServiceAccount body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1ServiceAccount
If the method is called asynchronously,
returns the request thread.
### Response:
def replace_namespaced_service_account(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_service_account # noqa: E501
replace the specified ServiceAccount # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_service_account(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ServiceAccount (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ServiceAccount body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1ServiceAccount
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_service_account_with_http_info(name, namespace, body, **kwargs) # noqa: E501
else:
(data) = self.replace_namespaced_service_account_with_http_info(name, namespace, body, **kwargs) # noqa: E501
return data |
def list_sessions(logged_in_users_only=False):
'''
List information about the sessions.
.. versionadded:: 2016.11.0
:param logged_in_users_only: If True, only return sessions with users logged in.
:return: A list containing dictionaries of session information.
CLI Example:
.. code-block:: bash
salt '*' rdp.list_sessions
'''
ret = list()
server = win32ts.WTS_CURRENT_SERVER_HANDLE
protocols = {win32ts.WTS_PROTOCOL_TYPE_CONSOLE: 'console',
win32ts.WTS_PROTOCOL_TYPE_ICA: 'citrix',
win32ts.WTS_PROTOCOL_TYPE_RDP: 'rdp'}
statuses = {win32ts.WTSActive: 'active', win32ts.WTSConnected: 'connected',
win32ts.WTSConnectQuery: 'connect_query', win32ts.WTSShadow: 'shadow',
win32ts.WTSDisconnected: 'disconnected', win32ts.WTSIdle: 'idle',
win32ts.WTSListen: 'listen', win32ts.WTSReset: 'reset',
win32ts.WTSDown: 'down', win32ts.WTSInit: 'init'}
for session in win32ts.WTSEnumerateSessions(server):
user = win32ts.WTSQuerySessionInformation(server, session['SessionId'],
win32ts.WTSUserName) or None
protocol_id = win32ts.WTSQuerySessionInformation(server, session['SessionId'],
win32ts.WTSClientProtocolType)
status_id = win32ts.WTSQuerySessionInformation(server, session['SessionId'],
win32ts.WTSConnectState)
protocol = protocols.get(protocol_id, 'unknown')
connection_status = statuses.get(status_id, 'unknown')
station = session['WinStationName'] or 'Disconnected'
connection_info = {'connection_status': connection_status, 'protocol': protocol,
'session_id': session['SessionId'], 'station': station,
'user': user}
if logged_in_users_only:
if user:
ret.append(connection_info)
else:
ret.append(connection_info)
if not ret:
_LOG.warning('No sessions found.')
return sorted(ret, key=lambda k: k['session_id']) | List information about the sessions.
.. versionadded:: 2016.11.0
:param logged_in_users_only: If True, only return sessions with users logged in.
:return: A list containing dictionaries of session information.
CLI Example:
.. code-block:: bash
salt '*' rdp.list_sessions | Below is the the instruction that describes the task:
### Input:
List information about the sessions.
.. versionadded:: 2016.11.0
:param logged_in_users_only: If True, only return sessions with users logged in.
:return: A list containing dictionaries of session information.
CLI Example:
.. code-block:: bash
salt '*' rdp.list_sessions
### Response:
def list_sessions(logged_in_users_only=False):
'''
List information about the sessions.
.. versionadded:: 2016.11.0
:param logged_in_users_only: If True, only return sessions with users logged in.
:return: A list containing dictionaries of session information.
CLI Example:
.. code-block:: bash
salt '*' rdp.list_sessions
'''
ret = list()
server = win32ts.WTS_CURRENT_SERVER_HANDLE
protocols = {win32ts.WTS_PROTOCOL_TYPE_CONSOLE: 'console',
win32ts.WTS_PROTOCOL_TYPE_ICA: 'citrix',
win32ts.WTS_PROTOCOL_TYPE_RDP: 'rdp'}
statuses = {win32ts.WTSActive: 'active', win32ts.WTSConnected: 'connected',
win32ts.WTSConnectQuery: 'connect_query', win32ts.WTSShadow: 'shadow',
win32ts.WTSDisconnected: 'disconnected', win32ts.WTSIdle: 'idle',
win32ts.WTSListen: 'listen', win32ts.WTSReset: 'reset',
win32ts.WTSDown: 'down', win32ts.WTSInit: 'init'}
for session in win32ts.WTSEnumerateSessions(server):
user = win32ts.WTSQuerySessionInformation(server, session['SessionId'],
win32ts.WTSUserName) or None
protocol_id = win32ts.WTSQuerySessionInformation(server, session['SessionId'],
win32ts.WTSClientProtocolType)
status_id = win32ts.WTSQuerySessionInformation(server, session['SessionId'],
win32ts.WTSConnectState)
protocol = protocols.get(protocol_id, 'unknown')
connection_status = statuses.get(status_id, 'unknown')
station = session['WinStationName'] or 'Disconnected'
connection_info = {'connection_status': connection_status, 'protocol': protocol,
'session_id': session['SessionId'], 'station': station,
'user': user}
if logged_in_users_only:
if user:
ret.append(connection_info)
else:
ret.append(connection_info)
if not ret:
_LOG.warning('No sessions found.')
return sorted(ret, key=lambda k: k['session_id']) |
def bulkWrite(self, endpoint, buffer, timeout = 100):
r"""Perform a bulk write request to the endpoint specified.
Arguments:
endpoint: endpoint number.
buffer: sequence data buffer to write.
This parameter can be any sequence type.
timeout: operation timeout in milliseconds. (default: 100)
Returns the number of bytes written.
"""
return self.dev.write(endpoint, buffer, timeout) | r"""Perform a bulk write request to the endpoint specified.
Arguments:
endpoint: endpoint number.
buffer: sequence data buffer to write.
This parameter can be any sequence type.
timeout: operation timeout in milliseconds. (default: 100)
Returns the number of bytes written. | Below is the the instruction that describes the task:
### Input:
r"""Perform a bulk write request to the endpoint specified.
Arguments:
endpoint: endpoint number.
buffer: sequence data buffer to write.
This parameter can be any sequence type.
timeout: operation timeout in milliseconds. (default: 100)
Returns the number of bytes written.
### Response:
def bulkWrite(self, endpoint, buffer, timeout = 100):
r"""Perform a bulk write request to the endpoint specified.
Arguments:
endpoint: endpoint number.
buffer: sequence data buffer to write.
This parameter can be any sequence type.
timeout: operation timeout in milliseconds. (default: 100)
Returns the number of bytes written.
"""
return self.dev.write(endpoint, buffer, timeout) |
def _deliver_message(self, msg):
"""
Deliver the message to the subscripted actions
"""
my_subscribed_actions = self.subscriptions.get(msg.sender, [])
for action in my_subscribed_actions:
if Global.CONFIG_MANAGER.tracing_mode:
Global.LOGGER.debug(f"delivering message to {action.name}")
action.on_input_received(msg) | Deliver the message to the subscripted actions | Below is the the instruction that describes the task:
### Input:
Deliver the message to the subscripted actions
### Response:
def _deliver_message(self, msg):
"""
Deliver the message to the subscripted actions
"""
my_subscribed_actions = self.subscriptions.get(msg.sender, [])
for action in my_subscribed_actions:
if Global.CONFIG_MANAGER.tracing_mode:
Global.LOGGER.debug(f"delivering message to {action.name}")
action.on_input_received(msg) |
def save_model(self, request, obj, form, change):
"""Saves TreeItem model under certain Tree.
Handles item's parent assignment exception.
"""
if change:
# No, you're not allowed to make item parent of itself
if obj.parent is not None and obj.parent.id == obj.id:
obj.parent = self.previous_parent
messages.warning(
request, _("Item's parent left unchanged. Item couldn't be parent to itself."), '', True)
obj.tree = self.tree
obj.save() | Saves TreeItem model under certain Tree.
Handles item's parent assignment exception. | Below is the the instruction that describes the task:
### Input:
Saves TreeItem model under certain Tree.
Handles item's parent assignment exception.
### Response:
def save_model(self, request, obj, form, change):
"""Saves TreeItem model under certain Tree.
Handles item's parent assignment exception.
"""
if change:
# No, you're not allowed to make item parent of itself
if obj.parent is not None and obj.parent.id == obj.id:
obj.parent = self.previous_parent
messages.warning(
request, _("Item's parent left unchanged. Item couldn't be parent to itself."), '', True)
obj.tree = self.tree
obj.save() |
def text_pieces(self, method, uplaces=2, use_exponent=True):
"""Return (main, dhigh, dlow, sharedexponent), all as strings. The
delta terms do not have sign indicators. Any item except the first
may be None.
`method` is passed to Uval.repvals() to compute representative
statistical limits.
"""
md, hi, lo = self.repvals(method)
if hi == lo:
return '%g' % lo, None, None, None
if not np.isfinite([lo, md, hi]).all():
raise ValueError('got nonfinite values when formatting Uval')
# Deltas. Round to limited # of places because we don't actually know
# the fourth moment of the thing we're trying to describe.
from numpy import abs, ceil, floor, log10
dh = hi - md
dl = md - lo
if dh <= 0:
raise ValueError('strange problem formatting Uval; '
'hi=%g md=%g dh=%g' % (hi, md, dh))
if dl <= 0:
raise ValueError('strange problem formatting Uval; '
'lo=%g md=%g dl=%g' % (lo, md, dl))
p = int(ceil(log10(dh)))
rdh = round(dh * 10**(-p), uplaces) * 10**p
p = int(ceil(log10(dl)))
rdl = round(dl * 10**(-p), uplaces) * 10**p
# The least significant place to worry about is the L.S.P. of one of
# the deltas, which we can find relative to its M.S.P. Any precision
# in the datum beyond this point is false.
lsp = int(ceil(log10(min(rdh, rdl)))) - uplaces
# We should round the datum since it might be something like
# 0.999+-0.1 and we're about to try to decide what its most
# significant place is. Might get -1 rather than 0.
rmd = round(md, -lsp)
if rmd == -0.: # 0 = -0, too, but no problem there.
rmd = 0.
# The most significant place to worry about is the M.S.P. of any of
# the datum or the deltas. rdl and rdl must be positive, but not
# necessarily rmd.
msp = int(floor(log10(max(abs(rmd), rdh, rdl))))
# If we're not very large or very small, or it's been explicitly
# disabled, don't use scientific notation.
if (msp > -3 and msp < 3) or not use_exponent:
srmd = '%.*f' % (-lsp, rmd)
srdh = '%.*f' % (-lsp, rdh)
srdl = '%.*f' % (-lsp, rdl)
return srmd, srdh, srdl, None
# Use scientific notation. Adjust values, then format.
armd = rmd * 10**-msp
ardh = rdh * 10**-msp
ardl = rdl * 10**-msp
prec = msp - lsp
sarmd = '%.*f' % (prec, armd)
sardh = '%.*f' % (prec, ardh)
sardl = '%.*f' % (prec, ardl)
return sarmd, sardh, sardl, str(msp) | Return (main, dhigh, dlow, sharedexponent), all as strings. The
delta terms do not have sign indicators. Any item except the first
may be None.
`method` is passed to Uval.repvals() to compute representative
statistical limits. | Below is the the instruction that describes the task:
### Input:
Return (main, dhigh, dlow, sharedexponent), all as strings. The
delta terms do not have sign indicators. Any item except the first
may be None.
`method` is passed to Uval.repvals() to compute representative
statistical limits.
### Response:
def text_pieces(self, method, uplaces=2, use_exponent=True):
"""Return (main, dhigh, dlow, sharedexponent), all as strings. The
delta terms do not have sign indicators. Any item except the first
may be None.
`method` is passed to Uval.repvals() to compute representative
statistical limits.
"""
md, hi, lo = self.repvals(method)
if hi == lo:
return '%g' % lo, None, None, None
if not np.isfinite([lo, md, hi]).all():
raise ValueError('got nonfinite values when formatting Uval')
# Deltas. Round to limited # of places because we don't actually know
# the fourth moment of the thing we're trying to describe.
from numpy import abs, ceil, floor, log10
dh = hi - md
dl = md - lo
if dh <= 0:
raise ValueError('strange problem formatting Uval; '
'hi=%g md=%g dh=%g' % (hi, md, dh))
if dl <= 0:
raise ValueError('strange problem formatting Uval; '
'lo=%g md=%g dl=%g' % (lo, md, dl))
p = int(ceil(log10(dh)))
rdh = round(dh * 10**(-p), uplaces) * 10**p
p = int(ceil(log10(dl)))
rdl = round(dl * 10**(-p), uplaces) * 10**p
# The least significant place to worry about is the L.S.P. of one of
# the deltas, which we can find relative to its M.S.P. Any precision
# in the datum beyond this point is false.
lsp = int(ceil(log10(min(rdh, rdl)))) - uplaces
# We should round the datum since it might be something like
# 0.999+-0.1 and we're about to try to decide what its most
# significant place is. Might get -1 rather than 0.
rmd = round(md, -lsp)
if rmd == -0.: # 0 = -0, too, but no problem there.
rmd = 0.
# The most significant place to worry about is the M.S.P. of any of
# the datum or the deltas. rdl and rdl must be positive, but not
# necessarily rmd.
msp = int(floor(log10(max(abs(rmd), rdh, rdl))))
# If we're not very large or very small, or it's been explicitly
# disabled, don't use scientific notation.
if (msp > -3 and msp < 3) or not use_exponent:
srmd = '%.*f' % (-lsp, rmd)
srdh = '%.*f' % (-lsp, rdh)
srdl = '%.*f' % (-lsp, rdl)
return srmd, srdh, srdl, None
# Use scientific notation. Adjust values, then format.
armd = rmd * 10**-msp
ardh = rdh * 10**-msp
ardl = rdl * 10**-msp
prec = msp - lsp
sarmd = '%.*f' % (prec, armd)
sardh = '%.*f' % (prec, ardh)
sardl = '%.*f' % (prec, ardl)
return sarmd, sardh, sardl, str(msp) |
def get_precision(self):
"""
Get the current precision from the sensor.
:returns: sensor resolution from 9-12 bits
:rtype: int
"""
config_str = self.raw_sensor_strings[1].split()[4] # Byte 5 is the config register
bit_base = int(config_str, 16) >> 5 # Bit 5-6 contains the resolution, cut off the rest
return bit_base + 9 | Get the current precision from the sensor.
:returns: sensor resolution from 9-12 bits
:rtype: int | Below is the the instruction that describes the task:
### Input:
Get the current precision from the sensor.
:returns: sensor resolution from 9-12 bits
:rtype: int
### Response:
def get_precision(self):
"""
Get the current precision from the sensor.
:returns: sensor resolution from 9-12 bits
:rtype: int
"""
config_str = self.raw_sensor_strings[1].split()[4] # Byte 5 is the config register
bit_base = int(config_str, 16) >> 5 # Bit 5-6 contains the resolution, cut off the rest
return bit_base + 9 |
def uniform_unit_scaling(tensor: torch.Tensor, nonlinearity: str = "linear"):
"""
An initaliser which preserves output variance for approximately gaussian
distributed inputs. This boils down to initialising layers using a uniform
distribution in the range ``(-sqrt(3/dim[0]) * scale, sqrt(3 / dim[0]) * scale)``, where
``dim[0]`` is equal to the input dimension of the parameter and the ``scale``
is a constant scaling factor which depends on the non-linearity used.
See `Random Walk Initialisation for Training Very Deep Feedforward Networks
<https://www.semanticscholar.org/paper/Random-Walk-Initialization-for-Training-Very-Deep-Sussillo-Abbott/be9728a0728b6acf7a485225b1e41592176eda0b>`_
for more information.
Parameters
----------
tensor : ``torch.Tensor``, required.
The tensor to initialise.
nonlinearity : ``str``, optional (default = "linear")
The non-linearity which is performed after the projection that this
tensor is involved in. This must be the name of a function contained
in the ``torch.nn.functional`` package.
Returns
-------
The initialised tensor.
"""
size = 1.
# Estimate the input size. This won't work perfectly,
# but it covers almost all use cases where this initialiser
# would be expected to be useful, i.e in large linear and
# convolutional layers, as the last dimension will almost
# always be the output size.
for dimension in list(tensor.size())[:-1]:
size *= dimension
activation_scaling = torch.nn.init.calculate_gain(nonlinearity, tensor)
max_value = math.sqrt(3 / size) * activation_scaling
return tensor.data.uniform_(-max_value, max_value) | An initaliser which preserves output variance for approximately gaussian
distributed inputs. This boils down to initialising layers using a uniform
distribution in the range ``(-sqrt(3/dim[0]) * scale, sqrt(3 / dim[0]) * scale)``, where
``dim[0]`` is equal to the input dimension of the parameter and the ``scale``
is a constant scaling factor which depends on the non-linearity used.
See `Random Walk Initialisation for Training Very Deep Feedforward Networks
<https://www.semanticscholar.org/paper/Random-Walk-Initialization-for-Training-Very-Deep-Sussillo-Abbott/be9728a0728b6acf7a485225b1e41592176eda0b>`_
for more information.
Parameters
----------
tensor : ``torch.Tensor``, required.
The tensor to initialise.
nonlinearity : ``str``, optional (default = "linear")
The non-linearity which is performed after the projection that this
tensor is involved in. This must be the name of a function contained
in the ``torch.nn.functional`` package.
Returns
-------
The initialised tensor. | Below is the the instruction that describes the task:
### Input:
An initaliser which preserves output variance for approximately gaussian
distributed inputs. This boils down to initialising layers using a uniform
distribution in the range ``(-sqrt(3/dim[0]) * scale, sqrt(3 / dim[0]) * scale)``, where
``dim[0]`` is equal to the input dimension of the parameter and the ``scale``
is a constant scaling factor which depends on the non-linearity used.
See `Random Walk Initialisation for Training Very Deep Feedforward Networks
<https://www.semanticscholar.org/paper/Random-Walk-Initialization-for-Training-Very-Deep-Sussillo-Abbott/be9728a0728b6acf7a485225b1e41592176eda0b>`_
for more information.
Parameters
----------
tensor : ``torch.Tensor``, required.
The tensor to initialise.
nonlinearity : ``str``, optional (default = "linear")
The non-linearity which is performed after the projection that this
tensor is involved in. This must be the name of a function contained
in the ``torch.nn.functional`` package.
Returns
-------
The initialised tensor.
### Response:
def uniform_unit_scaling(tensor: torch.Tensor, nonlinearity: str = "linear"):
"""
An initaliser which preserves output variance for approximately gaussian
distributed inputs. This boils down to initialising layers using a uniform
distribution in the range ``(-sqrt(3/dim[0]) * scale, sqrt(3 / dim[0]) * scale)``, where
``dim[0]`` is equal to the input dimension of the parameter and the ``scale``
is a constant scaling factor which depends on the non-linearity used.
See `Random Walk Initialisation for Training Very Deep Feedforward Networks
<https://www.semanticscholar.org/paper/Random-Walk-Initialization-for-Training-Very-Deep-Sussillo-Abbott/be9728a0728b6acf7a485225b1e41592176eda0b>`_
for more information.
Parameters
----------
tensor : ``torch.Tensor``, required.
The tensor to initialise.
nonlinearity : ``str``, optional (default = "linear")
The non-linearity which is performed after the projection that this
tensor is involved in. This must be the name of a function contained
in the ``torch.nn.functional`` package.
Returns
-------
The initialised tensor.
"""
size = 1.
# Estimate the input size. This won't work perfectly,
# but it covers almost all use cases where this initialiser
# would be expected to be useful, i.e in large linear and
# convolutional layers, as the last dimension will almost
# always be the output size.
for dimension in list(tensor.size())[:-1]:
size *= dimension
activation_scaling = torch.nn.init.calculate_gain(nonlinearity, tensor)
max_value = math.sqrt(3 / size) * activation_scaling
return tensor.data.uniform_(-max_value, max_value) |
def task_view_generator(job_descriptor):
"""Generator that yields a task-specific view of the job.
This generator exists to make it easy for callers to iterate over the tasks
in a JobDescriptor. Each pass yields a new JobDescriptor with a single task.
Args:
job_descriptor: A JobDescriptor with 1 or more tasks.
Yields:
A JobDescriptor with a single task.
"""
for task_descriptor in job_descriptor.task_descriptors:
jd = JobDescriptor(job_descriptor.job_metadata, job_descriptor.job_params,
job_descriptor.job_resources, [task_descriptor])
yield jd | Generator that yields a task-specific view of the job.
This generator exists to make it easy for callers to iterate over the tasks
in a JobDescriptor. Each pass yields a new JobDescriptor with a single task.
Args:
job_descriptor: A JobDescriptor with 1 or more tasks.
Yields:
A JobDescriptor with a single task. | Below is the the instruction that describes the task:
### Input:
Generator that yields a task-specific view of the job.
This generator exists to make it easy for callers to iterate over the tasks
in a JobDescriptor. Each pass yields a new JobDescriptor with a single task.
Args:
job_descriptor: A JobDescriptor with 1 or more tasks.
Yields:
A JobDescriptor with a single task.
### Response:
def task_view_generator(job_descriptor):
"""Generator that yields a task-specific view of the job.
This generator exists to make it easy for callers to iterate over the tasks
in a JobDescriptor. Each pass yields a new JobDescriptor with a single task.
Args:
job_descriptor: A JobDescriptor with 1 or more tasks.
Yields:
A JobDescriptor with a single task.
"""
for task_descriptor in job_descriptor.task_descriptors:
jd = JobDescriptor(job_descriptor.job_metadata, job_descriptor.job_params,
job_descriptor.job_resources, [task_descriptor])
yield jd |
def add(self, si):
'''puts `si` into the currently open chunk, which it creates if
necessary. If this item causes the chunk to cross chunk_max,
then the chunk closed after adding.
'''
if self.o_chunk is None:
if os.path.exists(self.t_path):
os.remove(self.t_path)
self.o_chunk = streamcorpus.Chunk(self.t_path, mode='wb')
self.o_chunk.add(si)
logger.debug('added %d-th item to chunk', len(self.o_chunk))
if len(self.o_chunk) == self.chunk_max:
self.close() | puts `si` into the currently open chunk, which it creates if
necessary. If this item causes the chunk to cross chunk_max,
then the chunk closed after adding. | Below is the the instruction that describes the task:
### Input:
puts `si` into the currently open chunk, which it creates if
necessary. If this item causes the chunk to cross chunk_max,
then the chunk closed after adding.
### Response:
def add(self, si):
'''puts `si` into the currently open chunk, which it creates if
necessary. If this item causes the chunk to cross chunk_max,
then the chunk closed after adding.
'''
if self.o_chunk is None:
if os.path.exists(self.t_path):
os.remove(self.t_path)
self.o_chunk = streamcorpus.Chunk(self.t_path, mode='wb')
self.o_chunk.add(si)
logger.debug('added %d-th item to chunk', len(self.o_chunk))
if len(self.o_chunk) == self.chunk_max:
self.close() |
def QA_SU_save_future_min(client=DATABASE, ui_log=None, ui_progress=None):
"""save future_min
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
future_list = [
item for item in QA_fetch_get_future_list().code.unique().tolist()
if str(item)[-2:] in ['L8',
'L9']
]
coll = client.future_min
coll.create_index(
[
('code',
pymongo.ASCENDING),
('time_stamp',
pymongo.ASCENDING),
('date_stamp',
pymongo.ASCENDING)
]
)
err = []
def __saving_work(code, coll):
QA_util_log_info(
'##JOB13 Now Saving Future_MIN ==== {}'.format(str(code)),
ui_log=ui_log
)
try:
for type in ['1min', '5min', '15min', '30min', '60min']:
ref_ = coll.find({'code': str(code)[0:6], 'type': type})
end_time = str(now_time())[0:19]
if ref_.count() > 0:
start_time = ref_[ref_.count() - 1]['datetime']
QA_util_log_info(
'##JOB13.{} Now Saving Future {} from {} to {} =={} '
.format(
['1min',
'5min',
'15min',
'30min',
'60min'].index(type),
str(code),
start_time,
end_time,
type
),
ui_log=ui_log
)
if start_time != end_time:
__data = QA_fetch_get_future_min(
str(code),
start_time,
end_time,
type
)
if len(__data) > 1:
coll.insert_many(
QA_util_to_json_from_pandas(__data[1::])
)
else:
start_time = '2015-01-01'
QA_util_log_info(
'##JOB13.{} Now Saving Future {} from {} to {} =={} '
.format(
['1min',
'5min',
'15min',
'30min',
'60min'].index(type),
str(code),
start_time,
end_time,
type
),
ui_log=ui_log
)
if start_time != end_time:
__data = QA_fetch_get_future_min(
str(code),
start_time,
end_time,
type
)
if len(__data) > 1:
coll.insert_many(
QA_util_to_json_from_pandas(__data)
)
except:
err.append(code)
executor = ThreadPoolExecutor(max_workers=4)
res = {
executor.submit(__saving_work,
future_list[i_],
coll)
for i_ in range(len(future_list))
} # multi index ./.
count = 0
for i_ in concurrent.futures.as_completed(res):
QA_util_log_info(
'The {} of Total {}'.format(count,
len(future_list)),
ui_log=ui_log
)
strLogProgress = 'DOWNLOAD PROGRESS {} '.format(
str(float(count / len(future_list) * 100))[0:4] + '%'
)
intLogProgress = int(float(count / len(future_list) * 10000.0))
QA_util_log_info(
strLogProgress,
ui_log=ui_log,
ui_progress=ui_progress,
ui_progress_int_value=intLogProgress
)
count = count + 1
if len(err) < 1:
QA_util_log_info('SUCCESS', ui_log=ui_log)
else:
QA_util_log_info(' ERROR CODE \n ', ui_log=ui_log)
QA_util_log_info(err, ui_log=ui_log) | save future_min
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE}) | Below is the the instruction that describes the task:
### Input:
save future_min
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
### Response:
def QA_SU_save_future_min(client=DATABASE, ui_log=None, ui_progress=None):
"""save future_min
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
future_list = [
item for item in QA_fetch_get_future_list().code.unique().tolist()
if str(item)[-2:] in ['L8',
'L9']
]
coll = client.future_min
coll.create_index(
[
('code',
pymongo.ASCENDING),
('time_stamp',
pymongo.ASCENDING),
('date_stamp',
pymongo.ASCENDING)
]
)
err = []
def __saving_work(code, coll):
QA_util_log_info(
'##JOB13 Now Saving Future_MIN ==== {}'.format(str(code)),
ui_log=ui_log
)
try:
for type in ['1min', '5min', '15min', '30min', '60min']:
ref_ = coll.find({'code': str(code)[0:6], 'type': type})
end_time = str(now_time())[0:19]
if ref_.count() > 0:
start_time = ref_[ref_.count() - 1]['datetime']
QA_util_log_info(
'##JOB13.{} Now Saving Future {} from {} to {} =={} '
.format(
['1min',
'5min',
'15min',
'30min',
'60min'].index(type),
str(code),
start_time,
end_time,
type
),
ui_log=ui_log
)
if start_time != end_time:
__data = QA_fetch_get_future_min(
str(code),
start_time,
end_time,
type
)
if len(__data) > 1:
coll.insert_many(
QA_util_to_json_from_pandas(__data[1::])
)
else:
start_time = '2015-01-01'
QA_util_log_info(
'##JOB13.{} Now Saving Future {} from {} to {} =={} '
.format(
['1min',
'5min',
'15min',
'30min',
'60min'].index(type),
str(code),
start_time,
end_time,
type
),
ui_log=ui_log
)
if start_time != end_time:
__data = QA_fetch_get_future_min(
str(code),
start_time,
end_time,
type
)
if len(__data) > 1:
coll.insert_many(
QA_util_to_json_from_pandas(__data)
)
except:
err.append(code)
executor = ThreadPoolExecutor(max_workers=4)
res = {
executor.submit(__saving_work,
future_list[i_],
coll)
for i_ in range(len(future_list))
} # multi index ./.
count = 0
for i_ in concurrent.futures.as_completed(res):
QA_util_log_info(
'The {} of Total {}'.format(count,
len(future_list)),
ui_log=ui_log
)
strLogProgress = 'DOWNLOAD PROGRESS {} '.format(
str(float(count / len(future_list) * 100))[0:4] + '%'
)
intLogProgress = int(float(count / len(future_list) * 10000.0))
QA_util_log_info(
strLogProgress,
ui_log=ui_log,
ui_progress=ui_progress,
ui_progress_int_value=intLogProgress
)
count = count + 1
if len(err) < 1:
QA_util_log_info('SUCCESS', ui_log=ui_log)
else:
QA_util_log_info(' ERROR CODE \n ', ui_log=ui_log)
QA_util_log_info(err, ui_log=ui_log) |
def _read_mode_tsopt(self, size, kind):
"""Read Timestamps option.
Positional arguments:
* size - int, length of option
* kind - int, 8 (Timestamps)
Returns:
* dict -- extracted Timestamps (TS) option
Structure of TCP TSopt [RFC 7323]:
+-------+-------+---------------------+---------------------+
|Kind=8 | 10 | TS Value (TSval) |TS Echo Reply (TSecr)|
+-------+-------+---------------------+---------------------+
1 1 4 4
Octets Bits Name Description
0 0 tcp.ts.kind Kind (8)
1 8 tcp.ts.length Length (10)
2 16 tcp.ts.val Timestamp Value
6 48 tcp.ts.ecr Timestamps Echo Reply
"""
temp = struct.unpack('>II', self._read_fileng(size))
data = dict(
kind=kind,
length=size,
val=temp[0],
ecr=temp[1],
)
return data | Read Timestamps option.
Positional arguments:
* size - int, length of option
* kind - int, 8 (Timestamps)
Returns:
* dict -- extracted Timestamps (TS) option
Structure of TCP TSopt [RFC 7323]:
+-------+-------+---------------------+---------------------+
|Kind=8 | 10 | TS Value (TSval) |TS Echo Reply (TSecr)|
+-------+-------+---------------------+---------------------+
1 1 4 4
Octets Bits Name Description
0 0 tcp.ts.kind Kind (8)
1 8 tcp.ts.length Length (10)
2 16 tcp.ts.val Timestamp Value
6 48 tcp.ts.ecr Timestamps Echo Reply | Below is the the instruction that describes the task:
### Input:
Read Timestamps option.
Positional arguments:
* size - int, length of option
* kind - int, 8 (Timestamps)
Returns:
* dict -- extracted Timestamps (TS) option
Structure of TCP TSopt [RFC 7323]:
+-------+-------+---------------------+---------------------+
|Kind=8 | 10 | TS Value (TSval) |TS Echo Reply (TSecr)|
+-------+-------+---------------------+---------------------+
1 1 4 4
Octets Bits Name Description
0 0 tcp.ts.kind Kind (8)
1 8 tcp.ts.length Length (10)
2 16 tcp.ts.val Timestamp Value
6 48 tcp.ts.ecr Timestamps Echo Reply
### Response:
def _read_mode_tsopt(self, size, kind):
"""Read Timestamps option.
Positional arguments:
* size - int, length of option
* kind - int, 8 (Timestamps)
Returns:
* dict -- extracted Timestamps (TS) option
Structure of TCP TSopt [RFC 7323]:
+-------+-------+---------------------+---------------------+
|Kind=8 | 10 | TS Value (TSval) |TS Echo Reply (TSecr)|
+-------+-------+---------------------+---------------------+
1 1 4 4
Octets Bits Name Description
0 0 tcp.ts.kind Kind (8)
1 8 tcp.ts.length Length (10)
2 16 tcp.ts.val Timestamp Value
6 48 tcp.ts.ecr Timestamps Echo Reply
"""
temp = struct.unpack('>II', self._read_fileng(size))
data = dict(
kind=kind,
length=size,
val=temp[0],
ecr=temp[1],
)
return data |
def send(self, api_key=None, secret=None, list_data=None, auth=False, **kwargs):
"""Sends the given Payload to the API via the websocket connection.
:param kwargs: payload paarameters as key=value pairs
:return:
"""
if auth:
nonce = str(int(time.time() * 10000000))
auth_string = 'AUTH' + nonce
auth_sig = hmac.new(secret.encode(), auth_string.encode(),
hashlib.sha384).hexdigest()
payload = {'event': 'auth', 'apiKey': api_key, 'authSig': auth_sig,
'authPayload': auth_string, 'authNonce': nonce}
payload = json.dumps(payload)
elif list_data:
payload = json.dumps(list_data)
else:
payload = json.dumps(kwargs)
self.log.debug("send(): Sending payload to API: %s", payload)
try:
self.socket.send(payload)
except websocket.WebSocketConnectionClosedException:
self.log.error("send(): Did not send out payload %s - client not connected. ", kwargs) | Sends the given Payload to the API via the websocket connection.
:param kwargs: payload paarameters as key=value pairs
:return: | Below is the the instruction that describes the task:
### Input:
Sends the given Payload to the API via the websocket connection.
:param kwargs: payload paarameters as key=value pairs
:return:
### Response:
def send(self, api_key=None, secret=None, list_data=None, auth=False, **kwargs):
"""Sends the given Payload to the API via the websocket connection.
:param kwargs: payload paarameters as key=value pairs
:return:
"""
if auth:
nonce = str(int(time.time() * 10000000))
auth_string = 'AUTH' + nonce
auth_sig = hmac.new(secret.encode(), auth_string.encode(),
hashlib.sha384).hexdigest()
payload = {'event': 'auth', 'apiKey': api_key, 'authSig': auth_sig,
'authPayload': auth_string, 'authNonce': nonce}
payload = json.dumps(payload)
elif list_data:
payload = json.dumps(list_data)
else:
payload = json.dumps(kwargs)
self.log.debug("send(): Sending payload to API: %s", payload)
try:
self.socket.send(payload)
except websocket.WebSocketConnectionClosedException:
self.log.error("send(): Did not send out payload %s - client not connected. ", kwargs) |
def start(self):
"""
Starts the download.
:raises SbgError: If download is not in PREPARING state.
"""
if self._status == TransferState.PREPARING:
self._running.set()
super(Download, self).start()
self._status = TransferState.RUNNING
self._time_started = time.time()
else:
raise SbgError(
'Unable to start. Download not in PREPARING state.'
) | Starts the download.
:raises SbgError: If download is not in PREPARING state. | Below is the the instruction that describes the task:
### Input:
Starts the download.
:raises SbgError: If download is not in PREPARING state.
### Response:
def start(self):
"""
Starts the download.
:raises SbgError: If download is not in PREPARING state.
"""
if self._status == TransferState.PREPARING:
self._running.set()
super(Download, self).start()
self._status = TransferState.RUNNING
self._time_started = time.time()
else:
raise SbgError(
'Unable to start. Download not in PREPARING state.'
) |
def _generate_name(self, space, service_name, plan_name):
"""
Can generate a name based on the space, service name and plan.
"""
return str.join('-', [space, service_name, plan_name]).lower() | Can generate a name based on the space, service name and plan. | Below is the the instruction that describes the task:
### Input:
Can generate a name based on the space, service name and plan.
### Response:
def _generate_name(self, space, service_name, plan_name):
"""
Can generate a name based on the space, service name and plan.
"""
return str.join('-', [space, service_name, plan_name]).lower() |
def decode_solution(self, encoded_solution):
"""Return solution from an encoded representation."""
return self._decode_function(encoded_solution, *self._decode_args,
**self._decode_kwargs) | Return solution from an encoded representation. | Below is the the instruction that describes the task:
### Input:
Return solution from an encoded representation.
### Response:
def decode_solution(self, encoded_solution):
"""Return solution from an encoded representation."""
return self._decode_function(encoded_solution, *self._decode_args,
**self._decode_kwargs) |
def solve_with_sdpa(sdp, solverparameters=None):
"""Helper function to write out the SDP problem to a temporary
file, call the solver, and parse the output.
:param sdp: The SDP relaxation to be solved.
:type sdp: :class:`ncpol2sdpa.sdp`.
:param solverparameters: Optional parameters to SDPA.
:type solverparameters: dict of str.
:returns: tuple of float and list -- the primal and dual solution of the
SDP, respectively, and a status string.
"""
solverexecutable = detect_sdpa(solverparameters)
if solverexecutable is None:
raise OSError("SDPA is not in the path or the executable provided is" +
" not correct")
primal, dual = 0, 0
tempfile_ = tempfile.NamedTemporaryFile()
tmp_filename = tempfile_.name
tempfile_.close()
tmp_dats_filename = tmp_filename + ".dat-s"
tmp_out_filename = tmp_filename + ".out"
write_to_sdpa(sdp, tmp_dats_filename)
command_line = [solverexecutable, "-ds", tmp_dats_filename,
"-o", tmp_out_filename]
if solverparameters is not None:
for key, value in list(solverparameters.items()):
if key == "executable":
continue
elif key == "paramsfile":
command_line.extend(["-p", value])
else:
raise ValueError("Unknown parameter for SDPA: " + key)
if sdp.verbose < 1:
with open(os.devnull, "w") as fnull:
call(command_line, stdout=fnull, stderr=fnull)
else:
call(command_line)
primal, dual, x_mat, y_mat, status = read_sdpa_out(tmp_out_filename, True,
True)
if sdp.verbose < 2:
os.remove(tmp_dats_filename)
os.remove(tmp_out_filename)
return primal+sdp.constant_term, \
dual+sdp.constant_term, x_mat, y_mat, status | Helper function to write out the SDP problem to a temporary
file, call the solver, and parse the output.
:param sdp: The SDP relaxation to be solved.
:type sdp: :class:`ncpol2sdpa.sdp`.
:param solverparameters: Optional parameters to SDPA.
:type solverparameters: dict of str.
:returns: tuple of float and list -- the primal and dual solution of the
SDP, respectively, and a status string. | Below is the the instruction that describes the task:
### Input:
Helper function to write out the SDP problem to a temporary
file, call the solver, and parse the output.
:param sdp: The SDP relaxation to be solved.
:type sdp: :class:`ncpol2sdpa.sdp`.
:param solverparameters: Optional parameters to SDPA.
:type solverparameters: dict of str.
:returns: tuple of float and list -- the primal and dual solution of the
SDP, respectively, and a status string.
### Response:
def solve_with_sdpa(sdp, solverparameters=None):
"""Helper function to write out the SDP problem to a temporary
file, call the solver, and parse the output.
:param sdp: The SDP relaxation to be solved.
:type sdp: :class:`ncpol2sdpa.sdp`.
:param solverparameters: Optional parameters to SDPA.
:type solverparameters: dict of str.
:returns: tuple of float and list -- the primal and dual solution of the
SDP, respectively, and a status string.
"""
solverexecutable = detect_sdpa(solverparameters)
if solverexecutable is None:
raise OSError("SDPA is not in the path or the executable provided is" +
" not correct")
primal, dual = 0, 0
tempfile_ = tempfile.NamedTemporaryFile()
tmp_filename = tempfile_.name
tempfile_.close()
tmp_dats_filename = tmp_filename + ".dat-s"
tmp_out_filename = tmp_filename + ".out"
write_to_sdpa(sdp, tmp_dats_filename)
command_line = [solverexecutable, "-ds", tmp_dats_filename,
"-o", tmp_out_filename]
if solverparameters is not None:
for key, value in list(solverparameters.items()):
if key == "executable":
continue
elif key == "paramsfile":
command_line.extend(["-p", value])
else:
raise ValueError("Unknown parameter for SDPA: " + key)
if sdp.verbose < 1:
with open(os.devnull, "w") as fnull:
call(command_line, stdout=fnull, stderr=fnull)
else:
call(command_line)
primal, dual, x_mat, y_mat, status = read_sdpa_out(tmp_out_filename, True,
True)
if sdp.verbose < 2:
os.remove(tmp_dats_filename)
os.remove(tmp_out_filename)
return primal+sdp.constant_term, \
dual+sdp.constant_term, x_mat, y_mat, status |
def decode(self, bytes, raw=False):
"""decode(bytearray, raw=False) -> value
Decodes the given bytearray according to this PrimitiveType
definition.
NOTE: The parameter ``raw`` is present to adhere to the
``decode()`` inteface, but has no effect for PrimitiveType
definitions.
"""
return struct.unpack(self.format, buffer(bytes))[0] | decode(bytearray, raw=False) -> value
Decodes the given bytearray according to this PrimitiveType
definition.
NOTE: The parameter ``raw`` is present to adhere to the
``decode()`` inteface, but has no effect for PrimitiveType
definitions. | Below is the the instruction that describes the task:
### Input:
decode(bytearray, raw=False) -> value
Decodes the given bytearray according to this PrimitiveType
definition.
NOTE: The parameter ``raw`` is present to adhere to the
``decode()`` inteface, but has no effect for PrimitiveType
definitions.
### Response:
def decode(self, bytes, raw=False):
"""decode(bytearray, raw=False) -> value
Decodes the given bytearray according to this PrimitiveType
definition.
NOTE: The parameter ``raw`` is present to adhere to the
``decode()`` inteface, but has no effect for PrimitiveType
definitions.
"""
return struct.unpack(self.format, buffer(bytes))[0] |
def open_conn(host, db, user, password, retries=0, sleep=0.5):
'''
Return an open mysql db connection using the given credentials. Use
`retries` and `sleep` to be robust to the occassional transient connection
failure.
retries: if an exception when getting the connection, try again at most this many times.
sleep: pause between retries for this many seconds. a float >= 0.
'''
assert retries >= 0
try:
return MySQLdb.connect(host=host, user=user, passwd=password, db=db)
except Exception:
if retries > 0:
time.sleep(sleep)
return open_conn(host, db, user, password, retries - 1, sleep)
else:
raise | Return an open mysql db connection using the given credentials. Use
`retries` and `sleep` to be robust to the occassional transient connection
failure.
retries: if an exception when getting the connection, try again at most this many times.
sleep: pause between retries for this many seconds. a float >= 0. | Below is the the instruction that describes the task:
### Input:
Return an open mysql db connection using the given credentials. Use
`retries` and `sleep` to be robust to the occassional transient connection
failure.
retries: if an exception when getting the connection, try again at most this many times.
sleep: pause between retries for this many seconds. a float >= 0.
### Response:
def open_conn(host, db, user, password, retries=0, sleep=0.5):
'''
Return an open mysql db connection using the given credentials. Use
`retries` and `sleep` to be robust to the occassional transient connection
failure.
retries: if an exception when getting the connection, try again at most this many times.
sleep: pause between retries for this many seconds. a float >= 0.
'''
assert retries >= 0
try:
return MySQLdb.connect(host=host, user=user, passwd=password, db=db)
except Exception:
if retries > 0:
time.sleep(sleep)
return open_conn(host, db, user, password, retries - 1, sleep)
else:
raise |
def hide_routemap_holder_route_map_content_match_ip_route_source_prefix_list_rmrs(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
route_map = ET.SubElement(hide_routemap_holder, "route-map")
name_key = ET.SubElement(route_map, "name")
name_key.text = kwargs.pop('name')
action_rm_key = ET.SubElement(route_map, "action-rm")
action_rm_key.text = kwargs.pop('action_rm')
instance_key = ET.SubElement(route_map, "instance")
instance_key.text = kwargs.pop('instance')
content = ET.SubElement(route_map, "content")
match = ET.SubElement(content, "match")
ip = ET.SubElement(match, "ip")
route_source = ET.SubElement(ip, "route-source")
prefix_list_rmrs = ET.SubElement(route_source, "prefix-list-rmrs")
prefix_list_rmrs.text = kwargs.pop('prefix_list_rmrs')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def hide_routemap_holder_route_map_content_match_ip_route_source_prefix_list_rmrs(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
route_map = ET.SubElement(hide_routemap_holder, "route-map")
name_key = ET.SubElement(route_map, "name")
name_key.text = kwargs.pop('name')
action_rm_key = ET.SubElement(route_map, "action-rm")
action_rm_key.text = kwargs.pop('action_rm')
instance_key = ET.SubElement(route_map, "instance")
instance_key.text = kwargs.pop('instance')
content = ET.SubElement(route_map, "content")
match = ET.SubElement(content, "match")
ip = ET.SubElement(match, "ip")
route_source = ET.SubElement(ip, "route-source")
prefix_list_rmrs = ET.SubElement(route_source, "prefix-list-rmrs")
prefix_list_rmrs.text = kwargs.pop('prefix_list_rmrs')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def send(self, command, tab_key, params=None):
'''
Send command `command` with optional parameters `params` to the
remote chrome instance.
The command `id` is automatically added to the outgoing message.
return value is the command id, which can be used to match a command
to it's associated response.
'''
self.__check_open_socket(tab_key)
sent_id = self.msg_id
command = {
"id": self.msg_id,
"method": command,
}
if params:
command["params"] = params
navcom = json.dumps(command)
# self.log.debug(" Sending: '%s'", navcom)
try:
self.soclist[tab_key].send(navcom)
except (socket.timeout, websocket.WebSocketTimeoutException):
raise cr_exceptions.ChromeCommunicationsError("Failure sending command to chromium.")
except websocket.WebSocketConnectionClosedException:
raise cr_exceptions.ChromeCommunicationsError("Websocket appears to have been closed. Is the"
" remote chromium instance dead?")
self.msg_id += 1
return sent_id | Send command `command` with optional parameters `params` to the
remote chrome instance.
The command `id` is automatically added to the outgoing message.
return value is the command id, which can be used to match a command
to it's associated response. | Below is the the instruction that describes the task:
### Input:
Send command `command` with optional parameters `params` to the
remote chrome instance.
The command `id` is automatically added to the outgoing message.
return value is the command id, which can be used to match a command
to it's associated response.
### Response:
def send(self, command, tab_key, params=None):
'''
Send command `command` with optional parameters `params` to the
remote chrome instance.
The command `id` is automatically added to the outgoing message.
return value is the command id, which can be used to match a command
to it's associated response.
'''
self.__check_open_socket(tab_key)
sent_id = self.msg_id
command = {
"id": self.msg_id,
"method": command,
}
if params:
command["params"] = params
navcom = json.dumps(command)
# self.log.debug(" Sending: '%s'", navcom)
try:
self.soclist[tab_key].send(navcom)
except (socket.timeout, websocket.WebSocketTimeoutException):
raise cr_exceptions.ChromeCommunicationsError("Failure sending command to chromium.")
except websocket.WebSocketConnectionClosedException:
raise cr_exceptions.ChromeCommunicationsError("Websocket appears to have been closed. Is the"
" remote chromium instance dead?")
self.msg_id += 1
return sent_id |
def get_or_create_node(self, graph: BELGraph, node: BaseEntity) -> Optional[Node]:
"""Create an entry and object for given node if it does not exist."""
sha512 = node.as_sha512()
if sha512 in self.object_cache_node:
return self.object_cache_node[sha512]
node_model = self.get_node_by_hash(sha512)
if node_model is not None:
self.object_cache_node[sha512] = node_model
return node_model
node_model = Node._start_from_base_entity(node)
namespace = node.get(NAMESPACE)
if namespace is None:
pass
elif namespace in graph.namespace_url:
url = graph.namespace_url[namespace]
name = node[NAME]
entry = self.get_namespace_entry(url, name)
if entry is None:
log.debug('skipping node with identifier %s: %s', url, name)
return
self.session.add(entry)
node_model.namespace_entry = entry
elif namespace in graph.namespace_pattern:
name = node[NAME]
pattern = graph.namespace_pattern[namespace]
entry = self.get_or_create_regex_namespace_entry(namespace, pattern, name)
self.session.add(entry)
node_model.namespace_entry = entry
else:
log.warning("No reference in BELGraph for namespace: {}".format(node[NAMESPACE]))
return
if VARIANTS in node or FUSION in node:
node_model.is_variant = True
node_model.has_fusion = FUSION in node
modifications = self.get_or_create_modification(graph, node)
if modifications is None:
log.warning('could not create %s because had an uncachable modification', node.as_bel())
return
node_model.modifications = modifications
self.session.add(node_model)
self.object_cache_node[sha512] = node_model
return node_model | Create an entry and object for given node if it does not exist. | Below is the the instruction that describes the task:
### Input:
Create an entry and object for given node if it does not exist.
### Response:
def get_or_create_node(self, graph: BELGraph, node: BaseEntity) -> Optional[Node]:
"""Create an entry and object for given node if it does not exist."""
sha512 = node.as_sha512()
if sha512 in self.object_cache_node:
return self.object_cache_node[sha512]
node_model = self.get_node_by_hash(sha512)
if node_model is not None:
self.object_cache_node[sha512] = node_model
return node_model
node_model = Node._start_from_base_entity(node)
namespace = node.get(NAMESPACE)
if namespace is None:
pass
elif namespace in graph.namespace_url:
url = graph.namespace_url[namespace]
name = node[NAME]
entry = self.get_namespace_entry(url, name)
if entry is None:
log.debug('skipping node with identifier %s: %s', url, name)
return
self.session.add(entry)
node_model.namespace_entry = entry
elif namespace in graph.namespace_pattern:
name = node[NAME]
pattern = graph.namespace_pattern[namespace]
entry = self.get_or_create_regex_namespace_entry(namespace, pattern, name)
self.session.add(entry)
node_model.namespace_entry = entry
else:
log.warning("No reference in BELGraph for namespace: {}".format(node[NAMESPACE]))
return
if VARIANTS in node or FUSION in node:
node_model.is_variant = True
node_model.has_fusion = FUSION in node
modifications = self.get_or_create_modification(graph, node)
if modifications is None:
log.warning('could not create %s because had an uncachable modification', node.as_bel())
return
node_model.modifications = modifications
self.session.add(node_model)
self.object_cache_node[sha512] = node_model
return node_model |
def draw_marked_line(self, data, coordinates, linestyle, markerstyle,
label, mplobj=None):
"""Draw a line that also has markers.
If this isn't reimplemented by a renderer object, by default, it will
make a call to BOTH draw_line and draw_markers when both markerstyle
and linestyle are not None in the same Line2D object.
"""
if linestyle is not None:
self.draw_line(data, coordinates, linestyle, label, mplobj)
if markerstyle is not None:
self.draw_markers(data, coordinates, markerstyle, label, mplobj) | Draw a line that also has markers.
If this isn't reimplemented by a renderer object, by default, it will
make a call to BOTH draw_line and draw_markers when both markerstyle
and linestyle are not None in the same Line2D object. | Below is the the instruction that describes the task:
### Input:
Draw a line that also has markers.
If this isn't reimplemented by a renderer object, by default, it will
make a call to BOTH draw_line and draw_markers when both markerstyle
and linestyle are not None in the same Line2D object.
### Response:
def draw_marked_line(self, data, coordinates, linestyle, markerstyle,
label, mplobj=None):
"""Draw a line that also has markers.
If this isn't reimplemented by a renderer object, by default, it will
make a call to BOTH draw_line and draw_markers when both markerstyle
and linestyle are not None in the same Line2D object.
"""
if linestyle is not None:
self.draw_line(data, coordinates, linestyle, label, mplobj)
if markerstyle is not None:
self.draw_markers(data, coordinates, markerstyle, label, mplobj) |
def lock_multi(self, keys, ttl=0):
"""Lock multiple keys. Multi variant of :meth:`lock`
:param keys: the keys to lock
:type keys: :ref:`iterable<argtypes>`
:param int ttl: The lock timeout for all keys
:return: a :class:`~.MultiResult` object
.. seealso:: :meth:`lock`
"""
return _Base.lock_multi(self, keys, ttl=ttl) | Lock multiple keys. Multi variant of :meth:`lock`
:param keys: the keys to lock
:type keys: :ref:`iterable<argtypes>`
:param int ttl: The lock timeout for all keys
:return: a :class:`~.MultiResult` object
.. seealso:: :meth:`lock` | Below is the the instruction that describes the task:
### Input:
Lock multiple keys. Multi variant of :meth:`lock`
:param keys: the keys to lock
:type keys: :ref:`iterable<argtypes>`
:param int ttl: The lock timeout for all keys
:return: a :class:`~.MultiResult` object
.. seealso:: :meth:`lock`
### Response:
def lock_multi(self, keys, ttl=0):
"""Lock multiple keys. Multi variant of :meth:`lock`
:param keys: the keys to lock
:type keys: :ref:`iterable<argtypes>`
:param int ttl: The lock timeout for all keys
:return: a :class:`~.MultiResult` object
.. seealso:: :meth:`lock`
"""
return _Base.lock_multi(self, keys, ttl=ttl) |
def _print(self, char):
"""
Print a character at the current cursor position and advance the
cursor.
"""
# Don't make bugs where we try to print a screen.
assert len(char) == 1
try:
try:
# Python 3
char = self.decoder(bytes(char, self.encoding))[0]
except TypeError:
# Python 2.x
char = self.decoder(char)[0]
except UnicodeDecodeError:
char = "?"
if self.current_charset == "g0" and self.g0 is not None:
char = char.translate(self.g0)
elif self.current_charset == "g1" and self.g1 is not None:
char = char.translate(self.g1)
row = self.display[self.y]
self.display[self.y] = row[:self.x] + char + row[self.x+1:]
attrs = self.attributes[self.y]
self.attributes[self.y] = attrs[:self.x] + [self.cursor_attributes] + \
attrs[self.x+1:]
self.x += 1
if self.x >= self.size[1]:
# If this was the last column in a row, move the cursor to the
# next row.
self._linefeed() | Print a character at the current cursor position and advance the
cursor. | Below is the the instruction that describes the task:
### Input:
Print a character at the current cursor position and advance the
cursor.
### Response:
def _print(self, char):
"""
Print a character at the current cursor position and advance the
cursor.
"""
# Don't make bugs where we try to print a screen.
assert len(char) == 1
try:
try:
# Python 3
char = self.decoder(bytes(char, self.encoding))[0]
except TypeError:
# Python 2.x
char = self.decoder(char)[0]
except UnicodeDecodeError:
char = "?"
if self.current_charset == "g0" and self.g0 is not None:
char = char.translate(self.g0)
elif self.current_charset == "g1" and self.g1 is not None:
char = char.translate(self.g1)
row = self.display[self.y]
self.display[self.y] = row[:self.x] + char + row[self.x+1:]
attrs = self.attributes[self.y]
self.attributes[self.y] = attrs[:self.x] + [self.cursor_attributes] + \
attrs[self.x+1:]
self.x += 1
if self.x >= self.size[1]:
# If this was the last column in a row, move the cursor to the
# next row.
self._linefeed() |
def list_all_braintree_gateways(cls, **kwargs):
"""List BraintreeGateways
Return a list of BraintreeGateways
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_braintree_gateways(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[BraintreeGateway]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_braintree_gateways_with_http_info(**kwargs)
else:
(data) = cls._list_all_braintree_gateways_with_http_info(**kwargs)
return data | List BraintreeGateways
Return a list of BraintreeGateways
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_braintree_gateways(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[BraintreeGateway]
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
List BraintreeGateways
Return a list of BraintreeGateways
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_braintree_gateways(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[BraintreeGateway]
If the method is called asynchronously,
returns the request thread.
### Response:
def list_all_braintree_gateways(cls, **kwargs):
"""List BraintreeGateways
Return a list of BraintreeGateways
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_braintree_gateways(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[BraintreeGateway]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_braintree_gateways_with_http_info(**kwargs)
else:
(data) = cls._list_all_braintree_gateways_with_http_info(**kwargs)
return data |
def model_fn(features, labels, mode, params):
"""
Create the model for estimator api
Args:
features: tensor with shape
[BATCH_SIZE, go.N, go.N, features_lib.NEW_FEATURES_PLANES]
labels: dict from string to tensor with shape
'pi_tensor': [BATCH_SIZE, go.N * go.N + 1]
'value_tensor': [BATCH_SIZE]
mode: a tf.estimator.ModeKeys (batchnorm params update for TRAIN only)
params: A dictionary (Typically derived from the FLAGS object.)
Returns: tf.estimator.EstimatorSpec with props
mode: same as mode arg
predictions: dict of tensors
'policy': [BATCH_SIZE, go.N * go.N + 1]
'value': [BATCH_SIZE]
loss: a single value tensor
train_op: train op
eval_metric_ops
return dict of tensors
logits: [BATCH_SIZE, go.N * go.N + 1]
"""
policy_output, value_output, logits = model_inference_fn(
features, mode == tf.estimator.ModeKeys.TRAIN, params)
# train ops
policy_cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=tf.stop_gradient(labels['pi_tensor'])))
value_cost = params['value_cost_weight'] * tf.reduce_mean(
tf.square(value_output - labels['value_tensor']))
reg_vars = [v for v in tf.trainable_variables()
if 'bias' not in v.name and 'beta' not in v.name]
l2_cost = params['l2_strength'] * \
tf.add_n([tf.nn.l2_loss(v) for v in reg_vars])
combined_cost = policy_cost + value_cost + l2_cost
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.train.piecewise_constant(
global_step, params['lr_boundaries'], params['lr_rates'])
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# Insert quantization ops if requested
if params['quantize']:
if mode == tf.estimator.ModeKeys.TRAIN:
tf.contrib.quantize.create_training_graph(
quant_delay=params['quant_delay'])
else:
tf.contrib.quantize.create_eval_graph()
optimizer = tf.train.MomentumOptimizer(
learning_rate, params['sgd_momentum'])
if params['use_tpu']:
optimizer = tpu_optimizer.CrossShardOptimizer(optimizer)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(combined_cost, global_step=global_step)
# Computations to be executed on CPU, outside of the main TPU queues.
def eval_metrics_host_call_fn(policy_output, value_output, pi_tensor, policy_cost,
value_cost, l2_cost, combined_cost, step,
est_mode=tf.estimator.ModeKeys.TRAIN):
policy_entropy = -tf.reduce_mean(tf.reduce_sum(
policy_output * tf.log(policy_output), axis=1))
# pi_tensor is one_hot when generated from sgfs (for supervised learning)
# and soft-max when using self-play records. argmax normalizes the two.
policy_target_top_1 = tf.argmax(pi_tensor, axis=1)
policy_output_in_top1 = tf.to_float(
tf.nn.in_top_k(policy_output, policy_target_top_1, k=1))
policy_output_in_top3 = tf.to_float(
tf.nn.in_top_k(policy_output, policy_target_top_1, k=3))
policy_top_1_confidence = tf.reduce_max(policy_output, axis=1)
policy_target_top_1_confidence = tf.boolean_mask(
policy_output,
tf.one_hot(policy_target_top_1, tf.shape(policy_output)[1]))
value_cost_normalized = value_cost / params['value_cost_weight']
with tf.variable_scope("metrics"):
metric_ops = {
'policy_cost': tf.metrics.mean(policy_cost),
'value_cost': tf.metrics.mean(value_cost),
'value_cost_normalized': tf.metrics.mean(value_cost_normalized),
'l2_cost': tf.metrics.mean(l2_cost),
'policy_entropy': tf.metrics.mean(policy_entropy),
'combined_cost': tf.metrics.mean(combined_cost),
'policy_accuracy_top_1': tf.metrics.mean(policy_output_in_top1),
'policy_accuracy_top_3': tf.metrics.mean(policy_output_in_top3),
'policy_top_1_confidence': tf.metrics.mean(policy_top_1_confidence),
'policy_target_top_1_confidence': tf.metrics.mean(
policy_target_top_1_confidence),
'value_confidence': tf.metrics.mean(tf.abs(value_output)),
}
if est_mode == tf.estimator.ModeKeys.EVAL:
return metric_ops
# NOTE: global_step is rounded to a multiple of FLAGS.summary_steps.
eval_step = tf.reduce_min(step)
# Create summary ops so that they show up in SUMMARIES collection
# That way, they get logged automatically during training
summary_writer = summary.create_file_writer(FLAGS.work_dir)
with summary_writer.as_default(), \
summary.record_summaries_every_n_global_steps(
params['summary_steps'], eval_step):
for metric_name, metric_op in metric_ops.items():
summary.scalar(metric_name, metric_op[1], step=eval_step)
# Reset metrics occasionally so that they are mean of recent batches.
reset_op = tf.variables_initializer(tf.local_variables("metrics"))
cond_reset_op = tf.cond(
tf.equal(eval_step % params['summary_steps'], tf.to_int64(1)),
lambda: reset_op,
lambda: tf.no_op())
return summary.all_summary_ops() + [cond_reset_op]
metric_args = [
policy_output,
value_output,
labels['pi_tensor'],
tf.reshape(policy_cost, [1]),
tf.reshape(value_cost, [1]),
tf.reshape(l2_cost, [1]),
tf.reshape(combined_cost, [1]),
tf.reshape(global_step, [1]),
]
predictions = {
'policy_output': policy_output,
'value_output': value_output,
}
eval_metrics_only_fn = functools.partial(
eval_metrics_host_call_fn, est_mode=tf.estimator.ModeKeys.EVAL)
host_call_fn = functools.partial(
eval_metrics_host_call_fn, est_mode=tf.estimator.ModeKeys.TRAIN)
tpu_estimator_spec = tpu_estimator.TPUEstimatorSpec(
mode=mode,
predictions=predictions,
loss=combined_cost,
train_op=train_op,
eval_metrics=(eval_metrics_only_fn, metric_args),
host_call=(host_call_fn, metric_args)
)
if params['use_tpu']:
return tpu_estimator_spec
else:
return tpu_estimator_spec.as_estimator_spec() | Create the model for estimator api
Args:
features: tensor with shape
[BATCH_SIZE, go.N, go.N, features_lib.NEW_FEATURES_PLANES]
labels: dict from string to tensor with shape
'pi_tensor': [BATCH_SIZE, go.N * go.N + 1]
'value_tensor': [BATCH_SIZE]
mode: a tf.estimator.ModeKeys (batchnorm params update for TRAIN only)
params: A dictionary (Typically derived from the FLAGS object.)
Returns: tf.estimator.EstimatorSpec with props
mode: same as mode arg
predictions: dict of tensors
'policy': [BATCH_SIZE, go.N * go.N + 1]
'value': [BATCH_SIZE]
loss: a single value tensor
train_op: train op
eval_metric_ops
return dict of tensors
logits: [BATCH_SIZE, go.N * go.N + 1] | Below is the the instruction that describes the task:
### Input:
Create the model for estimator api
Args:
features: tensor with shape
[BATCH_SIZE, go.N, go.N, features_lib.NEW_FEATURES_PLANES]
labels: dict from string to tensor with shape
'pi_tensor': [BATCH_SIZE, go.N * go.N + 1]
'value_tensor': [BATCH_SIZE]
mode: a tf.estimator.ModeKeys (batchnorm params update for TRAIN only)
params: A dictionary (Typically derived from the FLAGS object.)
Returns: tf.estimator.EstimatorSpec with props
mode: same as mode arg
predictions: dict of tensors
'policy': [BATCH_SIZE, go.N * go.N + 1]
'value': [BATCH_SIZE]
loss: a single value tensor
train_op: train op
eval_metric_ops
return dict of tensors
logits: [BATCH_SIZE, go.N * go.N + 1]
### Response:
def model_fn(features, labels, mode, params):
"""
Create the model for estimator api
Args:
features: tensor with shape
[BATCH_SIZE, go.N, go.N, features_lib.NEW_FEATURES_PLANES]
labels: dict from string to tensor with shape
'pi_tensor': [BATCH_SIZE, go.N * go.N + 1]
'value_tensor': [BATCH_SIZE]
mode: a tf.estimator.ModeKeys (batchnorm params update for TRAIN only)
params: A dictionary (Typically derived from the FLAGS object.)
Returns: tf.estimator.EstimatorSpec with props
mode: same as mode arg
predictions: dict of tensors
'policy': [BATCH_SIZE, go.N * go.N + 1]
'value': [BATCH_SIZE]
loss: a single value tensor
train_op: train op
eval_metric_ops
return dict of tensors
logits: [BATCH_SIZE, go.N * go.N + 1]
"""
policy_output, value_output, logits = model_inference_fn(
features, mode == tf.estimator.ModeKeys.TRAIN, params)
# train ops
policy_cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=tf.stop_gradient(labels['pi_tensor'])))
value_cost = params['value_cost_weight'] * tf.reduce_mean(
tf.square(value_output - labels['value_tensor']))
reg_vars = [v for v in tf.trainable_variables()
if 'bias' not in v.name and 'beta' not in v.name]
l2_cost = params['l2_strength'] * \
tf.add_n([tf.nn.l2_loss(v) for v in reg_vars])
combined_cost = policy_cost + value_cost + l2_cost
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.train.piecewise_constant(
global_step, params['lr_boundaries'], params['lr_rates'])
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# Insert quantization ops if requested
if params['quantize']:
if mode == tf.estimator.ModeKeys.TRAIN:
tf.contrib.quantize.create_training_graph(
quant_delay=params['quant_delay'])
else:
tf.contrib.quantize.create_eval_graph()
optimizer = tf.train.MomentumOptimizer(
learning_rate, params['sgd_momentum'])
if params['use_tpu']:
optimizer = tpu_optimizer.CrossShardOptimizer(optimizer)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(combined_cost, global_step=global_step)
# Computations to be executed on CPU, outside of the main TPU queues.
def eval_metrics_host_call_fn(policy_output, value_output, pi_tensor, policy_cost,
value_cost, l2_cost, combined_cost, step,
est_mode=tf.estimator.ModeKeys.TRAIN):
policy_entropy = -tf.reduce_mean(tf.reduce_sum(
policy_output * tf.log(policy_output), axis=1))
# pi_tensor is one_hot when generated from sgfs (for supervised learning)
# and soft-max when using self-play records. argmax normalizes the two.
policy_target_top_1 = tf.argmax(pi_tensor, axis=1)
policy_output_in_top1 = tf.to_float(
tf.nn.in_top_k(policy_output, policy_target_top_1, k=1))
policy_output_in_top3 = tf.to_float(
tf.nn.in_top_k(policy_output, policy_target_top_1, k=3))
policy_top_1_confidence = tf.reduce_max(policy_output, axis=1)
policy_target_top_1_confidence = tf.boolean_mask(
policy_output,
tf.one_hot(policy_target_top_1, tf.shape(policy_output)[1]))
value_cost_normalized = value_cost / params['value_cost_weight']
with tf.variable_scope("metrics"):
metric_ops = {
'policy_cost': tf.metrics.mean(policy_cost),
'value_cost': tf.metrics.mean(value_cost),
'value_cost_normalized': tf.metrics.mean(value_cost_normalized),
'l2_cost': tf.metrics.mean(l2_cost),
'policy_entropy': tf.metrics.mean(policy_entropy),
'combined_cost': tf.metrics.mean(combined_cost),
'policy_accuracy_top_1': tf.metrics.mean(policy_output_in_top1),
'policy_accuracy_top_3': tf.metrics.mean(policy_output_in_top3),
'policy_top_1_confidence': tf.metrics.mean(policy_top_1_confidence),
'policy_target_top_1_confidence': tf.metrics.mean(
policy_target_top_1_confidence),
'value_confidence': tf.metrics.mean(tf.abs(value_output)),
}
if est_mode == tf.estimator.ModeKeys.EVAL:
return metric_ops
# NOTE: global_step is rounded to a multiple of FLAGS.summary_steps.
eval_step = tf.reduce_min(step)
# Create summary ops so that they show up in SUMMARIES collection
# That way, they get logged automatically during training
summary_writer = summary.create_file_writer(FLAGS.work_dir)
with summary_writer.as_default(), \
summary.record_summaries_every_n_global_steps(
params['summary_steps'], eval_step):
for metric_name, metric_op in metric_ops.items():
summary.scalar(metric_name, metric_op[1], step=eval_step)
# Reset metrics occasionally so that they are mean of recent batches.
reset_op = tf.variables_initializer(tf.local_variables("metrics"))
cond_reset_op = tf.cond(
tf.equal(eval_step % params['summary_steps'], tf.to_int64(1)),
lambda: reset_op,
lambda: tf.no_op())
return summary.all_summary_ops() + [cond_reset_op]
metric_args = [
policy_output,
value_output,
labels['pi_tensor'],
tf.reshape(policy_cost, [1]),
tf.reshape(value_cost, [1]),
tf.reshape(l2_cost, [1]),
tf.reshape(combined_cost, [1]),
tf.reshape(global_step, [1]),
]
predictions = {
'policy_output': policy_output,
'value_output': value_output,
}
eval_metrics_only_fn = functools.partial(
eval_metrics_host_call_fn, est_mode=tf.estimator.ModeKeys.EVAL)
host_call_fn = functools.partial(
eval_metrics_host_call_fn, est_mode=tf.estimator.ModeKeys.TRAIN)
tpu_estimator_spec = tpu_estimator.TPUEstimatorSpec(
mode=mode,
predictions=predictions,
loss=combined_cost,
train_op=train_op,
eval_metrics=(eval_metrics_only_fn, metric_args),
host_call=(host_call_fn, metric_args)
)
if params['use_tpu']:
return tpu_estimator_spec
else:
return tpu_estimator_spec.as_estimator_spec() |
def lowpass(data,filterSize=None):
"""
minimal complexity low-pass filtering.
Filter size is how "wide" the filter will be.
Sigma will be 1/10 of this filter width.
If filter size isn't given, it will be 1/10 of the data size.
"""
if filterSize is None:
filterSize=len(data)/10
kernel=kernel_gaussian(size=filterSize)
data=convolve(data,kernel) # do the convolution with padded edges
return data | minimal complexity low-pass filtering.
Filter size is how "wide" the filter will be.
Sigma will be 1/10 of this filter width.
If filter size isn't given, it will be 1/10 of the data size. | Below is the the instruction that describes the task:
### Input:
minimal complexity low-pass filtering.
Filter size is how "wide" the filter will be.
Sigma will be 1/10 of this filter width.
If filter size isn't given, it will be 1/10 of the data size.
### Response:
def lowpass(data,filterSize=None):
"""
minimal complexity low-pass filtering.
Filter size is how "wide" the filter will be.
Sigma will be 1/10 of this filter width.
If filter size isn't given, it will be 1/10 of the data size.
"""
if filterSize is None:
filterSize=len(data)/10
kernel=kernel_gaussian(size=filterSize)
data=convolve(data,kernel) # do the convolution with padded edges
return data |
async def send_message():
"""Example of sending a message."""
jar = aiohttp.CookieJar(unsafe=True)
websession = aiohttp.ClientSession(cookie_jar=jar)
modem = eternalegypt.Modem(hostname=sys.argv[1], websession=websession)
await modem.login(password=sys.argv[2])
await modem.sms(phone=sys.argv[3], message=sys.argv[4])
await modem.logout()
await websession.close() | Example of sending a message. | Below is the the instruction that describes the task:
### Input:
Example of sending a message.
### Response:
async def send_message():
"""Example of sending a message."""
jar = aiohttp.CookieJar(unsafe=True)
websession = aiohttp.ClientSession(cookie_jar=jar)
modem = eternalegypt.Modem(hostname=sys.argv[1], websession=websession)
await modem.login(password=sys.argv[2])
await modem.sms(phone=sys.argv[3], message=sys.argv[4])
await modem.logout()
await websession.close() |
def list_hosted_zones(self, page_chunks=100):
"""
List all hosted zones associated with this connection's account. Since
this method returns a generator, you can pull as many or as few
entries as you'd like, without having to query and receive every
hosted zone you may have.
:keyword int page_chunks: This API call is "paginated" behind-the-scenes
in order to break up large result sets. This number determines
the maximum number of
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`
instances to retrieve per request. The default is fine for almost
everyone.
:rtype: generator
:returns: A generator of :py:class:`HostedZone <route53.hosted_zone.HostedZone>`
instances.
"""
return self._do_autopaginating_api_call(
path='hostedzone',
params={'maxitems': page_chunks},
method='GET',
parser_func=xml_parsers.list_hosted_zones_parser,
next_marker_xpath="./{*}NextMarker",
next_marker_param_name="marker",
) | List all hosted zones associated with this connection's account. Since
this method returns a generator, you can pull as many or as few
entries as you'd like, without having to query and receive every
hosted zone you may have.
:keyword int page_chunks: This API call is "paginated" behind-the-scenes
in order to break up large result sets. This number determines
the maximum number of
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`
instances to retrieve per request. The default is fine for almost
everyone.
:rtype: generator
:returns: A generator of :py:class:`HostedZone <route53.hosted_zone.HostedZone>`
instances. | Below is the the instruction that describes the task:
### Input:
List all hosted zones associated with this connection's account. Since
this method returns a generator, you can pull as many or as few
entries as you'd like, without having to query and receive every
hosted zone you may have.
:keyword int page_chunks: This API call is "paginated" behind-the-scenes
in order to break up large result sets. This number determines
the maximum number of
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`
instances to retrieve per request. The default is fine for almost
everyone.
:rtype: generator
:returns: A generator of :py:class:`HostedZone <route53.hosted_zone.HostedZone>`
instances.
### Response:
def list_hosted_zones(self, page_chunks=100):
"""
List all hosted zones associated with this connection's account. Since
this method returns a generator, you can pull as many or as few
entries as you'd like, without having to query and receive every
hosted zone you may have.
:keyword int page_chunks: This API call is "paginated" behind-the-scenes
in order to break up large result sets. This number determines
the maximum number of
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`
instances to retrieve per request. The default is fine for almost
everyone.
:rtype: generator
:returns: A generator of :py:class:`HostedZone <route53.hosted_zone.HostedZone>`
instances.
"""
return self._do_autopaginating_api_call(
path='hostedzone',
params={'maxitems': page_chunks},
method='GET',
parser_func=xml_parsers.list_hosted_zones_parser,
next_marker_xpath="./{*}NextMarker",
next_marker_param_name="marker",
) |
def get_matched_token(self, match):
"""Find which token has been matched by compound regex"""
match_groupdict = match.groupdict()
for group in self.groups:
if match_groupdict[group] is not None:
token, match_type = self.groups[group]
return (token, match_type, group) | Find which token has been matched by compound regex | Below is the the instruction that describes the task:
### Input:
Find which token has been matched by compound regex
### Response:
def get_matched_token(self, match):
"""Find which token has been matched by compound regex"""
match_groupdict = match.groupdict()
for group in self.groups:
if match_groupdict[group] is not None:
token, match_type = self.groups[group]
return (token, match_type, group) |
def send_packet(self, pk, expected_reply=(), resend=False, timeout=0.2):
"""
Send a packet through the link interface.
pk -- Packet to send
expect_answer -- True if a packet from the Crazyflie is expected to
be sent back, otherwise false
"""
self._send_lock.acquire()
if self.link is not None:
if len(expected_reply) > 0 and not resend and \
self.link.needs_resending:
pattern = (pk.header,) + expected_reply
logger.debug(
'Sending packet and expecting the %s pattern back',
pattern)
new_timer = Timer(timeout,
lambda: self._no_answer_do_retry(pk,
pattern))
self._answer_patterns[pattern] = new_timer
new_timer.start()
elif resend:
# Check if we have gotten an answer, if not try again
pattern = expected_reply
if pattern in self._answer_patterns:
logger.debug('We want to resend and the pattern is there')
if self._answer_patterns[pattern]:
new_timer = Timer(timeout,
lambda:
self._no_answer_do_retry(
pk, pattern))
self._answer_patterns[pattern] = new_timer
new_timer.start()
else:
logger.debug('Resend requested, but no pattern found: %s',
self._answer_patterns)
self.link.send_packet(pk)
self.packet_sent.call(pk)
self._send_lock.release() | Send a packet through the link interface.
pk -- Packet to send
expect_answer -- True if a packet from the Crazyflie is expected to
be sent back, otherwise false | Below is the the instruction that describes the task:
### Input:
Send a packet through the link interface.
pk -- Packet to send
expect_answer -- True if a packet from the Crazyflie is expected to
be sent back, otherwise false
### Response:
def send_packet(self, pk, expected_reply=(), resend=False, timeout=0.2):
"""
Send a packet through the link interface.
pk -- Packet to send
expect_answer -- True if a packet from the Crazyflie is expected to
be sent back, otherwise false
"""
self._send_lock.acquire()
if self.link is not None:
if len(expected_reply) > 0 and not resend and \
self.link.needs_resending:
pattern = (pk.header,) + expected_reply
logger.debug(
'Sending packet and expecting the %s pattern back',
pattern)
new_timer = Timer(timeout,
lambda: self._no_answer_do_retry(pk,
pattern))
self._answer_patterns[pattern] = new_timer
new_timer.start()
elif resend:
# Check if we have gotten an answer, if not try again
pattern = expected_reply
if pattern in self._answer_patterns:
logger.debug('We want to resend and the pattern is there')
if self._answer_patterns[pattern]:
new_timer = Timer(timeout,
lambda:
self._no_answer_do_retry(
pk, pattern))
self._answer_patterns[pattern] = new_timer
new_timer.start()
else:
logger.debug('Resend requested, but no pattern found: %s',
self._answer_patterns)
self.link.send_packet(pk)
self.packet_sent.call(pk)
self._send_lock.release() |
def setIsolateHidden(self, state):
"""
Sets whether or not this item is hidden due to isolation.
:param state | <bool>
"""
self._isolatedHidden = state
super(XNode, self).setVisible(self.isVisible()) | Sets whether or not this item is hidden due to isolation.
:param state | <bool> | Below is the the instruction that describes the task:
### Input:
Sets whether or not this item is hidden due to isolation.
:param state | <bool>
### Response:
def setIsolateHidden(self, state):
"""
Sets whether or not this item is hidden due to isolation.
:param state | <bool>
"""
self._isolatedHidden = state
super(XNode, self).setVisible(self.isVisible()) |
def select_address_family(host, port):
"""Return ``AF_INET4``, ``AF_INET6``, or ``AF_UNIX`` depending on
the host and port."""
# disabled due to problems with current ipv6 implementations
# and various operating systems. Probably this code also is
# not supposed to work, but I can't come up with any other
# ways to implement this.
# try:
# info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
# socket.SOCK_STREAM, 0,
# socket.AI_PASSIVE)
# if info:
# return info[0][0]
# except socket.gaierror:
# pass
if host.startswith("unix://"):
return socket.AF_UNIX
elif ":" in host and hasattr(socket, "AF_INET6"):
return socket.AF_INET6
return socket.AF_INET | Return ``AF_INET4``, ``AF_INET6``, or ``AF_UNIX`` depending on
the host and port. | Below is the the instruction that describes the task:
### Input:
Return ``AF_INET4``, ``AF_INET6``, or ``AF_UNIX`` depending on
the host and port.
### Response:
def select_address_family(host, port):
"""Return ``AF_INET4``, ``AF_INET6``, or ``AF_UNIX`` depending on
the host and port."""
# disabled due to problems with current ipv6 implementations
# and various operating systems. Probably this code also is
# not supposed to work, but I can't come up with any other
# ways to implement this.
# try:
# info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
# socket.SOCK_STREAM, 0,
# socket.AI_PASSIVE)
# if info:
# return info[0][0]
# except socket.gaierror:
# pass
if host.startswith("unix://"):
return socket.AF_UNIX
elif ":" in host and hasattr(socket, "AF_INET6"):
return socket.AF_INET6
return socket.AF_INET |
def tag(self, tag: str, overwrite: bool = False) -> None:
"""
Tags the current commit
:param tag: tag
:type tag: str
:param overwrite: overwrite existing tag
:type overwrite: bool
"""
LOGGER.info('tagging repo: %s', tag)
try:
self.repo.create_tag(tag)
except GitCommandError as exc:
if 'already exists' in exc.stderr and overwrite:
LOGGER.info('overwriting existing tag')
self.remove_tag(tag)
self.repo.create_tag(tag)
else:
LOGGER.exception('error while tagging repo')
raise | Tags the current commit
:param tag: tag
:type tag: str
:param overwrite: overwrite existing tag
:type overwrite: bool | Below is the the instruction that describes the task:
### Input:
Tags the current commit
:param tag: tag
:type tag: str
:param overwrite: overwrite existing tag
:type overwrite: bool
### Response:
def tag(self, tag: str, overwrite: bool = False) -> None:
"""
Tags the current commit
:param tag: tag
:type tag: str
:param overwrite: overwrite existing tag
:type overwrite: bool
"""
LOGGER.info('tagging repo: %s', tag)
try:
self.repo.create_tag(tag)
except GitCommandError as exc:
if 'already exists' in exc.stderr and overwrite:
LOGGER.info('overwriting existing tag')
self.remove_tag(tag)
self.repo.create_tag(tag)
else:
LOGGER.exception('error while tagging repo')
raise |
def save(self, *args, **kwargs):
"""
Custom save method does the following things:
* converts geometry collections of just 1 item to that item (eg: a collection of 1 Point becomes a Point)
* intercepts changes to status and fires node_status_changed signal
* set default status
"""
# geometry collection check
if isinstance(self.geometry, GeometryCollection) and 0 < len(self.geometry) < 2:
self.geometry = self.geometry[0]
# if no status specified
if not self.status and not self.status_id:
try:
self.status = Status.objects.filter(is_default=True)[0]
except IndexError:
pass
super(Node, self).save(*args, **kwargs)
# if status of a node changes
if (self.status and self._current_status and self.status.id != self._current_status) or\
(self.status_id and self._current_status and self.status_id != self._current_status):
# send django signal
node_status_changed.send(
sender=self.__class__,
instance=self,
old_status=Status.objects.get(pk=self._current_status),
new_status=self.status
)
# update _current_status
self._current_status = self.status_id | Custom save method does the following things:
* converts geometry collections of just 1 item to that item (eg: a collection of 1 Point becomes a Point)
* intercepts changes to status and fires node_status_changed signal
* set default status | Below is the the instruction that describes the task:
### Input:
Custom save method does the following things:
* converts geometry collections of just 1 item to that item (eg: a collection of 1 Point becomes a Point)
* intercepts changes to status and fires node_status_changed signal
* set default status
### Response:
def save(self, *args, **kwargs):
"""
Custom save method does the following things:
* converts geometry collections of just 1 item to that item (eg: a collection of 1 Point becomes a Point)
* intercepts changes to status and fires node_status_changed signal
* set default status
"""
# geometry collection check
if isinstance(self.geometry, GeometryCollection) and 0 < len(self.geometry) < 2:
self.geometry = self.geometry[0]
# if no status specified
if not self.status and not self.status_id:
try:
self.status = Status.objects.filter(is_default=True)[0]
except IndexError:
pass
super(Node, self).save(*args, **kwargs)
# if status of a node changes
if (self.status and self._current_status and self.status.id != self._current_status) or\
(self.status_id and self._current_status and self.status_id != self._current_status):
# send django signal
node_status_changed.send(
sender=self.__class__,
instance=self,
old_status=Status.objects.get(pk=self._current_status),
new_status=self.status
)
# update _current_status
self._current_status = self.status_id |
def is_already_running(self):
"""Return True if lock exists and has not timed out."""
redis_key = self.CELERY_LOCK.format(task_id=self.task_identifier)
return self.celery_self.backend.client.exists(redis_key) | Return True if lock exists and has not timed out. | Below is the the instruction that describes the task:
### Input:
Return True if lock exists and has not timed out.
### Response:
def is_already_running(self):
"""Return True if lock exists and has not timed out."""
redis_key = self.CELERY_LOCK.format(task_id=self.task_identifier)
return self.celery_self.backend.client.exists(redis_key) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.