code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def resolve_and_call(self, func, extra_env=None):
""" Resolve function arguments and call them, possibily filling from the environment """
kwargs = self.resolve_parameters(func, extra_env=extra_env)
return func(**kwargs) | Resolve function arguments and call them, possibily filling from the environment | Below is the the instruction that describes the task:
### Input:
Resolve function arguments and call them, possibily filling from the environment
### Response:
def resolve_and_call(self, func, extra_env=None):
""" Resolve function arguments and call them, possibily filling from the environment """
kwargs = self.resolve_parameters(func, extra_env=extra_env)
return func(**kwargs) |
def gen_FS_DF(df_output):
"""generate DataFrame of scores.
Parameters
----------
df_WS_data : type
Description of parameter `df_WS_data`.
Returns
-------
type
Description of returned object.
"""
df_day = pd.pivot_table(
df_output,
values=['T2', 'U10', 'Kdown', 'RH2'],
index=['Year', 'Month', 'Day'],
aggfunc=[min, max, np.mean, ])
df_day_all_year = pd.pivot_table(
df_output,
values=['T2', 'U10', 'Kdown', 'RH2'],
index=['Month', 'Day'],
aggfunc=[min, max, np.mean, ])
array_yr_mon = df_day.index.droplevel(
'Day').to_frame().drop_duplicates().values
df_fs = pd.DataFrame(
{(yr, mon):
(df_day.loc[(yr, mon)].apply(gen_score_ser) -
df_day_all_year.loc[mon].apply(gen_score_ser)).abs().mean()
for yr, mon in array_yr_mon})
return df_fs | generate DataFrame of scores.
Parameters
----------
df_WS_data : type
Description of parameter `df_WS_data`.
Returns
-------
type
Description of returned object. | Below is the the instruction that describes the task:
### Input:
generate DataFrame of scores.
Parameters
----------
df_WS_data : type
Description of parameter `df_WS_data`.
Returns
-------
type
Description of returned object.
### Response:
def gen_FS_DF(df_output):
"""generate DataFrame of scores.
Parameters
----------
df_WS_data : type
Description of parameter `df_WS_data`.
Returns
-------
type
Description of returned object.
"""
df_day = pd.pivot_table(
df_output,
values=['T2', 'U10', 'Kdown', 'RH2'],
index=['Year', 'Month', 'Day'],
aggfunc=[min, max, np.mean, ])
df_day_all_year = pd.pivot_table(
df_output,
values=['T2', 'U10', 'Kdown', 'RH2'],
index=['Month', 'Day'],
aggfunc=[min, max, np.mean, ])
array_yr_mon = df_day.index.droplevel(
'Day').to_frame().drop_duplicates().values
df_fs = pd.DataFrame(
{(yr, mon):
(df_day.loc[(yr, mon)].apply(gen_score_ser) -
df_day_all_year.loc[mon].apply(gen_score_ser)).abs().mean()
for yr, mon in array_yr_mon})
return df_fs |
def vcf2cytosure(institute_id, case_name, individual_id):
"""Download vcf2cytosure file for individual."""
(display_name, vcf2cytosure) = controllers.vcf2cytosure(store,
institute_id, case_name, individual_id)
outdir = os.path.abspath(os.path.dirname(vcf2cytosure))
filename = os.path.basename(vcf2cytosure)
log.debug("Attempt to deliver file {0} from dir {1}".format(filename, outdir))
attachment_filename = display_name + ".vcf2cytosure.cgh"
return send_from_directory(outdir, filename,
attachment_filename=attachment_filename,
as_attachment=True) | Download vcf2cytosure file for individual. | Below is the the instruction that describes the task:
### Input:
Download vcf2cytosure file for individual.
### Response:
def vcf2cytosure(institute_id, case_name, individual_id):
"""Download vcf2cytosure file for individual."""
(display_name, vcf2cytosure) = controllers.vcf2cytosure(store,
institute_id, case_name, individual_id)
outdir = os.path.abspath(os.path.dirname(vcf2cytosure))
filename = os.path.basename(vcf2cytosure)
log.debug("Attempt to deliver file {0} from dir {1}".format(filename, outdir))
attachment_filename = display_name + ".vcf2cytosure.cgh"
return send_from_directory(outdir, filename,
attachment_filename=attachment_filename,
as_attachment=True) |
def get_k8s_upgrades_completion_list(cmd, prefix, namespace, **kwargs): # pylint: disable=unused-argument
"""Return Kubernetes versions available for upgrading an existing cluster."""
resource_group = getattr(namespace, 'resource_group_name', None)
name = getattr(namespace, 'name', None)
return get_k8s_upgrades(cmd.cli_ctx, resource_group, name) if resource_group and name else None | Return Kubernetes versions available for upgrading an existing cluster. | Below is the the instruction that describes the task:
### Input:
Return Kubernetes versions available for upgrading an existing cluster.
### Response:
def get_k8s_upgrades_completion_list(cmd, prefix, namespace, **kwargs): # pylint: disable=unused-argument
"""Return Kubernetes versions available for upgrading an existing cluster."""
resource_group = getattr(namespace, 'resource_group_name', None)
name = getattr(namespace, 'name', None)
return get_k8s_upgrades(cmd.cli_ctx, resource_group, name) if resource_group and name else None |
def process_request(self, request):
"""
Sets the current request's ``urlconf`` attribute to the urlconf
associated with the subdomain, if it is listed in
``settings.SUBDOMAIN_URLCONFS``.
"""
super(SubdomainURLRoutingMiddleware, self).process_request(request)
subdomain = getattr(request, 'subdomain', UNSET)
if subdomain is not UNSET:
urlconf = settings.SUBDOMAIN_URLCONFS.get(subdomain)
if urlconf is not None:
logger.debug("Using urlconf %s for subdomain: %s",
repr(urlconf), repr(subdomain))
request.urlconf = urlconf | Sets the current request's ``urlconf`` attribute to the urlconf
associated with the subdomain, if it is listed in
``settings.SUBDOMAIN_URLCONFS``. | Below is the the instruction that describes the task:
### Input:
Sets the current request's ``urlconf`` attribute to the urlconf
associated with the subdomain, if it is listed in
``settings.SUBDOMAIN_URLCONFS``.
### Response:
def process_request(self, request):
"""
Sets the current request's ``urlconf`` attribute to the urlconf
associated with the subdomain, if it is listed in
``settings.SUBDOMAIN_URLCONFS``.
"""
super(SubdomainURLRoutingMiddleware, self).process_request(request)
subdomain = getattr(request, 'subdomain', UNSET)
if subdomain is not UNSET:
urlconf = settings.SUBDOMAIN_URLCONFS.get(subdomain)
if urlconf is not None:
logger.debug("Using urlconf %s for subdomain: %s",
repr(urlconf), repr(subdomain))
request.urlconf = urlconf |
def open(cls, grammar_filename, rel_to=None, **options):
"""Create an instance of Lark with the grammar given by its filename
If rel_to is provided, the function will find the grammar filename in relation to it.
Example:
>>> Lark.open("grammar_file.lark", rel_to=__file__, parser="lalr")
Lark(...)
"""
if rel_to:
basepath = os.path.dirname(rel_to)
grammar_filename = os.path.join(basepath, grammar_filename)
with open(grammar_filename, encoding='utf8') as f:
return cls(f, **options) | Create an instance of Lark with the grammar given by its filename
If rel_to is provided, the function will find the grammar filename in relation to it.
Example:
>>> Lark.open("grammar_file.lark", rel_to=__file__, parser="lalr")
Lark(...) | Below is the the instruction that describes the task:
### Input:
Create an instance of Lark with the grammar given by its filename
If rel_to is provided, the function will find the grammar filename in relation to it.
Example:
>>> Lark.open("grammar_file.lark", rel_to=__file__, parser="lalr")
Lark(...)
### Response:
def open(cls, grammar_filename, rel_to=None, **options):
"""Create an instance of Lark with the grammar given by its filename
If rel_to is provided, the function will find the grammar filename in relation to it.
Example:
>>> Lark.open("grammar_file.lark", rel_to=__file__, parser="lalr")
Lark(...)
"""
if rel_to:
basepath = os.path.dirname(rel_to)
grammar_filename = os.path.join(basepath, grammar_filename)
with open(grammar_filename, encoding='utf8') as f:
return cls(f, **options) |
def as_p(self, show_leaf=True, current_linkable=False, class_current="active_link"):
"""
It returns breadcrumb as p
"""
return self.__do_menu("as_p", show_leaf, current_linkable, class_current) | It returns breadcrumb as p | Below is the the instruction that describes the task:
### Input:
It returns breadcrumb as p
### Response:
def as_p(self, show_leaf=True, current_linkable=False, class_current="active_link"):
"""
It returns breadcrumb as p
"""
return self.__do_menu("as_p", show_leaf, current_linkable, class_current) |
def bind(self, fn: Callable[[Any], 'List']) -> 'List':
"""Flatten and map the List.
Haskell: xs >>= f = concat (map f xs)
"""
return List.concat(self.map(fn)) | Flatten and map the List.
Haskell: xs >>= f = concat (map f xs) | Below is the the instruction that describes the task:
### Input:
Flatten and map the List.
Haskell: xs >>= f = concat (map f xs)
### Response:
def bind(self, fn: Callable[[Any], 'List']) -> 'List':
"""Flatten and map the List.
Haskell: xs >>= f = concat (map f xs)
"""
return List.concat(self.map(fn)) |
def get_vm_info(name):
'''
get the information for a VM.
:param name: salt_id name
:return: dictionary of {'machine': x, 'cwd': y, ...}.
'''
try:
vm_ = __utils__['sdb.sdb_get'](_build_sdb_uri(name), __opts__)
except KeyError:
raise SaltInvocationError(
'Probable sdb driver not found. Check your configuration.')
if vm_ is None or 'machine' not in vm_:
raise SaltInvocationError(
'No Vagrant machine defined for Salt_id {}'.format(name))
return vm_ | get the information for a VM.
:param name: salt_id name
:return: dictionary of {'machine': x, 'cwd': y, ...}. | Below is the the instruction that describes the task:
### Input:
get the information for a VM.
:param name: salt_id name
:return: dictionary of {'machine': x, 'cwd': y, ...}.
### Response:
def get_vm_info(name):
'''
get the information for a VM.
:param name: salt_id name
:return: dictionary of {'machine': x, 'cwd': y, ...}.
'''
try:
vm_ = __utils__['sdb.sdb_get'](_build_sdb_uri(name), __opts__)
except KeyError:
raise SaltInvocationError(
'Probable sdb driver not found. Check your configuration.')
if vm_ is None or 'machine' not in vm_:
raise SaltInvocationError(
'No Vagrant machine defined for Salt_id {}'.format(name))
return vm_ |
def rtree_filter(self):
"""
:returns: an RtreeFilter
"""
return RtreeFilter(self.src_filter.sitecol,
self.oqparam.maximum_distance,
self.src_filter.filename) | :returns: an RtreeFilter | Below is the the instruction that describes the task:
### Input:
:returns: an RtreeFilter
### Response:
def rtree_filter(self):
"""
:returns: an RtreeFilter
"""
return RtreeFilter(self.src_filter.sitecol,
self.oqparam.maximum_distance,
self.src_filter.filename) |
def get_repr(expr, multiline=False):
"""
Build a repr string for ``expr`` from its vars and signature.
::
>>> class MyObject:
... def __init__(self, arg1, arg2, *var_args, foo=None, bar=None, **kwargs):
... self.arg1 = arg1
... self.arg2 = arg2
... self.var_args = var_args
... self.foo = foo
... self.bar = bar
... self.kwargs = kwargs
...
>>> my_object = MyObject('a', 'b', 'c', 'd', foo='x', quux=['y', 'z'])
::
>>> import uqbar
>>> print(uqbar.objects.get_repr(my_object))
MyObject(
'a',
'b',
'c',
'd',
foo='x',
quux=['y', 'z'],
)
"""
signature = _get_object_signature(expr)
if signature is None:
return "{}()".format(type(expr).__name__)
defaults = {}
for name, parameter in signature.parameters.items():
if parameter.default is not inspect._empty:
defaults[name] = parameter.default
args, var_args, kwargs = get_vars(expr)
args_parts = collections.OrderedDict()
var_args_parts = []
kwargs_parts = {}
has_lines = multiline
parts = []
# Format keyword-optional arguments.
# print(type(expr), args)
for i, (key, value) in enumerate(args.items()):
arg_repr = _dispatch_formatting(value)
if "\n" in arg_repr:
has_lines = True
args_parts[key] = arg_repr
# Format *args
for arg in var_args:
arg_repr = _dispatch_formatting(arg)
if "\n" in arg_repr:
has_lines = True
var_args_parts.append(arg_repr)
# Format **kwargs
for key, value in sorted(kwargs.items()):
if key in defaults and value == defaults[key]:
continue
value = _dispatch_formatting(value)
arg_repr = "{}={}".format(key, value)
has_lines = True
kwargs_parts[key] = arg_repr
for _, part in args_parts.items():
parts.append(part)
parts.extend(var_args_parts)
for _, part in sorted(kwargs_parts.items()):
parts.append(part)
# If we should format on multiple lines, add the appropriate formatting.
if has_lines and parts:
for i, part in enumerate(parts):
parts[i] = "\n".join(" " + line for line in part.split("\n"))
parts.append(" )")
parts = ",\n".join(parts)
return "{}(\n{}".format(type(expr).__name__, parts)
parts = ", ".join(parts)
return "{}({})".format(type(expr).__name__, parts) | Build a repr string for ``expr`` from its vars and signature.
::
>>> class MyObject:
... def __init__(self, arg1, arg2, *var_args, foo=None, bar=None, **kwargs):
... self.arg1 = arg1
... self.arg2 = arg2
... self.var_args = var_args
... self.foo = foo
... self.bar = bar
... self.kwargs = kwargs
...
>>> my_object = MyObject('a', 'b', 'c', 'd', foo='x', quux=['y', 'z'])
::
>>> import uqbar
>>> print(uqbar.objects.get_repr(my_object))
MyObject(
'a',
'b',
'c',
'd',
foo='x',
quux=['y', 'z'],
) | Below is the the instruction that describes the task:
### Input:
Build a repr string for ``expr`` from its vars and signature.
::
>>> class MyObject:
... def __init__(self, arg1, arg2, *var_args, foo=None, bar=None, **kwargs):
... self.arg1 = arg1
... self.arg2 = arg2
... self.var_args = var_args
... self.foo = foo
... self.bar = bar
... self.kwargs = kwargs
...
>>> my_object = MyObject('a', 'b', 'c', 'd', foo='x', quux=['y', 'z'])
::
>>> import uqbar
>>> print(uqbar.objects.get_repr(my_object))
MyObject(
'a',
'b',
'c',
'd',
foo='x',
quux=['y', 'z'],
)
### Response:
def get_repr(expr, multiline=False):
"""
Build a repr string for ``expr`` from its vars and signature.
::
>>> class MyObject:
... def __init__(self, arg1, arg2, *var_args, foo=None, bar=None, **kwargs):
... self.arg1 = arg1
... self.arg2 = arg2
... self.var_args = var_args
... self.foo = foo
... self.bar = bar
... self.kwargs = kwargs
...
>>> my_object = MyObject('a', 'b', 'c', 'd', foo='x', quux=['y', 'z'])
::
>>> import uqbar
>>> print(uqbar.objects.get_repr(my_object))
MyObject(
'a',
'b',
'c',
'd',
foo='x',
quux=['y', 'z'],
)
"""
signature = _get_object_signature(expr)
if signature is None:
return "{}()".format(type(expr).__name__)
defaults = {}
for name, parameter in signature.parameters.items():
if parameter.default is not inspect._empty:
defaults[name] = parameter.default
args, var_args, kwargs = get_vars(expr)
args_parts = collections.OrderedDict()
var_args_parts = []
kwargs_parts = {}
has_lines = multiline
parts = []
# Format keyword-optional arguments.
# print(type(expr), args)
for i, (key, value) in enumerate(args.items()):
arg_repr = _dispatch_formatting(value)
if "\n" in arg_repr:
has_lines = True
args_parts[key] = arg_repr
# Format *args
for arg in var_args:
arg_repr = _dispatch_formatting(arg)
if "\n" in arg_repr:
has_lines = True
var_args_parts.append(arg_repr)
# Format **kwargs
for key, value in sorted(kwargs.items()):
if key in defaults and value == defaults[key]:
continue
value = _dispatch_formatting(value)
arg_repr = "{}={}".format(key, value)
has_lines = True
kwargs_parts[key] = arg_repr
for _, part in args_parts.items():
parts.append(part)
parts.extend(var_args_parts)
for _, part in sorted(kwargs_parts.items()):
parts.append(part)
# If we should format on multiple lines, add the appropriate formatting.
if has_lines and parts:
for i, part in enumerate(parts):
parts[i] = "\n".join(" " + line for line in part.split("\n"))
parts.append(" )")
parts = ",\n".join(parts)
return "{}(\n{}".format(type(expr).__name__, parts)
parts = ", ".join(parts)
return "{}({})".format(type(expr).__name__, parts) |
def copy_file_job(job, name, file_id, output_dir):
"""
Job version of move_files for one file
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str name: Name of output file (including extension)
:param str file_id: FileStoreID of file
:param str output_dir: Location to place output file
"""
work_dir = job.fileStore.getLocalTempDir()
fpath = job.fileStore.readGlobalFile(file_id, os.path.join(work_dir, name))
copy_files([fpath], output_dir) | Job version of move_files for one file
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str name: Name of output file (including extension)
:param str file_id: FileStoreID of file
:param str output_dir: Location to place output file | Below is the the instruction that describes the task:
### Input:
Job version of move_files for one file
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str name: Name of output file (including extension)
:param str file_id: FileStoreID of file
:param str output_dir: Location to place output file
### Response:
def copy_file_job(job, name, file_id, output_dir):
"""
Job version of move_files for one file
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str name: Name of output file (including extension)
:param str file_id: FileStoreID of file
:param str output_dir: Location to place output file
"""
work_dir = job.fileStore.getLocalTempDir()
fpath = job.fileStore.readGlobalFile(file_id, os.path.join(work_dir, name))
copy_files([fpath], output_dir) |
def _parse_qemu_img_info(info):
'''
Parse qemu-img info JSON output into disk infos dictionary
'''
raw_infos = salt.utils.json.loads(info)
disks = []
for disk_infos in raw_infos:
disk = {
'file': disk_infos['filename'],
'file format': disk_infos['format'],
'disk size': disk_infos['actual-size'],
'virtual size': disk_infos['virtual-size'],
'cluster size': disk_infos['cluster-size'] if 'cluster-size' in disk_infos else None,
}
if 'full-backing-filename' in disk_infos.keys():
disk['backing file'] = format(disk_infos['full-backing-filename'])
if 'snapshots' in disk_infos.keys():
disk['snapshots'] = [
{
'id': snapshot['id'],
'tag': snapshot['name'],
'vmsize': snapshot['vm-state-size'],
'date': datetime.datetime.fromtimestamp(
float('{}.{}'.format(snapshot['date-sec'], snapshot['date-nsec']))).isoformat(),
'vmclock': datetime.datetime.utcfromtimestamp(
float('{}.{}'.format(snapshot['vm-clock-sec'],
snapshot['vm-clock-nsec']))).time().isoformat()
} for snapshot in disk_infos['snapshots']]
disks.append(disk)
for disk in disks:
if 'backing file' in disk.keys():
candidates = [info for info in disks if 'file' in info.keys() and info['file'] == disk['backing file']]
if candidates:
disk['backing file'] = candidates[0]
return disks[0] | Parse qemu-img info JSON output into disk infos dictionary | Below is the the instruction that describes the task:
### Input:
Parse qemu-img info JSON output into disk infos dictionary
### Response:
def _parse_qemu_img_info(info):
'''
Parse qemu-img info JSON output into disk infos dictionary
'''
raw_infos = salt.utils.json.loads(info)
disks = []
for disk_infos in raw_infos:
disk = {
'file': disk_infos['filename'],
'file format': disk_infos['format'],
'disk size': disk_infos['actual-size'],
'virtual size': disk_infos['virtual-size'],
'cluster size': disk_infos['cluster-size'] if 'cluster-size' in disk_infos else None,
}
if 'full-backing-filename' in disk_infos.keys():
disk['backing file'] = format(disk_infos['full-backing-filename'])
if 'snapshots' in disk_infos.keys():
disk['snapshots'] = [
{
'id': snapshot['id'],
'tag': snapshot['name'],
'vmsize': snapshot['vm-state-size'],
'date': datetime.datetime.fromtimestamp(
float('{}.{}'.format(snapshot['date-sec'], snapshot['date-nsec']))).isoformat(),
'vmclock': datetime.datetime.utcfromtimestamp(
float('{}.{}'.format(snapshot['vm-clock-sec'],
snapshot['vm-clock-nsec']))).time().isoformat()
} for snapshot in disk_infos['snapshots']]
disks.append(disk)
for disk in disks:
if 'backing file' in disk.keys():
candidates = [info for info in disks if 'file' in info.keys() and info['file'] == disk['backing file']]
if candidates:
disk['backing file'] = candidates[0]
return disks[0] |
def state_delta(self, selector='all',
power=None, duration=1.0, infrared=None, hue=None,
saturation=None, brightness=None, kelvin=None):
"""Given a state delta, apply the modifications to lights' state
over a given period of time.
selector: required String
The selector to limit which lights are controlled.
power: String
The power state you want to set on the selector. on or off
duration: Double
How long in seconds you want the power action to take.
Range: 0.0 – 3155760000.0 (100 years)
infrared: Double
The maximum brightness of the infrared channel.
hue: Double
Rotate the hue by this angle in degrees.
saturation: Double
Change the saturation by this additive amount; the resulting
saturation is clipped to [0, 1].
brightness: Double
Change the brightness by this additive amount; the resulting
brightness is clipped to [0, 1].
kelvin: Double
Change the kelvin by this additive amount; the resulting kelvin is
clipped to [2500, 9000].
"""
argument_tuples = [
("power", power),
("duration", duration),
("infrared", infrared),
("hue", hue),
("saturation", saturation),
("brightness", brightness),
("kelvin", kelvin)
]
return self.client.perform_request(
method='post', endpoint='lights/{}/state/delta',
endpoint_args=[selector], argument_tuples=argument_tuples) | Given a state delta, apply the modifications to lights' state
over a given period of time.
selector: required String
The selector to limit which lights are controlled.
power: String
The power state you want to set on the selector. on or off
duration: Double
How long in seconds you want the power action to take.
Range: 0.0 – 3155760000.0 (100 years)
infrared: Double
The maximum brightness of the infrared channel.
hue: Double
Rotate the hue by this angle in degrees.
saturation: Double
Change the saturation by this additive amount; the resulting
saturation is clipped to [0, 1].
brightness: Double
Change the brightness by this additive amount; the resulting
brightness is clipped to [0, 1].
kelvin: Double
Change the kelvin by this additive amount; the resulting kelvin is
clipped to [2500, 9000]. | Below is the the instruction that describes the task:
### Input:
Given a state delta, apply the modifications to lights' state
over a given period of time.
selector: required String
The selector to limit which lights are controlled.
power: String
The power state you want to set on the selector. on or off
duration: Double
How long in seconds you want the power action to take.
Range: 0.0 – 3155760000.0 (100 years)
infrared: Double
The maximum brightness of the infrared channel.
hue: Double
Rotate the hue by this angle in degrees.
saturation: Double
Change the saturation by this additive amount; the resulting
saturation is clipped to [0, 1].
brightness: Double
Change the brightness by this additive amount; the resulting
brightness is clipped to [0, 1].
kelvin: Double
Change the kelvin by this additive amount; the resulting kelvin is
clipped to [2500, 9000].
### Response:
def state_delta(self, selector='all',
power=None, duration=1.0, infrared=None, hue=None,
saturation=None, brightness=None, kelvin=None):
"""Given a state delta, apply the modifications to lights' state
over a given period of time.
selector: required String
The selector to limit which lights are controlled.
power: String
The power state you want to set on the selector. on or off
duration: Double
How long in seconds you want the power action to take.
Range: 0.0 – 3155760000.0 (100 years)
infrared: Double
The maximum brightness of the infrared channel.
hue: Double
Rotate the hue by this angle in degrees.
saturation: Double
Change the saturation by this additive amount; the resulting
saturation is clipped to [0, 1].
brightness: Double
Change the brightness by this additive amount; the resulting
brightness is clipped to [0, 1].
kelvin: Double
Change the kelvin by this additive amount; the resulting kelvin is
clipped to [2500, 9000].
"""
argument_tuples = [
("power", power),
("duration", duration),
("infrared", infrared),
("hue", hue),
("saturation", saturation),
("brightness", brightness),
("kelvin", kelvin)
]
return self.client.perform_request(
method='post', endpoint='lights/{}/state/delta',
endpoint_args=[selector], argument_tuples=argument_tuples) |
def main(output_directory: int, data: str) -> None:
"""
Processes the text2sql data into the following directory structure:
``dataset/{query_split, question_split}/{train,dev,test}.json``
for datasets which have train, dev and test splits, or:
``dataset/{query_split, question_split}/{split_{split_id}}.json``
for datasets which use cross validation.
The JSON format is identical to the original datasets, apart from they
are split into separate files with respect to the split_type. This means that
for the question split, all of the sql data is duplicated for each sentence
which is bucketed together as having the same semantics.
As an example, the following blob would be put "as-is" into the query split
dataset, and split into two datasets with identical blobs for the question split,
differing only in the "sentence" key, where blob1 would end up in the train split
and blob2 would be in the dev split, with the rest of the json duplicated in each.
{
"comments": [],
"old-name": "",
"query-split": "train",
"sentences": [{blob1, "question-split": "train"}, {blob2, "question-split": "dev"}],
"sql": [],
"variables": []
},
Parameters
----------
output_directory : str, required.
The output directory.
data: str, default = None
The path to the data director of https://github.com/jkkummerfeld/text2sql-data.
"""
json_files = glob.glob(os.path.join(data, "*.json"))
for dataset in json_files:
dataset_name = os.path.basename(dataset)[:-5]
print(f"Processing dataset: {dataset} into query and question "
f"splits at output path: {output_directory + '/' + dataset_name}")
full_dataset = json.load(open(dataset))
if not isinstance(full_dataset, list):
full_dataset = [full_dataset]
for split_type in ["query_split", "question_split"]:
dataset_out = os.path.join(output_directory, dataset_name, split_type)
for split, split_dataset in process_dataset(full_dataset, split_type):
dataset_out = os.path.join(output_directory, dataset_name, split_type)
os.makedirs(dataset_out, exist_ok=True)
json.dump(split_dataset, open(os.path.join(dataset_out, split), "w"), indent=4) | Processes the text2sql data into the following directory structure:
``dataset/{query_split, question_split}/{train,dev,test}.json``
for datasets which have train, dev and test splits, or:
``dataset/{query_split, question_split}/{split_{split_id}}.json``
for datasets which use cross validation.
The JSON format is identical to the original datasets, apart from they
are split into separate files with respect to the split_type. This means that
for the question split, all of the sql data is duplicated for each sentence
which is bucketed together as having the same semantics.
As an example, the following blob would be put "as-is" into the query split
dataset, and split into two datasets with identical blobs for the question split,
differing only in the "sentence" key, where blob1 would end up in the train split
and blob2 would be in the dev split, with the rest of the json duplicated in each.
{
"comments": [],
"old-name": "",
"query-split": "train",
"sentences": [{blob1, "question-split": "train"}, {blob2, "question-split": "dev"}],
"sql": [],
"variables": []
},
Parameters
----------
output_directory : str, required.
The output directory.
data: str, default = None
The path to the data director of https://github.com/jkkummerfeld/text2sql-data. | Below is the the instruction that describes the task:
### Input:
Processes the text2sql data into the following directory structure:
``dataset/{query_split, question_split}/{train,dev,test}.json``
for datasets which have train, dev and test splits, or:
``dataset/{query_split, question_split}/{split_{split_id}}.json``
for datasets which use cross validation.
The JSON format is identical to the original datasets, apart from they
are split into separate files with respect to the split_type. This means that
for the question split, all of the sql data is duplicated for each sentence
which is bucketed together as having the same semantics.
As an example, the following blob would be put "as-is" into the query split
dataset, and split into two datasets with identical blobs for the question split,
differing only in the "sentence" key, where blob1 would end up in the train split
and blob2 would be in the dev split, with the rest of the json duplicated in each.
{
"comments": [],
"old-name": "",
"query-split": "train",
"sentences": [{blob1, "question-split": "train"}, {blob2, "question-split": "dev"}],
"sql": [],
"variables": []
},
Parameters
----------
output_directory : str, required.
The output directory.
data: str, default = None
The path to the data director of https://github.com/jkkummerfeld/text2sql-data.
### Response:
def main(output_directory: int, data: str) -> None:
"""
Processes the text2sql data into the following directory structure:
``dataset/{query_split, question_split}/{train,dev,test}.json``
for datasets which have train, dev and test splits, or:
``dataset/{query_split, question_split}/{split_{split_id}}.json``
for datasets which use cross validation.
The JSON format is identical to the original datasets, apart from they
are split into separate files with respect to the split_type. This means that
for the question split, all of the sql data is duplicated for each sentence
which is bucketed together as having the same semantics.
As an example, the following blob would be put "as-is" into the query split
dataset, and split into two datasets with identical blobs for the question split,
differing only in the "sentence" key, where blob1 would end up in the train split
and blob2 would be in the dev split, with the rest of the json duplicated in each.
{
"comments": [],
"old-name": "",
"query-split": "train",
"sentences": [{blob1, "question-split": "train"}, {blob2, "question-split": "dev"}],
"sql": [],
"variables": []
},
Parameters
----------
output_directory : str, required.
The output directory.
data: str, default = None
The path to the data director of https://github.com/jkkummerfeld/text2sql-data.
"""
json_files = glob.glob(os.path.join(data, "*.json"))
for dataset in json_files:
dataset_name = os.path.basename(dataset)[:-5]
print(f"Processing dataset: {dataset} into query and question "
f"splits at output path: {output_directory + '/' + dataset_name}")
full_dataset = json.load(open(dataset))
if not isinstance(full_dataset, list):
full_dataset = [full_dataset]
for split_type in ["query_split", "question_split"]:
dataset_out = os.path.join(output_directory, dataset_name, split_type)
for split, split_dataset in process_dataset(full_dataset, split_type):
dataset_out = os.path.join(output_directory, dataset_name, split_type)
os.makedirs(dataset_out, exist_ok=True)
json.dump(split_dataset, open(os.path.join(dataset_out, split), "w"), indent=4) |
def result(self, line=''):
"""Print the result of the last asynchronous %px command.
This lets you recall the results of %px computations after
asynchronous submission (block=False).
Examples
--------
::
In [23]: %px os.getpid()
Async parallel execution on engine(s): all
In [24]: %pxresult
Out[8:10]: 60920
Out[9:10]: 60921
Out[10:10]: 60922
Out[11:10]: 60923
"""
args = magic_arguments.parse_argstring(self.result, line)
if self.last_result is None:
raise UsageError(NO_LAST_RESULT)
self.last_result.get()
self.last_result.display_outputs(groupby=args.groupby) | Print the result of the last asynchronous %px command.
This lets you recall the results of %px computations after
asynchronous submission (block=False).
Examples
--------
::
In [23]: %px os.getpid()
Async parallel execution on engine(s): all
In [24]: %pxresult
Out[8:10]: 60920
Out[9:10]: 60921
Out[10:10]: 60922
Out[11:10]: 60923 | Below is the the instruction that describes the task:
### Input:
Print the result of the last asynchronous %px command.
This lets you recall the results of %px computations after
asynchronous submission (block=False).
Examples
--------
::
In [23]: %px os.getpid()
Async parallel execution on engine(s): all
In [24]: %pxresult
Out[8:10]: 60920
Out[9:10]: 60921
Out[10:10]: 60922
Out[11:10]: 60923
### Response:
def result(self, line=''):
"""Print the result of the last asynchronous %px command.
This lets you recall the results of %px computations after
asynchronous submission (block=False).
Examples
--------
::
In [23]: %px os.getpid()
Async parallel execution on engine(s): all
In [24]: %pxresult
Out[8:10]: 60920
Out[9:10]: 60921
Out[10:10]: 60922
Out[11:10]: 60923
"""
args = magic_arguments.parse_argstring(self.result, line)
if self.last_result is None:
raise UsageError(NO_LAST_RESULT)
self.last_result.get()
self.last_result.display_outputs(groupby=args.groupby) |
def prep_jid(nocache=False, passed_jid=None):
'''
Return a job id and prepare the job id directory
This is the function responsible for making sure jids don't collide
(unless its passed a jid). So do what you have to do to make sure that
stays the case
'''
conn = _get_conn()
if conn is None:
return None
cur = conn.cursor()
if passed_jid is None:
jid = _gen_jid(cur)
else:
jid = passed_jid
while not jid:
log.info("jid clash, generating a new one")
jid = _gen_jid(cur)
cur.close()
conn.close()
return jid | Return a job id and prepare the job id directory
This is the function responsible for making sure jids don't collide
(unless its passed a jid). So do what you have to do to make sure that
stays the case | Below is the the instruction that describes the task:
### Input:
Return a job id and prepare the job id directory
This is the function responsible for making sure jids don't collide
(unless its passed a jid). So do what you have to do to make sure that
stays the case
### Response:
def prep_jid(nocache=False, passed_jid=None):
'''
Return a job id and prepare the job id directory
This is the function responsible for making sure jids don't collide
(unless its passed a jid). So do what you have to do to make sure that
stays the case
'''
conn = _get_conn()
if conn is None:
return None
cur = conn.cursor()
if passed_jid is None:
jid = _gen_jid(cur)
else:
jid = passed_jid
while not jid:
log.info("jid clash, generating a new one")
jid = _gen_jid(cur)
cur.close()
conn.close()
return jid |
def logged_in(self):
"""
This is True if this instance is logged in else False.
We test if this session is authenticated by calling the User.get()
XMLRPC method with ids set. Logged-out users cannot pass the 'ids'
parameter and will result in a 505 error. If we tried to login with a
token, but the token was incorrect or expired, the server returns a
32000 error.
For Bugzilla 5 and later, a new method, User.valid_login is available
to test the validity of the token. However, this will require that the
username be cached along with the token in order to work effectively in
all scenarios and is not currently used. For more information, refer to
the following url.
http://bugzilla.readthedocs.org/en/latest/api/core/v1/user.html#valid-login
"""
try:
self._proxy.User.get({'ids': []})
return True
except Fault as e:
if e.faultCode == 505 or e.faultCode == 32000:
return False
raise e | This is True if this instance is logged in else False.
We test if this session is authenticated by calling the User.get()
XMLRPC method with ids set. Logged-out users cannot pass the 'ids'
parameter and will result in a 505 error. If we tried to login with a
token, but the token was incorrect or expired, the server returns a
32000 error.
For Bugzilla 5 and later, a new method, User.valid_login is available
to test the validity of the token. However, this will require that the
username be cached along with the token in order to work effectively in
all scenarios and is not currently used. For more information, refer to
the following url.
http://bugzilla.readthedocs.org/en/latest/api/core/v1/user.html#valid-login | Below is the the instruction that describes the task:
### Input:
This is True if this instance is logged in else False.
We test if this session is authenticated by calling the User.get()
XMLRPC method with ids set. Logged-out users cannot pass the 'ids'
parameter and will result in a 505 error. If we tried to login with a
token, but the token was incorrect or expired, the server returns a
32000 error.
For Bugzilla 5 and later, a new method, User.valid_login is available
to test the validity of the token. However, this will require that the
username be cached along with the token in order to work effectively in
all scenarios and is not currently used. For more information, refer to
the following url.
http://bugzilla.readthedocs.org/en/latest/api/core/v1/user.html#valid-login
### Response:
def logged_in(self):
"""
This is True if this instance is logged in else False.
We test if this session is authenticated by calling the User.get()
XMLRPC method with ids set. Logged-out users cannot pass the 'ids'
parameter and will result in a 505 error. If we tried to login with a
token, but the token was incorrect or expired, the server returns a
32000 error.
For Bugzilla 5 and later, a new method, User.valid_login is available
to test the validity of the token. However, this will require that the
username be cached along with the token in order to work effectively in
all scenarios and is not currently used. For more information, refer to
the following url.
http://bugzilla.readthedocs.org/en/latest/api/core/v1/user.html#valid-login
"""
try:
self._proxy.User.get({'ids': []})
return True
except Fault as e:
if e.faultCode == 505 or e.faultCode == 32000:
return False
raise e |
def p_catch(self, p):
"""catch : CATCH LPAREN identifier RPAREN block"""
p[0] = ast.Catch(identifier=p[3], elements=p[5]) | catch : CATCH LPAREN identifier RPAREN block | Below is the the instruction that describes the task:
### Input:
catch : CATCH LPAREN identifier RPAREN block
### Response:
def p_catch(self, p):
"""catch : CATCH LPAREN identifier RPAREN block"""
p[0] = ast.Catch(identifier=p[3], elements=p[5]) |
def yield_event(self, act):
"""
Hande completion for a request and return an (op, coro) to be
passed to the scheduler on the last completion loop of a proactor.
"""
if act in self.tokens:
coro = act.coro
op = self.try_run_act(act, self.tokens[act])
if op:
del self.tokens[act]
return op, coro | Hande completion for a request and return an (op, coro) to be
passed to the scheduler on the last completion loop of a proactor. | Below is the the instruction that describes the task:
### Input:
Hande completion for a request and return an (op, coro) to be
passed to the scheduler on the last completion loop of a proactor.
### Response:
def yield_event(self, act):
"""
Hande completion for a request and return an (op, coro) to be
passed to the scheduler on the last completion loop of a proactor.
"""
if act in self.tokens:
coro = act.coro
op = self.try_run_act(act, self.tokens[act])
if op:
del self.tokens[act]
return op, coro |
def get_attribute_data(doc):
"""Helper function: parse attribute data from a wiki html doc
Args:
doc (document parsed with lxml.html): parsed wiki page
Returns:
dict: attributes values and listed links, format ``{<key>: {'value': <value>, 'link': <link>}}``;
only the first hyperlink listed in each attribute value is included
"""
attributes = dict()
for attribute_node in doc.xpath("//div[contains(@class, 'pi-data ')]"):
# label node
node = attribute_node.xpath(".//*[contains(@class, 'pi-data-label')]")[0]
label = " ".join(node.itertext()).strip()
# value node
node = attribute_node.xpath(".//*[contains(@class, 'pi-data-value')]")[0]
# get value, first link, and the link text
value = " ".join(node.itertext()).strip()
link_node = node.find('a')
if link_node is not None:
link = link_node.get('href')
link_text = link_node.text
else:
link = None
link_text = None
# store result
attributes[label] = dict(value=value,
link=link,
link_text=link_text)
return attributes | Helper function: parse attribute data from a wiki html doc
Args:
doc (document parsed with lxml.html): parsed wiki page
Returns:
dict: attributes values and listed links, format ``{<key>: {'value': <value>, 'link': <link>}}``;
only the first hyperlink listed in each attribute value is included | Below is the the instruction that describes the task:
### Input:
Helper function: parse attribute data from a wiki html doc
Args:
doc (document parsed with lxml.html): parsed wiki page
Returns:
dict: attributes values and listed links, format ``{<key>: {'value': <value>, 'link': <link>}}``;
only the first hyperlink listed in each attribute value is included
### Response:
def get_attribute_data(doc):
"""Helper function: parse attribute data from a wiki html doc
Args:
doc (document parsed with lxml.html): parsed wiki page
Returns:
dict: attributes values and listed links, format ``{<key>: {'value': <value>, 'link': <link>}}``;
only the first hyperlink listed in each attribute value is included
"""
attributes = dict()
for attribute_node in doc.xpath("//div[contains(@class, 'pi-data ')]"):
# label node
node = attribute_node.xpath(".//*[contains(@class, 'pi-data-label')]")[0]
label = " ".join(node.itertext()).strip()
# value node
node = attribute_node.xpath(".//*[contains(@class, 'pi-data-value')]")[0]
# get value, first link, and the link text
value = " ".join(node.itertext()).strip()
link_node = node.find('a')
if link_node is not None:
link = link_node.get('href')
link_text = link_node.text
else:
link = None
link_text = None
# store result
attributes[label] = dict(value=value,
link=link,
link_text=link_text)
return attributes |
def get_bank_name(clabe: str) -> str:
"""
Regresa el nombre del banco basado en los primeros 3 digitos
https://es.wikipedia.org/wiki/CLABE#D.C3.ADgito_control
"""
code = clabe[:3]
try:
bank_name = BANK_NAMES[BANKS[code]]
except KeyError:
raise ValueError(f"Ningún banco tiene código '{code}'")
else:
return bank_name | Regresa el nombre del banco basado en los primeros 3 digitos
https://es.wikipedia.org/wiki/CLABE#D.C3.ADgito_control | Below is the the instruction that describes the task:
### Input:
Regresa el nombre del banco basado en los primeros 3 digitos
https://es.wikipedia.org/wiki/CLABE#D.C3.ADgito_control
### Response:
def get_bank_name(clabe: str) -> str:
"""
Regresa el nombre del banco basado en los primeros 3 digitos
https://es.wikipedia.org/wiki/CLABE#D.C3.ADgito_control
"""
code = clabe[:3]
try:
bank_name = BANK_NAMES[BANKS[code]]
except KeyError:
raise ValueError(f"Ningún banco tiene código '{code}'")
else:
return bank_name |
def _set_offset_cpu(self, v, load=False):
"""
Setter method for offset_cpu, mapped from YANG variable /resource_monitor/cpu/offset_cpu (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_offset_cpu is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_offset_cpu() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 70']}), is_leaf=True, yang_name="offset-cpu", rest_name="thresh-offset", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Offset to CPU threshold for testing', u'hidden': u'debug', u'alt-name': u'thresh-offset', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-resource-monitor', defining_module='brocade-resource-monitor', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """offset_cpu must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 70']}), is_leaf=True, yang_name="offset-cpu", rest_name="thresh-offset", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Offset to CPU threshold for testing', u'hidden': u'debug', u'alt-name': u'thresh-offset', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-resource-monitor', defining_module='brocade-resource-monitor', yang_type='uint32', is_config=True)""",
})
self.__offset_cpu = t
if hasattr(self, '_set'):
self._set() | Setter method for offset_cpu, mapped from YANG variable /resource_monitor/cpu/offset_cpu (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_offset_cpu is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_offset_cpu() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for offset_cpu, mapped from YANG variable /resource_monitor/cpu/offset_cpu (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_offset_cpu is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_offset_cpu() directly.
### Response:
def _set_offset_cpu(self, v, load=False):
"""
Setter method for offset_cpu, mapped from YANG variable /resource_monitor/cpu/offset_cpu (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_offset_cpu is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_offset_cpu() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 70']}), is_leaf=True, yang_name="offset-cpu", rest_name="thresh-offset", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Offset to CPU threshold for testing', u'hidden': u'debug', u'alt-name': u'thresh-offset', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-resource-monitor', defining_module='brocade-resource-monitor', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """offset_cpu must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0 .. 70']}), is_leaf=True, yang_name="offset-cpu", rest_name="thresh-offset", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Offset to CPU threshold for testing', u'hidden': u'debug', u'alt-name': u'thresh-offset', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-resource-monitor', defining_module='brocade-resource-monitor', yang_type='uint32', is_config=True)""",
})
self.__offset_cpu = t
if hasattr(self, '_set'):
self._set() |
def _get_exception_log_path():
"""Return the normalized path for the connection log, raising an
exception if it can not written to.
:return: str
"""
app = sys.argv[0].split('/')[-1]
for exception_log in ['/var/log/%s.errors' % app,
'/var/tmp/%s.errors' % app,
'/tmp/%s.errors' % app]:
if os.access(path.dirname(exception_log), os.W_OK):
return exception_log
return None | Return the normalized path for the connection log, raising an
exception if it can not written to.
:return: str | Below is the the instruction that describes the task:
### Input:
Return the normalized path for the connection log, raising an
exception if it can not written to.
:return: str
### Response:
def _get_exception_log_path():
"""Return the normalized path for the connection log, raising an
exception if it can not written to.
:return: str
"""
app = sys.argv[0].split('/')[-1]
for exception_log in ['/var/log/%s.errors' % app,
'/var/tmp/%s.errors' % app,
'/tmp/%s.errors' % app]:
if os.access(path.dirname(exception_log), os.W_OK):
return exception_log
return None |
def drug_names_match_criteria(drug_names: List[str],
names_are_generic: bool = False,
include_categories: bool = False,
**criteria: Dict[str, bool]) -> List[bool]:
"""
Establish whether multiple drugs, passed as a list of drug names, each
matches the specified criteria. See :func:`drug_matches_criteria`.
"""
return [
drug_name_matches_criteria(
dn,
name_is_generic=names_are_generic,
include_categories=include_categories,
**criteria)
for dn in drug_names
] | Establish whether multiple drugs, passed as a list of drug names, each
matches the specified criteria. See :func:`drug_matches_criteria`. | Below is the the instruction that describes the task:
### Input:
Establish whether multiple drugs, passed as a list of drug names, each
matches the specified criteria. See :func:`drug_matches_criteria`.
### Response:
def drug_names_match_criteria(drug_names: List[str],
names_are_generic: bool = False,
include_categories: bool = False,
**criteria: Dict[str, bool]) -> List[bool]:
"""
Establish whether multiple drugs, passed as a list of drug names, each
matches the specified criteria. See :func:`drug_matches_criteria`.
"""
return [
drug_name_matches_criteria(
dn,
name_is_generic=names_are_generic,
include_categories=include_categories,
**criteria)
for dn in drug_names
] |
def dumpJSON(self):
"""
Return dictionary of data for FITS headers.
"""
g = get_root(self).globals
return dict(
RA=self.ra['text'],
DEC=self.dec['text'],
tel=g.cpars['telins_name'],
alt=self._getVal(self.alt),
az=self._getVal(self.az),
secz=self._getVal(self.airmass),
pa=self._getVal(self.pa),
foc=self._getVal(self.focus),
mdist=self._getVal(self.mdist)
) | Return dictionary of data for FITS headers. | Below is the the instruction that describes the task:
### Input:
Return dictionary of data for FITS headers.
### Response:
def dumpJSON(self):
"""
Return dictionary of data for FITS headers.
"""
g = get_root(self).globals
return dict(
RA=self.ra['text'],
DEC=self.dec['text'],
tel=g.cpars['telins_name'],
alt=self._getVal(self.alt),
az=self._getVal(self.az),
secz=self._getVal(self.airmass),
pa=self._getVal(self.pa),
foc=self._getVal(self.focus),
mdist=self._getVal(self.mdist)
) |
def workflow(
graph: BELGraph,
node: BaseEntity,
key: Optional[str] = None,
tag: Optional[str] = None,
default_score: Optional[float] = None,
runs: Optional[int] = None,
minimum_nodes: int = 1,
) -> List['Runner']:
"""Generate candidate mechanisms and run the heat diffusion workflow.
:param graph: A BEL graph
:param node: The BEL node that is the focus of this analysis
:param key: The key in the node data dictionary representing the experimental data. Defaults to
:data:`pybel_tools.constants.WEIGHT`.
:param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score'
:param default_score: The initial score for all nodes. This number can go up or down.
:param runs: The number of times to run the heat diffusion workflow. Defaults to 100.
:param minimum_nodes: The minimum number of nodes a sub-graph needs to try running heat diffusion
:return: A list of runners
"""
subgraph = generate_mechanism(graph, node, key=key)
if subgraph.number_of_nodes() <= minimum_nodes:
return []
runners = multirun(subgraph, node, key=key, tag=tag, default_score=default_score, runs=runs)
return list(runners) | Generate candidate mechanisms and run the heat diffusion workflow.
:param graph: A BEL graph
:param node: The BEL node that is the focus of this analysis
:param key: The key in the node data dictionary representing the experimental data. Defaults to
:data:`pybel_tools.constants.WEIGHT`.
:param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score'
:param default_score: The initial score for all nodes. This number can go up or down.
:param runs: The number of times to run the heat diffusion workflow. Defaults to 100.
:param minimum_nodes: The minimum number of nodes a sub-graph needs to try running heat diffusion
:return: A list of runners | Below is the the instruction that describes the task:
### Input:
Generate candidate mechanisms and run the heat diffusion workflow.
:param graph: A BEL graph
:param node: The BEL node that is the focus of this analysis
:param key: The key in the node data dictionary representing the experimental data. Defaults to
:data:`pybel_tools.constants.WEIGHT`.
:param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score'
:param default_score: The initial score for all nodes. This number can go up or down.
:param runs: The number of times to run the heat diffusion workflow. Defaults to 100.
:param minimum_nodes: The minimum number of nodes a sub-graph needs to try running heat diffusion
:return: A list of runners
### Response:
def workflow(
graph: BELGraph,
node: BaseEntity,
key: Optional[str] = None,
tag: Optional[str] = None,
default_score: Optional[float] = None,
runs: Optional[int] = None,
minimum_nodes: int = 1,
) -> List['Runner']:
"""Generate candidate mechanisms and run the heat diffusion workflow.
:param graph: A BEL graph
:param node: The BEL node that is the focus of this analysis
:param key: The key in the node data dictionary representing the experimental data. Defaults to
:data:`pybel_tools.constants.WEIGHT`.
:param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score'
:param default_score: The initial score for all nodes. This number can go up or down.
:param runs: The number of times to run the heat diffusion workflow. Defaults to 100.
:param minimum_nodes: The minimum number of nodes a sub-graph needs to try running heat diffusion
:return: A list of runners
"""
subgraph = generate_mechanism(graph, node, key=key)
if subgraph.number_of_nodes() <= minimum_nodes:
return []
runners = multirun(subgraph, node, key=key, tag=tag, default_score=default_score, runs=runs)
return list(runners) |
def AND(*args, **kwargs):
"""
ALL args must not raise an exception when called incrementally.
If an exception is specified, raise it, otherwise raise the callable's exception.
:params iterable[Certifier] args:
The certifiers to call
:param callable kwargs['exc']:
Callable that excepts the unexpectedly raised exception as argument and return an
exception to raise.
:raises CertifierError:
The first certifier error if at least one raises a certifier error.
"""
for arg in args:
try:
arg()
except CertifierError as e:
exc = kwargs.get('exc', None)
if exc is not None:
raise exc(e)
raise | ALL args must not raise an exception when called incrementally.
If an exception is specified, raise it, otherwise raise the callable's exception.
:params iterable[Certifier] args:
The certifiers to call
:param callable kwargs['exc']:
Callable that excepts the unexpectedly raised exception as argument and return an
exception to raise.
:raises CertifierError:
The first certifier error if at least one raises a certifier error. | Below is the the instruction that describes the task:
### Input:
ALL args must not raise an exception when called incrementally.
If an exception is specified, raise it, otherwise raise the callable's exception.
:params iterable[Certifier] args:
The certifiers to call
:param callable kwargs['exc']:
Callable that excepts the unexpectedly raised exception as argument and return an
exception to raise.
:raises CertifierError:
The first certifier error if at least one raises a certifier error.
### Response:
def AND(*args, **kwargs):
"""
ALL args must not raise an exception when called incrementally.
If an exception is specified, raise it, otherwise raise the callable's exception.
:params iterable[Certifier] args:
The certifiers to call
:param callable kwargs['exc']:
Callable that excepts the unexpectedly raised exception as argument and return an
exception to raise.
:raises CertifierError:
The first certifier error if at least one raises a certifier error.
"""
for arg in args:
try:
arg()
except CertifierError as e:
exc = kwargs.get('exc', None)
if exc is not None:
raise exc(e)
raise |
def _assert_input_is_valid(input_value, # type: Any
validators, # type: List[InputValidator]
validated_func, # type: Callable
input_name # type: str
):
"""
Called by the `validating_wrapper` in the first step (a) `apply_on_each_func_args` for each function input before
executing the function. It simply delegates to the validator. The signature of this function is hardcoded to
correspond to `apply_on_each_func_args`'s behaviour and should therefore not be changed.
:param input_value: the value to validate
:param validator: the Validator object that will be applied on input_value_to_validate
:param validated_func: the function for which this validation is performed. This is not used since the Validator
knows it already, but we should not change the signature here.
:param input_name: the name of the function input that is being validated
:return: Nothing
"""
for validator in validators:
validator.assert_valid(input_name, input_value) | Called by the `validating_wrapper` in the first step (a) `apply_on_each_func_args` for each function input before
executing the function. It simply delegates to the validator. The signature of this function is hardcoded to
correspond to `apply_on_each_func_args`'s behaviour and should therefore not be changed.
:param input_value: the value to validate
:param validator: the Validator object that will be applied on input_value_to_validate
:param validated_func: the function for which this validation is performed. This is not used since the Validator
knows it already, but we should not change the signature here.
:param input_name: the name of the function input that is being validated
:return: Nothing | Below is the the instruction that describes the task:
### Input:
Called by the `validating_wrapper` in the first step (a) `apply_on_each_func_args` for each function input before
executing the function. It simply delegates to the validator. The signature of this function is hardcoded to
correspond to `apply_on_each_func_args`'s behaviour and should therefore not be changed.
:param input_value: the value to validate
:param validator: the Validator object that will be applied on input_value_to_validate
:param validated_func: the function for which this validation is performed. This is not used since the Validator
knows it already, but we should not change the signature here.
:param input_name: the name of the function input that is being validated
:return: Nothing
### Response:
def _assert_input_is_valid(input_value, # type: Any
validators, # type: List[InputValidator]
validated_func, # type: Callable
input_name # type: str
):
"""
Called by the `validating_wrapper` in the first step (a) `apply_on_each_func_args` for each function input before
executing the function. It simply delegates to the validator. The signature of this function is hardcoded to
correspond to `apply_on_each_func_args`'s behaviour and should therefore not be changed.
:param input_value: the value to validate
:param validator: the Validator object that will be applied on input_value_to_validate
:param validated_func: the function for which this validation is performed. This is not used since the Validator
knows it already, but we should not change the signature here.
:param input_name: the name of the function input that is being validated
:return: Nothing
"""
for validator in validators:
validator.assert_valid(input_name, input_value) |
def expired(self):
"""Called when an expired session is atime"""
self._data["_killed"] = True
self.save()
raise SessionExpired(self._config.expired_message) | Called when an expired session is atime | Below is the the instruction that describes the task:
### Input:
Called when an expired session is atime
### Response:
def expired(self):
"""Called when an expired session is atime"""
self._data["_killed"] = True
self.save()
raise SessionExpired(self._config.expired_message) |
def dict_diff(d1, d2, no_key='<KEYNOTFOUND>'):
# type: (DictUpperBound, DictUpperBound, str) -> Dict
"""Compares two dictionaries
Args:
d1 (DictUpperBound): First dictionary to compare
d2 (DictUpperBound): Second dictionary to compare
no_key (str): What value to use if key is not found Defaults to '<KEYNOTFOUND>'.
Returns:
Dict: Comparison dictionary
"""
d1keys = set(d1.keys())
d2keys = set(d2.keys())
both = d1keys & d2keys
diff = {k: (d1[k], d2[k]) for k in both if d1[k] != d2[k]}
diff.update({k: (d1[k], no_key) for k in d1keys - both})
diff.update({k: (no_key, d2[k]) for k in d2keys - both})
return diff | Compares two dictionaries
Args:
d1 (DictUpperBound): First dictionary to compare
d2 (DictUpperBound): Second dictionary to compare
no_key (str): What value to use if key is not found Defaults to '<KEYNOTFOUND>'.
Returns:
Dict: Comparison dictionary | Below is the the instruction that describes the task:
### Input:
Compares two dictionaries
Args:
d1 (DictUpperBound): First dictionary to compare
d2 (DictUpperBound): Second dictionary to compare
no_key (str): What value to use if key is not found Defaults to '<KEYNOTFOUND>'.
Returns:
Dict: Comparison dictionary
### Response:
def dict_diff(d1, d2, no_key='<KEYNOTFOUND>'):
# type: (DictUpperBound, DictUpperBound, str) -> Dict
"""Compares two dictionaries
Args:
d1 (DictUpperBound): First dictionary to compare
d2 (DictUpperBound): Second dictionary to compare
no_key (str): What value to use if key is not found Defaults to '<KEYNOTFOUND>'.
Returns:
Dict: Comparison dictionary
"""
d1keys = set(d1.keys())
d2keys = set(d2.keys())
both = d1keys & d2keys
diff = {k: (d1[k], d2[k]) for k in both if d1[k] != d2[k]}
diff.update({k: (d1[k], no_key) for k in d1keys - both})
diff.update({k: (no_key, d2[k]) for k in d2keys - both})
return diff |
def register(self, event, fn):
"""
Registers the given function as a handler to be applied
in response to the the given event.
"""
# TODO: Can we check the method signature?
self._handler_dict.setdefault(event, [])
if fn not in self._handler_dict[event]:
self._handler_dict[event].append(fn) | Registers the given function as a handler to be applied
in response to the the given event. | Below is the the instruction that describes the task:
### Input:
Registers the given function as a handler to be applied
in response to the the given event.
### Response:
def register(self, event, fn):
"""
Registers the given function as a handler to be applied
in response to the the given event.
"""
# TODO: Can we check the method signature?
self._handler_dict.setdefault(event, [])
if fn not in self._handler_dict[event]:
self._handler_dict[event].append(fn) |
def fit_effective_mass(distances, energies, parabolic=True):
"""Fit the effective masses using either a parabolic or nonparabolic fit.
Args:
distances (:obj:`numpy.ndarray`): The x-distances between k-points in
reciprocal Angstroms, normalised to the band extrema.
energies (:obj:`numpy.ndarray`): The band eigenvalues normalised to the
eigenvalue of the band extrema.
parabolic (:obj:`bool`, optional): Use a parabolic fit of the band
edges. If ``False`` then nonparabolic fitting will be attempted.
Defaults to ``True``.
Returns:
float: The effective mass in units of electron rest mass, :math:`m_0`.
"""
if parabolic:
fit = np.polyfit(distances, energies, 2)
c = 2 * fit[0] # curvature therefore 2 * the exponent on the ^2 term
else:
# Use non parabolic description of the bands
def f(x, alpha, d):
top = np.sqrt(4 * alpha * d * x**2 + 1) - 1
bot = 2 * alpha
return top / bot
# set boundaries for curve fitting: alpha > 1e-8
# as alpha = 0 causes an error
bounds = ((1e-8, -np.inf), (np.inf, np.inf))
popt, _ = curve_fit(f, distances, energies, p0=[1., 1.],
bounds=bounds)
c = 2 * popt[1]
# coefficient is currently in eV/Angstrom^2/h_bar^2
# want it in atomic units so Hartree/bohr^2/h_bar^2
eff_mass = (angstrom_to_bohr**2 / eV_to_hartree) / c
return eff_mass | Fit the effective masses using either a parabolic or nonparabolic fit.
Args:
distances (:obj:`numpy.ndarray`): The x-distances between k-points in
reciprocal Angstroms, normalised to the band extrema.
energies (:obj:`numpy.ndarray`): The band eigenvalues normalised to the
eigenvalue of the band extrema.
parabolic (:obj:`bool`, optional): Use a parabolic fit of the band
edges. If ``False`` then nonparabolic fitting will be attempted.
Defaults to ``True``.
Returns:
float: The effective mass in units of electron rest mass, :math:`m_0`. | Below is the the instruction that describes the task:
### Input:
Fit the effective masses using either a parabolic or nonparabolic fit.
Args:
distances (:obj:`numpy.ndarray`): The x-distances between k-points in
reciprocal Angstroms, normalised to the band extrema.
energies (:obj:`numpy.ndarray`): The band eigenvalues normalised to the
eigenvalue of the band extrema.
parabolic (:obj:`bool`, optional): Use a parabolic fit of the band
edges. If ``False`` then nonparabolic fitting will be attempted.
Defaults to ``True``.
Returns:
float: The effective mass in units of electron rest mass, :math:`m_0`.
### Response:
def fit_effective_mass(distances, energies, parabolic=True):
"""Fit the effective masses using either a parabolic or nonparabolic fit.
Args:
distances (:obj:`numpy.ndarray`): The x-distances between k-points in
reciprocal Angstroms, normalised to the band extrema.
energies (:obj:`numpy.ndarray`): The band eigenvalues normalised to the
eigenvalue of the band extrema.
parabolic (:obj:`bool`, optional): Use a parabolic fit of the band
edges. If ``False`` then nonparabolic fitting will be attempted.
Defaults to ``True``.
Returns:
float: The effective mass in units of electron rest mass, :math:`m_0`.
"""
if parabolic:
fit = np.polyfit(distances, energies, 2)
c = 2 * fit[0] # curvature therefore 2 * the exponent on the ^2 term
else:
# Use non parabolic description of the bands
def f(x, alpha, d):
top = np.sqrt(4 * alpha * d * x**2 + 1) - 1
bot = 2 * alpha
return top / bot
# set boundaries for curve fitting: alpha > 1e-8
# as alpha = 0 causes an error
bounds = ((1e-8, -np.inf), (np.inf, np.inf))
popt, _ = curve_fit(f, distances, energies, p0=[1., 1.],
bounds=bounds)
c = 2 * popt[1]
# coefficient is currently in eV/Angstrom^2/h_bar^2
# want it in atomic units so Hartree/bohr^2/h_bar^2
eff_mass = (angstrom_to_bohr**2 / eV_to_hartree) / c
return eff_mass |
def availability(self, dcid, params=None):
''' /v1/regions/availability
GET - public
Retrieve a list of the VPSPLANIDs currently available
in this location. If your account has special plans available,
you will need to pass your api_key in in order to see them.
For all other accounts, the API key is not optional.
Link: https://www.vultr.com/api/#regions_region_available
'''
params = update_params(params, {'DCID': dcid})
return self.request('/v1/regions/availability', params, 'GET') | /v1/regions/availability
GET - public
Retrieve a list of the VPSPLANIDs currently available
in this location. If your account has special plans available,
you will need to pass your api_key in in order to see them.
For all other accounts, the API key is not optional.
Link: https://www.vultr.com/api/#regions_region_available | Below is the the instruction that describes the task:
### Input:
/v1/regions/availability
GET - public
Retrieve a list of the VPSPLANIDs currently available
in this location. If your account has special plans available,
you will need to pass your api_key in in order to see them.
For all other accounts, the API key is not optional.
Link: https://www.vultr.com/api/#regions_region_available
### Response:
def availability(self, dcid, params=None):
''' /v1/regions/availability
GET - public
Retrieve a list of the VPSPLANIDs currently available
in this location. If your account has special plans available,
you will need to pass your api_key in in order to see them.
For all other accounts, the API key is not optional.
Link: https://www.vultr.com/api/#regions_region_available
'''
params = update_params(params, {'DCID': dcid})
return self.request('/v1/regions/availability', params, 'GET') |
def set_residual(self, pores=[], overwrite=False):
r"""
Method to start invasion in a network w. residual saturation.
Called after inlets are set.
Parameters
----------
pores : array_like
The pores locations that are to be filled with invader at the
beginning of the simulation.
overwrite : boolean
If ``True`` then all existing inlet locations will be removed and
then the supplied locations will be added. If ``False``, then
supplied locations are added to any already existing locations.
Notes
-----
Currently works for pores only and treats inner throats, i.e.
those that connect two pores in the cluster as invaded and outer ones
as uninvaded. Uninvaded throats are added to a new residual cluster
queue but do not start invading independently if not connected to an
inlet.
Step 1. Identify clusters in the phase occupancy.
Step 2. Look for clusters that are connected or contain an inlet
Step 3. For those that are merge into inlet cluster. May be connected
to more than one - run should sort this out
Step 4. For those that are isolated set the queue to not invading.
Step 5. (in run) When isolated cluster is met my invading cluster it
merges in and starts invading
"""
Ps = self._parse_indices(pores)
if overwrite:
self['pore.residual'] = False
self['pore.residual'][Ps] = True
residual = self['pore.residual']
net = self.project.network
conns = net['throat.conns']
rclusters = site_percolation(conns, residual).sites
rcluster_ids = np.unique(rclusters[rclusters > -1])
initial_num = len(self.queue)-1
for rcluster_id in rcluster_ids:
rPs = rclusters == rcluster_id
existing = np.unique(self['pore.cluster'][rPs])
existing = existing[existing > -1]
if len(existing) > 0:
# There was at least one inlet cluster connected to this
# residual cluster, pick the first one.
cluster_num = existing[0]
else:
# Make a new cluster queue
cluster_num = len(self.queue)
self.queue.append([])
queue = self.queue[cluster_num]
# Set the residual pores and inner throats as part of cluster
self['pore.cluster'][rPs] = cluster_num
Ts = net.find_neighbor_throats(pores=rPs,
flatten=True,
mode='xnor')
self['throat.cluster'][Ts] = cluster_num
self['pore.invasion_sequence'][rPs] = 0
self['throat.invasion_sequence'][Ts] = 0
self['pore.invasion_pressure'][rPs] = -np.inf
self['throat.invasion_pressure'][Ts] = -np.inf
# Add all the outer throats to the queue
Ts = net.find_neighbor_throats(pores=rPs,
flatten=True,
mode='exclusive_or')
for T in Ts:
data = []
# Pc
data.append(self['throat.entry_pressure'][T])
# Element Index
data.append(T)
# Element Type (Pore of Throat)
data.append('throat')
hq.heappush(queue, data)
self.invasion_running = [True]*len(self.queue)
# we have added new clusters that are currently isolated and we
# need to stop them invading until they merge into an invading
# cluster
for c_num in range(len(self.queue)):
if c_num > initial_num:
self.invasion_running[c_num] = False | r"""
Method to start invasion in a network w. residual saturation.
Called after inlets are set.
Parameters
----------
pores : array_like
The pores locations that are to be filled with invader at the
beginning of the simulation.
overwrite : boolean
If ``True`` then all existing inlet locations will be removed and
then the supplied locations will be added. If ``False``, then
supplied locations are added to any already existing locations.
Notes
-----
Currently works for pores only and treats inner throats, i.e.
those that connect two pores in the cluster as invaded and outer ones
as uninvaded. Uninvaded throats are added to a new residual cluster
queue but do not start invading independently if not connected to an
inlet.
Step 1. Identify clusters in the phase occupancy.
Step 2. Look for clusters that are connected or contain an inlet
Step 3. For those that are merge into inlet cluster. May be connected
to more than one - run should sort this out
Step 4. For those that are isolated set the queue to not invading.
Step 5. (in run) When isolated cluster is met my invading cluster it
merges in and starts invading | Below is the the instruction that describes the task:
### Input:
r"""
Method to start invasion in a network w. residual saturation.
Called after inlets are set.
Parameters
----------
pores : array_like
The pores locations that are to be filled with invader at the
beginning of the simulation.
overwrite : boolean
If ``True`` then all existing inlet locations will be removed and
then the supplied locations will be added. If ``False``, then
supplied locations are added to any already existing locations.
Notes
-----
Currently works for pores only and treats inner throats, i.e.
those that connect two pores in the cluster as invaded and outer ones
as uninvaded. Uninvaded throats are added to a new residual cluster
queue but do not start invading independently if not connected to an
inlet.
Step 1. Identify clusters in the phase occupancy.
Step 2. Look for clusters that are connected or contain an inlet
Step 3. For those that are merge into inlet cluster. May be connected
to more than one - run should sort this out
Step 4. For those that are isolated set the queue to not invading.
Step 5. (in run) When isolated cluster is met my invading cluster it
merges in and starts invading
### Response:
def set_residual(self, pores=[], overwrite=False):
r"""
Method to start invasion in a network w. residual saturation.
Called after inlets are set.
Parameters
----------
pores : array_like
The pores locations that are to be filled with invader at the
beginning of the simulation.
overwrite : boolean
If ``True`` then all existing inlet locations will be removed and
then the supplied locations will be added. If ``False``, then
supplied locations are added to any already existing locations.
Notes
-----
Currently works for pores only and treats inner throats, i.e.
those that connect two pores in the cluster as invaded and outer ones
as uninvaded. Uninvaded throats are added to a new residual cluster
queue but do not start invading independently if not connected to an
inlet.
Step 1. Identify clusters in the phase occupancy.
Step 2. Look for clusters that are connected or contain an inlet
Step 3. For those that are merge into inlet cluster. May be connected
to more than one - run should sort this out
Step 4. For those that are isolated set the queue to not invading.
Step 5. (in run) When isolated cluster is met my invading cluster it
merges in and starts invading
"""
Ps = self._parse_indices(pores)
if overwrite:
self['pore.residual'] = False
self['pore.residual'][Ps] = True
residual = self['pore.residual']
net = self.project.network
conns = net['throat.conns']
rclusters = site_percolation(conns, residual).sites
rcluster_ids = np.unique(rclusters[rclusters > -1])
initial_num = len(self.queue)-1
for rcluster_id in rcluster_ids:
rPs = rclusters == rcluster_id
existing = np.unique(self['pore.cluster'][rPs])
existing = existing[existing > -1]
if len(existing) > 0:
# There was at least one inlet cluster connected to this
# residual cluster, pick the first one.
cluster_num = existing[0]
else:
# Make a new cluster queue
cluster_num = len(self.queue)
self.queue.append([])
queue = self.queue[cluster_num]
# Set the residual pores and inner throats as part of cluster
self['pore.cluster'][rPs] = cluster_num
Ts = net.find_neighbor_throats(pores=rPs,
flatten=True,
mode='xnor')
self['throat.cluster'][Ts] = cluster_num
self['pore.invasion_sequence'][rPs] = 0
self['throat.invasion_sequence'][Ts] = 0
self['pore.invasion_pressure'][rPs] = -np.inf
self['throat.invasion_pressure'][Ts] = -np.inf
# Add all the outer throats to the queue
Ts = net.find_neighbor_throats(pores=rPs,
flatten=True,
mode='exclusive_or')
for T in Ts:
data = []
# Pc
data.append(self['throat.entry_pressure'][T])
# Element Index
data.append(T)
# Element Type (Pore of Throat)
data.append('throat')
hq.heappush(queue, data)
self.invasion_running = [True]*len(self.queue)
# we have added new clusters that are currently isolated and we
# need to stop them invading until they merge into an invading
# cluster
for c_num in range(len(self.queue)):
if c_num > initial_num:
self.invasion_running[c_num] = False |
def batch_load_docs(db, doc_iterator, on_duplicate="replace"):
"""Batch load documents
Args:
db: ArangoDB client database handle
doc_iterator: function that yields (collection_name, doc_key, doc)
on_duplicate: defaults to replace, but can be error, update, replace or ignore
https://python-driver-for-arangodb.readthedocs.io/en/master/specs.html?highlight=import_bulk#arango.collection.StandardCollection.import_bulk
"""
batch_size = 100
counter = 0
collections = {}
docs = {}
if on_duplicate not in ["error", "update", "replace", "ignore"]:
log.error(f"Bad parameter for on_duplicate: {on_duplicate}")
return
for (collection_name, doc) in doc_iterator:
if collection_name not in collections:
collections[collection_name] = db.collection(collection_name)
docs[collection_name] = []
counter += 1
docs[collection_name].append(doc)
if counter % batch_size == 0:
log.info(f"Bulk import arangodb: {counter}")
for cname in docs:
collections[cname].import_bulk(
docs[cname], on_duplicate=on_duplicate, halt_on_error=False
)
docs[cname] = []
log.info(f"Bulk import arangodb: {counter}")
for cname in docs:
collections[cname].import_bulk(
docs[cname], on_duplicate=on_duplicate, halt_on_error=False
)
docs[cname] = [] | Batch load documents
Args:
db: ArangoDB client database handle
doc_iterator: function that yields (collection_name, doc_key, doc)
on_duplicate: defaults to replace, but can be error, update, replace or ignore
https://python-driver-for-arangodb.readthedocs.io/en/master/specs.html?highlight=import_bulk#arango.collection.StandardCollection.import_bulk | Below is the the instruction that describes the task:
### Input:
Batch load documents
Args:
db: ArangoDB client database handle
doc_iterator: function that yields (collection_name, doc_key, doc)
on_duplicate: defaults to replace, but can be error, update, replace or ignore
https://python-driver-for-arangodb.readthedocs.io/en/master/specs.html?highlight=import_bulk#arango.collection.StandardCollection.import_bulk
### Response:
def batch_load_docs(db, doc_iterator, on_duplicate="replace"):
"""Batch load documents
Args:
db: ArangoDB client database handle
doc_iterator: function that yields (collection_name, doc_key, doc)
on_duplicate: defaults to replace, but can be error, update, replace or ignore
https://python-driver-for-arangodb.readthedocs.io/en/master/specs.html?highlight=import_bulk#arango.collection.StandardCollection.import_bulk
"""
batch_size = 100
counter = 0
collections = {}
docs = {}
if on_duplicate not in ["error", "update", "replace", "ignore"]:
log.error(f"Bad parameter for on_duplicate: {on_duplicate}")
return
for (collection_name, doc) in doc_iterator:
if collection_name not in collections:
collections[collection_name] = db.collection(collection_name)
docs[collection_name] = []
counter += 1
docs[collection_name].append(doc)
if counter % batch_size == 0:
log.info(f"Bulk import arangodb: {counter}")
for cname in docs:
collections[cname].import_bulk(
docs[cname], on_duplicate=on_duplicate, halt_on_error=False
)
docs[cname] = []
log.info(f"Bulk import arangodb: {counter}")
for cname in docs:
collections[cname].import_bulk(
docs[cname], on_duplicate=on_duplicate, halt_on_error=False
)
docs[cname] = [] |
def find_vc_pdir_vswhere(msvc_version):
"""
Find the MSVC product directory using vswhere.exe .
Run it asking for specified version and get MSVS install location
:param msvc_version:
:return: MSVC install dir
"""
vswhere_path = os.path.join(
'C:\\',
'Program Files (x86)',
'Microsoft Visual Studio',
'Installer',
'vswhere.exe'
)
vswhere_cmd = [vswhere_path, '-version', msvc_version, '-property', 'installationPath']
if os.path.exists(vswhere_path):
sp = subprocess.Popen(vswhere_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
vsdir, err = sp.communicate()
vsdir = vsdir.decode("mbcs")
vsdir = vsdir.rstrip()
vc_pdir = os.path.join(vsdir, 'VC')
return vc_pdir
else:
# No vswhere on system, no install info available
return None | Find the MSVC product directory using vswhere.exe .
Run it asking for specified version and get MSVS install location
:param msvc_version:
:return: MSVC install dir | Below is the the instruction that describes the task:
### Input:
Find the MSVC product directory using vswhere.exe .
Run it asking for specified version and get MSVS install location
:param msvc_version:
:return: MSVC install dir
### Response:
def find_vc_pdir_vswhere(msvc_version):
"""
Find the MSVC product directory using vswhere.exe .
Run it asking for specified version and get MSVS install location
:param msvc_version:
:return: MSVC install dir
"""
vswhere_path = os.path.join(
'C:\\',
'Program Files (x86)',
'Microsoft Visual Studio',
'Installer',
'vswhere.exe'
)
vswhere_cmd = [vswhere_path, '-version', msvc_version, '-property', 'installationPath']
if os.path.exists(vswhere_path):
sp = subprocess.Popen(vswhere_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
vsdir, err = sp.communicate()
vsdir = vsdir.decode("mbcs")
vsdir = vsdir.rstrip()
vc_pdir = os.path.join(vsdir, 'VC')
return vc_pdir
else:
# No vswhere on system, no install info available
return None |
def assign_site_properties(self, slab, height=0.9):
"""
Assigns site properties.
"""
if 'surface_properties' in slab.site_properties.keys():
return slab
else:
surf_sites = self.find_surface_sites_by_height(slab, height)
surf_props = ['surface' if site in surf_sites
else 'subsurface' for site in slab.sites]
return slab.copy(
site_properties={'surface_properties': surf_props}) | Assigns site properties. | Below is the the instruction that describes the task:
### Input:
Assigns site properties.
### Response:
def assign_site_properties(self, slab, height=0.9):
"""
Assigns site properties.
"""
if 'surface_properties' in slab.site_properties.keys():
return slab
else:
surf_sites = self.find_surface_sites_by_height(slab, height)
surf_props = ['surface' if site in surf_sites
else 'subsurface' for site in slab.sites]
return slab.copy(
site_properties={'surface_properties': surf_props}) |
def df(self, src):
'''Perform ``df`` on a path'''
return self._getStdOutCmd([self._hadoop_cmd, 'fs', '-df', self._full_hdfs_path(src)], True) | Perform ``df`` on a path | Below is the the instruction that describes the task:
### Input:
Perform ``df`` on a path
### Response:
def df(self, src):
'''Perform ``df`` on a path'''
return self._getStdOutCmd([self._hadoop_cmd, 'fs', '-df', self._full_hdfs_path(src)], True) |
def get_identifier(self):
"""
For methods this is the return type, the name and the (non-pretty)
argument descriptor. For fields it is simply the name.
The return-type of methods is attached to the identifier when
it is a bridge method, which can technically allow two methods
with the same name and argument type list, but with different
return type.
"""
ident = self.get_name()
if self.is_method:
args = ",".join(self.get_arg_type_descriptors())
if self.is_bridge():
ident = "%s(%s):%s" % (ident, args, self.get_descriptor())
else:
ident = "%s(%s)" % (ident, args)
return ident | For methods this is the return type, the name and the (non-pretty)
argument descriptor. For fields it is simply the name.
The return-type of methods is attached to the identifier when
it is a bridge method, which can technically allow two methods
with the same name and argument type list, but with different
return type. | Below is the the instruction that describes the task:
### Input:
For methods this is the return type, the name and the (non-pretty)
argument descriptor. For fields it is simply the name.
The return-type of methods is attached to the identifier when
it is a bridge method, which can technically allow two methods
with the same name and argument type list, but with different
return type.
### Response:
def get_identifier(self):
"""
For methods this is the return type, the name and the (non-pretty)
argument descriptor. For fields it is simply the name.
The return-type of methods is attached to the identifier when
it is a bridge method, which can technically allow two methods
with the same name and argument type list, but with different
return type.
"""
ident = self.get_name()
if self.is_method:
args = ",".join(self.get_arg_type_descriptors())
if self.is_bridge():
ident = "%s(%s):%s" % (ident, args, self.get_descriptor())
else:
ident = "%s(%s)" % (ident, args)
return ident |
def MultiWritePathHistory(self, client_path_histories):
"""Writes a collection of hash and stat entries observed for given paths."""
for client_path, client_path_history in iteritems(client_path_histories):
if client_path.client_id not in self.metadatas:
raise db.UnknownClientError(client_path.client_id)
path_info = rdf_objects.PathInfo(
path_type=client_path.path_type, components=client_path.components)
for timestamp, stat_entry in iteritems(client_path_history.stat_entries):
path_record = self._GetPathRecord(
client_path.client_id, path_info, set_default=False)
if path_record is None:
# TODO(hanuszczak): Provide more details about paths that caused that.
raise db.AtLeastOneUnknownPathError([])
path_record.AddStatEntry(stat_entry, timestamp)
for timestamp, hash_entry in iteritems(client_path_history.hash_entries):
path_record = self._GetPathRecord(
client_path.client_id, path_info, set_default=False)
if path_record is None:
# TODO(hanuszczak): Provide more details about paths that caused that.
raise db.AtLeastOneUnknownPathError([])
path_record.AddHashEntry(hash_entry, timestamp) | Writes a collection of hash and stat entries observed for given paths. | Below is the the instruction that describes the task:
### Input:
Writes a collection of hash and stat entries observed for given paths.
### Response:
def MultiWritePathHistory(self, client_path_histories):
"""Writes a collection of hash and stat entries observed for given paths."""
for client_path, client_path_history in iteritems(client_path_histories):
if client_path.client_id not in self.metadatas:
raise db.UnknownClientError(client_path.client_id)
path_info = rdf_objects.PathInfo(
path_type=client_path.path_type, components=client_path.components)
for timestamp, stat_entry in iteritems(client_path_history.stat_entries):
path_record = self._GetPathRecord(
client_path.client_id, path_info, set_default=False)
if path_record is None:
# TODO(hanuszczak): Provide more details about paths that caused that.
raise db.AtLeastOneUnknownPathError([])
path_record.AddStatEntry(stat_entry, timestamp)
for timestamp, hash_entry in iteritems(client_path_history.hash_entries):
path_record = self._GetPathRecord(
client_path.client_id, path_info, set_default=False)
if path_record is None:
# TODO(hanuszczak): Provide more details about paths that caused that.
raise db.AtLeastOneUnknownPathError([])
path_record.AddHashEntry(hash_entry, timestamp) |
def clear(self):
"""
Clear screen and go to 0,0
"""
# Erase current output first.
self.erase()
# Send "Erase Screen" command and go to (0, 0).
output = self.output
output.erase_screen()
output.cursor_goto(0, 0)
output.flush()
self.request_absolute_cursor_position() | Clear screen and go to 0,0 | Below is the the instruction that describes the task:
### Input:
Clear screen and go to 0,0
### Response:
def clear(self):
"""
Clear screen and go to 0,0
"""
# Erase current output first.
self.erase()
# Send "Erase Screen" command and go to (0, 0).
output = self.output
output.erase_screen()
output.cursor_goto(0, 0)
output.flush()
self.request_absolute_cursor_position() |
def add_virtualip(self, lb, vip):
"""Adds the VirtualIP to the specified load balancer."""
resp, body = self.api.method_post("/loadbalancers/%s/virtualips" % lb.id,
body=vip.to_dict())
return resp, body | Adds the VirtualIP to the specified load balancer. | Below is the the instruction that describes the task:
### Input:
Adds the VirtualIP to the specified load balancer.
### Response:
def add_virtualip(self, lb, vip):
"""Adds the VirtualIP to the specified load balancer."""
resp, body = self.api.method_post("/loadbalancers/%s/virtualips" % lb.id,
body=vip.to_dict())
return resp, body |
def get_argument_starttime(self):
"""
Helper function to get starttime argument.
Raises exception if argument is missing.
Returns the starttime argument.
"""
try:
starttime = self.get_argument(constants.PARAM_STARTTIME)
return starttime
except tornado.web.MissingArgumentError as e:
raise Exception(e.log_message) | Helper function to get starttime argument.
Raises exception if argument is missing.
Returns the starttime argument. | Below is the the instruction that describes the task:
### Input:
Helper function to get starttime argument.
Raises exception if argument is missing.
Returns the starttime argument.
### Response:
def get_argument_starttime(self):
"""
Helper function to get starttime argument.
Raises exception if argument is missing.
Returns the starttime argument.
"""
try:
starttime = self.get_argument(constants.PARAM_STARTTIME)
return starttime
except tornado.web.MissingArgumentError as e:
raise Exception(e.log_message) |
def set_attributes_all(target, attributes, discard_others=True):
""" Set Attributes in bulk and optionally discard others.
Sets each Attribute in turn (modifying it in place if possible if it
is already present) and optionally discarding all other Attributes
not explicitly set. This function yields much greater performance
than the required individual calls to ``set_attribute``,
``set_attribute_string``, ``set_attribute_string_array`` and
``del_attribute`` put together.
.. versionadded:: 0.2
Parameters
----------
target : Dataset or Group
Dataset or Group to set the Attributes of.
attributes : dict
The Attributes to set. The keys (``str``) are the names. The
values are ``tuple`` of the Attribute kind and the value to
set. Valid kinds are ``'string_array'``, ``'string'``, and
``'value'``. The values must correspond to what
``set_attribute_string_array``, ``set_attribute_string`` and
``set_attribute`` would take respectively.
discard_others : bool, optional
Whether to discard all other Attributes not explicitly set
(default) or not.
See Also
--------
set_attribute
set_attribute_string
set_attribute_string_array
"""
attrs = target.attrs
existing = dict(attrs.items())
# Generate special dtype for string arrays.
if sys.hexversion >= 0x03000000:
str_arr_dtype = h5py.special_dtype(vlen=str)
else:
str_arr_dtype = dtype=h5py.special_dtype(vlen=unicode)
# Go through each attribute. If it is already present, modify it if
# possible and create it otherwise (deletes old value.)
for k, (kind, value) in attributes.items():
if kind == 'string_array':
attrs.create(k, [convert_to_str(s) for s in value],
dtype=str_arr_dtype)
else:
if kind == 'string':
value = np.bytes_(value)
if k not in existing:
attrs.create(k, value)
else:
try:
if value.dtype == existing[k].dtype \
and value.shape == existing[k].shape:
attrs.modify(k, value)
except:
attrs.create(k, value)
# Discard all other attributes.
if discard_others:
for k in set(existing) - set(attributes):
del attrs[k] | Set Attributes in bulk and optionally discard others.
Sets each Attribute in turn (modifying it in place if possible if it
is already present) and optionally discarding all other Attributes
not explicitly set. This function yields much greater performance
than the required individual calls to ``set_attribute``,
``set_attribute_string``, ``set_attribute_string_array`` and
``del_attribute`` put together.
.. versionadded:: 0.2
Parameters
----------
target : Dataset or Group
Dataset or Group to set the Attributes of.
attributes : dict
The Attributes to set. The keys (``str``) are the names. The
values are ``tuple`` of the Attribute kind and the value to
set. Valid kinds are ``'string_array'``, ``'string'``, and
``'value'``. The values must correspond to what
``set_attribute_string_array``, ``set_attribute_string`` and
``set_attribute`` would take respectively.
discard_others : bool, optional
Whether to discard all other Attributes not explicitly set
(default) or not.
See Also
--------
set_attribute
set_attribute_string
set_attribute_string_array | Below is the the instruction that describes the task:
### Input:
Set Attributes in bulk and optionally discard others.
Sets each Attribute in turn (modifying it in place if possible if it
is already present) and optionally discarding all other Attributes
not explicitly set. This function yields much greater performance
than the required individual calls to ``set_attribute``,
``set_attribute_string``, ``set_attribute_string_array`` and
``del_attribute`` put together.
.. versionadded:: 0.2
Parameters
----------
target : Dataset or Group
Dataset or Group to set the Attributes of.
attributes : dict
The Attributes to set. The keys (``str``) are the names. The
values are ``tuple`` of the Attribute kind and the value to
set. Valid kinds are ``'string_array'``, ``'string'``, and
``'value'``. The values must correspond to what
``set_attribute_string_array``, ``set_attribute_string`` and
``set_attribute`` would take respectively.
discard_others : bool, optional
Whether to discard all other Attributes not explicitly set
(default) or not.
See Also
--------
set_attribute
set_attribute_string
set_attribute_string_array
### Response:
def set_attributes_all(target, attributes, discard_others=True):
""" Set Attributes in bulk and optionally discard others.
Sets each Attribute in turn (modifying it in place if possible if it
is already present) and optionally discarding all other Attributes
not explicitly set. This function yields much greater performance
than the required individual calls to ``set_attribute``,
``set_attribute_string``, ``set_attribute_string_array`` and
``del_attribute`` put together.
.. versionadded:: 0.2
Parameters
----------
target : Dataset or Group
Dataset or Group to set the Attributes of.
attributes : dict
The Attributes to set. The keys (``str``) are the names. The
values are ``tuple`` of the Attribute kind and the value to
set. Valid kinds are ``'string_array'``, ``'string'``, and
``'value'``. The values must correspond to what
``set_attribute_string_array``, ``set_attribute_string`` and
``set_attribute`` would take respectively.
discard_others : bool, optional
Whether to discard all other Attributes not explicitly set
(default) or not.
See Also
--------
set_attribute
set_attribute_string
set_attribute_string_array
"""
attrs = target.attrs
existing = dict(attrs.items())
# Generate special dtype for string arrays.
if sys.hexversion >= 0x03000000:
str_arr_dtype = h5py.special_dtype(vlen=str)
else:
str_arr_dtype = dtype=h5py.special_dtype(vlen=unicode)
# Go through each attribute. If it is already present, modify it if
# possible and create it otherwise (deletes old value.)
for k, (kind, value) in attributes.items():
if kind == 'string_array':
attrs.create(k, [convert_to_str(s) for s in value],
dtype=str_arr_dtype)
else:
if kind == 'string':
value = np.bytes_(value)
if k not in existing:
attrs.create(k, value)
else:
try:
if value.dtype == existing[k].dtype \
and value.shape == existing[k].shape:
attrs.modify(k, value)
except:
attrs.create(k, value)
# Discard all other attributes.
if discard_others:
for k in set(existing) - set(attributes):
del attrs[k] |
def on_linkType_changed(self, evt):
"""User changed link kind, so prepare available fields."""
if self.current_idx < 0:
evt.Skip()
return
n = self.linkType.GetSelection()
lt_str = self.linkType.GetString(n)
lt = self.link_code[lt_str]
self.prep_link_details(lt)
lnk = self.page_links[self.current_idx]
lnk["update"] = True
lnk["kind"] = lt
self.enable_update()
if lt == fitz.LINK_GOTO:
if not self.toPage.Value.isdecimal():
self.toPage.ChangeValue("1")
self.toPage.Enable()
if not self.toLeft.Value.isdecimal():
self.toLeft.ChangeValue("0")
self.toLeft.Enable()
if not self.toHeight.Value.isdecimal():
self.toHeight.ChangeValue("0")
self.toHeight.Enable()
lnk["page"] = int(self.toPage.Value) - 1
lnk["to"] = fitz.Point(int(self.toLeft.Value),
int(self.toHeight.Value))
elif lt == fitz.LINK_GOTOR:
if not self.toFile.Value:
self.toFile.SetValue(self.text_in_rect())
self.toFile.MarkDirty()
if not self.toPage.Value.isdecimal():
self.toPage.ChangeValue("1")
if not self.toLeft.Value.isdecimal():
self.toLeft.ChangeValue("0")
if not self.toHeight.Value.isdecimal():
self.toHeight.ChangeValue("0")
self.toLeft.Enable()
self.toPage.Enable()
self.toFile.Enable()
self.toHeight.Enable()
lnk["file"] = self.toFile.Value
lnk["page"] = int(self.toPage.Value) - 1
lnk["to"] = fitz.Point(int(self.toLeft.Value),
int(self.toHeight.Value))
elif lt == fitz.LINK_URI:
if not self.toURI.Value:
self.toURI.SetValue(self.text_in_rect())
self.toURI.MarkDirty()
lnk["uri"] = self.toURI.Value
self.toURI.Enable()
elif lt == fitz.LINK_LAUNCH:
if not self.toFile.Value:
self.toFile.SetValue(self.text_in_rect())
self.toFile.MarkDirty()
lnk["file"] = self.toFile.Value
self.toFile.Enable()
elif lt == fitz.LINK_NAMED:
self.toName.SetSelection(0)
self.toName.Enable()
self.page_links[self.current_idx] = lnk
evt.Skip()
return | User changed link kind, so prepare available fields. | Below is the the instruction that describes the task:
### Input:
User changed link kind, so prepare available fields.
### Response:
def on_linkType_changed(self, evt):
"""User changed link kind, so prepare available fields."""
if self.current_idx < 0:
evt.Skip()
return
n = self.linkType.GetSelection()
lt_str = self.linkType.GetString(n)
lt = self.link_code[lt_str]
self.prep_link_details(lt)
lnk = self.page_links[self.current_idx]
lnk["update"] = True
lnk["kind"] = lt
self.enable_update()
if lt == fitz.LINK_GOTO:
if not self.toPage.Value.isdecimal():
self.toPage.ChangeValue("1")
self.toPage.Enable()
if not self.toLeft.Value.isdecimal():
self.toLeft.ChangeValue("0")
self.toLeft.Enable()
if not self.toHeight.Value.isdecimal():
self.toHeight.ChangeValue("0")
self.toHeight.Enable()
lnk["page"] = int(self.toPage.Value) - 1
lnk["to"] = fitz.Point(int(self.toLeft.Value),
int(self.toHeight.Value))
elif lt == fitz.LINK_GOTOR:
if not self.toFile.Value:
self.toFile.SetValue(self.text_in_rect())
self.toFile.MarkDirty()
if not self.toPage.Value.isdecimal():
self.toPage.ChangeValue("1")
if not self.toLeft.Value.isdecimal():
self.toLeft.ChangeValue("0")
if not self.toHeight.Value.isdecimal():
self.toHeight.ChangeValue("0")
self.toLeft.Enable()
self.toPage.Enable()
self.toFile.Enable()
self.toHeight.Enable()
lnk["file"] = self.toFile.Value
lnk["page"] = int(self.toPage.Value) - 1
lnk["to"] = fitz.Point(int(self.toLeft.Value),
int(self.toHeight.Value))
elif lt == fitz.LINK_URI:
if not self.toURI.Value:
self.toURI.SetValue(self.text_in_rect())
self.toURI.MarkDirty()
lnk["uri"] = self.toURI.Value
self.toURI.Enable()
elif lt == fitz.LINK_LAUNCH:
if not self.toFile.Value:
self.toFile.SetValue(self.text_in_rect())
self.toFile.MarkDirty()
lnk["file"] = self.toFile.Value
self.toFile.Enable()
elif lt == fitz.LINK_NAMED:
self.toName.SetSelection(0)
self.toName.Enable()
self.page_links[self.current_idx] = lnk
evt.Skip()
return |
def _parsed_callback_wrapper(self, callback_parsed, callback_plain, foc, data):
"""Used to by register_catchall_*data() and Thing class (follow, create_point) to present point data as an
object."""
# used by PointDataObjectHandler as reference
if foc == R_FEED:
point_ref = data['pid']
else: # R_CONTROL
point_ref = Control(self, data[P_ENTITY_LID], data[P_LID], '0' * 32)
try:
data['parsed'] = self._get_point_data_handler_for(point_ref).get_template(data=data[P_DATA])
except RefreshException:
# No metadata available, do not produce warning
if callback_plain:
callback_plain(data)
except:
logger.warning('Failed to parse %s data for %s%s', foc_to_str(foc), point_ref,
'' if callback_plain else ', ignoring',
exc_info=DEBUG_ENABLED)
if callback_plain:
callback_plain(data)
else:
callback_parsed(data) | Used to by register_catchall_*data() and Thing class (follow, create_point) to present point data as an
object. | Below is the the instruction that describes the task:
### Input:
Used to by register_catchall_*data() and Thing class (follow, create_point) to present point data as an
object.
### Response:
def _parsed_callback_wrapper(self, callback_parsed, callback_plain, foc, data):
"""Used to by register_catchall_*data() and Thing class (follow, create_point) to present point data as an
object."""
# used by PointDataObjectHandler as reference
if foc == R_FEED:
point_ref = data['pid']
else: # R_CONTROL
point_ref = Control(self, data[P_ENTITY_LID], data[P_LID], '0' * 32)
try:
data['parsed'] = self._get_point_data_handler_for(point_ref).get_template(data=data[P_DATA])
except RefreshException:
# No metadata available, do not produce warning
if callback_plain:
callback_plain(data)
except:
logger.warning('Failed to parse %s data for %s%s', foc_to_str(foc), point_ref,
'' if callback_plain else ', ignoring',
exc_info=DEBUG_ENABLED)
if callback_plain:
callback_plain(data)
else:
callback_parsed(data) |
def run_scan_command(
self,
server_info: ServerConnectivityInfo,
scan_command: PluginScanCommand
) -> PluginScanResult:
"""Run a single scan command against a server; will block until the scan command has been completed.
Args:
server_info: The server's connectivity information. The test_connectivity_to_server() method must have been
called first to ensure that the server is online and accessible.
scan_command: The scan command to run against this server.
Returns:
The result of the scan command, which will be an instance of the scan command's
corresponding PluginScanResult subclass.
"""
plugin_class = self._plugins_repository.get_plugin_class_for_command(scan_command)
plugin = plugin_class()
return plugin.process_task(server_info, scan_command) | Run a single scan command against a server; will block until the scan command has been completed.
Args:
server_info: The server's connectivity information. The test_connectivity_to_server() method must have been
called first to ensure that the server is online and accessible.
scan_command: The scan command to run against this server.
Returns:
The result of the scan command, which will be an instance of the scan command's
corresponding PluginScanResult subclass. | Below is the the instruction that describes the task:
### Input:
Run a single scan command against a server; will block until the scan command has been completed.
Args:
server_info: The server's connectivity information. The test_connectivity_to_server() method must have been
called first to ensure that the server is online and accessible.
scan_command: The scan command to run against this server.
Returns:
The result of the scan command, which will be an instance of the scan command's
corresponding PluginScanResult subclass.
### Response:
def run_scan_command(
self,
server_info: ServerConnectivityInfo,
scan_command: PluginScanCommand
) -> PluginScanResult:
"""Run a single scan command against a server; will block until the scan command has been completed.
Args:
server_info: The server's connectivity information. The test_connectivity_to_server() method must have been
called first to ensure that the server is online and accessible.
scan_command: The scan command to run against this server.
Returns:
The result of the scan command, which will be an instance of the scan command's
corresponding PluginScanResult subclass.
"""
plugin_class = self._plugins_repository.get_plugin_class_for_command(scan_command)
plugin = plugin_class()
return plugin.process_task(server_info, scan_command) |
def get_gmm_pdf(self, x):
"""Calculate the GMM likelihood for a single point.
.. math::
y = \\sum_{i=1}^{N} w_i
\\times \\text{normpdf}(x, x_i, \\sigma_i)/\\sum_{i=1}^{N} w_i
:label: gmm-likelihood
Arguments
---------
x : float
Point at which likelihood needs to be computed
Returns
-------
float
Likelihood value at x
"""
def my_norm_pdf(xt, mu, sigma):
z = (xt - mu) / sigma
return (math.exp(-0.5 * z * z)
/ (math.sqrt(2. * np.pi) * sigma))
y = 0
if (x < self.min_limit):
return 0
if (x > self.max_limit):
return 0
for _x in range(self.points.size):
y += (my_norm_pdf(x, self.points[_x], self.sigma[_x])
* self.weights[_x]) / self.W_sum
return y | Calculate the GMM likelihood for a single point.
.. math::
y = \\sum_{i=1}^{N} w_i
\\times \\text{normpdf}(x, x_i, \\sigma_i)/\\sum_{i=1}^{N} w_i
:label: gmm-likelihood
Arguments
---------
x : float
Point at which likelihood needs to be computed
Returns
-------
float
Likelihood value at x | Below is the the instruction that describes the task:
### Input:
Calculate the GMM likelihood for a single point.
.. math::
y = \\sum_{i=1}^{N} w_i
\\times \\text{normpdf}(x, x_i, \\sigma_i)/\\sum_{i=1}^{N} w_i
:label: gmm-likelihood
Arguments
---------
x : float
Point at which likelihood needs to be computed
Returns
-------
float
Likelihood value at x
### Response:
def get_gmm_pdf(self, x):
"""Calculate the GMM likelihood for a single point.
.. math::
y = \\sum_{i=1}^{N} w_i
\\times \\text{normpdf}(x, x_i, \\sigma_i)/\\sum_{i=1}^{N} w_i
:label: gmm-likelihood
Arguments
---------
x : float
Point at which likelihood needs to be computed
Returns
-------
float
Likelihood value at x
"""
def my_norm_pdf(xt, mu, sigma):
z = (xt - mu) / sigma
return (math.exp(-0.5 * z * z)
/ (math.sqrt(2. * np.pi) * sigma))
y = 0
if (x < self.min_limit):
return 0
if (x > self.max_limit):
return 0
for _x in range(self.points.size):
y += (my_norm_pdf(x, self.points[_x], self.sigma[_x])
* self.weights[_x]) / self.W_sum
return y |
def module_remove(name):
'''
Removes SELinux module
name
The name of the module to remove
.. versionadded:: 2016.11.6
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
modules = __salt__['selinux.list_semod']()
if name not in modules:
ret['comment'] = 'Module {0} is not available'.format(name)
ret['result'] = False
return ret
if __salt__['selinux.remove_semod'](name):
ret['comment'] = 'Module {0} has been removed'.format(name)
return ret
ret['result'] = False
ret['comment'] = 'Failed to remove module {0}'.format(name)
return ret | Removes SELinux module
name
The name of the module to remove
.. versionadded:: 2016.11.6 | Below is the the instruction that describes the task:
### Input:
Removes SELinux module
name
The name of the module to remove
.. versionadded:: 2016.11.6
### Response:
def module_remove(name):
'''
Removes SELinux module
name
The name of the module to remove
.. versionadded:: 2016.11.6
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
modules = __salt__['selinux.list_semod']()
if name not in modules:
ret['comment'] = 'Module {0} is not available'.format(name)
ret['result'] = False
return ret
if __salt__['selinux.remove_semod'](name):
ret['comment'] = 'Module {0} has been removed'.format(name)
return ret
ret['result'] = False
ret['comment'] = 'Failed to remove module {0}'.format(name)
return ret |
def open(self, fp, mode='r'):
"""
Open the NMEAFile.
"""
self._file = open(fp, mode=mode)
return self._file | Open the NMEAFile. | Below is the the instruction that describes the task:
### Input:
Open the NMEAFile.
### Response:
def open(self, fp, mode='r'):
"""
Open the NMEAFile.
"""
self._file = open(fp, mode=mode)
return self._file |
def _load_data(
self,
resource,
detail_resource=None,
resource_id=None,
querystring=None,
traverse_pagination=False,
default=DEFAULT_VALUE_SAFEGUARD,
):
"""
Loads a response from a call to one of the Enterprise endpoints.
:param resource: The endpoint resource name.
:param detail_resource: The sub-resource to append to the path.
:param resource_id: The resource ID for the specific detail to get from the endpoint.
:param querystring: Optional query string parameters.
:param traverse_pagination: Whether to traverse pagination or return paginated response.
:param default: The default value to return in case of no response content.
:return: Data returned by the API.
"""
default_val = default if default != self.DEFAULT_VALUE_SAFEGUARD else {}
querystring = querystring if querystring else {}
cache_key = utils.get_cache_key(
resource=resource,
querystring=querystring,
traverse_pagination=traverse_pagination,
resource_id=resource_id
)
response = cache.get(cache_key)
if not response:
# Response is not cached, so make a call.
endpoint = getattr(self.client, resource)(resource_id)
endpoint = getattr(endpoint, detail_resource) if detail_resource else endpoint
response = endpoint.get(**querystring)
if traverse_pagination:
results = utils.traverse_pagination(response, endpoint)
response = {
'count': len(results),
'next': 'None',
'previous': 'None',
'results': results,
}
if response:
# Now that we've got a response, cache it.
cache.set(cache_key, response, settings.ENTERPRISE_API_CACHE_TIMEOUT)
return response or default_val | Loads a response from a call to one of the Enterprise endpoints.
:param resource: The endpoint resource name.
:param detail_resource: The sub-resource to append to the path.
:param resource_id: The resource ID for the specific detail to get from the endpoint.
:param querystring: Optional query string parameters.
:param traverse_pagination: Whether to traverse pagination or return paginated response.
:param default: The default value to return in case of no response content.
:return: Data returned by the API. | Below is the the instruction that describes the task:
### Input:
Loads a response from a call to one of the Enterprise endpoints.
:param resource: The endpoint resource name.
:param detail_resource: The sub-resource to append to the path.
:param resource_id: The resource ID for the specific detail to get from the endpoint.
:param querystring: Optional query string parameters.
:param traverse_pagination: Whether to traverse pagination or return paginated response.
:param default: The default value to return in case of no response content.
:return: Data returned by the API.
### Response:
def _load_data(
self,
resource,
detail_resource=None,
resource_id=None,
querystring=None,
traverse_pagination=False,
default=DEFAULT_VALUE_SAFEGUARD,
):
"""
Loads a response from a call to one of the Enterprise endpoints.
:param resource: The endpoint resource name.
:param detail_resource: The sub-resource to append to the path.
:param resource_id: The resource ID for the specific detail to get from the endpoint.
:param querystring: Optional query string parameters.
:param traverse_pagination: Whether to traverse pagination or return paginated response.
:param default: The default value to return in case of no response content.
:return: Data returned by the API.
"""
default_val = default if default != self.DEFAULT_VALUE_SAFEGUARD else {}
querystring = querystring if querystring else {}
cache_key = utils.get_cache_key(
resource=resource,
querystring=querystring,
traverse_pagination=traverse_pagination,
resource_id=resource_id
)
response = cache.get(cache_key)
if not response:
# Response is not cached, so make a call.
endpoint = getattr(self.client, resource)(resource_id)
endpoint = getattr(endpoint, detail_resource) if detail_resource else endpoint
response = endpoint.get(**querystring)
if traverse_pagination:
results = utils.traverse_pagination(response, endpoint)
response = {
'count': len(results),
'next': 'None',
'previous': 'None',
'results': results,
}
if response:
# Now that we've got a response, cache it.
cache.set(cache_key, response, settings.ENTERPRISE_API_CACHE_TIMEOUT)
return response or default_val |
def format_search(q, **kwargs):
'''Formats the results of a search'''
m = search(q, **kwargs)
count = m['count']
if not count:
raise DapiCommError('Could not find any DAP packages for your query.')
return
for mdap in m['results']:
mdap = mdap['content_object']
return _format_dap_with_description(mdap) | Formats the results of a search | Below is the the instruction that describes the task:
### Input:
Formats the results of a search
### Response:
def format_search(q, **kwargs):
'''Formats the results of a search'''
m = search(q, **kwargs)
count = m['count']
if not count:
raise DapiCommError('Could not find any DAP packages for your query.')
return
for mdap in m['results']:
mdap = mdap['content_object']
return _format_dap_with_description(mdap) |
def get_asset_lookup_session(self, proxy, *args, **kwargs):
"""Gets the OsidSession associated with the asset lookup service.
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetLookupSession) - the new
AssetLookupSession
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_asset_lookup() is false
compliance: optional - This method must be implemented if
supports_asset_lookup() is true.
"""
if not self.supports_asset_lookup():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise # OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.AssetLookupSession(proxy=proxy, runtime=self._runtime, **kwargs)
except AttributeError:
raise # OperationFailed()
return session | Gets the OsidSession associated with the asset lookup service.
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetLookupSession) - the new
AssetLookupSession
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_asset_lookup() is false
compliance: optional - This method must be implemented if
supports_asset_lookup() is true. | Below is the the instruction that describes the task:
### Input:
Gets the OsidSession associated with the asset lookup service.
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetLookupSession) - the new
AssetLookupSession
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_asset_lookup() is false
compliance: optional - This method must be implemented if
supports_asset_lookup() is true.
### Response:
def get_asset_lookup_session(self, proxy, *args, **kwargs):
"""Gets the OsidSession associated with the asset lookup service.
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetLookupSession) - the new
AssetLookupSession
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_asset_lookup() is false
compliance: optional - This method must be implemented if
supports_asset_lookup() is true.
"""
if not self.supports_asset_lookup():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise # OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.AssetLookupSession(proxy=proxy, runtime=self._runtime, **kwargs)
except AttributeError:
raise # OperationFailed()
return session |
def list_events_view(request):
''' A list view of upcoming events. '''
page_name = "Upcoming Events"
profile = UserProfile.objects.get(user=request.user)
event_form = EventForm(
request.POST if 'post_event' in request.POST else None,
profile=profile,
)
if event_form.is_valid():
event_form.save()
return HttpResponseRedirect(reverse('events:list'))
# a pseudo-dictionary, actually a list with items of form (event, ongoing,
# rsvpd, rsvp_form), where ongoing is a boolean of whether the event is
# currently ongoing, rsvpd is a boolean of whether the user has rsvp'd to
# the event
events_dict = list()
for event in Event.objects.filter(end_time__gte=now()):
rsvp_form = RsvpForm(
request.POST if "rsvp-{0}".format(event.pk) in request.POST else None,
instance=event,
profile=profile,
)
if rsvp_form.is_valid():
rsvpd = rsvp_form.save()
if rsvpd:
message = MESSAGES['RSVP_ADD'].format(event=event.title)
else:
message = MESSAGES['RSVP_REMOVE'].format(event=event.title)
messages.add_message(request, messages.SUCCESS, message)
return HttpResponseRedirect(reverse('events:list'))
ongoing = ((event.start_time <= now()) and (event.end_time >= now()))
rsvpd = (profile in event.rsvps.all())
events_dict.append((event, ongoing, rsvpd, rsvp_form))
if request.method == "POST":
messages.add_message(request, messages.ERROR, MESSAGES["EVENT_ERROR"])
return render_to_response('list_events.html', {
'page_name': page_name,
'events_dict': events_dict,
'now': now(),
'event_form': event_form,
}, context_instance=RequestContext(request)) | A list view of upcoming events. | Below is the the instruction that describes the task:
### Input:
A list view of upcoming events.
### Response:
def list_events_view(request):
''' A list view of upcoming events. '''
page_name = "Upcoming Events"
profile = UserProfile.objects.get(user=request.user)
event_form = EventForm(
request.POST if 'post_event' in request.POST else None,
profile=profile,
)
if event_form.is_valid():
event_form.save()
return HttpResponseRedirect(reverse('events:list'))
# a pseudo-dictionary, actually a list with items of form (event, ongoing,
# rsvpd, rsvp_form), where ongoing is a boolean of whether the event is
# currently ongoing, rsvpd is a boolean of whether the user has rsvp'd to
# the event
events_dict = list()
for event in Event.objects.filter(end_time__gte=now()):
rsvp_form = RsvpForm(
request.POST if "rsvp-{0}".format(event.pk) in request.POST else None,
instance=event,
profile=profile,
)
if rsvp_form.is_valid():
rsvpd = rsvp_form.save()
if rsvpd:
message = MESSAGES['RSVP_ADD'].format(event=event.title)
else:
message = MESSAGES['RSVP_REMOVE'].format(event=event.title)
messages.add_message(request, messages.SUCCESS, message)
return HttpResponseRedirect(reverse('events:list'))
ongoing = ((event.start_time <= now()) and (event.end_time >= now()))
rsvpd = (profile in event.rsvps.all())
events_dict.append((event, ongoing, rsvpd, rsvp_form))
if request.method == "POST":
messages.add_message(request, messages.ERROR, MESSAGES["EVENT_ERROR"])
return render_to_response('list_events.html', {
'page_name': page_name,
'events_dict': events_dict,
'now': now(),
'event_form': event_form,
}, context_instance=RequestContext(request)) |
def get_cache_key(bucket, name, args, kwargs):
"""
Gets a unique SHA1 cache key for any call to a native tag.
Use args and kwargs in hash so that the same arguments use the same key
"""
u = ''.join(map(str, (bucket, name, args, kwargs)))
return 'native_tags.%s' % sha_constructor(u).hexdigest() | Gets a unique SHA1 cache key for any call to a native tag.
Use args and kwargs in hash so that the same arguments use the same key | Below is the the instruction that describes the task:
### Input:
Gets a unique SHA1 cache key for any call to a native tag.
Use args and kwargs in hash so that the same arguments use the same key
### Response:
def get_cache_key(bucket, name, args, kwargs):
"""
Gets a unique SHA1 cache key for any call to a native tag.
Use args and kwargs in hash so that the same arguments use the same key
"""
u = ''.join(map(str, (bucket, name, args, kwargs)))
return 'native_tags.%s' % sha_constructor(u).hexdigest() |
def mul(value, arg):
"""Multiply the arg with the value."""
try:
return valid_numeric(value) * valid_numeric(arg)
except (ValueError, TypeError):
try:
return value * arg
except Exception:
return '' | Multiply the arg with the value. | Below is the the instruction that describes the task:
### Input:
Multiply the arg with the value.
### Response:
def mul(value, arg):
"""Multiply the arg with the value."""
try:
return valid_numeric(value) * valid_numeric(arg)
except (ValueError, TypeError):
try:
return value * arg
except Exception:
return '' |
def list_build_configurations_for_product(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
"""
List all BuildConfigurations associated with the given Product.
"""
data = list_build_configurations_for_product_raw(id, name, page_size, page_index, sort, q)
if data:
return utils.format_json_list(data) | List all BuildConfigurations associated with the given Product. | Below is the the instruction that describes the task:
### Input:
List all BuildConfigurations associated with the given Product.
### Response:
def list_build_configurations_for_product(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
"""
List all BuildConfigurations associated with the given Product.
"""
data = list_build_configurations_for_product_raw(id, name, page_size, page_index, sort, q)
if data:
return utils.format_json_list(data) |
def smudge(newtype, target):
"""
Smudge magic bytes with a known type
"""
db = smudge_db.get()
magic_bytes = db[newtype]['magic']
magic_offset = db[newtype]['offset']
_backup_bytes(target, magic_offset, len(magic_bytes))
_smudge_bytes(target, magic_offset, magic_bytes) | Smudge magic bytes with a known type | Below is the the instruction that describes the task:
### Input:
Smudge magic bytes with a known type
### Response:
def smudge(newtype, target):
"""
Smudge magic bytes with a known type
"""
db = smudge_db.get()
magic_bytes = db[newtype]['magic']
magic_offset = db[newtype]['offset']
_backup_bytes(target, magic_offset, len(magic_bytes))
_smudge_bytes(target, magic_offset, magic_bytes) |
def tipbod(ref, body, et):
"""
Return a 3x3 matrix that transforms positions in inertial
coordinates to positions in body-equator-and-prime-meridian
coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/tipbod_c.html
:param ref: ID of inertial reference frame to transform from.
:type ref: str
:param body: ID code of body.
:type body: int
:param et: Epoch of transformation.
:type et: float
:return: Transformation (position), inertial to prime meridian.
:rtype: 3x3-Element Array of floats
"""
ref = stypes.stringToCharP(ref)
body = ctypes.c_int(body)
et = ctypes.c_double(et)
retmatrix = stypes.emptyDoubleMatrix()
libspice.tipbod_c(ref, body, et, retmatrix)
return stypes.cMatrixToNumpy(retmatrix) | Return a 3x3 matrix that transforms positions in inertial
coordinates to positions in body-equator-and-prime-meridian
coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/tipbod_c.html
:param ref: ID of inertial reference frame to transform from.
:type ref: str
:param body: ID code of body.
:type body: int
:param et: Epoch of transformation.
:type et: float
:return: Transformation (position), inertial to prime meridian.
:rtype: 3x3-Element Array of floats | Below is the the instruction that describes the task:
### Input:
Return a 3x3 matrix that transforms positions in inertial
coordinates to positions in body-equator-and-prime-meridian
coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/tipbod_c.html
:param ref: ID of inertial reference frame to transform from.
:type ref: str
:param body: ID code of body.
:type body: int
:param et: Epoch of transformation.
:type et: float
:return: Transformation (position), inertial to prime meridian.
:rtype: 3x3-Element Array of floats
### Response:
def tipbod(ref, body, et):
"""
Return a 3x3 matrix that transforms positions in inertial
coordinates to positions in body-equator-and-prime-meridian
coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/tipbod_c.html
:param ref: ID of inertial reference frame to transform from.
:type ref: str
:param body: ID code of body.
:type body: int
:param et: Epoch of transformation.
:type et: float
:return: Transformation (position), inertial to prime meridian.
:rtype: 3x3-Element Array of floats
"""
ref = stypes.stringToCharP(ref)
body = ctypes.c_int(body)
et = ctypes.c_double(et)
retmatrix = stypes.emptyDoubleMatrix()
libspice.tipbod_c(ref, body, et, retmatrix)
return stypes.cMatrixToNumpy(retmatrix) |
def extraction(event_collection, timeframe=None, timezone=None, filters=None, latest=None, email=None,
property_names=None):
""" Performs a data extraction
Returns either a JSON object of events or a response
indicating an email will be sent to you with data.
:param event_collection: string, the name of the collection to query
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param filters: array of dict, contains the filters you'd like to apply to the data
example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param latest: int, the number of most recent records you'd like to return
:param email: string, optional string containing an email address to email results to
:param property_names: string or list of strings, used to limit the properties returned
"""
_initialize_client_from_environment()
return _client.extraction(event_collection=event_collection, timeframe=timeframe, timezone=timezone,
filters=filters, latest=latest, email=email, property_names=property_names) | Performs a data extraction
Returns either a JSON object of events or a response
indicating an email will be sent to you with data.
:param event_collection: string, the name of the collection to query
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param filters: array of dict, contains the filters you'd like to apply to the data
example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param latest: int, the number of most recent records you'd like to return
:param email: string, optional string containing an email address to email results to
:param property_names: string or list of strings, used to limit the properties returned | Below is the the instruction that describes the task:
### Input:
Performs a data extraction
Returns either a JSON object of events or a response
indicating an email will be sent to you with data.
:param event_collection: string, the name of the collection to query
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param filters: array of dict, contains the filters you'd like to apply to the data
example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param latest: int, the number of most recent records you'd like to return
:param email: string, optional string containing an email address to email results to
:param property_names: string or list of strings, used to limit the properties returned
### Response:
def extraction(event_collection, timeframe=None, timezone=None, filters=None, latest=None, email=None,
property_names=None):
""" Performs a data extraction
Returns either a JSON object of events or a response
indicating an email will be sent to you with data.
:param event_collection: string, the name of the collection to query
:param timeframe: string or dict, the timeframe in which the events
happened example: "previous_7_days"
:param timezone: int, the timezone you'd like to use for the timeframe
and interval in seconds
:param filters: array of dict, contains the filters you'd like to apply to the data
example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}]
:param latest: int, the number of most recent records you'd like to return
:param email: string, optional string containing an email address to email results to
:param property_names: string or list of strings, used to limit the properties returned
"""
_initialize_client_from_environment()
return _client.extraction(event_collection=event_collection, timeframe=timeframe, timezone=timezone,
filters=filters, latest=latest, email=email, property_names=property_names) |
def invert(self, copy=False):
"""
Inverts the striplog, changing its order and the order of its contents.
Operates in place by default.
Args:
copy (bool): Whether to operate in place or make a copy.
Returns:
None if operating in-place, or an inverted copy of the striplog
if not.
"""
if copy:
return Striplog([i.invert(copy=True) for i in self])
else:
for i in self:
i.invert()
self.__sort()
o = self.order
self.order = {'depth': 'elevation', 'elevation': 'depth'}[o]
return | Inverts the striplog, changing its order and the order of its contents.
Operates in place by default.
Args:
copy (bool): Whether to operate in place or make a copy.
Returns:
None if operating in-place, or an inverted copy of the striplog
if not. | Below is the the instruction that describes the task:
### Input:
Inverts the striplog, changing its order and the order of its contents.
Operates in place by default.
Args:
copy (bool): Whether to operate in place or make a copy.
Returns:
None if operating in-place, or an inverted copy of the striplog
if not.
### Response:
def invert(self, copy=False):
"""
Inverts the striplog, changing its order and the order of its contents.
Operates in place by default.
Args:
copy (bool): Whether to operate in place or make a copy.
Returns:
None if operating in-place, or an inverted copy of the striplog
if not.
"""
if copy:
return Striplog([i.invert(copy=True) for i in self])
else:
for i in self:
i.invert()
self.__sort()
o = self.order
self.order = {'depth': 'elevation', 'elevation': 'depth'}[o]
return |
def _check_point(self, lat, lng):
""" Checks if latitude and longitude correct """
if abs(lat) > 90 or abs(lng) > 180:
msg = "Illegal lat and/or lng, (%s, %s) provided." % (lat, lng)
raise IllegalPointException(msg) | Checks if latitude and longitude correct | Below is the the instruction that describes the task:
### Input:
Checks if latitude and longitude correct
### Response:
def _check_point(self, lat, lng):
""" Checks if latitude and longitude correct """
if abs(lat) > 90 or abs(lng) > 180:
msg = "Illegal lat and/or lng, (%s, %s) provided." % (lat, lng)
raise IllegalPointException(msg) |
def send(self, message):
""" Send a message object
:type message: data.OutgoingMessage
:param message: The message to send
:rtype: data.OutgoingMessage
:returns: The sent message with populated fields
:raises AssertionError: wrong provider name encountered (returned by the router, or provided to OutgoingMessage)
:raises MessageSendError: generic errors
:raises AuthError: provider authentication failed
:raises LimitsError: sending limits exceeded
:raises CreditError: not enough money on the account
"""
# Which provider to use?
provider_name = self._default_provider # default
if message.provider is not None:
assert message.provider in self._providers, \
'Unknown provider specified in OutgoingMessage.provideer: {}'.format(provider_name)
provider = self.get_provider(message.provider)
else:
# Apply routing
if message.routing_values is not None: # Use the default provider when no routing values are given
# Routing values are present
provider_name = self.router(message, *message.routing_values) or self._default_provider
assert provider_name in self._providers, \
'Routing function returned an unknown provider name: {}'.format(provider_name)
provider = self.get_provider(provider_name)
# Set message provider name
message.provider = provider.name
# Send the message using the provider
message = provider.send(message)
# Emit the send event
self.onSend(message)
# Finish
return message | Send a message object
:type message: data.OutgoingMessage
:param message: The message to send
:rtype: data.OutgoingMessage
:returns: The sent message with populated fields
:raises AssertionError: wrong provider name encountered (returned by the router, or provided to OutgoingMessage)
:raises MessageSendError: generic errors
:raises AuthError: provider authentication failed
:raises LimitsError: sending limits exceeded
:raises CreditError: not enough money on the account | Below is the the instruction that describes the task:
### Input:
Send a message object
:type message: data.OutgoingMessage
:param message: The message to send
:rtype: data.OutgoingMessage
:returns: The sent message with populated fields
:raises AssertionError: wrong provider name encountered (returned by the router, or provided to OutgoingMessage)
:raises MessageSendError: generic errors
:raises AuthError: provider authentication failed
:raises LimitsError: sending limits exceeded
:raises CreditError: not enough money on the account
### Response:
def send(self, message):
""" Send a message object
:type message: data.OutgoingMessage
:param message: The message to send
:rtype: data.OutgoingMessage
:returns: The sent message with populated fields
:raises AssertionError: wrong provider name encountered (returned by the router, or provided to OutgoingMessage)
:raises MessageSendError: generic errors
:raises AuthError: provider authentication failed
:raises LimitsError: sending limits exceeded
:raises CreditError: not enough money on the account
"""
# Which provider to use?
provider_name = self._default_provider # default
if message.provider is not None:
assert message.provider in self._providers, \
'Unknown provider specified in OutgoingMessage.provideer: {}'.format(provider_name)
provider = self.get_provider(message.provider)
else:
# Apply routing
if message.routing_values is not None: # Use the default provider when no routing values are given
# Routing values are present
provider_name = self.router(message, *message.routing_values) or self._default_provider
assert provider_name in self._providers, \
'Routing function returned an unknown provider name: {}'.format(provider_name)
provider = self.get_provider(provider_name)
# Set message provider name
message.provider = provider.name
# Send the message using the provider
message = provider.send(message)
# Emit the send event
self.onSend(message)
# Finish
return message |
def _update_mean_in_window(self):
"""
Compute mean in window the slow way. useful for first step.
Considers all values in window
See Also
--------
_add_observation_to_means : fast update of mean for single observation addition
_remove_observation_from_means : fast update of mean for single observation removal
"""
self._mean_x_in_window = numpy.mean(self._x_in_window)
self._mean_y_in_window = numpy.mean(self._y_in_window) | Compute mean in window the slow way. useful for first step.
Considers all values in window
See Also
--------
_add_observation_to_means : fast update of mean for single observation addition
_remove_observation_from_means : fast update of mean for single observation removal | Below is the the instruction that describes the task:
### Input:
Compute mean in window the slow way. useful for first step.
Considers all values in window
See Also
--------
_add_observation_to_means : fast update of mean for single observation addition
_remove_observation_from_means : fast update of mean for single observation removal
### Response:
def _update_mean_in_window(self):
"""
Compute mean in window the slow way. useful for first step.
Considers all values in window
See Also
--------
_add_observation_to_means : fast update of mean for single observation addition
_remove_observation_from_means : fast update of mean for single observation removal
"""
self._mean_x_in_window = numpy.mean(self._x_in_window)
self._mean_y_in_window = numpy.mean(self._y_in_window) |
def add_listener(self, listener_type, callback):
""" add a listener to the widget
Args:
listener_type: string that can either be 'objectHovered' or 'objClicked'
callback: python function"""
self.listener_type= listener_type
if listener_type == 'objectHovered':
self.listener_callback_source_hover= callback
elif listener_type == 'objectClicked':
self.listener_callback_source_click= callback
elif listener_type == 'click':
self.listener_callback_click= callback
elif listener_type == 'select':
self.listener_callback_select= callback
self.listener_flag= not self.listener_flag | add a listener to the widget
Args:
listener_type: string that can either be 'objectHovered' or 'objClicked'
callback: python function | Below is the the instruction that describes the task:
### Input:
add a listener to the widget
Args:
listener_type: string that can either be 'objectHovered' or 'objClicked'
callback: python function
### Response:
def add_listener(self, listener_type, callback):
""" add a listener to the widget
Args:
listener_type: string that can either be 'objectHovered' or 'objClicked'
callback: python function"""
self.listener_type= listener_type
if listener_type == 'objectHovered':
self.listener_callback_source_hover= callback
elif listener_type == 'objectClicked':
self.listener_callback_source_click= callback
elif listener_type == 'click':
self.listener_callback_click= callback
elif listener_type == 'select':
self.listener_callback_select= callback
self.listener_flag= not self.listener_flag |
def get_mopheader(expnum, ccd, version='p', prefix=None):
"""
Retrieve the mopheader, either from cache or from vospace
@param expnum:
@param ccd:
@param version:
@param prefix:
@return: Header
"""
prefix = prefix is None and "" or prefix
mopheader_uri = dbimages_uri(expnum=expnum,
ccd=ccd,
version=version,
prefix=prefix,
ext='.mopheader')
if mopheader_uri in mopheaders:
return mopheaders[mopheader_uri]
filename = os.path.basename(mopheader_uri)
if os.access(filename, os.F_OK):
logger.debug("File already on disk: {}".format(filename))
mopheader_fpt = StringIO(open(filename, 'r').read())
else:
mopheader_fpt = StringIO(open_vos_or_local(mopheader_uri).read())
with warnings.catch_warnings():
warnings.simplefilter('ignore', AstropyUserWarning)
mopheader = fits.open(mopheader_fpt)
# add some values to the mopheader so it can be an astrom header too.
header = mopheader[0].header
try:
header['FWHM'] = get_fwhm(expnum, ccd)
except IOError:
header['FWHM'] = 10
header['SCALE'] = mopheader[0].header['PIXSCALE']
header['NAX1'] = header['NAXIS1']
header['NAX2'] = header['NAXIS2']
header['MOPversion'] = header['MOP_VER']
header['MJD_OBS_CENTER'] = str(Time(header['MJD-OBSC'],
format='mjd',
scale='utc', precision=5).replicate(format='mpc'))
header['MAXCOUNT'] = MAXCOUNT
mopheaders[mopheader_uri] = header
mopheader.close()
return mopheaders[mopheader_uri] | Retrieve the mopheader, either from cache or from vospace
@param expnum:
@param ccd:
@param version:
@param prefix:
@return: Header | Below is the the instruction that describes the task:
### Input:
Retrieve the mopheader, either from cache or from vospace
@param expnum:
@param ccd:
@param version:
@param prefix:
@return: Header
### Response:
def get_mopheader(expnum, ccd, version='p', prefix=None):
"""
Retrieve the mopheader, either from cache or from vospace
@param expnum:
@param ccd:
@param version:
@param prefix:
@return: Header
"""
prefix = prefix is None and "" or prefix
mopheader_uri = dbimages_uri(expnum=expnum,
ccd=ccd,
version=version,
prefix=prefix,
ext='.mopheader')
if mopheader_uri in mopheaders:
return mopheaders[mopheader_uri]
filename = os.path.basename(mopheader_uri)
if os.access(filename, os.F_OK):
logger.debug("File already on disk: {}".format(filename))
mopheader_fpt = StringIO(open(filename, 'r').read())
else:
mopheader_fpt = StringIO(open_vos_or_local(mopheader_uri).read())
with warnings.catch_warnings():
warnings.simplefilter('ignore', AstropyUserWarning)
mopheader = fits.open(mopheader_fpt)
# add some values to the mopheader so it can be an astrom header too.
header = mopheader[0].header
try:
header['FWHM'] = get_fwhm(expnum, ccd)
except IOError:
header['FWHM'] = 10
header['SCALE'] = mopheader[0].header['PIXSCALE']
header['NAX1'] = header['NAXIS1']
header['NAX2'] = header['NAXIS2']
header['MOPversion'] = header['MOP_VER']
header['MJD_OBS_CENTER'] = str(Time(header['MJD-OBSC'],
format='mjd',
scale='utc', precision=5).replicate(format='mpc'))
header['MAXCOUNT'] = MAXCOUNT
mopheaders[mopheader_uri] = header
mopheader.close()
return mopheaders[mopheader_uri] |
def register_lazy_provider_method(self, cls, method):
"""
Register a class method lazily as a provider.
"""
if 'provides' not in getattr(method, '__di__', {}):
raise DiayException('method %r is not a provider' % method)
@functools.wraps(method)
def wrapper(*args, **kwargs):
return getattr(self.get(cls), method.__name__)(*args, **kwargs)
self.factories[method.__di__['provides']] = wrapper | Register a class method lazily as a provider. | Below is the the instruction that describes the task:
### Input:
Register a class method lazily as a provider.
### Response:
def register_lazy_provider_method(self, cls, method):
"""
Register a class method lazily as a provider.
"""
if 'provides' not in getattr(method, '__di__', {}):
raise DiayException('method %r is not a provider' % method)
@functools.wraps(method)
def wrapper(*args, **kwargs):
return getattr(self.get(cls), method.__name__)(*args, **kwargs)
self.factories[method.__di__['provides']] = wrapper |
def roll_alpha_beta(returns, factor_returns, window=10, **kwargs):
"""
Computes alpha and beta over a rolling window.
Parameters
----------
lhs : array-like
The first array to pass to the rolling alpha-beta.
rhs : array-like
The second array to pass to the rolling alpha-beta.
window : int
Size of the rolling window in terms of the periodicity of the data.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
**kwargs
Forwarded to :func:`~empyrical.alpha_beta`.
"""
returns, factor_returns = _aligned_series(returns, factor_returns)
return roll_alpha_beta_aligned(
returns,
factor_returns,
window=window,
**kwargs
) | Computes alpha and beta over a rolling window.
Parameters
----------
lhs : array-like
The first array to pass to the rolling alpha-beta.
rhs : array-like
The second array to pass to the rolling alpha-beta.
window : int
Size of the rolling window in terms of the periodicity of the data.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
**kwargs
Forwarded to :func:`~empyrical.alpha_beta`. | Below is the the instruction that describes the task:
### Input:
Computes alpha and beta over a rolling window.
Parameters
----------
lhs : array-like
The first array to pass to the rolling alpha-beta.
rhs : array-like
The second array to pass to the rolling alpha-beta.
window : int
Size of the rolling window in terms of the periodicity of the data.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
**kwargs
Forwarded to :func:`~empyrical.alpha_beta`.
### Response:
def roll_alpha_beta(returns, factor_returns, window=10, **kwargs):
"""
Computes alpha and beta over a rolling window.
Parameters
----------
lhs : array-like
The first array to pass to the rolling alpha-beta.
rhs : array-like
The second array to pass to the rolling alpha-beta.
window : int
Size of the rolling window in terms of the periodicity of the data.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
**kwargs
Forwarded to :func:`~empyrical.alpha_beta`.
"""
returns, factor_returns = _aligned_series(returns, factor_returns)
return roll_alpha_beta_aligned(
returns,
factor_returns,
window=window,
**kwargs
) |
def getApplicationKeyByProcessId(self, unProcessId, pchAppKeyBuffer, unAppKeyBufferLen):
"""
Returns the key of the application for the specified Process Id. The buffer should be at least
k_unMaxApplicationKeyLength in order to fit the key.
"""
fn = self.function_table.getApplicationKeyByProcessId
result = fn(unProcessId, pchAppKeyBuffer, unAppKeyBufferLen)
return result | Returns the key of the application for the specified Process Id. The buffer should be at least
k_unMaxApplicationKeyLength in order to fit the key. | Below is the the instruction that describes the task:
### Input:
Returns the key of the application for the specified Process Id. The buffer should be at least
k_unMaxApplicationKeyLength in order to fit the key.
### Response:
def getApplicationKeyByProcessId(self, unProcessId, pchAppKeyBuffer, unAppKeyBufferLen):
"""
Returns the key of the application for the specified Process Id. The buffer should be at least
k_unMaxApplicationKeyLength in order to fit the key.
"""
fn = self.function_table.getApplicationKeyByProcessId
result = fn(unProcessId, pchAppKeyBuffer, unAppKeyBufferLen)
return result |
def _Descriptor_from_json(self, obj):
"""Create Descriptor instance from json dict.
Parameters:
obj(dict): descriptor dict
Returns:
Descriptor: descriptor
"""
descs = getattr(self, "_all_descriptors", None)
if descs is None:
from mordred import descriptors
descs = {
cls.__name__: cls
for cls in get_descriptors_in_module(descriptors)
}
descs[ConstDescriptor.__name__] = ConstDescriptor
self._all_descriptors = descs
return _from_json(obj, descs) | Create Descriptor instance from json dict.
Parameters:
obj(dict): descriptor dict
Returns:
Descriptor: descriptor | Below is the the instruction that describes the task:
### Input:
Create Descriptor instance from json dict.
Parameters:
obj(dict): descriptor dict
Returns:
Descriptor: descriptor
### Response:
def _Descriptor_from_json(self, obj):
"""Create Descriptor instance from json dict.
Parameters:
obj(dict): descriptor dict
Returns:
Descriptor: descriptor
"""
descs = getattr(self, "_all_descriptors", None)
if descs is None:
from mordred import descriptors
descs = {
cls.__name__: cls
for cls in get_descriptors_in_module(descriptors)
}
descs[ConstDescriptor.__name__] = ConstDescriptor
self._all_descriptors = descs
return _from_json(obj, descs) |
def altersingle(self, alpha, i, b, g, r):
"""Move neuron i towards biased (b,g,r) by factor alpha"""
n = self.network[i] # Alter hit neuron
n[0] -= (alpha * (n[0] - b))
n[1] -= (alpha * (n[1] - g))
n[2] -= (alpha * (n[2] - r)) | Move neuron i towards biased (b,g,r) by factor alpha | Below is the the instruction that describes the task:
### Input:
Move neuron i towards biased (b,g,r) by factor alpha
### Response:
def altersingle(self, alpha, i, b, g, r):
"""Move neuron i towards biased (b,g,r) by factor alpha"""
n = self.network[i] # Alter hit neuron
n[0] -= (alpha * (n[0] - b))
n[1] -= (alpha * (n[1] - g))
n[2] -= (alpha * (n[2] - r)) |
def _launch_editor(starting_text=''):
"Launch editor, let user write text, then return that text."
# TODO: What is a reasonable default for windows? Does this approach even
# make sense on windows?
editor = os.environ.get('EDITOR', 'vim')
with tempfile.TemporaryDirectory() as dirname:
filename = pathlib.Path(dirname) / 'metadata.yml'
with filename.open(mode='wt') as handle:
handle.write(starting_text)
subprocess.call([editor, filename])
with filename.open(mode='rt') as handle:
text = handle.read()
return text | Launch editor, let user write text, then return that text. | Below is the the instruction that describes the task:
### Input:
Launch editor, let user write text, then return that text.
### Response:
def _launch_editor(starting_text=''):
"Launch editor, let user write text, then return that text."
# TODO: What is a reasonable default for windows? Does this approach even
# make sense on windows?
editor = os.environ.get('EDITOR', 'vim')
with tempfile.TemporaryDirectory() as dirname:
filename = pathlib.Path(dirname) / 'metadata.yml'
with filename.open(mode='wt') as handle:
handle.write(starting_text)
subprocess.call([editor, filename])
with filename.open(mode='rt') as handle:
text = handle.read()
return text |
def get_provider(self, provider_name='default'):
"""Fetch provider with the name specified in Configuration file"""
try:
if self._providers is None:
self._providers = self._initialize_providers()
return self._providers[provider_name]
except KeyError:
raise AssertionError(f'No Provider registered with name {provider_name}') | Fetch provider with the name specified in Configuration file | Below is the the instruction that describes the task:
### Input:
Fetch provider with the name specified in Configuration file
### Response:
def get_provider(self, provider_name='default'):
"""Fetch provider with the name specified in Configuration file"""
try:
if self._providers is None:
self._providers = self._initialize_providers()
return self._providers[provider_name]
except KeyError:
raise AssertionError(f'No Provider registered with name {provider_name}') |
def _GetWinevtRcDatabaseReader(self):
"""Opens the Windows Event Log resource database reader.
Returns:
WinevtResourcesSqlite3DatabaseReader: Windows Event Log resource
database reader or None.
"""
if not self._winevt_database_reader and self._data_location:
database_path = os.path.join(
self._data_location, self._WINEVT_RC_DATABASE)
if not os.path.isfile(database_path):
return None
self._winevt_database_reader = (
winevt_rc.WinevtResourcesSqlite3DatabaseReader())
if not self._winevt_database_reader.Open(database_path):
self._winevt_database_reader = None
return self._winevt_database_reader | Opens the Windows Event Log resource database reader.
Returns:
WinevtResourcesSqlite3DatabaseReader: Windows Event Log resource
database reader or None. | Below is the the instruction that describes the task:
### Input:
Opens the Windows Event Log resource database reader.
Returns:
WinevtResourcesSqlite3DatabaseReader: Windows Event Log resource
database reader or None.
### Response:
def _GetWinevtRcDatabaseReader(self):
"""Opens the Windows Event Log resource database reader.
Returns:
WinevtResourcesSqlite3DatabaseReader: Windows Event Log resource
database reader or None.
"""
if not self._winevt_database_reader and self._data_location:
database_path = os.path.join(
self._data_location, self._WINEVT_RC_DATABASE)
if not os.path.isfile(database_path):
return None
self._winevt_database_reader = (
winevt_rc.WinevtResourcesSqlite3DatabaseReader())
if not self._winevt_database_reader.Open(database_path):
self._winevt_database_reader = None
return self._winevt_database_reader |
def MDL(N, rho, k):
r"""Minimum Description Length
.. math:: MDL(k) = N log \rho_k + p \log N
:validation: results
"""
from numpy import log
#p = arange(1, len(rho)+1)
mdl = N* log(rho) + k * log(N)
return mdl | r"""Minimum Description Length
.. math:: MDL(k) = N log \rho_k + p \log N
:validation: results | Below is the the instruction that describes the task:
### Input:
r"""Minimum Description Length
.. math:: MDL(k) = N log \rho_k + p \log N
:validation: results
### Response:
def MDL(N, rho, k):
r"""Minimum Description Length
.. math:: MDL(k) = N log \rho_k + p \log N
:validation: results
"""
from numpy import log
#p = arange(1, len(rho)+1)
mdl = N* log(rho) + k * log(N)
return mdl |
def data_format_value(self):
"""
:return: The data type of the data component as integer value.
"""
try:
if self._part:
value = self._part.data_format
else:
value = self._buffer.pixel_format
except InvalidParameterException:
value = self._node_map.PixelFormat.value
return value | :return: The data type of the data component as integer value. | Below is the the instruction that describes the task:
### Input:
:return: The data type of the data component as integer value.
### Response:
def data_format_value(self):
"""
:return: The data type of the data component as integer value.
"""
try:
if self._part:
value = self._part.data_format
else:
value = self._buffer.pixel_format
except InvalidParameterException:
value = self._node_map.PixelFormat.value
return value |
def summary(raster, geometry=None, all_touched=False, mean_only=False,
bounds=None, exclude_nodata_value=True):
"""Return ``ST_SummaryStats`` style stats for the given raster.
If ``geometry`` is provided, we mask the raster with the given geometry and
return the stats for the intersection. The parameter can be a GeoJSON-like
object, a WKT string, or a Shapely geometry.
If ``all_touched`` is set, we include every pixel that is touched by the
given geometry. If set to ``False``, we only include pixels that are
"mostly" inside the given geometry (the calculation is done by Rasterio).
If ``mean_only`` is ``True`` we only return the mean value of the pixels,
not the full set of stats.
If ``bounds`` is passed, it should be a two-tuple of (min, max) to use for
filtering raster pixels. If not provided, we exclude anything equal to the
raster no data value.
If ``mean_only`` is ``False``, we return a ``namedtuple`` representing the
stats. All other attributes should be obvious and are consistent with
PostGIS (``min``, ``max``, ``std``, etc).
If ``mean_only`` is ``True``, we simply return a ``float`` or ``None``
representing the mean value of the matching pixels.
The ``exclude_nodata_value`` is consistent with ``ST_SummaryStats`` in that
if it's ``True`` (default) we only count non-nodata pixels (or those pixels
within ``bounds`` if defined). If it's ``False`` we return the count of all
pixels.
"""
def no_result(mean_only):
if mean_only:
return None
else:
return Summary(None, None, None, None, None, None)
try:
if geometry:
# If it's a string, assume WKT
if isinstance(geometry, six.string_types):
geometry = wkt.loads(geometry)
# If not already GeoJSON, assume it's a Shapely shape
if not isinstance(geometry, dict):
geojson = mapping(geometry)
else:
geojson = geometry
geometry = shape(geometry)
result, _ = mask(
raster, [geojson], crop=True, all_touched=all_touched,
)
pixels = result.data.flatten()
else:
pixels = raster.read(1).flatten()
except ValueError:
return no_result(mean_only)
raster_shape = raster_to_shape(raster)
if not raster_shape.contains(geometry):
log.warning(
'Geometry {} is not fully contained by the source raster'.format(
geometry,
)
)
if bounds:
score_mask = numpy.logical_and(
numpy.greater_equal(pixels, bounds[0]),
numpy.less_equal(pixels, bounds[1]),
)
else:
score_mask = numpy.not_equal(pixels, raster.nodata),
scored_pixels = numpy.extract(score_mask, pixels)
if len(scored_pixels):
if mean_only:
return scored_pixels.mean()
else:
if exclude_nodata_value:
count = len(scored_pixels)
else:
count = len(pixels)
return Summary(
count,
scored_pixels.sum(),
scored_pixels.mean(),
scored_pixels.min(),
scored_pixels.max(),
scored_pixels.std(),
)
else:
return no_result(mean_only) | Return ``ST_SummaryStats`` style stats for the given raster.
If ``geometry`` is provided, we mask the raster with the given geometry and
return the stats for the intersection. The parameter can be a GeoJSON-like
object, a WKT string, or a Shapely geometry.
If ``all_touched`` is set, we include every pixel that is touched by the
given geometry. If set to ``False``, we only include pixels that are
"mostly" inside the given geometry (the calculation is done by Rasterio).
If ``mean_only`` is ``True`` we only return the mean value of the pixels,
not the full set of stats.
If ``bounds`` is passed, it should be a two-tuple of (min, max) to use for
filtering raster pixels. If not provided, we exclude anything equal to the
raster no data value.
If ``mean_only`` is ``False``, we return a ``namedtuple`` representing the
stats. All other attributes should be obvious and are consistent with
PostGIS (``min``, ``max``, ``std``, etc).
If ``mean_only`` is ``True``, we simply return a ``float`` or ``None``
representing the mean value of the matching pixels.
The ``exclude_nodata_value`` is consistent with ``ST_SummaryStats`` in that
if it's ``True`` (default) we only count non-nodata pixels (or those pixels
within ``bounds`` if defined). If it's ``False`` we return the count of all
pixels. | Below is the the instruction that describes the task:
### Input:
Return ``ST_SummaryStats`` style stats for the given raster.
If ``geometry`` is provided, we mask the raster with the given geometry and
return the stats for the intersection. The parameter can be a GeoJSON-like
object, a WKT string, or a Shapely geometry.
If ``all_touched`` is set, we include every pixel that is touched by the
given geometry. If set to ``False``, we only include pixels that are
"mostly" inside the given geometry (the calculation is done by Rasterio).
If ``mean_only`` is ``True`` we only return the mean value of the pixels,
not the full set of stats.
If ``bounds`` is passed, it should be a two-tuple of (min, max) to use for
filtering raster pixels. If not provided, we exclude anything equal to the
raster no data value.
If ``mean_only`` is ``False``, we return a ``namedtuple`` representing the
stats. All other attributes should be obvious and are consistent with
PostGIS (``min``, ``max``, ``std``, etc).
If ``mean_only`` is ``True``, we simply return a ``float`` or ``None``
representing the mean value of the matching pixels.
The ``exclude_nodata_value`` is consistent with ``ST_SummaryStats`` in that
if it's ``True`` (default) we only count non-nodata pixels (or those pixels
within ``bounds`` if defined). If it's ``False`` we return the count of all
pixels.
### Response:
def summary(raster, geometry=None, all_touched=False, mean_only=False,
bounds=None, exclude_nodata_value=True):
"""Return ``ST_SummaryStats`` style stats for the given raster.
If ``geometry`` is provided, we mask the raster with the given geometry and
return the stats for the intersection. The parameter can be a GeoJSON-like
object, a WKT string, or a Shapely geometry.
If ``all_touched`` is set, we include every pixel that is touched by the
given geometry. If set to ``False``, we only include pixels that are
"mostly" inside the given geometry (the calculation is done by Rasterio).
If ``mean_only`` is ``True`` we only return the mean value of the pixels,
not the full set of stats.
If ``bounds`` is passed, it should be a two-tuple of (min, max) to use for
filtering raster pixels. If not provided, we exclude anything equal to the
raster no data value.
If ``mean_only`` is ``False``, we return a ``namedtuple`` representing the
stats. All other attributes should be obvious and are consistent with
PostGIS (``min``, ``max``, ``std``, etc).
If ``mean_only`` is ``True``, we simply return a ``float`` or ``None``
representing the mean value of the matching pixels.
The ``exclude_nodata_value`` is consistent with ``ST_SummaryStats`` in that
if it's ``True`` (default) we only count non-nodata pixels (or those pixels
within ``bounds`` if defined). If it's ``False`` we return the count of all
pixels.
"""
def no_result(mean_only):
if mean_only:
return None
else:
return Summary(None, None, None, None, None, None)
try:
if geometry:
# If it's a string, assume WKT
if isinstance(geometry, six.string_types):
geometry = wkt.loads(geometry)
# If not already GeoJSON, assume it's a Shapely shape
if not isinstance(geometry, dict):
geojson = mapping(geometry)
else:
geojson = geometry
geometry = shape(geometry)
result, _ = mask(
raster, [geojson], crop=True, all_touched=all_touched,
)
pixels = result.data.flatten()
else:
pixels = raster.read(1).flatten()
except ValueError:
return no_result(mean_only)
raster_shape = raster_to_shape(raster)
if not raster_shape.contains(geometry):
log.warning(
'Geometry {} is not fully contained by the source raster'.format(
geometry,
)
)
if bounds:
score_mask = numpy.logical_and(
numpy.greater_equal(pixels, bounds[0]),
numpy.less_equal(pixels, bounds[1]),
)
else:
score_mask = numpy.not_equal(pixels, raster.nodata),
scored_pixels = numpy.extract(score_mask, pixels)
if len(scored_pixels):
if mean_only:
return scored_pixels.mean()
else:
if exclude_nodata_value:
count = len(scored_pixels)
else:
count = len(pixels)
return Summary(
count,
scored_pixels.sum(),
scored_pixels.mean(),
scored_pixels.min(),
scored_pixels.max(),
scored_pixels.std(),
)
else:
return no_result(mean_only) |
def on_equalarea_specimen_select(self, event):
"""
Get mouse position on double click find the nearest interpretation
to the mouse
position then select that interpretation
Parameters
----------
event : the wx Mouseevent for that click
Alters
------
current_fit
"""
if not self.specimen_EA_xdata or not self.specimen_EA_ydata:
return
pos = event.GetPosition()
width, height = self.canvas2.get_width_height()
pos[1] = height - pos[1]
xpick_data, ypick_data = pos
xdata_org = self.specimen_EA_xdata
ydata_org = self.specimen_EA_ydata
data_corrected = self.specimen_eqarea.transData.transform(
vstack([xdata_org, ydata_org]).T)
xdata, ydata = data_corrected.T
xdata = list(map(float, xdata))
ydata = list(map(float, ydata))
e = 4e0
index = None
for i, (x, y) in enumerate(zip(xdata, ydata)):
if 0 < sqrt((x-xpick_data)**2. + (y-ypick_data)**2.) < e:
index = i
break
if index != None:
self.fit_box.SetSelection(index)
self.draw_figure(self.s, True)
self.on_select_fit(event) | Get mouse position on double click find the nearest interpretation
to the mouse
position then select that interpretation
Parameters
----------
event : the wx Mouseevent for that click
Alters
------
current_fit | Below is the the instruction that describes the task:
### Input:
Get mouse position on double click find the nearest interpretation
to the mouse
position then select that interpretation
Parameters
----------
event : the wx Mouseevent for that click
Alters
------
current_fit
### Response:
def on_equalarea_specimen_select(self, event):
"""
Get mouse position on double click find the nearest interpretation
to the mouse
position then select that interpretation
Parameters
----------
event : the wx Mouseevent for that click
Alters
------
current_fit
"""
if not self.specimen_EA_xdata or not self.specimen_EA_ydata:
return
pos = event.GetPosition()
width, height = self.canvas2.get_width_height()
pos[1] = height - pos[1]
xpick_data, ypick_data = pos
xdata_org = self.specimen_EA_xdata
ydata_org = self.specimen_EA_ydata
data_corrected = self.specimen_eqarea.transData.transform(
vstack([xdata_org, ydata_org]).T)
xdata, ydata = data_corrected.T
xdata = list(map(float, xdata))
ydata = list(map(float, ydata))
e = 4e0
index = None
for i, (x, y) in enumerate(zip(xdata, ydata)):
if 0 < sqrt((x-xpick_data)**2. + (y-ypick_data)**2.) < e:
index = i
break
if index != None:
self.fit_box.SetSelection(index)
self.draw_figure(self.s, True)
self.on_select_fit(event) |
def ordc(item, inset):
"""
The function returns the ordinal position of any given item in a
character set. If the item does not appear in the set, the function
returns -1.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ordc_c.html
:param item: An item to locate within a set.
:type item: str
:param inset: A set to search for a given item.
:type inset: SpiceCharCell
:return: the ordinal position of item within the set
:rtype: int
"""
assert isinstance(inset, stypes.SpiceCell)
assert inset.is_char()
assert isinstance(item, str)
item = stypes.stringToCharP(item)
return libspice.ordc_c(item, ctypes.byref(inset)) | The function returns the ordinal position of any given item in a
character set. If the item does not appear in the set, the function
returns -1.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ordc_c.html
:param item: An item to locate within a set.
:type item: str
:param inset: A set to search for a given item.
:type inset: SpiceCharCell
:return: the ordinal position of item within the set
:rtype: int | Below is the the instruction that describes the task:
### Input:
The function returns the ordinal position of any given item in a
character set. If the item does not appear in the set, the function
returns -1.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ordc_c.html
:param item: An item to locate within a set.
:type item: str
:param inset: A set to search for a given item.
:type inset: SpiceCharCell
:return: the ordinal position of item within the set
:rtype: int
### Response:
def ordc(item, inset):
"""
The function returns the ordinal position of any given item in a
character set. If the item does not appear in the set, the function
returns -1.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ordc_c.html
:param item: An item to locate within a set.
:type item: str
:param inset: A set to search for a given item.
:type inset: SpiceCharCell
:return: the ordinal position of item within the set
:rtype: int
"""
assert isinstance(inset, stypes.SpiceCell)
assert inset.is_char()
assert isinstance(item, str)
item = stypes.stringToCharP(item)
return libspice.ordc_c(item, ctypes.byref(inset)) |
def separate_comma_imports(partitions):
"""Turns `import a, b` into `import a` and `import b`"""
def _inner():
for partition in partitions:
if partition.code_type is CodeType.IMPORT:
import_obj = import_obj_from_str(partition.src)
if import_obj.has_multiple_imports:
for new_import_obj in import_obj.split_imports():
yield CodePartition(
CodeType.IMPORT, new_import_obj.to_text(),
)
else:
yield partition
else:
yield partition
return list(_inner()) | Turns `import a, b` into `import a` and `import b` | Below is the the instruction that describes the task:
### Input:
Turns `import a, b` into `import a` and `import b`
### Response:
def separate_comma_imports(partitions):
"""Turns `import a, b` into `import a` and `import b`"""
def _inner():
for partition in partitions:
if partition.code_type is CodeType.IMPORT:
import_obj = import_obj_from_str(partition.src)
if import_obj.has_multiple_imports:
for new_import_obj in import_obj.split_imports():
yield CodePartition(
CodeType.IMPORT, new_import_obj.to_text(),
)
else:
yield partition
else:
yield partition
return list(_inner()) |
def crossing(b, component, time, dynamics_method='keplerian', ltte=True, tol=1e-4, maxiter=1000):
"""
tol in days
"""
def projected_separation_sq(time, b, dynamics_method, cind1, cind2, ltte=True):
"""
"""
#print "*** projected_separation_sq", time, dynamics_method, cind1, cind2, ltte
times = np.array([time])
if dynamics_method in ['nbody', 'rebound']:
# TODO: make sure that this takes systemic velocity and corrects positions and velocities (including ltte effects if enabled)
ts, xs, ys, zs, vxs, vys, vzs = dynamics.nbody.dynamics_from_bundle(b, times, compute=None, ltte=ltte)
elif dynamics_method=='bs':
ts, xs, ys, zs, vxs, vys, vzs = dynamics.nbody.dynamics_from_bundle_bs(b, times, compute, ltte=ltte)
elif dynamics_method=='keplerian':
# TODO: make sure that this takes systemic velocity and corrects positions and velocities (including ltte effects if enabled)
ts, xs, ys, zs, vxs, vys, vzs = dynamics.keplerian.dynamics_from_bundle(b, times, compute=None, ltte=ltte, return_euler=False)
else:
raise NotImplementedError
return (xs[cind2][0]-xs[cind1][0])**2 + (ys[cind2][0]-ys[cind1][0])**2
# TODO: optimize this by allowing to pass cind1 and cind2 directly (and fallback to this if they aren't)
starrefs = b.hierarchy.get_stars()
cind1 = starrefs.index(component)
cind2 = starrefs.index(b.hierarchy.get_sibling_of(component))
# TODO: provide options for tol and maxiter (in the frontend computeoptionsp)?
return newton(projected_separation_sq, x0=time, args=(b, dynamics_method, cind1, cind2, ltte), tol=tol, maxiter=maxiter) | tol in days | Below is the the instruction that describes the task:
### Input:
tol in days
### Response:
def crossing(b, component, time, dynamics_method='keplerian', ltte=True, tol=1e-4, maxiter=1000):
"""
tol in days
"""
def projected_separation_sq(time, b, dynamics_method, cind1, cind2, ltte=True):
"""
"""
#print "*** projected_separation_sq", time, dynamics_method, cind1, cind2, ltte
times = np.array([time])
if dynamics_method in ['nbody', 'rebound']:
# TODO: make sure that this takes systemic velocity and corrects positions and velocities (including ltte effects if enabled)
ts, xs, ys, zs, vxs, vys, vzs = dynamics.nbody.dynamics_from_bundle(b, times, compute=None, ltte=ltte)
elif dynamics_method=='bs':
ts, xs, ys, zs, vxs, vys, vzs = dynamics.nbody.dynamics_from_bundle_bs(b, times, compute, ltte=ltte)
elif dynamics_method=='keplerian':
# TODO: make sure that this takes systemic velocity and corrects positions and velocities (including ltte effects if enabled)
ts, xs, ys, zs, vxs, vys, vzs = dynamics.keplerian.dynamics_from_bundle(b, times, compute=None, ltte=ltte, return_euler=False)
else:
raise NotImplementedError
return (xs[cind2][0]-xs[cind1][0])**2 + (ys[cind2][0]-ys[cind1][0])**2
# TODO: optimize this by allowing to pass cind1 and cind2 directly (and fallback to this if they aren't)
starrefs = b.hierarchy.get_stars()
cind1 = starrefs.index(component)
cind2 = starrefs.index(b.hierarchy.get_sibling_of(component))
# TODO: provide options for tol and maxiter (in the frontend computeoptionsp)?
return newton(projected_separation_sq, x0=time, args=(b, dynamics_method, cind1, cind2, ltte), tol=tol, maxiter=maxiter) |
def _size_from_header(cls, header):
"""
Get the size of each columns from the header.
:param header:
The header template we have to get the size from.
:type header: dict
:return: The maximal size of the each data to print.
:rtype: list
"""
# We initiate the result we are going to return.
result = []
for data in header:
# We lopp through the header.
# And we append the size to our result.
result.append(header[data])
# We return the result.
return result | Get the size of each columns from the header.
:param header:
The header template we have to get the size from.
:type header: dict
:return: The maximal size of the each data to print.
:rtype: list | Below is the the instruction that describes the task:
### Input:
Get the size of each columns from the header.
:param header:
The header template we have to get the size from.
:type header: dict
:return: The maximal size of the each data to print.
:rtype: list
### Response:
def _size_from_header(cls, header):
"""
Get the size of each columns from the header.
:param header:
The header template we have to get the size from.
:type header: dict
:return: The maximal size of the each data to print.
:rtype: list
"""
# We initiate the result we are going to return.
result = []
for data in header:
# We lopp through the header.
# And we append the size to our result.
result.append(header[data])
# We return the result.
return result |
def generic_path_not_found(*args):
"""
Creates a Lambda Service Generic PathNotFound Response
Parameters
----------
args list
List of arguments Flask passes to the method
Returns
-------
Flask.Response
A response object representing the GenericPathNotFound Error
"""
exception_tuple = LambdaErrorResponses.PathNotFoundException
return BaseLocalService.service_response(
LambdaErrorResponses._construct_error_response_body(
LambdaErrorResponses.LOCAL_SERVICE_ERROR, "PathNotFoundException"),
LambdaErrorResponses._construct_headers(exception_tuple[0]),
exception_tuple[1]
) | Creates a Lambda Service Generic PathNotFound Response
Parameters
----------
args list
List of arguments Flask passes to the method
Returns
-------
Flask.Response
A response object representing the GenericPathNotFound Error | Below is the the instruction that describes the task:
### Input:
Creates a Lambda Service Generic PathNotFound Response
Parameters
----------
args list
List of arguments Flask passes to the method
Returns
-------
Flask.Response
A response object representing the GenericPathNotFound Error
### Response:
def generic_path_not_found(*args):
"""
Creates a Lambda Service Generic PathNotFound Response
Parameters
----------
args list
List of arguments Flask passes to the method
Returns
-------
Flask.Response
A response object representing the GenericPathNotFound Error
"""
exception_tuple = LambdaErrorResponses.PathNotFoundException
return BaseLocalService.service_response(
LambdaErrorResponses._construct_error_response_body(
LambdaErrorResponses.LOCAL_SERVICE_ERROR, "PathNotFoundException"),
LambdaErrorResponses._construct_headers(exception_tuple[0]),
exception_tuple[1]
) |
def wait_for_build(self, interval=5, path=None):
"""
A convenience method designed to inform you when a project build has
completed. It polls the API every `interval` seconds until there is
not a build running. At that point, it returns the "last_build_info"
field of the project record if the build succeeded, and raises a
LuminosoError with the field as its message if the build failed.
If a `path` is not specified, this method will assume that its URL is
the URL for the project. Otherwise, it will use the specified path
(which should be "/projects/<project_id>/").
"""
path = path or ''
start = time.time()
next_log = 0
while True:
response = self.get(path)['last_build_info']
if not response:
raise ValueError('This project is not building!')
if response['stop_time']:
if response['success']:
return response
else:
raise LuminosoError(response)
elapsed = time.time() - start
if elapsed > next_log:
logger.info('Still waiting (%d seconds elapsed).', next_log)
next_log += 120
time.sleep(interval) | A convenience method designed to inform you when a project build has
completed. It polls the API every `interval` seconds until there is
not a build running. At that point, it returns the "last_build_info"
field of the project record if the build succeeded, and raises a
LuminosoError with the field as its message if the build failed.
If a `path` is not specified, this method will assume that its URL is
the URL for the project. Otherwise, it will use the specified path
(which should be "/projects/<project_id>/"). | Below is the the instruction that describes the task:
### Input:
A convenience method designed to inform you when a project build has
completed. It polls the API every `interval` seconds until there is
not a build running. At that point, it returns the "last_build_info"
field of the project record if the build succeeded, and raises a
LuminosoError with the field as its message if the build failed.
If a `path` is not specified, this method will assume that its URL is
the URL for the project. Otherwise, it will use the specified path
(which should be "/projects/<project_id>/").
### Response:
def wait_for_build(self, interval=5, path=None):
"""
A convenience method designed to inform you when a project build has
completed. It polls the API every `interval` seconds until there is
not a build running. At that point, it returns the "last_build_info"
field of the project record if the build succeeded, and raises a
LuminosoError with the field as its message if the build failed.
If a `path` is not specified, this method will assume that its URL is
the URL for the project. Otherwise, it will use the specified path
(which should be "/projects/<project_id>/").
"""
path = path or ''
start = time.time()
next_log = 0
while True:
response = self.get(path)['last_build_info']
if not response:
raise ValueError('This project is not building!')
if response['stop_time']:
if response['success']:
return response
else:
raise LuminosoError(response)
elapsed = time.time() - start
if elapsed > next_log:
logger.info('Still waiting (%d seconds elapsed).', next_log)
next_log += 120
time.sleep(interval) |
def delete(args):
"""Deletes the jobs from the job manager. If the jobs are still running in the grid, they are stopped."""
jm = setup(args)
# first, stop the jobs if they are running in the grid
if not args.local and 'executing' in args.status:
stop(args)
# then, delete them from the database
jm.delete(job_ids=get_ids(args.job_ids), array_ids=get_ids(args.array_ids), delete_logs=not args.keep_logs, delete_log_dir=not args.keep_log_dir, status=args.status) | Deletes the jobs from the job manager. If the jobs are still running in the grid, they are stopped. | Below is the the instruction that describes the task:
### Input:
Deletes the jobs from the job manager. If the jobs are still running in the grid, they are stopped.
### Response:
def delete(args):
"""Deletes the jobs from the job manager. If the jobs are still running in the grid, they are stopped."""
jm = setup(args)
# first, stop the jobs if they are running in the grid
if not args.local and 'executing' in args.status:
stop(args)
# then, delete them from the database
jm.delete(job_ids=get_ids(args.job_ids), array_ids=get_ids(args.array_ids), delete_logs=not args.keep_logs, delete_log_dir=not args.keep_log_dir, status=args.status) |
def update_state_machine_tab_label(self, state_machine_m):
""" Updates tab label if needed because system path, root state name or marked_dirty flag changed
:param StateMachineModel state_machine_m: State machine model that has changed
:return:
"""
sm_id = state_machine_m.state_machine.state_machine_id
if sm_id in self.tabs:
sm = state_machine_m.state_machine
# create new tab label if tab label properties are not up to date
if not self.tabs[sm_id]['marked_dirty'] == sm.marked_dirty or \
not self.tabs[sm_id]['file_system_path'] == sm.file_system_path or \
not self.tabs[sm_id]['root_state_name'] == sm.root_state.name:
label = self.view["notebook"].get_tab_label(self.tabs[sm_id]["page"]).get_child().get_children()[0]
set_tab_label_texts(label, state_machine_m, unsaved_changes=sm.marked_dirty)
self.tabs[sm_id]['file_system_path'] = sm.file_system_path
self.tabs[sm_id]['marked_dirty'] = sm.marked_dirty
self.tabs[sm_id]['root_state_name'] = sm.root_state.name
else:
logger.warning("State machine '{0}' tab label can not be updated there is no tab.".format(sm_id)) | Updates tab label if needed because system path, root state name or marked_dirty flag changed
:param StateMachineModel state_machine_m: State machine model that has changed
:return: | Below is the the instruction that describes the task:
### Input:
Updates tab label if needed because system path, root state name or marked_dirty flag changed
:param StateMachineModel state_machine_m: State machine model that has changed
:return:
### Response:
def update_state_machine_tab_label(self, state_machine_m):
""" Updates tab label if needed because system path, root state name or marked_dirty flag changed
:param StateMachineModel state_machine_m: State machine model that has changed
:return:
"""
sm_id = state_machine_m.state_machine.state_machine_id
if sm_id in self.tabs:
sm = state_machine_m.state_machine
# create new tab label if tab label properties are not up to date
if not self.tabs[sm_id]['marked_dirty'] == sm.marked_dirty or \
not self.tabs[sm_id]['file_system_path'] == sm.file_system_path or \
not self.tabs[sm_id]['root_state_name'] == sm.root_state.name:
label = self.view["notebook"].get_tab_label(self.tabs[sm_id]["page"]).get_child().get_children()[0]
set_tab_label_texts(label, state_machine_m, unsaved_changes=sm.marked_dirty)
self.tabs[sm_id]['file_system_path'] = sm.file_system_path
self.tabs[sm_id]['marked_dirty'] = sm.marked_dirty
self.tabs[sm_id]['root_state_name'] = sm.root_state.name
else:
logger.warning("State machine '{0}' tab label can not be updated there is no tab.".format(sm_id)) |
def get_paths_for_attribute_set(self, keys):
"""
Given a list/set of keys (or one key), returns the parts that have
all of the keys in the list.
Because on_targets=True, this DOES NOT WORK WITH TOP LEVEL PROPERTIES,
only those of targets.
These paths are not pointers to the objects themselves, but tuples of
attribute names that allow us to (attempt) to look up that object in any
belief state.
"""
if not isinstance(keys, (list, set)):
keys = [keys]
has_all_keys = lambda name, structure: \
all(map(lambda k: k in structure, keys))
return self.find_path(has_all_keys, on_targets=True) | Given a list/set of keys (or one key), returns the parts that have
all of the keys in the list.
Because on_targets=True, this DOES NOT WORK WITH TOP LEVEL PROPERTIES,
only those of targets.
These paths are not pointers to the objects themselves, but tuples of
attribute names that allow us to (attempt) to look up that object in any
belief state. | Below is the the instruction that describes the task:
### Input:
Given a list/set of keys (or one key), returns the parts that have
all of the keys in the list.
Because on_targets=True, this DOES NOT WORK WITH TOP LEVEL PROPERTIES,
only those of targets.
These paths are not pointers to the objects themselves, but tuples of
attribute names that allow us to (attempt) to look up that object in any
belief state.
### Response:
def get_paths_for_attribute_set(self, keys):
"""
Given a list/set of keys (or one key), returns the parts that have
all of the keys in the list.
Because on_targets=True, this DOES NOT WORK WITH TOP LEVEL PROPERTIES,
only those of targets.
These paths are not pointers to the objects themselves, but tuples of
attribute names that allow us to (attempt) to look up that object in any
belief state.
"""
if not isinstance(keys, (list, set)):
keys = [keys]
has_all_keys = lambda name, structure: \
all(map(lambda k: k in structure, keys))
return self.find_path(has_all_keys, on_targets=True) |
def in_transaction(self):
"""
:return: True if there is an open transaction.
"""
self._in_transaction = self._in_transaction and self.is_connected
return self._in_transaction | :return: True if there is an open transaction. | Below is the the instruction that describes the task:
### Input:
:return: True if there is an open transaction.
### Response:
def in_transaction(self):
"""
:return: True if there is an open transaction.
"""
self._in_transaction = self._in_transaction and self.is_connected
return self._in_transaction |
def indexes_all(ol,value):
'''
from elist.elist import *
ol = [1,'a',3,'a',4,'a',5]
indexes_all(ol,'a')
'''
length = ol.__len__()
indexes =[]
for i in range(0,length):
if(value == ol[i]):
indexes.append(i)
else:
pass
return(indexes) | from elist.elist import *
ol = [1,'a',3,'a',4,'a',5]
indexes_all(ol,'a') | Below is the the instruction that describes the task:
### Input:
from elist.elist import *
ol = [1,'a',3,'a',4,'a',5]
indexes_all(ol,'a')
### Response:
def indexes_all(ol,value):
'''
from elist.elist import *
ol = [1,'a',3,'a',4,'a',5]
indexes_all(ol,'a')
'''
length = ol.__len__()
indexes =[]
for i in range(0,length):
if(value == ol[i]):
indexes.append(i)
else:
pass
return(indexes) |
def app(environ, start_response):
"""Function called by the WSGI server."""
r = HttpRequestHandler(environ, start_response, Router).dispatch()
return r | Function called by the WSGI server. | Below is the the instruction that describes the task:
### Input:
Function called by the WSGI server.
### Response:
def app(environ, start_response):
"""Function called by the WSGI server."""
r = HttpRequestHandler(environ, start_response, Router).dispatch()
return r |
def get_db_prep_lookup(self, lookup_type, value, connection=None, prepared=None):
"""
Returns field's value prepared for database lookup.
"""
## convert to settings.TIME_ZONE
if value.tzinfo is None:
value = default_tz.localize(value)
else:
value = value.astimezone(default_tz)
return super(LocalizedDateTimeField, self).get_db_prep_lookup(lookup_type, value, connection=connection, prepared=prepared) | Returns field's value prepared for database lookup. | Below is the the instruction that describes the task:
### Input:
Returns field's value prepared for database lookup.
### Response:
def get_db_prep_lookup(self, lookup_type, value, connection=None, prepared=None):
"""
Returns field's value prepared for database lookup.
"""
## convert to settings.TIME_ZONE
if value.tzinfo is None:
value = default_tz.localize(value)
else:
value = value.astimezone(default_tz)
return super(LocalizedDateTimeField, self).get_db_prep_lookup(lookup_type, value, connection=connection, prepared=prepared) |
def guggenheim_katayama(target, K2, n, temperature='pore.temperature',
critical_temperature='pore.critical_temperature',
critical_pressure='pore.critical_pressure'):
r"""
Missing description
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
K2 : scalar
Fluid specific constant
n : scalar
Fluid specific constant
temperature : string
The dictionary key containing the temperature values (K)
critical_temperature : string
The dictionary key containing the critical temperature values (K)
critical_pressure : string
The dictionary key containing the critical pressure values (K)
"""
T = target[temperature]
Pc = target[critical_pressure]
Tc = target[critical_temperature]
sigma_o = K2*Tc**(1/3)*Pc**(2/3)
value = sigma_o*(1-T/Tc)**n
return value | r"""
Missing description
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
K2 : scalar
Fluid specific constant
n : scalar
Fluid specific constant
temperature : string
The dictionary key containing the temperature values (K)
critical_temperature : string
The dictionary key containing the critical temperature values (K)
critical_pressure : string
The dictionary key containing the critical pressure values (K) | Below is the the instruction that describes the task:
### Input:
r"""
Missing description
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
K2 : scalar
Fluid specific constant
n : scalar
Fluid specific constant
temperature : string
The dictionary key containing the temperature values (K)
critical_temperature : string
The dictionary key containing the critical temperature values (K)
critical_pressure : string
The dictionary key containing the critical pressure values (K)
### Response:
def guggenheim_katayama(target, K2, n, temperature='pore.temperature',
critical_temperature='pore.critical_temperature',
critical_pressure='pore.critical_pressure'):
r"""
Missing description
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
K2 : scalar
Fluid specific constant
n : scalar
Fluid specific constant
temperature : string
The dictionary key containing the temperature values (K)
critical_temperature : string
The dictionary key containing the critical temperature values (K)
critical_pressure : string
The dictionary key containing the critical pressure values (K)
"""
T = target[temperature]
Pc = target[critical_pressure]
Tc = target[critical_temperature]
sigma_o = K2*Tc**(1/3)*Pc**(2/3)
value = sigma_o*(1-T/Tc)**n
return value |
def watch_docs(c):
"""
Watch both doc trees & rebuild them if files change.
This includes e.g. rebuilding the API docs if the source code changes;
rebuilding the WWW docs if the README changes; etc.
Reuses the configuration values ``packaging.package`` or ``tests.package``
(the former winning over the latter if both defined) when determining which
source directory to scan for API doc updates.
"""
# TODO: break back down into generic single-site version, then create split
# tasks as with docs/www above. Probably wants invoke#63.
# NOTE: 'www'/'docs' refer to the module level sub-collections. meh.
# Readme & WWW triggers WWW
www_c = Context(config=c.config.clone())
www_c.update(**www.configuration())
www_handler = make_handler(
ctx=www_c,
task_=www["build"],
regexes=[r"\./README.rst", r"\./sites/www"],
ignore_regexes=[r".*/\..*\.swp", r"\./sites/www/_build"],
)
# Code and docs trigger API
docs_c = Context(config=c.config.clone())
docs_c.update(**docs.configuration())
regexes = [r"\./sites/docs"]
package = c.get("packaging", {}).get("package", None)
if package is None:
package = c.get("tests", {}).get("package", None)
if package:
regexes.append(r"\./{}/".format(package))
api_handler = make_handler(
ctx=docs_c,
task_=docs["build"],
regexes=regexes,
ignore_regexes=[r".*/\..*\.swp", r"\./sites/docs/_build"],
)
observe(www_handler, api_handler) | Watch both doc trees & rebuild them if files change.
This includes e.g. rebuilding the API docs if the source code changes;
rebuilding the WWW docs if the README changes; etc.
Reuses the configuration values ``packaging.package`` or ``tests.package``
(the former winning over the latter if both defined) when determining which
source directory to scan for API doc updates. | Below is the the instruction that describes the task:
### Input:
Watch both doc trees & rebuild them if files change.
This includes e.g. rebuilding the API docs if the source code changes;
rebuilding the WWW docs if the README changes; etc.
Reuses the configuration values ``packaging.package`` or ``tests.package``
(the former winning over the latter if both defined) when determining which
source directory to scan for API doc updates.
### Response:
def watch_docs(c):
"""
Watch both doc trees & rebuild them if files change.
This includes e.g. rebuilding the API docs if the source code changes;
rebuilding the WWW docs if the README changes; etc.
Reuses the configuration values ``packaging.package`` or ``tests.package``
(the former winning over the latter if both defined) when determining which
source directory to scan for API doc updates.
"""
# TODO: break back down into generic single-site version, then create split
# tasks as with docs/www above. Probably wants invoke#63.
# NOTE: 'www'/'docs' refer to the module level sub-collections. meh.
# Readme & WWW triggers WWW
www_c = Context(config=c.config.clone())
www_c.update(**www.configuration())
www_handler = make_handler(
ctx=www_c,
task_=www["build"],
regexes=[r"\./README.rst", r"\./sites/www"],
ignore_regexes=[r".*/\..*\.swp", r"\./sites/www/_build"],
)
# Code and docs trigger API
docs_c = Context(config=c.config.clone())
docs_c.update(**docs.configuration())
regexes = [r"\./sites/docs"]
package = c.get("packaging", {}).get("package", None)
if package is None:
package = c.get("tests", {}).get("package", None)
if package:
regexes.append(r"\./{}/".format(package))
api_handler = make_handler(
ctx=docs_c,
task_=docs["build"],
regexes=regexes,
ignore_regexes=[r".*/\..*\.swp", r"\./sites/docs/_build"],
)
observe(www_handler, api_handler) |
def get_language_settings(language_code, site_id=None):
"""
Return the language settings for the current site
"""
if site_id is None:
site_id = getattr(settings, 'SITE_ID', None)
for lang_dict in FLUENT_BLOGS_LANGUAGES.get(site_id, ()):
if lang_dict['code'] == language_code:
return lang_dict
return FLUENT_BLOGS_LANGUAGES['default'] | Return the language settings for the current site | Below is the the instruction that describes the task:
### Input:
Return the language settings for the current site
### Response:
def get_language_settings(language_code, site_id=None):
"""
Return the language settings for the current site
"""
if site_id is None:
site_id = getattr(settings, 'SITE_ID', None)
for lang_dict in FLUENT_BLOGS_LANGUAGES.get(site_id, ()):
if lang_dict['code'] == language_code:
return lang_dict
return FLUENT_BLOGS_LANGUAGES['default'] |
def _GetFlagValues(self, flags):
"""Determines which events are indicated by a set of fsevents flags.
Args:
flags (int): fsevents record flags.
Returns:
str: a comma separated string containing descriptions of the flag values
stored in an fsevents record.
"""
event_types = []
for event_flag, description in self._FLAG_VALUES.items():
if event_flag & flags:
event_types.append(description)
return ', '.join(event_types) | Determines which events are indicated by a set of fsevents flags.
Args:
flags (int): fsevents record flags.
Returns:
str: a comma separated string containing descriptions of the flag values
stored in an fsevents record. | Below is the the instruction that describes the task:
### Input:
Determines which events are indicated by a set of fsevents flags.
Args:
flags (int): fsevents record flags.
Returns:
str: a comma separated string containing descriptions of the flag values
stored in an fsevents record.
### Response:
def _GetFlagValues(self, flags):
"""Determines which events are indicated by a set of fsevents flags.
Args:
flags (int): fsevents record flags.
Returns:
str: a comma separated string containing descriptions of the flag values
stored in an fsevents record.
"""
event_types = []
for event_flag, description in self._FLAG_VALUES.items():
if event_flag & flags:
event_types.append(description)
return ', '.join(event_types) |
def insertLayer(self, layer, name=None):
"""
Insert **layer** into the font. ::
>>> layer = font.insertLayer(otherLayer, name="layer 2")
This will not insert the layer directly.
Rather, a new layer will be created and the data from
**layer** will be copied to to the new layer. **name**
indicates the name that should be assigned to the layer
after insertion. If **name** is not given, the layer's
original name must be used. If the layer does not have
a name, an error must be raised. The data that will be
inserted from **layer** is the same data as documented
in :meth:`BaseLayer.copy`.
"""
if name is None:
name = layer.name
name = normalizers.normalizeLayerName(name)
if name in self:
self.removeLayer(name)
return self._insertLayer(layer, name=name) | Insert **layer** into the font. ::
>>> layer = font.insertLayer(otherLayer, name="layer 2")
This will not insert the layer directly.
Rather, a new layer will be created and the data from
**layer** will be copied to to the new layer. **name**
indicates the name that should be assigned to the layer
after insertion. If **name** is not given, the layer's
original name must be used. If the layer does not have
a name, an error must be raised. The data that will be
inserted from **layer** is the same data as documented
in :meth:`BaseLayer.copy`. | Below is the the instruction that describes the task:
### Input:
Insert **layer** into the font. ::
>>> layer = font.insertLayer(otherLayer, name="layer 2")
This will not insert the layer directly.
Rather, a new layer will be created and the data from
**layer** will be copied to to the new layer. **name**
indicates the name that should be assigned to the layer
after insertion. If **name** is not given, the layer's
original name must be used. If the layer does not have
a name, an error must be raised. The data that will be
inserted from **layer** is the same data as documented
in :meth:`BaseLayer.copy`.
### Response:
def insertLayer(self, layer, name=None):
"""
Insert **layer** into the font. ::
>>> layer = font.insertLayer(otherLayer, name="layer 2")
This will not insert the layer directly.
Rather, a new layer will be created and the data from
**layer** will be copied to to the new layer. **name**
indicates the name that should be assigned to the layer
after insertion. If **name** is not given, the layer's
original name must be used. If the layer does not have
a name, an error must be raised. The data that will be
inserted from **layer** is the same data as documented
in :meth:`BaseLayer.copy`.
"""
if name is None:
name = layer.name
name = normalizers.normalizeLayerName(name)
if name in self:
self.removeLayer(name)
return self._insertLayer(layer, name=name) |
def editcomponent(self, data):
"""
A method to edit a component in Bugzilla. Takes a dict, with
mandatory elements of product. component, and initialowner.
All other elements are optional and use the same names as the
addcomponent() method.
"""
data = data.copy()
self._component_data_convert(data, update=True)
return self._proxy.Component.update(data) | A method to edit a component in Bugzilla. Takes a dict, with
mandatory elements of product. component, and initialowner.
All other elements are optional and use the same names as the
addcomponent() method. | Below is the the instruction that describes the task:
### Input:
A method to edit a component in Bugzilla. Takes a dict, with
mandatory elements of product. component, and initialowner.
All other elements are optional and use the same names as the
addcomponent() method.
### Response:
def editcomponent(self, data):
"""
A method to edit a component in Bugzilla. Takes a dict, with
mandatory elements of product. component, and initialowner.
All other elements are optional and use the same names as the
addcomponent() method.
"""
data = data.copy()
self._component_data_convert(data, update=True)
return self._proxy.Component.update(data) |
def add_group(self, name, devices):
"""Add a new device group.
:return: a :class:`DeviceGroup` instance.
"""
device = self.add_device(name, "group")
device.add_to_group(devices)
return device | Add a new device group.
:return: a :class:`DeviceGroup` instance. | Below is the the instruction that describes the task:
### Input:
Add a new device group.
:return: a :class:`DeviceGroup` instance.
### Response:
def add_group(self, name, devices):
"""Add a new device group.
:return: a :class:`DeviceGroup` instance.
"""
device = self.add_device(name, "group")
device.add_to_group(devices)
return device |
def packvalue(value, *properties):
'''
Store a specified value to specified property path. Often used in nstruct "init" parameter.
:param value: a fixed value
:param properties: specified field name, same as sizefromlen.
:returns: a function which takes a NamedStruct as parameter, and store the value to property path.
'''
def func(namedstruct):
v = namedstruct._target
for p in properties[:-1]:
v = getattr(v, p)
setattr(v, properties[-1], value)
return func | Store a specified value to specified property path. Often used in nstruct "init" parameter.
:param value: a fixed value
:param properties: specified field name, same as sizefromlen.
:returns: a function which takes a NamedStruct as parameter, and store the value to property path. | Below is the the instruction that describes the task:
### Input:
Store a specified value to specified property path. Often used in nstruct "init" parameter.
:param value: a fixed value
:param properties: specified field name, same as sizefromlen.
:returns: a function which takes a NamedStruct as parameter, and store the value to property path.
### Response:
def packvalue(value, *properties):
'''
Store a specified value to specified property path. Often used in nstruct "init" parameter.
:param value: a fixed value
:param properties: specified field name, same as sizefromlen.
:returns: a function which takes a NamedStruct as parameter, and store the value to property path.
'''
def func(namedstruct):
v = namedstruct._target
for p in properties[:-1]:
v = getattr(v, p)
setattr(v, properties[-1], value)
return func |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.