code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def interpolate_mrms_day(start_date, variable, interp_type, mrms_path, map_filename, out_path):
"""
For a given day, this module interpolates hourly MRMS data to a specified latitude and
longitude grid, and saves the interpolated grids to CF-compliant netCDF4 files.
Args:
start_date (datetime.datetime): Date of data being interpolated
variable (str): MRMS variable
interp_type (str): Whether to use maximum neighbor or spline
mrms_path (str): Path to top-level directory of MRMS GRIB2 files
map_filename (str): Name of the map filename. Supports ARPS map file format and netCDF files containing latitude
and longitude variables
out_path (str): Path to location where interpolated netCDF4 files are saved.
"""
try:
print(start_date, variable)
end_date = start_date + timedelta(hours=23)
mrms = MRMSGrid(start_date, end_date, variable, mrms_path)
if mrms.data is not None:
if map_filename[-3:] == "map":
mapping_data = make_proj_grids(*read_arps_map_file(map_filename))
mrms.interpolate_to_netcdf(mapping_data['lon'], mapping_data['lat'], out_path, interp_type=interp_type)
elif map_filename[-3:] == "txt":
mapping_data = make_proj_grids(*read_ncar_map_file(map_filename))
mrms.interpolate_to_netcdf(mapping_data["lon"], mapping_data["lat"], out_path, interp_type=interp_type)
else:
lon, lat = load_map_coordinates(map_filename)
mrms.interpolate_to_netcdf(lon, lat, out_path, interp_type=interp_type)
except Exception as e:
# This exception catches any errors when run in multiprocessing, prints the stack trace,
# and ends the process. Otherwise the process will stall.
print(traceback.format_exc())
raise e | For a given day, this module interpolates hourly MRMS data to a specified latitude and
longitude grid, and saves the interpolated grids to CF-compliant netCDF4 files.
Args:
start_date (datetime.datetime): Date of data being interpolated
variable (str): MRMS variable
interp_type (str): Whether to use maximum neighbor or spline
mrms_path (str): Path to top-level directory of MRMS GRIB2 files
map_filename (str): Name of the map filename. Supports ARPS map file format and netCDF files containing latitude
and longitude variables
out_path (str): Path to location where interpolated netCDF4 files are saved. | Below is the the instruction that describes the task:
### Input:
For a given day, this module interpolates hourly MRMS data to a specified latitude and
longitude grid, and saves the interpolated grids to CF-compliant netCDF4 files.
Args:
start_date (datetime.datetime): Date of data being interpolated
variable (str): MRMS variable
interp_type (str): Whether to use maximum neighbor or spline
mrms_path (str): Path to top-level directory of MRMS GRIB2 files
map_filename (str): Name of the map filename. Supports ARPS map file format and netCDF files containing latitude
and longitude variables
out_path (str): Path to location where interpolated netCDF4 files are saved.
### Response:
def interpolate_mrms_day(start_date, variable, interp_type, mrms_path, map_filename, out_path):
"""
For a given day, this module interpolates hourly MRMS data to a specified latitude and
longitude grid, and saves the interpolated grids to CF-compliant netCDF4 files.
Args:
start_date (datetime.datetime): Date of data being interpolated
variable (str): MRMS variable
interp_type (str): Whether to use maximum neighbor or spline
mrms_path (str): Path to top-level directory of MRMS GRIB2 files
map_filename (str): Name of the map filename. Supports ARPS map file format and netCDF files containing latitude
and longitude variables
out_path (str): Path to location where interpolated netCDF4 files are saved.
"""
try:
print(start_date, variable)
end_date = start_date + timedelta(hours=23)
mrms = MRMSGrid(start_date, end_date, variable, mrms_path)
if mrms.data is not None:
if map_filename[-3:] == "map":
mapping_data = make_proj_grids(*read_arps_map_file(map_filename))
mrms.interpolate_to_netcdf(mapping_data['lon'], mapping_data['lat'], out_path, interp_type=interp_type)
elif map_filename[-3:] == "txt":
mapping_data = make_proj_grids(*read_ncar_map_file(map_filename))
mrms.interpolate_to_netcdf(mapping_data["lon"], mapping_data["lat"], out_path, interp_type=interp_type)
else:
lon, lat = load_map_coordinates(map_filename)
mrms.interpolate_to_netcdf(lon, lat, out_path, interp_type=interp_type)
except Exception as e:
# This exception catches any errors when run in multiprocessing, prints the stack trace,
# and ends the process. Otherwise the process will stall.
print(traceback.format_exc())
raise e |
def finalize(self, params, rep):
"""
Save the full model once we are done.
"""
if params.get("saveNet", True):
saveDir = os.path.join(params["path"], params["name"],
"model_{}.pt".format(rep))
torch.save(self.model, saveDir) | Save the full model once we are done. | Below is the the instruction that describes the task:
### Input:
Save the full model once we are done.
### Response:
def finalize(self, params, rep):
"""
Save the full model once we are done.
"""
if params.get("saveNet", True):
saveDir = os.path.join(params["path"], params["name"],
"model_{}.pt".format(rep))
torch.save(self.model, saveDir) |
def send_approved_mail(request, user):
"""
Sends an email to a user once their ``is_active`` status goes from
``False`` to ``True`` when the ``ACCOUNTS_APPROVAL_REQUIRED``
setting is ``True``.
"""
context = {"request": request, "user": user}
subject = subject_template("email/account_approved_subject.txt", context)
send_mail_template(subject, "email/account_approved",
settings.DEFAULT_FROM_EMAIL, user.email,
context=context) | Sends an email to a user once their ``is_active`` status goes from
``False`` to ``True`` when the ``ACCOUNTS_APPROVAL_REQUIRED``
setting is ``True``. | Below is the the instruction that describes the task:
### Input:
Sends an email to a user once their ``is_active`` status goes from
``False`` to ``True`` when the ``ACCOUNTS_APPROVAL_REQUIRED``
setting is ``True``.
### Response:
def send_approved_mail(request, user):
"""
Sends an email to a user once their ``is_active`` status goes from
``False`` to ``True`` when the ``ACCOUNTS_APPROVAL_REQUIRED``
setting is ``True``.
"""
context = {"request": request, "user": user}
subject = subject_template("email/account_approved_subject.txt", context)
send_mail_template(subject, "email/account_approved",
settings.DEFAULT_FROM_EMAIL, user.email,
context=context) |
def spawn(self, command):
""" Spawns a new process and adds it to the pool """
# process_name
# output
# time before starting (wait for port?)
# start_new_session=True : avoid sending parent signals to child
env = dict(os.environ)
env["MRQ_IS_SUBPROCESS"] = "1"
env.update(self.extra_env or {})
# Extract env variables from shell commands.
parts = shlex.split(command)
for p in list(parts):
if "=" in p:
env[p.split("=")[0]] = p[len(p.split("=")[0]) + 1:]
parts.pop(0)
else:
break
p = subprocess.Popen(parts, shell=False, close_fds=True, env=env, cwd=os.getcwd())
self.processes.append({
"subprocess": p,
"pid": p.pid,
"command": command,
"psutil": psutil.Process(pid=p.pid)
}) | Spawns a new process and adds it to the pool | Below is the the instruction that describes the task:
### Input:
Spawns a new process and adds it to the pool
### Response:
def spawn(self, command):
""" Spawns a new process and adds it to the pool """
# process_name
# output
# time before starting (wait for port?)
# start_new_session=True : avoid sending parent signals to child
env = dict(os.environ)
env["MRQ_IS_SUBPROCESS"] = "1"
env.update(self.extra_env or {})
# Extract env variables from shell commands.
parts = shlex.split(command)
for p in list(parts):
if "=" in p:
env[p.split("=")[0]] = p[len(p.split("=")[0]) + 1:]
parts.pop(0)
else:
break
p = subprocess.Popen(parts, shell=False, close_fds=True, env=env, cwd=os.getcwd())
self.processes.append({
"subprocess": p,
"pid": p.pid,
"command": command,
"psutil": psutil.Process(pid=p.pid)
}) |
def register_fn(cls, f):
"""Registers a scope function on this builder."""
def inner(self, *args, **kwargs):
try:
query, projection, options = cls.unpack_scope(f(*args, **kwargs))
new_query = deepcopy(self.query)
new_projection = deepcopy(self.projection)
new_options = deepcopy(self.options)
deep_merge(query, new_query)
new_projection.update(projection)
new_options.update(options)
return ScopeBuilder(self.model, self.fns, new_query,
new_projection, new_options)
except ValueError:
raise ValueError("Scope function \"{}\ returns an invalid scope".format(f.__name__))
setattr(cls, f.__name__, inner) | Registers a scope function on this builder. | Below is the the instruction that describes the task:
### Input:
Registers a scope function on this builder.
### Response:
def register_fn(cls, f):
"""Registers a scope function on this builder."""
def inner(self, *args, **kwargs):
try:
query, projection, options = cls.unpack_scope(f(*args, **kwargs))
new_query = deepcopy(self.query)
new_projection = deepcopy(self.projection)
new_options = deepcopy(self.options)
deep_merge(query, new_query)
new_projection.update(projection)
new_options.update(options)
return ScopeBuilder(self.model, self.fns, new_query,
new_projection, new_options)
except ValueError:
raise ValueError("Scope function \"{}\ returns an invalid scope".format(f.__name__))
setattr(cls, f.__name__, inner) |
def p_Catch(p):
'''
Catch :
| Catch CATCH LPARENT Varible COLON NsContentName RPARENT COLON Terminator Block
'''
if len(p) <= 1:
p[0] = Catch(None, None, None, None, None)
else:
p[0] = Catch(p[1], p[4], p[6], p[9], p[10]) | Catch :
| Catch CATCH LPARENT Varible COLON NsContentName RPARENT COLON Terminator Block | Below is the the instruction that describes the task:
### Input:
Catch :
| Catch CATCH LPARENT Varible COLON NsContentName RPARENT COLON Terminator Block
### Response:
def p_Catch(p):
'''
Catch :
| Catch CATCH LPARENT Varible COLON NsContentName RPARENT COLON Terminator Block
'''
if len(p) <= 1:
p[0] = Catch(None, None, None, None, None)
else:
p[0] = Catch(p[1], p[4], p[6], p[9], p[10]) |
def _load_ssh(self, tag):
"""Loads the SSH configuration into the vardict."""
for child in tag:
if child.tag == "server":
self._vardict["server"] = child.attrib
elif child.tag == "codes":
self._load_codes(child, True)
elif child.tag == "mappings":
self._load_mapping(child, True)
elif child.tag == "libraries":
self._load_includes(child, True) | Loads the SSH configuration into the vardict. | Below is the the instruction that describes the task:
### Input:
Loads the SSH configuration into the vardict.
### Response:
def _load_ssh(self, tag):
"""Loads the SSH configuration into the vardict."""
for child in tag:
if child.tag == "server":
self._vardict["server"] = child.attrib
elif child.tag == "codes":
self._load_codes(child, True)
elif child.tag == "mappings":
self._load_mapping(child, True)
elif child.tag == "libraries":
self._load_includes(child, True) |
def autocomplete(self):
"""
Output completion suggestions for BASH.
The output of this function is passed to BASH's `COMREPLY` variable and
treated as completion suggestions. `COMREPLY` expects a space
separated string as the result.
The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used
to get information about the cli input. Please refer to the BASH
man-page for more information about this variables.
Subcommand options are saved as pairs. A pair consists of
the long option string (e.g. '--exclude') and a boolean
value indicating if the option requires arguments. When printing to
stdout, an equal sign is appended to options which require arguments.
Note: If debugging this function, it is recommended to write the debug
output in a separate file. Otherwise the debug output will be treated
and formatted as potential completion suggestions.
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'DJANGO_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
curr = cwords[cword - 1]
except IndexError:
curr = ''
subcommands = list(get_commands()) + ['help']
options = [('--help', None)]
# subcommand
if cword == 1:
print(' '.join(sorted(filter(lambda x: x.startswith(curr), subcommands))))
# subcommand options
# special case: the 'help' subcommand has no options
elif cwords[0] in subcommands and cwords[0] != 'help':
subcommand_cls = self.fetch_command(cwords[0])
# special case: 'runfcgi' stores additional options as
# 'key=value' pairs
if cwords[0] == 'runfcgi':
from django.core.servers.fastcgi import FASTCGI_OPTIONS
options += [(k, 1) for k in FASTCGI_OPTIONS]
# special case: add the names of installed apps to options
elif cwords[0] in ('dumpdata', 'sql', 'sqlall', 'sqlclear',
'sqlcustom', 'sqlindexes', 'sqlsequencereset', 'test'):
try:
app_configs = apps.get_app_configs()
# Get the last part of the dotted path as the app name.
options += [(app_config.label, 0) for app_config in app_configs]
except ImportError:
# Fail silently if DJANGO_SETTINGS_MODULE isn't set. The
# user will find out once they execute the command.
pass
parser = subcommand_cls.create_parser('', cwords[0])
if subcommand_cls.use_argparse:
options += [(sorted(s_opt.option_strings)[0], s_opt.nargs != 0) for s_opt in
parser._actions if s_opt.option_strings]
else:
options += [(s_opt.get_opt_string(), s_opt.nargs) for s_opt in
parser.option_list]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [opt for opt in options if opt[0] not in prev_opts]
# filter options by current input
options = sorted((k, v) for k, v in options if k.startswith(curr))
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
sys.exit(1) | Output completion suggestions for BASH.
The output of this function is passed to BASH's `COMREPLY` variable and
treated as completion suggestions. `COMREPLY` expects a space
separated string as the result.
The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used
to get information about the cli input. Please refer to the BASH
man-page for more information about this variables.
Subcommand options are saved as pairs. A pair consists of
the long option string (e.g. '--exclude') and a boolean
value indicating if the option requires arguments. When printing to
stdout, an equal sign is appended to options which require arguments.
Note: If debugging this function, it is recommended to write the debug
output in a separate file. Otherwise the debug output will be treated
and formatted as potential completion suggestions. | Below is the the instruction that describes the task:
### Input:
Output completion suggestions for BASH.
The output of this function is passed to BASH's `COMREPLY` variable and
treated as completion suggestions. `COMREPLY` expects a space
separated string as the result.
The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used
to get information about the cli input. Please refer to the BASH
man-page for more information about this variables.
Subcommand options are saved as pairs. A pair consists of
the long option string (e.g. '--exclude') and a boolean
value indicating if the option requires arguments. When printing to
stdout, an equal sign is appended to options which require arguments.
Note: If debugging this function, it is recommended to write the debug
output in a separate file. Otherwise the debug output will be treated
and formatted as potential completion suggestions.
### Response:
def autocomplete(self):
"""
Output completion suggestions for BASH.
The output of this function is passed to BASH's `COMREPLY` variable and
treated as completion suggestions. `COMREPLY` expects a space
separated string as the result.
The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used
to get information about the cli input. Please refer to the BASH
man-page for more information about this variables.
Subcommand options are saved as pairs. A pair consists of
the long option string (e.g. '--exclude') and a boolean
value indicating if the option requires arguments. When printing to
stdout, an equal sign is appended to options which require arguments.
Note: If debugging this function, it is recommended to write the debug
output in a separate file. Otherwise the debug output will be treated
and formatted as potential completion suggestions.
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'DJANGO_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
curr = cwords[cword - 1]
except IndexError:
curr = ''
subcommands = list(get_commands()) + ['help']
options = [('--help', None)]
# subcommand
if cword == 1:
print(' '.join(sorted(filter(lambda x: x.startswith(curr), subcommands))))
# subcommand options
# special case: the 'help' subcommand has no options
elif cwords[0] in subcommands and cwords[0] != 'help':
subcommand_cls = self.fetch_command(cwords[0])
# special case: 'runfcgi' stores additional options as
# 'key=value' pairs
if cwords[0] == 'runfcgi':
from django.core.servers.fastcgi import FASTCGI_OPTIONS
options += [(k, 1) for k in FASTCGI_OPTIONS]
# special case: add the names of installed apps to options
elif cwords[0] in ('dumpdata', 'sql', 'sqlall', 'sqlclear',
'sqlcustom', 'sqlindexes', 'sqlsequencereset', 'test'):
try:
app_configs = apps.get_app_configs()
# Get the last part of the dotted path as the app name.
options += [(app_config.label, 0) for app_config in app_configs]
except ImportError:
# Fail silently if DJANGO_SETTINGS_MODULE isn't set. The
# user will find out once they execute the command.
pass
parser = subcommand_cls.create_parser('', cwords[0])
if subcommand_cls.use_argparse:
options += [(sorted(s_opt.option_strings)[0], s_opt.nargs != 0) for s_opt in
parser._actions if s_opt.option_strings]
else:
options += [(s_opt.get_opt_string(), s_opt.nargs) for s_opt in
parser.option_list]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [opt for opt in options if opt[0] not in prev_opts]
# filter options by current input
options = sorted((k, v) for k, v in options if k.startswith(curr))
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
sys.exit(1) |
def alias_bin(self, bin_id, alias_id):
"""Adds an ``Id`` to a ``Bin`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Bin`` is determined by the provider.
The new ``Id`` performs as an alias to the primary ``Id``. If
the alias is a pointer to another bin, it is reassigned to the
given bin ``Id``.
arg: bin_id (osid.id.Id): the ``Id`` of a ``Bin``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is already assigned
raise: NotFound - ``bin_id`` not found
raise: NullArgument - ``bin_id`` or ``alias_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinLookupSession.alias_bin_template
if self._catalog_session is not None:
return self._catalog_session.alias_catalog(catalog_id=bin_id, alias_id=alias_id)
self._alias_id(primary_id=bin_id, equivalent_id=alias_id) | Adds an ``Id`` to a ``Bin`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Bin`` is determined by the provider.
The new ``Id`` performs as an alias to the primary ``Id``. If
the alias is a pointer to another bin, it is reassigned to the
given bin ``Id``.
arg: bin_id (osid.id.Id): the ``Id`` of a ``Bin``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is already assigned
raise: NotFound - ``bin_id`` not found
raise: NullArgument - ``bin_id`` or ``alias_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Adds an ``Id`` to a ``Bin`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Bin`` is determined by the provider.
The new ``Id`` performs as an alias to the primary ``Id``. If
the alias is a pointer to another bin, it is reassigned to the
given bin ``Id``.
arg: bin_id (osid.id.Id): the ``Id`` of a ``Bin``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is already assigned
raise: NotFound - ``bin_id`` not found
raise: NullArgument - ``bin_id`` or ``alias_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def alias_bin(self, bin_id, alias_id):
"""Adds an ``Id`` to a ``Bin`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Bin`` is determined by the provider.
The new ``Id`` performs as an alias to the primary ``Id``. If
the alias is a pointer to another bin, it is reassigned to the
given bin ``Id``.
arg: bin_id (osid.id.Id): the ``Id`` of a ``Bin``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is already assigned
raise: NotFound - ``bin_id`` not found
raise: NullArgument - ``bin_id`` or ``alias_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinLookupSession.alias_bin_template
if self._catalog_session is not None:
return self._catalog_session.alias_catalog(catalog_id=bin_id, alias_id=alias_id)
self._alias_id(primary_id=bin_id, equivalent_id=alias_id) |
def list(self, options=None, **kwds):
"""
Endpoint: /activities[/<options>]/list.json
Returns a list of Activity objects.
The options parameter can be used to narrow down the activities.
Eg: options={"type": "photo-upload"}
"""
option_string = self._build_option_string(options)
activities = self._client.get("/activities%s/list.json" % option_string,
**kwds)["result"]
activities = self._result_to_list(activities)
return [Activity(self._client, activity) for activity in activities] | Endpoint: /activities[/<options>]/list.json
Returns a list of Activity objects.
The options parameter can be used to narrow down the activities.
Eg: options={"type": "photo-upload"} | Below is the the instruction that describes the task:
### Input:
Endpoint: /activities[/<options>]/list.json
Returns a list of Activity objects.
The options parameter can be used to narrow down the activities.
Eg: options={"type": "photo-upload"}
### Response:
def list(self, options=None, **kwds):
"""
Endpoint: /activities[/<options>]/list.json
Returns a list of Activity objects.
The options parameter can be used to narrow down the activities.
Eg: options={"type": "photo-upload"}
"""
option_string = self._build_option_string(options)
activities = self._client.get("/activities%s/list.json" % option_string,
**kwds)["result"]
activities = self._result_to_list(activities)
return [Activity(self._client, activity) for activity in activities] |
def main(args=None):
"""Build and run parser
:param args: cli args from tests
"""
parser = argument_parser()
args = parser.parse_args(args)
# If 'func' isn't present, something is misconfigured above or no (positional) arg was given.
if not hasattr(args, 'func'):
args = parser.parse_args(['help']) # show help
# Convert argparse.Namespace into dict and clean it up.
# We can then pass it directly to the helper function.
kwargs = vars(args)
# handle the '--dev' option
if kwargs.pop('dev') or os.environ.get('QUILT_DEV_MODE', '').strip().lower() == 'true':
# Enables CLI ctrl-c tracebacks, and whatever anyone else uses it for
quilt._DEV_MODE = True
else:
# Disables CLI ctrl-c tracebacks, etc.
quilt._DEV_MODE = False
func = kwargs.pop('func')
try:
func(**kwargs)
return 0
except QuiltException as ex:
print(ex.message, file=sys.stderr)
return 1
except requests.exceptions.ConnectionError as ex:
print("Failed to connect: %s" % ex, file=sys.stderr)
return 1 | Build and run parser
:param args: cli args from tests | Below is the the instruction that describes the task:
### Input:
Build and run parser
:param args: cli args from tests
### Response:
def main(args=None):
"""Build and run parser
:param args: cli args from tests
"""
parser = argument_parser()
args = parser.parse_args(args)
# If 'func' isn't present, something is misconfigured above or no (positional) arg was given.
if not hasattr(args, 'func'):
args = parser.parse_args(['help']) # show help
# Convert argparse.Namespace into dict and clean it up.
# We can then pass it directly to the helper function.
kwargs = vars(args)
# handle the '--dev' option
if kwargs.pop('dev') or os.environ.get('QUILT_DEV_MODE', '').strip().lower() == 'true':
# Enables CLI ctrl-c tracebacks, and whatever anyone else uses it for
quilt._DEV_MODE = True
else:
# Disables CLI ctrl-c tracebacks, etc.
quilt._DEV_MODE = False
func = kwargs.pop('func')
try:
func(**kwargs)
return 0
except QuiltException as ex:
print(ex.message, file=sys.stderr)
return 1
except requests.exceptions.ConnectionError as ex:
print("Failed to connect: %s" % ex, file=sys.stderr)
return 1 |
def get_marshaller_for_type_string(self, type_string):
""" Gets the appropriate marshaller for a type string.
Retrieves the marshaller, if any, that can be used to read/write
a Python object with the given type string. The modules it
requires, if available, will be loaded.
Parameters
----------
type_string : str
Type string for a Python object.
Returns
-------
marshaller : marshaller or None
The marshaller that can read/write the type to
file. ``None`` if no appropriate marshaller is found.
has_required_modules : bool
Whether the required modules for reading the type are
present or not.
See Also
--------
hdf5storage.Marshallers.TypeMarshaller.python_type_strings
"""
if type_string in self._type_strings:
index = self._type_strings[type_string]
m = self._marshallers[index]
if self._imported_required_modules[index]:
return m, True
if not self._has_required_modules[index]:
return m, False
success = self._import_marshaller_modules(m)
self._has_required_modules[index] = success
self._imported_required_modules[index] = success
return m, success
else:
return None, False | Gets the appropriate marshaller for a type string.
Retrieves the marshaller, if any, that can be used to read/write
a Python object with the given type string. The modules it
requires, if available, will be loaded.
Parameters
----------
type_string : str
Type string for a Python object.
Returns
-------
marshaller : marshaller or None
The marshaller that can read/write the type to
file. ``None`` if no appropriate marshaller is found.
has_required_modules : bool
Whether the required modules for reading the type are
present or not.
See Also
--------
hdf5storage.Marshallers.TypeMarshaller.python_type_strings | Below is the the instruction that describes the task:
### Input:
Gets the appropriate marshaller for a type string.
Retrieves the marshaller, if any, that can be used to read/write
a Python object with the given type string. The modules it
requires, if available, will be loaded.
Parameters
----------
type_string : str
Type string for a Python object.
Returns
-------
marshaller : marshaller or None
The marshaller that can read/write the type to
file. ``None`` if no appropriate marshaller is found.
has_required_modules : bool
Whether the required modules for reading the type are
present or not.
See Also
--------
hdf5storage.Marshallers.TypeMarshaller.python_type_strings
### Response:
def get_marshaller_for_type_string(self, type_string):
""" Gets the appropriate marshaller for a type string.
Retrieves the marshaller, if any, that can be used to read/write
a Python object with the given type string. The modules it
requires, if available, will be loaded.
Parameters
----------
type_string : str
Type string for a Python object.
Returns
-------
marshaller : marshaller or None
The marshaller that can read/write the type to
file. ``None`` if no appropriate marshaller is found.
has_required_modules : bool
Whether the required modules for reading the type are
present or not.
See Also
--------
hdf5storage.Marshallers.TypeMarshaller.python_type_strings
"""
if type_string in self._type_strings:
index = self._type_strings[type_string]
m = self._marshallers[index]
if self._imported_required_modules[index]:
return m, True
if not self._has_required_modules[index]:
return m, False
success = self._import_marshaller_modules(m)
self._has_required_modules[index] = success
self._imported_required_modules[index] = success
return m, success
else:
return None, False |
def getTaskTypes(self):
""" Return the current list of task types
"""
types = [
('Calibration', safe_unicode(_('Calibration')).encode('utf-8')),
('Enhancement', safe_unicode(_('Enhancement')).encode('utf-8')),
('Preventive', safe_unicode(_('Preventive')).encode('utf-8')),
('Repair', safe_unicode(_('Repair')).encode('utf-8')),
('Validation', safe_unicode(_('Validation')).encode('utf-8')),
]
return DisplayList(types) | Return the current list of task types | Below is the the instruction that describes the task:
### Input:
Return the current list of task types
### Response:
def getTaskTypes(self):
""" Return the current list of task types
"""
types = [
('Calibration', safe_unicode(_('Calibration')).encode('utf-8')),
('Enhancement', safe_unicode(_('Enhancement')).encode('utf-8')),
('Preventive', safe_unicode(_('Preventive')).encode('utf-8')),
('Repair', safe_unicode(_('Repair')).encode('utf-8')),
('Validation', safe_unicode(_('Validation')).encode('utf-8')),
]
return DisplayList(types) |
def parse(argv, rules=None, config=None, **kwargs):
"""Parse the given arg vector with the default Splunk command rules."""
parser_ = parser(rules, **kwargs)
if config is not None: parser_.loadrc(config)
return parser_.parse(argv).result | Parse the given arg vector with the default Splunk command rules. | Below is the the instruction that describes the task:
### Input:
Parse the given arg vector with the default Splunk command rules.
### Response:
def parse(argv, rules=None, config=None, **kwargs):
"""Parse the given arg vector with the default Splunk command rules."""
parser_ = parser(rules, **kwargs)
if config is not None: parser_.loadrc(config)
return parser_.parse(argv).result |
def set_text(self, x, y, text):
"""Set text to the given coords.
:param x: x coordinate of the text start position
:param y: y coordinate of the text start position
"""
col, row = get_pos(x, y)
for i,c in enumerate(text):
self.chars[row][col+i] = c | Set text to the given coords.
:param x: x coordinate of the text start position
:param y: y coordinate of the text start position | Below is the the instruction that describes the task:
### Input:
Set text to the given coords.
:param x: x coordinate of the text start position
:param y: y coordinate of the text start position
### Response:
def set_text(self, x, y, text):
"""Set text to the given coords.
:param x: x coordinate of the text start position
:param y: y coordinate of the text start position
"""
col, row = get_pos(x, y)
for i,c in enumerate(text):
self.chars[row][col+i] = c |
def clear_lock(remote=None):
'''
Clear update.lk
``remote`` can either be a dictionary containing repo configuration
information, or a pattern. If the latter, then remotes for which the URL
matches the pattern will be locked.
'''
def _do_clear_lock(repo):
def _add_error(errlist, repo, exc):
msg = ('Unable to remove update lock for {0} ({1}): {2} '
.format(repo['url'], repo['lockfile'], exc))
log.debug(msg)
errlist.append(msg)
success = []
failed = []
if os.path.exists(repo['lockfile']):
try:
os.remove(repo['lockfile'])
except OSError as exc:
if exc.errno == errno.EISDIR:
# Somehow this path is a directory. Should never happen
# unless some wiseguy manually creates a directory at this
# path, but just in case, handle it.
try:
shutil.rmtree(repo['lockfile'])
except OSError as exc:
_add_error(failed, repo, exc)
else:
_add_error(failed, repo, exc)
else:
msg = 'Removed lock for {0}'.format(repo['url'])
log.debug(msg)
success.append(msg)
return success, failed
if isinstance(remote, dict):
return _do_clear_lock(remote)
cleared = []
errors = []
for repo in init():
if remote:
try:
if remote not in repo['url']:
continue
except TypeError:
# remote was non-string, try again
if six.text_type(remote) not in repo['url']:
continue
success, failed = _do_clear_lock(repo)
cleared.extend(success)
errors.extend(failed)
return cleared, errors | Clear update.lk
``remote`` can either be a dictionary containing repo configuration
information, or a pattern. If the latter, then remotes for which the URL
matches the pattern will be locked. | Below is the the instruction that describes the task:
### Input:
Clear update.lk
``remote`` can either be a dictionary containing repo configuration
information, or a pattern. If the latter, then remotes for which the URL
matches the pattern will be locked.
### Response:
def clear_lock(remote=None):
'''
Clear update.lk
``remote`` can either be a dictionary containing repo configuration
information, or a pattern. If the latter, then remotes for which the URL
matches the pattern will be locked.
'''
def _do_clear_lock(repo):
def _add_error(errlist, repo, exc):
msg = ('Unable to remove update lock for {0} ({1}): {2} '
.format(repo['url'], repo['lockfile'], exc))
log.debug(msg)
errlist.append(msg)
success = []
failed = []
if os.path.exists(repo['lockfile']):
try:
os.remove(repo['lockfile'])
except OSError as exc:
if exc.errno == errno.EISDIR:
# Somehow this path is a directory. Should never happen
# unless some wiseguy manually creates a directory at this
# path, but just in case, handle it.
try:
shutil.rmtree(repo['lockfile'])
except OSError as exc:
_add_error(failed, repo, exc)
else:
_add_error(failed, repo, exc)
else:
msg = 'Removed lock for {0}'.format(repo['url'])
log.debug(msg)
success.append(msg)
return success, failed
if isinstance(remote, dict):
return _do_clear_lock(remote)
cleared = []
errors = []
for repo in init():
if remote:
try:
if remote not in repo['url']:
continue
except TypeError:
# remote was non-string, try again
if six.text_type(remote) not in repo['url']:
continue
success, failed = _do_clear_lock(repo)
cleared.extend(success)
errors.extend(failed)
return cleared, errors |
def setCurrentInspectorRegItem(self, regItem):
""" Sets the current inspector given an InspectorRegItem
"""
check_class(regItem, InspectorRegItem, allow_none=True)
self.inspectorTab.setCurrentRegItem(regItem) | Sets the current inspector given an InspectorRegItem | Below is the the instruction that describes the task:
### Input:
Sets the current inspector given an InspectorRegItem
### Response:
def setCurrentInspectorRegItem(self, regItem):
""" Sets the current inspector given an InspectorRegItem
"""
check_class(regItem, InspectorRegItem, allow_none=True)
self.inspectorTab.setCurrentRegItem(regItem) |
def construct_datapipeline(env='',
generated=None,
previous_env=None,
region='us-east-1',
settings=None,
pipeline_data=None):
"""Create the Pipeline JSON from template.
This handles the common repeatable patterns in a pipeline, such as
judgement, infrastructure, tagger and qe.
Args:
env (str): Deploy environment name, e.g. dev, stage, prod.
generated (gogoutils.Generator): Gogo Application name generator.
previous_env (str): The previous deploy environment to use as
Trigger.
region (str): AWS Region to deploy to.
settings (dict): Environment settings from configurations.
Returns:
dict: Pipeline JSON template rendered with configurations.
"""
LOG.info('%s block for [%s].', env, region)
if env.startswith('prod'):
template_name = 'pipeline/pipeline_{}_datapipeline.json.j2'.format(env)
else:
template_name = 'pipeline/pipeline_stages_datapipeline.json.j2'
LOG.debug('%s info:\n%s', env, pformat(settings))
gen_app_name = generated.app_name()
data = copy.deepcopy(settings)
data['app'].update({
'appname': gen_app_name,
'repo_name': generated.repo,
'group_name': generated.project,
'environment': env,
'region': region,
'previous_env': previous_env,
'promote_restrict': pipeline_data['promote_restrict'],
'owner_email': pipeline_data['owner_email']
})
LOG.debug('Block data:\n%s', pformat(data))
pipeline_json = get_template(template_file=template_name, data=data, formats=generated)
return pipeline_json | Create the Pipeline JSON from template.
This handles the common repeatable patterns in a pipeline, such as
judgement, infrastructure, tagger and qe.
Args:
env (str): Deploy environment name, e.g. dev, stage, prod.
generated (gogoutils.Generator): Gogo Application name generator.
previous_env (str): The previous deploy environment to use as
Trigger.
region (str): AWS Region to deploy to.
settings (dict): Environment settings from configurations.
Returns:
dict: Pipeline JSON template rendered with configurations. | Below is the the instruction that describes the task:
### Input:
Create the Pipeline JSON from template.
This handles the common repeatable patterns in a pipeline, such as
judgement, infrastructure, tagger and qe.
Args:
env (str): Deploy environment name, e.g. dev, stage, prod.
generated (gogoutils.Generator): Gogo Application name generator.
previous_env (str): The previous deploy environment to use as
Trigger.
region (str): AWS Region to deploy to.
settings (dict): Environment settings from configurations.
Returns:
dict: Pipeline JSON template rendered with configurations.
### Response:
def construct_datapipeline(env='',
generated=None,
previous_env=None,
region='us-east-1',
settings=None,
pipeline_data=None):
"""Create the Pipeline JSON from template.
This handles the common repeatable patterns in a pipeline, such as
judgement, infrastructure, tagger and qe.
Args:
env (str): Deploy environment name, e.g. dev, stage, prod.
generated (gogoutils.Generator): Gogo Application name generator.
previous_env (str): The previous deploy environment to use as
Trigger.
region (str): AWS Region to deploy to.
settings (dict): Environment settings from configurations.
Returns:
dict: Pipeline JSON template rendered with configurations.
"""
LOG.info('%s block for [%s].', env, region)
if env.startswith('prod'):
template_name = 'pipeline/pipeline_{}_datapipeline.json.j2'.format(env)
else:
template_name = 'pipeline/pipeline_stages_datapipeline.json.j2'
LOG.debug('%s info:\n%s', env, pformat(settings))
gen_app_name = generated.app_name()
data = copy.deepcopy(settings)
data['app'].update({
'appname': gen_app_name,
'repo_name': generated.repo,
'group_name': generated.project,
'environment': env,
'region': region,
'previous_env': previous_env,
'promote_restrict': pipeline_data['promote_restrict'],
'owner_email': pipeline_data['owner_email']
})
LOG.debug('Block data:\n%s', pformat(data))
pipeline_json = get_template(template_file=template_name, data=data, formats=generated)
return pipeline_json |
def server_side(func):
"""
Decorator to designate an API method applicable only to server-side
instances.
This allows us to use the same APIRequest and APIResponse subclasses on the
client and server sides without too much confusion.
"""
def inner(*args, **kwargs):
if args and hasattr(args[0], 'is_server') and not voltron.debugger:
raise ServerSideOnlyException("This method can only be called on a server-side instance")
return func(*args, **kwargs)
return inner | Decorator to designate an API method applicable only to server-side
instances.
This allows us to use the same APIRequest and APIResponse subclasses on the
client and server sides without too much confusion. | Below is the the instruction that describes the task:
### Input:
Decorator to designate an API method applicable only to server-side
instances.
This allows us to use the same APIRequest and APIResponse subclasses on the
client and server sides without too much confusion.
### Response:
def server_side(func):
"""
Decorator to designate an API method applicable only to server-side
instances.
This allows us to use the same APIRequest and APIResponse subclasses on the
client and server sides without too much confusion.
"""
def inner(*args, **kwargs):
if args and hasattr(args[0], 'is_server') and not voltron.debugger:
raise ServerSideOnlyException("This method can only be called on a server-side instance")
return func(*args, **kwargs)
return inner |
def resolve(self, piper, forgive=False):
"""
Given a ``Piper`` instance or the ``id`` of the ``Piper``. Returns the
``Piper`` instance if it can be resolved else raises a ``DaggerError``
or returns ``False`` depending on the "forgive" argument.
Arguments:
- piper(``Piper`` or id(``Piper``)) a ``Piper`` instance or its id to be
found in the ``Dagger``.
- forgive(``bool``) [default: ``False``] If "forgive" is ``False`` a
``DaggerError`` is raised whenever a ``Piper`` cannot be resolved in
the ``Dagger``. If "forgive" is ``True`` then ``False`` is returned.
"""
try:
if piper in self:
resolved = piper
else:
resolved = [p for p in self if id(p) == piper][0]
except (TypeError, IndexError):
resolved = False
if resolved:
self.log.debug('%s resolved a piper from %s' % (repr(self), piper))
else:
self.log.debug('%s could not resolve a piper from %s' % \
(repr(self), repr(piper)))
if not forgive:
raise DaggerError('%s could not resolve a Piper from %s' % \
(repr(self), repr(piper)))
resolved = False
return resolved | Given a ``Piper`` instance or the ``id`` of the ``Piper``. Returns the
``Piper`` instance if it can be resolved else raises a ``DaggerError``
or returns ``False`` depending on the "forgive" argument.
Arguments:
- piper(``Piper`` or id(``Piper``)) a ``Piper`` instance or its id to be
found in the ``Dagger``.
- forgive(``bool``) [default: ``False``] If "forgive" is ``False`` a
``DaggerError`` is raised whenever a ``Piper`` cannot be resolved in
the ``Dagger``. If "forgive" is ``True`` then ``False`` is returned. | Below is the the instruction that describes the task:
### Input:
Given a ``Piper`` instance or the ``id`` of the ``Piper``. Returns the
``Piper`` instance if it can be resolved else raises a ``DaggerError``
or returns ``False`` depending on the "forgive" argument.
Arguments:
- piper(``Piper`` or id(``Piper``)) a ``Piper`` instance or its id to be
found in the ``Dagger``.
- forgive(``bool``) [default: ``False``] If "forgive" is ``False`` a
``DaggerError`` is raised whenever a ``Piper`` cannot be resolved in
the ``Dagger``. If "forgive" is ``True`` then ``False`` is returned.
### Response:
def resolve(self, piper, forgive=False):
"""
Given a ``Piper`` instance or the ``id`` of the ``Piper``. Returns the
``Piper`` instance if it can be resolved else raises a ``DaggerError``
or returns ``False`` depending on the "forgive" argument.
Arguments:
- piper(``Piper`` or id(``Piper``)) a ``Piper`` instance or its id to be
found in the ``Dagger``.
- forgive(``bool``) [default: ``False``] If "forgive" is ``False`` a
``DaggerError`` is raised whenever a ``Piper`` cannot be resolved in
the ``Dagger``. If "forgive" is ``True`` then ``False`` is returned.
"""
try:
if piper in self:
resolved = piper
else:
resolved = [p for p in self if id(p) == piper][0]
except (TypeError, IndexError):
resolved = False
if resolved:
self.log.debug('%s resolved a piper from %s' % (repr(self), piper))
else:
self.log.debug('%s could not resolve a piper from %s' % \
(repr(self), repr(piper)))
if not forgive:
raise DaggerError('%s could not resolve a Piper from %s' % \
(repr(self), repr(piper)))
resolved = False
return resolved |
def server(self):
"""
All in one endpoints. This property is created automaticly
if you have implemented all the getters and setters.
However, if you are not satisfied with the getter and setter,
you can create a validator with :class:`OAuth2RequestValidator`::
class MyValidator(OAuth2RequestValidator):
def validate_client_id(self, client_id):
# do something
return True
And assign the validator for the provider::
oauth._validator = MyValidator()
"""
expires_in = self.app.config.get('OAUTH2_PROVIDER_TOKEN_EXPIRES_IN')
token_generator = self.app.config.get(
'OAUTH2_PROVIDER_TOKEN_GENERATOR', None
)
if token_generator and not callable(token_generator):
token_generator = import_string(token_generator)
refresh_token_generator = self.app.config.get(
'OAUTH2_PROVIDER_REFRESH_TOKEN_GENERATOR', None
)
if refresh_token_generator and not callable(refresh_token_generator):
refresh_token_generator = import_string(refresh_token_generator)
if hasattr(self, '_validator'):
return Server(
self._validator,
token_expires_in=expires_in,
token_generator=token_generator,
refresh_token_generator=refresh_token_generator,
)
if hasattr(self, '_clientgetter') and \
hasattr(self, '_tokengetter') and \
hasattr(self, '_tokensetter') and \
hasattr(self, '_grantgetter') and \
hasattr(self, '_grantsetter'):
usergetter = None
if hasattr(self, '_usergetter'):
usergetter = self._usergetter
validator_class = self._validator_class
if validator_class is None:
validator_class = OAuth2RequestValidator
validator = validator_class(
clientgetter=self._clientgetter,
tokengetter=self._tokengetter,
grantgetter=self._grantgetter,
usergetter=usergetter,
tokensetter=self._tokensetter,
grantsetter=self._grantsetter,
)
self._validator = validator
return Server(
validator,
token_expires_in=expires_in,
token_generator=token_generator,
refresh_token_generator=refresh_token_generator,
)
raise RuntimeError('application not bound to required getters') | All in one endpoints. This property is created automaticly
if you have implemented all the getters and setters.
However, if you are not satisfied with the getter and setter,
you can create a validator with :class:`OAuth2RequestValidator`::
class MyValidator(OAuth2RequestValidator):
def validate_client_id(self, client_id):
# do something
return True
And assign the validator for the provider::
oauth._validator = MyValidator() | Below is the the instruction that describes the task:
### Input:
All in one endpoints. This property is created automaticly
if you have implemented all the getters and setters.
However, if you are not satisfied with the getter and setter,
you can create a validator with :class:`OAuth2RequestValidator`::
class MyValidator(OAuth2RequestValidator):
def validate_client_id(self, client_id):
# do something
return True
And assign the validator for the provider::
oauth._validator = MyValidator()
### Response:
def server(self):
"""
All in one endpoints. This property is created automaticly
if you have implemented all the getters and setters.
However, if you are not satisfied with the getter and setter,
you can create a validator with :class:`OAuth2RequestValidator`::
class MyValidator(OAuth2RequestValidator):
def validate_client_id(self, client_id):
# do something
return True
And assign the validator for the provider::
oauth._validator = MyValidator()
"""
expires_in = self.app.config.get('OAUTH2_PROVIDER_TOKEN_EXPIRES_IN')
token_generator = self.app.config.get(
'OAUTH2_PROVIDER_TOKEN_GENERATOR', None
)
if token_generator and not callable(token_generator):
token_generator = import_string(token_generator)
refresh_token_generator = self.app.config.get(
'OAUTH2_PROVIDER_REFRESH_TOKEN_GENERATOR', None
)
if refresh_token_generator and not callable(refresh_token_generator):
refresh_token_generator = import_string(refresh_token_generator)
if hasattr(self, '_validator'):
return Server(
self._validator,
token_expires_in=expires_in,
token_generator=token_generator,
refresh_token_generator=refresh_token_generator,
)
if hasattr(self, '_clientgetter') and \
hasattr(self, '_tokengetter') and \
hasattr(self, '_tokensetter') and \
hasattr(self, '_grantgetter') and \
hasattr(self, '_grantsetter'):
usergetter = None
if hasattr(self, '_usergetter'):
usergetter = self._usergetter
validator_class = self._validator_class
if validator_class is None:
validator_class = OAuth2RequestValidator
validator = validator_class(
clientgetter=self._clientgetter,
tokengetter=self._tokengetter,
grantgetter=self._grantgetter,
usergetter=usergetter,
tokensetter=self._tokensetter,
grantsetter=self._grantsetter,
)
self._validator = validator
return Server(
validator,
token_expires_in=expires_in,
token_generator=token_generator,
refresh_token_generator=refresh_token_generator,
)
raise RuntimeError('application not bound to required getters') |
def generate_boosted_machine(self):
"""generate_boosted_machine() -> strong
Creates a single strong classifier from this cascade by concatenating all strong classifiers.
**Returns:**
``strong`` : :py:class:`bob.learn.boosting.BoostedMachine`
The strong classifier as a combination of all classifiers in this cascade.
"""
strong = bob.learn.boosting.BoostedMachine()
for machine, index in zip(self.cascade, self.indices):
weak = machine.weak_machines
weights = machine.weights
for i in range(len(weak)):
strong.add_weak_machine(weak[i], weights[i])
return strong | generate_boosted_machine() -> strong
Creates a single strong classifier from this cascade by concatenating all strong classifiers.
**Returns:**
``strong`` : :py:class:`bob.learn.boosting.BoostedMachine`
The strong classifier as a combination of all classifiers in this cascade. | Below is the the instruction that describes the task:
### Input:
generate_boosted_machine() -> strong
Creates a single strong classifier from this cascade by concatenating all strong classifiers.
**Returns:**
``strong`` : :py:class:`bob.learn.boosting.BoostedMachine`
The strong classifier as a combination of all classifiers in this cascade.
### Response:
def generate_boosted_machine(self):
"""generate_boosted_machine() -> strong
Creates a single strong classifier from this cascade by concatenating all strong classifiers.
**Returns:**
``strong`` : :py:class:`bob.learn.boosting.BoostedMachine`
The strong classifier as a combination of all classifiers in this cascade.
"""
strong = bob.learn.boosting.BoostedMachine()
for machine, index in zip(self.cascade, self.indices):
weak = machine.weak_machines
weights = machine.weights
for i in range(len(weak)):
strong.add_weak_machine(weak[i], weights[i])
return strong |
def make(
password_provider,
*,
pin_store=None,
pin_type=PinType.PUBLIC_KEY,
post_handshake_deferred_failure=None,
anonymous=False,
ssl_context_factory=default_ssl_context,
no_verify=False):
"""
Construct a :class:`SecurityLayer`. Depending on the arguments passed,
different features are enabled or disabled.
.. warning::
When using any argument except `password_provider`, be sure to read
its documentation below the following overview **carefully**. Many
arguments can be used to shoot yourself in the foot easily, while
violating all security expectations.
Args:
password_provider (:class:`str` or coroutine function):
Password source to authenticate with.
Keyword Args:
pin_store (:class:`dict` or :class:`AbstractPinStore`):
Enable use of certificate/public key pinning. `pin_type` controls
the type of store used when a dict is passed instead of a pin store
object.
pin_type (:class:`~aioxmpp.security_layer.PinType`):
Type of pin store to create when `pin_store` is a dict.
post_handshake_deferred_failure (coroutine function):
Coroutine callback to invoke when using certificate pinning and the
verification of the certificate was not possible using either PKIX
or the pin store.
anonymous (:class:`str`, :data:`None` or :data:`False`):
trace token for SASL ANONYMOUS (:rfc:`4505`); passing a
non-:data:`False` value enables ANONYMOUS authentication.
ssl_context_factory (function): Factory function to create the SSL
context used to establish transport layer security. Defaults to
:func:`aioxmpp.security_layer.default_ssl_context`.
no_verify (:class:`bool`): *Disable* all certificate verification.
Usage is **strongly discouraged** outside controlled test
environments. See below for alternatives.
Raises:
RuntimeError: if `anonymous` is not :data:`False` and the version of
:mod:`aiosasl` does not support ANONYMOUS authentication.
Returns:
:class:`SecurityLayer`: object holding the entire security layer
configuration
`password_provider` must either be a coroutine function or a :class:`str`.
As a coroutine function, it is called during authentication with the JID we
are trying to authenticate against as the first, and the sequence number of
the authentication attempt as second argument. The sequence number starts
at 0. The coroutine is expected to return :data:`None` or a password. See
:class:`PasswordSASLProvider` for details. If `password_provider` is a
:class:`str`, a coroutine which returns the string on the first and
:data:`None` on subsequent attempts is created and used.
If `pin_store` is not :data:`None`, :class:`PinningPKIXCertificateVerifier`
is used instead of the default :class:`PKIXCertificateVerifier`. The
`pin_store` argument determines the pinned certificates: if it is a
dictionary, a :class:`AbstractPinStore` according to the :class:`PinType`
passed as `pin_type` argument is created and initialised with the data from
the dictionary using its :meth:`~AbstractPinStore.import_from_json` method.
Otherwise, `pin_store` must be a :class:`AbstractPinStore` instance which
is passed to the verifier.
`post_handshake_deferred_callback` is used only if `pin_store` is not
:data:`None`. It is passed to the equally-named argument of
:class:`PinningPKIXCertificateVerifier`, see the documentation there for
details on the semantics. If `post_handshake_deferred_callback` is
:data:`None` while `pin_store` is not, a coroutine which returns
:data:`False` is substituted.
`ssl_context_factory` can be a callable taking no arguments and returning
a :class:`OpenSSL.SSL.Context` object. If given, the factory will be used
to obtain an SSL context when the stream negotiates transport layer
security via TLS. By default,
:func:`aioxmpp.security_layer.default_ssl_context` is used, which should be
fine for most applications.
.. warning::
The :func:`~.default_ssl_context` implementation sets important
defaults. It is **strongly recommended** to use the context returned
by :func:`~.default_ssl_context` and modify it, instead of creating
a new context from scratch when implementing your own factory.
If `no_verify` is true, none of the above regarding certificate verifiers
matters. The internal null verifier is used, which **disables certificate
verification completely**.
.. warning::
Disabling certificate verification makes your application vulnerable to
trivial Man-in-the-Middle attacks. Do **not** use this outside
controlled test environments or when you know **exactly** what you’re
doing!
If you need to handle certificates which cannot be verified using the
public key infrastructure, consider making use of the `pin_store`
argument instead.
`anonymous` may be a string or :data:`False`. If it is not :data:`False`,
:class:`AnonymousSASLProvider` is used before password based authentication
is attempted. In addition, it is allowed to set `password_provider` to
:data:`None`. `anonymous` is the trace token to use, and SHOULD be the
empty string (as specified by :xep:`175`). This requires :mod:`aiosasl` 0.3
or newer.
.. note::
:data:`False` and ``""`` are treated differently for the `anonymous`
argument, despite both being false-y values!
.. note::
If `anonymous` is not :data:`False` and `password_provider` is not
:data:`None`, both authentication types are attempted. Anonymous
authentication is, in that case, preferred over password-based
authentication.
If you need to reverse the order, you have to construct your own
:class:`SecurityLayer` object.
.. warning::
Take the security and privacy considerations from :rfc:`4505` (which
specifies the ANONYMOUS SASL mechanism) and :xep:`175` (which discusses
the ANONYMOUS SASL mechanism in the XMPP context) into account before
using `anonymous`.
The versatility and simplicity of use of this function make (pun intended)
it the preferred way to construct :class:`SecurityLayer` instances.
.. versionadded:: 0.8
Support for SASL ANONYMOUS was added.
.. versionadded:: 0.11
Support for `ssl_context_factory`.
"""
if isinstance(password_provider, str):
static_password = password_provider
@asyncio.coroutine
def password_provider(jid, nattempt):
if nattempt == 0:
return static_password
return None
if pin_store is not None:
if post_handshake_deferred_failure is None:
@asyncio.coroutine
def post_handshake_deferred_failure(verifier):
return False
if not isinstance(pin_store, AbstractPinStore):
pin_data = pin_store
if pin_type == PinType.PUBLIC_KEY:
logger.debug("using PublicKeyPinStore")
pin_store = PublicKeyPinStore()
else:
logger.debug("using CertificatePinStore")
pin_store = CertificatePinStore()
pin_store.import_from_json(pin_data)
def certificate_verifier_factory():
return PinningPKIXCertificateVerifier(
pin_store.query,
post_handshake_deferred_failure,
)
elif no_verify:
certificate_verifier_factory = _NullVerifier
else:
certificate_verifier_factory = PKIXCertificateVerifier
sasl_providers = []
if anonymous is not False:
if AnonymousSASLProvider is None:
raise RuntimeError(
"aiosasl does not support ANONYMOUS, please upgrade"
)
sasl_providers.append(
AnonymousSASLProvider(anonymous)
)
if password_provider is not None:
sasl_providers.append(
PasswordSASLProvider(
password_provider,
),
)
return SecurityLayer(
ssl_context_factory,
certificate_verifier_factory,
True,
tuple(sasl_providers),
) | Construct a :class:`SecurityLayer`. Depending on the arguments passed,
different features are enabled or disabled.
.. warning::
When using any argument except `password_provider`, be sure to read
its documentation below the following overview **carefully**. Many
arguments can be used to shoot yourself in the foot easily, while
violating all security expectations.
Args:
password_provider (:class:`str` or coroutine function):
Password source to authenticate with.
Keyword Args:
pin_store (:class:`dict` or :class:`AbstractPinStore`):
Enable use of certificate/public key pinning. `pin_type` controls
the type of store used when a dict is passed instead of a pin store
object.
pin_type (:class:`~aioxmpp.security_layer.PinType`):
Type of pin store to create when `pin_store` is a dict.
post_handshake_deferred_failure (coroutine function):
Coroutine callback to invoke when using certificate pinning and the
verification of the certificate was not possible using either PKIX
or the pin store.
anonymous (:class:`str`, :data:`None` or :data:`False`):
trace token for SASL ANONYMOUS (:rfc:`4505`); passing a
non-:data:`False` value enables ANONYMOUS authentication.
ssl_context_factory (function): Factory function to create the SSL
context used to establish transport layer security. Defaults to
:func:`aioxmpp.security_layer.default_ssl_context`.
no_verify (:class:`bool`): *Disable* all certificate verification.
Usage is **strongly discouraged** outside controlled test
environments. See below for alternatives.
Raises:
RuntimeError: if `anonymous` is not :data:`False` and the version of
:mod:`aiosasl` does not support ANONYMOUS authentication.
Returns:
:class:`SecurityLayer`: object holding the entire security layer
configuration
`password_provider` must either be a coroutine function or a :class:`str`.
As a coroutine function, it is called during authentication with the JID we
are trying to authenticate against as the first, and the sequence number of
the authentication attempt as second argument. The sequence number starts
at 0. The coroutine is expected to return :data:`None` or a password. See
:class:`PasswordSASLProvider` for details. If `password_provider` is a
:class:`str`, a coroutine which returns the string on the first and
:data:`None` on subsequent attempts is created and used.
If `pin_store` is not :data:`None`, :class:`PinningPKIXCertificateVerifier`
is used instead of the default :class:`PKIXCertificateVerifier`. The
`pin_store` argument determines the pinned certificates: if it is a
dictionary, a :class:`AbstractPinStore` according to the :class:`PinType`
passed as `pin_type` argument is created and initialised with the data from
the dictionary using its :meth:`~AbstractPinStore.import_from_json` method.
Otherwise, `pin_store` must be a :class:`AbstractPinStore` instance which
is passed to the verifier.
`post_handshake_deferred_callback` is used only if `pin_store` is not
:data:`None`. It is passed to the equally-named argument of
:class:`PinningPKIXCertificateVerifier`, see the documentation there for
details on the semantics. If `post_handshake_deferred_callback` is
:data:`None` while `pin_store` is not, a coroutine which returns
:data:`False` is substituted.
`ssl_context_factory` can be a callable taking no arguments and returning
a :class:`OpenSSL.SSL.Context` object. If given, the factory will be used
to obtain an SSL context when the stream negotiates transport layer
security via TLS. By default,
:func:`aioxmpp.security_layer.default_ssl_context` is used, which should be
fine for most applications.
.. warning::
The :func:`~.default_ssl_context` implementation sets important
defaults. It is **strongly recommended** to use the context returned
by :func:`~.default_ssl_context` and modify it, instead of creating
a new context from scratch when implementing your own factory.
If `no_verify` is true, none of the above regarding certificate verifiers
matters. The internal null verifier is used, which **disables certificate
verification completely**.
.. warning::
Disabling certificate verification makes your application vulnerable to
trivial Man-in-the-Middle attacks. Do **not** use this outside
controlled test environments or when you know **exactly** what you’re
doing!
If you need to handle certificates which cannot be verified using the
public key infrastructure, consider making use of the `pin_store`
argument instead.
`anonymous` may be a string or :data:`False`. If it is not :data:`False`,
:class:`AnonymousSASLProvider` is used before password based authentication
is attempted. In addition, it is allowed to set `password_provider` to
:data:`None`. `anonymous` is the trace token to use, and SHOULD be the
empty string (as specified by :xep:`175`). This requires :mod:`aiosasl` 0.3
or newer.
.. note::
:data:`False` and ``""`` are treated differently for the `anonymous`
argument, despite both being false-y values!
.. note::
If `anonymous` is not :data:`False` and `password_provider` is not
:data:`None`, both authentication types are attempted. Anonymous
authentication is, in that case, preferred over password-based
authentication.
If you need to reverse the order, you have to construct your own
:class:`SecurityLayer` object.
.. warning::
Take the security and privacy considerations from :rfc:`4505` (which
specifies the ANONYMOUS SASL mechanism) and :xep:`175` (which discusses
the ANONYMOUS SASL mechanism in the XMPP context) into account before
using `anonymous`.
The versatility and simplicity of use of this function make (pun intended)
it the preferred way to construct :class:`SecurityLayer` instances.
.. versionadded:: 0.8
Support for SASL ANONYMOUS was added.
.. versionadded:: 0.11
Support for `ssl_context_factory`. | Below is the the instruction that describes the task:
### Input:
Construct a :class:`SecurityLayer`. Depending on the arguments passed,
different features are enabled or disabled.
.. warning::
When using any argument except `password_provider`, be sure to read
its documentation below the following overview **carefully**. Many
arguments can be used to shoot yourself in the foot easily, while
violating all security expectations.
Args:
password_provider (:class:`str` or coroutine function):
Password source to authenticate with.
Keyword Args:
pin_store (:class:`dict` or :class:`AbstractPinStore`):
Enable use of certificate/public key pinning. `pin_type` controls
the type of store used when a dict is passed instead of a pin store
object.
pin_type (:class:`~aioxmpp.security_layer.PinType`):
Type of pin store to create when `pin_store` is a dict.
post_handshake_deferred_failure (coroutine function):
Coroutine callback to invoke when using certificate pinning and the
verification of the certificate was not possible using either PKIX
or the pin store.
anonymous (:class:`str`, :data:`None` or :data:`False`):
trace token for SASL ANONYMOUS (:rfc:`4505`); passing a
non-:data:`False` value enables ANONYMOUS authentication.
ssl_context_factory (function): Factory function to create the SSL
context used to establish transport layer security. Defaults to
:func:`aioxmpp.security_layer.default_ssl_context`.
no_verify (:class:`bool`): *Disable* all certificate verification.
Usage is **strongly discouraged** outside controlled test
environments. See below for alternatives.
Raises:
RuntimeError: if `anonymous` is not :data:`False` and the version of
:mod:`aiosasl` does not support ANONYMOUS authentication.
Returns:
:class:`SecurityLayer`: object holding the entire security layer
configuration
`password_provider` must either be a coroutine function or a :class:`str`.
As a coroutine function, it is called during authentication with the JID we
are trying to authenticate against as the first, and the sequence number of
the authentication attempt as second argument. The sequence number starts
at 0. The coroutine is expected to return :data:`None` or a password. See
:class:`PasswordSASLProvider` for details. If `password_provider` is a
:class:`str`, a coroutine which returns the string on the first and
:data:`None` on subsequent attempts is created and used.
If `pin_store` is not :data:`None`, :class:`PinningPKIXCertificateVerifier`
is used instead of the default :class:`PKIXCertificateVerifier`. The
`pin_store` argument determines the pinned certificates: if it is a
dictionary, a :class:`AbstractPinStore` according to the :class:`PinType`
passed as `pin_type` argument is created and initialised with the data from
the dictionary using its :meth:`~AbstractPinStore.import_from_json` method.
Otherwise, `pin_store` must be a :class:`AbstractPinStore` instance which
is passed to the verifier.
`post_handshake_deferred_callback` is used only if `pin_store` is not
:data:`None`. It is passed to the equally-named argument of
:class:`PinningPKIXCertificateVerifier`, see the documentation there for
details on the semantics. If `post_handshake_deferred_callback` is
:data:`None` while `pin_store` is not, a coroutine which returns
:data:`False` is substituted.
`ssl_context_factory` can be a callable taking no arguments and returning
a :class:`OpenSSL.SSL.Context` object. If given, the factory will be used
to obtain an SSL context when the stream negotiates transport layer
security via TLS. By default,
:func:`aioxmpp.security_layer.default_ssl_context` is used, which should be
fine for most applications.
.. warning::
The :func:`~.default_ssl_context` implementation sets important
defaults. It is **strongly recommended** to use the context returned
by :func:`~.default_ssl_context` and modify it, instead of creating
a new context from scratch when implementing your own factory.
If `no_verify` is true, none of the above regarding certificate verifiers
matters. The internal null verifier is used, which **disables certificate
verification completely**.
.. warning::
Disabling certificate verification makes your application vulnerable to
trivial Man-in-the-Middle attacks. Do **not** use this outside
controlled test environments or when you know **exactly** what you’re
doing!
If you need to handle certificates which cannot be verified using the
public key infrastructure, consider making use of the `pin_store`
argument instead.
`anonymous` may be a string or :data:`False`. If it is not :data:`False`,
:class:`AnonymousSASLProvider` is used before password based authentication
is attempted. In addition, it is allowed to set `password_provider` to
:data:`None`. `anonymous` is the trace token to use, and SHOULD be the
empty string (as specified by :xep:`175`). This requires :mod:`aiosasl` 0.3
or newer.
.. note::
:data:`False` and ``""`` are treated differently for the `anonymous`
argument, despite both being false-y values!
.. note::
If `anonymous` is not :data:`False` and `password_provider` is not
:data:`None`, both authentication types are attempted. Anonymous
authentication is, in that case, preferred over password-based
authentication.
If you need to reverse the order, you have to construct your own
:class:`SecurityLayer` object.
.. warning::
Take the security and privacy considerations from :rfc:`4505` (which
specifies the ANONYMOUS SASL mechanism) and :xep:`175` (which discusses
the ANONYMOUS SASL mechanism in the XMPP context) into account before
using `anonymous`.
The versatility and simplicity of use of this function make (pun intended)
it the preferred way to construct :class:`SecurityLayer` instances.
.. versionadded:: 0.8
Support for SASL ANONYMOUS was added.
.. versionadded:: 0.11
Support for `ssl_context_factory`.
### Response:
def make(
password_provider,
*,
pin_store=None,
pin_type=PinType.PUBLIC_KEY,
post_handshake_deferred_failure=None,
anonymous=False,
ssl_context_factory=default_ssl_context,
no_verify=False):
"""
Construct a :class:`SecurityLayer`. Depending on the arguments passed,
different features are enabled or disabled.
.. warning::
When using any argument except `password_provider`, be sure to read
its documentation below the following overview **carefully**. Many
arguments can be used to shoot yourself in the foot easily, while
violating all security expectations.
Args:
password_provider (:class:`str` or coroutine function):
Password source to authenticate with.
Keyword Args:
pin_store (:class:`dict` or :class:`AbstractPinStore`):
Enable use of certificate/public key pinning. `pin_type` controls
the type of store used when a dict is passed instead of a pin store
object.
pin_type (:class:`~aioxmpp.security_layer.PinType`):
Type of pin store to create when `pin_store` is a dict.
post_handshake_deferred_failure (coroutine function):
Coroutine callback to invoke when using certificate pinning and the
verification of the certificate was not possible using either PKIX
or the pin store.
anonymous (:class:`str`, :data:`None` or :data:`False`):
trace token for SASL ANONYMOUS (:rfc:`4505`); passing a
non-:data:`False` value enables ANONYMOUS authentication.
ssl_context_factory (function): Factory function to create the SSL
context used to establish transport layer security. Defaults to
:func:`aioxmpp.security_layer.default_ssl_context`.
no_verify (:class:`bool`): *Disable* all certificate verification.
Usage is **strongly discouraged** outside controlled test
environments. See below for alternatives.
Raises:
RuntimeError: if `anonymous` is not :data:`False` and the version of
:mod:`aiosasl` does not support ANONYMOUS authentication.
Returns:
:class:`SecurityLayer`: object holding the entire security layer
configuration
`password_provider` must either be a coroutine function or a :class:`str`.
As a coroutine function, it is called during authentication with the JID we
are trying to authenticate against as the first, and the sequence number of
the authentication attempt as second argument. The sequence number starts
at 0. The coroutine is expected to return :data:`None` or a password. See
:class:`PasswordSASLProvider` for details. If `password_provider` is a
:class:`str`, a coroutine which returns the string on the first and
:data:`None` on subsequent attempts is created and used.
If `pin_store` is not :data:`None`, :class:`PinningPKIXCertificateVerifier`
is used instead of the default :class:`PKIXCertificateVerifier`. The
`pin_store` argument determines the pinned certificates: if it is a
dictionary, a :class:`AbstractPinStore` according to the :class:`PinType`
passed as `pin_type` argument is created and initialised with the data from
the dictionary using its :meth:`~AbstractPinStore.import_from_json` method.
Otherwise, `pin_store` must be a :class:`AbstractPinStore` instance which
is passed to the verifier.
`post_handshake_deferred_callback` is used only if `pin_store` is not
:data:`None`. It is passed to the equally-named argument of
:class:`PinningPKIXCertificateVerifier`, see the documentation there for
details on the semantics. If `post_handshake_deferred_callback` is
:data:`None` while `pin_store` is not, a coroutine which returns
:data:`False` is substituted.
`ssl_context_factory` can be a callable taking no arguments and returning
a :class:`OpenSSL.SSL.Context` object. If given, the factory will be used
to obtain an SSL context when the stream negotiates transport layer
security via TLS. By default,
:func:`aioxmpp.security_layer.default_ssl_context` is used, which should be
fine for most applications.
.. warning::
The :func:`~.default_ssl_context` implementation sets important
defaults. It is **strongly recommended** to use the context returned
by :func:`~.default_ssl_context` and modify it, instead of creating
a new context from scratch when implementing your own factory.
If `no_verify` is true, none of the above regarding certificate verifiers
matters. The internal null verifier is used, which **disables certificate
verification completely**.
.. warning::
Disabling certificate verification makes your application vulnerable to
trivial Man-in-the-Middle attacks. Do **not** use this outside
controlled test environments or when you know **exactly** what you’re
doing!
If you need to handle certificates which cannot be verified using the
public key infrastructure, consider making use of the `pin_store`
argument instead.
`anonymous` may be a string or :data:`False`. If it is not :data:`False`,
:class:`AnonymousSASLProvider` is used before password based authentication
is attempted. In addition, it is allowed to set `password_provider` to
:data:`None`. `anonymous` is the trace token to use, and SHOULD be the
empty string (as specified by :xep:`175`). This requires :mod:`aiosasl` 0.3
or newer.
.. note::
:data:`False` and ``""`` are treated differently for the `anonymous`
argument, despite both being false-y values!
.. note::
If `anonymous` is not :data:`False` and `password_provider` is not
:data:`None`, both authentication types are attempted. Anonymous
authentication is, in that case, preferred over password-based
authentication.
If you need to reverse the order, you have to construct your own
:class:`SecurityLayer` object.
.. warning::
Take the security and privacy considerations from :rfc:`4505` (which
specifies the ANONYMOUS SASL mechanism) and :xep:`175` (which discusses
the ANONYMOUS SASL mechanism in the XMPP context) into account before
using `anonymous`.
The versatility and simplicity of use of this function make (pun intended)
it the preferred way to construct :class:`SecurityLayer` instances.
.. versionadded:: 0.8
Support for SASL ANONYMOUS was added.
.. versionadded:: 0.11
Support for `ssl_context_factory`.
"""
if isinstance(password_provider, str):
static_password = password_provider
@asyncio.coroutine
def password_provider(jid, nattempt):
if nattempt == 0:
return static_password
return None
if pin_store is not None:
if post_handshake_deferred_failure is None:
@asyncio.coroutine
def post_handshake_deferred_failure(verifier):
return False
if not isinstance(pin_store, AbstractPinStore):
pin_data = pin_store
if pin_type == PinType.PUBLIC_KEY:
logger.debug("using PublicKeyPinStore")
pin_store = PublicKeyPinStore()
else:
logger.debug("using CertificatePinStore")
pin_store = CertificatePinStore()
pin_store.import_from_json(pin_data)
def certificate_verifier_factory():
return PinningPKIXCertificateVerifier(
pin_store.query,
post_handshake_deferred_failure,
)
elif no_verify:
certificate_verifier_factory = _NullVerifier
else:
certificate_verifier_factory = PKIXCertificateVerifier
sasl_providers = []
if anonymous is not False:
if AnonymousSASLProvider is None:
raise RuntimeError(
"aiosasl does not support ANONYMOUS, please upgrade"
)
sasl_providers.append(
AnonymousSASLProvider(anonymous)
)
if password_provider is not None:
sasl_providers.append(
PasswordSASLProvider(
password_provider,
),
)
return SecurityLayer(
ssl_context_factory,
certificate_verifier_factory,
True,
tuple(sasl_providers),
) |
def get_future_days(self):
"""Return only future Day objects."""
today = timezone.now().date()
return Day.objects.filter(date__gte=today) | Return only future Day objects. | Below is the the instruction that describes the task:
### Input:
Return only future Day objects.
### Response:
def get_future_days(self):
"""Return only future Day objects."""
today = timezone.now().date()
return Day.objects.filter(date__gte=today) |
def ListJobs(self, token=None):
"""Returns a list of all currently running cron jobs."""
job_root = aff4.FACTORY.Open(self.CRON_JOBS_PATH, token=token)
return [urn.Basename() for urn in job_root.ListChildren()] | Returns a list of all currently running cron jobs. | Below is the the instruction that describes the task:
### Input:
Returns a list of all currently running cron jobs.
### Response:
def ListJobs(self, token=None):
"""Returns a list of all currently running cron jobs."""
job_root = aff4.FACTORY.Open(self.CRON_JOBS_PATH, token=token)
return [urn.Basename() for urn in job_root.ListChildren()] |
def dict_to_cognito(attributes, attr_map=None):
"""
:param attributes: Dictionary of User Pool attribute names/values
:return: list of User Pool attribute formatted dicts: {'Name': <attr_name>, 'Value': <attr_value>}
"""
if attr_map is None:
attr_map = {}
for k,v in attr_map.items():
if v in attributes.keys():
attributes[k] = attributes.pop(v)
return [{'Name': key, 'Value': value} for key, value in attributes.items()] | :param attributes: Dictionary of User Pool attribute names/values
:return: list of User Pool attribute formatted dicts: {'Name': <attr_name>, 'Value': <attr_value>} | Below is the the instruction that describes the task:
### Input:
:param attributes: Dictionary of User Pool attribute names/values
:return: list of User Pool attribute formatted dicts: {'Name': <attr_name>, 'Value': <attr_value>}
### Response:
def dict_to_cognito(attributes, attr_map=None):
"""
:param attributes: Dictionary of User Pool attribute names/values
:return: list of User Pool attribute formatted dicts: {'Name': <attr_name>, 'Value': <attr_value>}
"""
if attr_map is None:
attr_map = {}
for k,v in attr_map.items():
if v in attributes.keys():
attributes[k] = attributes.pop(v)
return [{'Name': key, 'Value': value} for key, value in attributes.items()] |
def eval(self, text):
"""
Evaluate text and return (obj, valid)
where *obj* is the object represented by *text*
and *valid* is True if object evaluation did not raise any exception
"""
assert is_text_string(text)
try:
return eval(text, self.locals), True
except:
return None, False | Evaluate text and return (obj, valid)
where *obj* is the object represented by *text*
and *valid* is True if object evaluation did not raise any exception | Below is the the instruction that describes the task:
### Input:
Evaluate text and return (obj, valid)
where *obj* is the object represented by *text*
and *valid* is True if object evaluation did not raise any exception
### Response:
def eval(self, text):
"""
Evaluate text and return (obj, valid)
where *obj* is the object represented by *text*
and *valid* is True if object evaluation did not raise any exception
"""
assert is_text_string(text)
try:
return eval(text, self.locals), True
except:
return None, False |
def from_inet_ptoi(bgp_id):
"""Convert an IPv4 address string format to a four byte long.
"""
four_byte_id = None
try:
four_byte_id = ip.ipv4_to_int(bgp_id)
except ValueError:
LOG.debug('Invalid bgp id given for conversion to integer value %s',
bgp_id)
return four_byte_id | Convert an IPv4 address string format to a four byte long. | Below is the the instruction that describes the task:
### Input:
Convert an IPv4 address string format to a four byte long.
### Response:
def from_inet_ptoi(bgp_id):
"""Convert an IPv4 address string format to a four byte long.
"""
four_byte_id = None
try:
four_byte_id = ip.ipv4_to_int(bgp_id)
except ValueError:
LOG.debug('Invalid bgp id given for conversion to integer value %s',
bgp_id)
return four_byte_id |
def do_import(token, account_uuid, bank_account, since=None):
"""Import data from teller.io
Returns the created StatementImport
"""
response = requests.get(
url="https://api.teller.io/accounts/{}/transactions".format(account_uuid),
headers={"Authorization": "Bearer {}".format(token)},
)
response.raise_for_status()
data = response.json()
statement_import = StatementImport.objects.create(
source="teller.io", extra={"account_uuid": account_uuid}, bank_account=bank_account
)
for line_data in data:
uuid = UUID(hex=line_data["id"])
if StatementLine.objects.filter(uuid=uuid):
continue
description = ", ".join(filter(bool, [line_data["counterparty"], line_data["description"]]))
date = datetime.date(*map(int, line_data["date"].split("-")))
if not since or date >= since:
StatementLine.objects.create(
uuid=uuid,
date=line_data["date"],
statement_import=statement_import,
amount=line_data["amount"],
type=line_data["type"],
description=description,
source_data=line_data,
) | Import data from teller.io
Returns the created StatementImport | Below is the the instruction that describes the task:
### Input:
Import data from teller.io
Returns the created StatementImport
### Response:
def do_import(token, account_uuid, bank_account, since=None):
"""Import data from teller.io
Returns the created StatementImport
"""
response = requests.get(
url="https://api.teller.io/accounts/{}/transactions".format(account_uuid),
headers={"Authorization": "Bearer {}".format(token)},
)
response.raise_for_status()
data = response.json()
statement_import = StatementImport.objects.create(
source="teller.io", extra={"account_uuid": account_uuid}, bank_account=bank_account
)
for line_data in data:
uuid = UUID(hex=line_data["id"])
if StatementLine.objects.filter(uuid=uuid):
continue
description = ", ".join(filter(bool, [line_data["counterparty"], line_data["description"]]))
date = datetime.date(*map(int, line_data["date"].split("-")))
if not since or date >= since:
StatementLine.objects.create(
uuid=uuid,
date=line_data["date"],
statement_import=statement_import,
amount=line_data["amount"],
type=line_data["type"],
description=description,
source_data=line_data,
) |
def _find(self, spec):
"""
Find and return the path to the template associated to the instance.
"""
if spec.template_path is not None:
return spec.template_path
dir_path, file_name = self._find_relative(spec)
locator = self.loader._make_locator()
if dir_path is None:
# Then we need to search for the path.
path = locator.find_object(spec, self.loader.search_dirs, file_name=file_name)
else:
obj_dir = locator.get_object_directory(spec)
path = os.path.join(obj_dir, dir_path, file_name)
return path | Find and return the path to the template associated to the instance. | Below is the the instruction that describes the task:
### Input:
Find and return the path to the template associated to the instance.
### Response:
def _find(self, spec):
"""
Find and return the path to the template associated to the instance.
"""
if spec.template_path is not None:
return spec.template_path
dir_path, file_name = self._find_relative(spec)
locator = self.loader._make_locator()
if dir_path is None:
# Then we need to search for the path.
path = locator.find_object(spec, self.loader.search_dirs, file_name=file_name)
else:
obj_dir = locator.get_object_directory(spec)
path = os.path.join(obj_dir, dir_path, file_name)
return path |
def update_rbac_policy(self, rbac_policy_id, body=None):
"""Update a RBAC policy."""
return self.put(self.rbac_policy_path % rbac_policy_id, body=body) | Update a RBAC policy. | Below is the the instruction that describes the task:
### Input:
Update a RBAC policy.
### Response:
def update_rbac_policy(self, rbac_policy_id, body=None):
"""Update a RBAC policy."""
return self.put(self.rbac_policy_path % rbac_policy_id, body=body) |
def create_dataset(self, name=None, description=None, public=False):
"""
Create a new data set.
:param name: name of the dataset
:type name: str
:param description: description for the dataset
:type description: str
:param public: A boolean indicating whether or not the dataset should be public.
:type public: bool
:return: The newly created dataset.
:rtype: :class:`Dataset`
"""
data = {
"public": _convert_bool_to_public_value(public)
}
if name:
data["name"] = name
if description:
data["description"] = description
dataset = {"dataset": data}
failure_message = "Unable to create dataset"
result = self._get_success_json(self._post_json(routes.create_dataset(), dataset, failure_message=failure_message))
return _dataset_from_response_dict(result) | Create a new data set.
:param name: name of the dataset
:type name: str
:param description: description for the dataset
:type description: str
:param public: A boolean indicating whether or not the dataset should be public.
:type public: bool
:return: The newly created dataset.
:rtype: :class:`Dataset` | Below is the the instruction that describes the task:
### Input:
Create a new data set.
:param name: name of the dataset
:type name: str
:param description: description for the dataset
:type description: str
:param public: A boolean indicating whether or not the dataset should be public.
:type public: bool
:return: The newly created dataset.
:rtype: :class:`Dataset`
### Response:
def create_dataset(self, name=None, description=None, public=False):
"""
Create a new data set.
:param name: name of the dataset
:type name: str
:param description: description for the dataset
:type description: str
:param public: A boolean indicating whether or not the dataset should be public.
:type public: bool
:return: The newly created dataset.
:rtype: :class:`Dataset`
"""
data = {
"public": _convert_bool_to_public_value(public)
}
if name:
data["name"] = name
if description:
data["description"] = description
dataset = {"dataset": data}
failure_message = "Unable to create dataset"
result = self._get_success_json(self._post_json(routes.create_dataset(), dataset, failure_message=failure_message))
return _dataset_from_response_dict(result) |
def show(config, username):
"""Display a specific user."""
client = Client()
client.prepare_connection()
user_api = API(client)
CLI.show_user(user_api.show(username)) | Display a specific user. | Below is the the instruction that describes the task:
### Input:
Display a specific user.
### Response:
def show(config, username):
"""Display a specific user."""
client = Client()
client.prepare_connection()
user_api = API(client)
CLI.show_user(user_api.show(username)) |
def _setup_states(state_definitions, prev=()):
"""Create a StateList object from a 'states' Workflow attribute."""
states = list(prev)
for state_def in state_definitions:
if len(state_def) != 2:
raise TypeError(
"The 'state' attribute of a workflow should be "
"a two-tuple of strings; got %r instead." % (state_def,)
)
name, title = state_def
state = State(name, title)
if any(st.name == name for st in states):
# Replacing an existing state
states = [state if st.name == name else st for st in states]
else:
states.append(state)
return StateList(states) | Create a StateList object from a 'states' Workflow attribute. | Below is the the instruction that describes the task:
### Input:
Create a StateList object from a 'states' Workflow attribute.
### Response:
def _setup_states(state_definitions, prev=()):
"""Create a StateList object from a 'states' Workflow attribute."""
states = list(prev)
for state_def in state_definitions:
if len(state_def) != 2:
raise TypeError(
"The 'state' attribute of a workflow should be "
"a two-tuple of strings; got %r instead." % (state_def,)
)
name, title = state_def
state = State(name, title)
if any(st.name == name for st in states):
# Replacing an existing state
states = [state if st.name == name else st for st in states]
else:
states.append(state)
return StateList(states) |
def parse_int(str_num):
""" Given an integer number, return its value,
or None if it could not be parsed.
Allowed formats: DECIMAL, HEXA (0xnnn, $nnnn or nnnnh)
:param str_num: (string) the number to be parsed
:return: an integer number or None if it could not be parsedd
"""
str_num = (str_num or "").strip().upper()
if not str_num:
return None
base = 10
if str_num.startswith('0X'):
base = 16
str_num = str_num[2:]
if str_num.endswith('H'):
base = 16
str_num = str_num[:-1]
if str_num.startswith('$'):
base = 16
str_num = str_num[1:]
try:
return int(str_num, base)
except ValueError:
return None | Given an integer number, return its value,
or None if it could not be parsed.
Allowed formats: DECIMAL, HEXA (0xnnn, $nnnn or nnnnh)
:param str_num: (string) the number to be parsed
:return: an integer number or None if it could not be parsedd | Below is the the instruction that describes the task:
### Input:
Given an integer number, return its value,
or None if it could not be parsed.
Allowed formats: DECIMAL, HEXA (0xnnn, $nnnn or nnnnh)
:param str_num: (string) the number to be parsed
:return: an integer number or None if it could not be parsedd
### Response:
def parse_int(str_num):
""" Given an integer number, return its value,
or None if it could not be parsed.
Allowed formats: DECIMAL, HEXA (0xnnn, $nnnn or nnnnh)
:param str_num: (string) the number to be parsed
:return: an integer number or None if it could not be parsedd
"""
str_num = (str_num or "").strip().upper()
if not str_num:
return None
base = 10
if str_num.startswith('0X'):
base = 16
str_num = str_num[2:]
if str_num.endswith('H'):
base = 16
str_num = str_num[:-1]
if str_num.startswith('$'):
base = 16
str_num = str_num[1:]
try:
return int(str_num, base)
except ValueError:
return None |
def compute_control_digit(clabe: str) -> str:
"""
Compute CLABE control digit according to
https://es.wikipedia.org/wiki/CLABE#D.C3.ADgito_control
"""
clabe = [int(i) for i in clabe]
weighted = [c * w % 10 for c, w in
zip(clabe[:CLABE_LENGTH - 1], CLABE_WEIGHTS)]
summed = sum(weighted) % 10
control_digit = (10 - summed) % 10
return str(control_digit) | Compute CLABE control digit according to
https://es.wikipedia.org/wiki/CLABE#D.C3.ADgito_control | Below is the the instruction that describes the task:
### Input:
Compute CLABE control digit according to
https://es.wikipedia.org/wiki/CLABE#D.C3.ADgito_control
### Response:
def compute_control_digit(clabe: str) -> str:
"""
Compute CLABE control digit according to
https://es.wikipedia.org/wiki/CLABE#D.C3.ADgito_control
"""
clabe = [int(i) for i in clabe]
weighted = [c * w % 10 for c, w in
zip(clabe[:CLABE_LENGTH - 1], CLABE_WEIGHTS)]
summed = sum(weighted) % 10
control_digit = (10 - summed) % 10
return str(control_digit) |
def _translators(attr, kwargs):
"""
Decorator which associates a set of translators (serializers or
deserializers) with a given method. The `attr` parameter
identifies which attribute is being updated.
"""
# Add translators to a function or class
def decorator(func):
# Make sure we have the attribute
try:
xlators = getattr(func, attr)
except AttributeError:
xlators = {}
setattr(func, attr, xlators)
xlators.update(kwargs)
return func
return decorator | Decorator which associates a set of translators (serializers or
deserializers) with a given method. The `attr` parameter
identifies which attribute is being updated. | Below is the the instruction that describes the task:
### Input:
Decorator which associates a set of translators (serializers or
deserializers) with a given method. The `attr` parameter
identifies which attribute is being updated.
### Response:
def _translators(attr, kwargs):
"""
Decorator which associates a set of translators (serializers or
deserializers) with a given method. The `attr` parameter
identifies which attribute is being updated.
"""
# Add translators to a function or class
def decorator(func):
# Make sure we have the attribute
try:
xlators = getattr(func, attr)
except AttributeError:
xlators = {}
setattr(func, attr, xlators)
xlators.update(kwargs)
return func
return decorator |
def SA_ellipsoidal_head(D, a):
r'''Calculates the surface area of an ellipsoidal head according to [1]_.
Formula below is for the full shape, the result of which is halved. The
formula also does not support `D` being larger than `a`; this is ensured
by simply swapping the variables if necessary, as geometrically the result
is the same. In the equations
.. math::
SA = 2\pi a^2 + \frac{\pi c^2}{e_1}\ln\left(\frac{1+e_1}{1-e_1}\right)
.. math::
e_1 = \sqrt{1 - \frac{c^2}{a^2}}
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
a : float
Distance the ellipsoidal head extends, [m]
Returns
-------
SA : float
Surface area [m^2]
Examples
--------
Spherical case
>>> SA_ellipsoidal_head(2, 1)
6.283185307179586
References
----------
.. [1] Weisstein, Eric W. "Spheroid." Text. Accessed March 14, 2016.
http://mathworld.wolfram.com/Spheroid.html.
'''
if D == a*2:
return pi*D**2/2 # necessary to avoid a division by zero when D == a
D = D/2.
D, a = min((D, a)), max((D, a))
e1 = (1 - D**2/a**2)**0.5
return (2*pi*a**2 + pi*D**2/e1*log((1+e1)/(1-e1)))/2. | r'''Calculates the surface area of an ellipsoidal head according to [1]_.
Formula below is for the full shape, the result of which is halved. The
formula also does not support `D` being larger than `a`; this is ensured
by simply swapping the variables if necessary, as geometrically the result
is the same. In the equations
.. math::
SA = 2\pi a^2 + \frac{\pi c^2}{e_1}\ln\left(\frac{1+e_1}{1-e_1}\right)
.. math::
e_1 = \sqrt{1 - \frac{c^2}{a^2}}
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
a : float
Distance the ellipsoidal head extends, [m]
Returns
-------
SA : float
Surface area [m^2]
Examples
--------
Spherical case
>>> SA_ellipsoidal_head(2, 1)
6.283185307179586
References
----------
.. [1] Weisstein, Eric W. "Spheroid." Text. Accessed March 14, 2016.
http://mathworld.wolfram.com/Spheroid.html. | Below is the the instruction that describes the task:
### Input:
r'''Calculates the surface area of an ellipsoidal head according to [1]_.
Formula below is for the full shape, the result of which is halved. The
formula also does not support `D` being larger than `a`; this is ensured
by simply swapping the variables if necessary, as geometrically the result
is the same. In the equations
.. math::
SA = 2\pi a^2 + \frac{\pi c^2}{e_1}\ln\left(\frac{1+e_1}{1-e_1}\right)
.. math::
e_1 = \sqrt{1 - \frac{c^2}{a^2}}
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
a : float
Distance the ellipsoidal head extends, [m]
Returns
-------
SA : float
Surface area [m^2]
Examples
--------
Spherical case
>>> SA_ellipsoidal_head(2, 1)
6.283185307179586
References
----------
.. [1] Weisstein, Eric W. "Spheroid." Text. Accessed March 14, 2016.
http://mathworld.wolfram.com/Spheroid.html.
### Response:
def SA_ellipsoidal_head(D, a):
r'''Calculates the surface area of an ellipsoidal head according to [1]_.
Formula below is for the full shape, the result of which is halved. The
formula also does not support `D` being larger than `a`; this is ensured
by simply swapping the variables if necessary, as geometrically the result
is the same. In the equations
.. math::
SA = 2\pi a^2 + \frac{\pi c^2}{e_1}\ln\left(\frac{1+e_1}{1-e_1}\right)
.. math::
e_1 = \sqrt{1 - \frac{c^2}{a^2}}
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
a : float
Distance the ellipsoidal head extends, [m]
Returns
-------
SA : float
Surface area [m^2]
Examples
--------
Spherical case
>>> SA_ellipsoidal_head(2, 1)
6.283185307179586
References
----------
.. [1] Weisstein, Eric W. "Spheroid." Text. Accessed March 14, 2016.
http://mathworld.wolfram.com/Spheroid.html.
'''
if D == a*2:
return pi*D**2/2 # necessary to avoid a division by zero when D == a
D = D/2.
D, a = min((D, a)), max((D, a))
e1 = (1 - D**2/a**2)**0.5
return (2*pi*a**2 + pi*D**2/e1*log((1+e1)/(1-e1)))/2. |
def patch(self, url, headers=None, body=None, kwargs=None):
"""Make a PATCH request.
To make a PATCH request pass, ``url``
:param url: ``str``
:param headers: ``dict``
:param body: ``object``
:param kwargs: ``dict``
"""
return self._request(
method='patch',
url=url,
headers=headers,
body=body,
kwargs=kwargs
) | Make a PATCH request.
To make a PATCH request pass, ``url``
:param url: ``str``
:param headers: ``dict``
:param body: ``object``
:param kwargs: ``dict`` | Below is the the instruction that describes the task:
### Input:
Make a PATCH request.
To make a PATCH request pass, ``url``
:param url: ``str``
:param headers: ``dict``
:param body: ``object``
:param kwargs: ``dict``
### Response:
def patch(self, url, headers=None, body=None, kwargs=None):
"""Make a PATCH request.
To make a PATCH request pass, ``url``
:param url: ``str``
:param headers: ``dict``
:param body: ``object``
:param kwargs: ``dict``
"""
return self._request(
method='patch',
url=url,
headers=headers,
body=body,
kwargs=kwargs
) |
def format(self, version=None, wipe=None):
"""Erase the NDEF message on a Type 2 Tag.
The :meth:`format` method will reset the length of the NDEF
message on a type 2 tag to zero, thus the tag will appear to
be empty. Additionally, if the *wipe* argument is set to some
integer then :meth:`format` will overwrite all user date that
follows the NDEF message TLV with that integer (mod 256). If
an NDEF message TLV is not present it will be created with a
length of zero.
Despite it's name, the :meth:`format` method can not format a
blank tag to make it NDEF compatible. This is because the user
data are of a type 2 tag can not be safely determined, also
reading all memory pages until an error response yields only
the total memory size which includes an undetermined number of
special pages at the end of memory.
It is also not possible to change the NDEF mapping version,
located in a one-time-programmable area of the tag memory.
"""
return super(Type2Tag, self).format(version, wipe) | Erase the NDEF message on a Type 2 Tag.
The :meth:`format` method will reset the length of the NDEF
message on a type 2 tag to zero, thus the tag will appear to
be empty. Additionally, if the *wipe* argument is set to some
integer then :meth:`format` will overwrite all user date that
follows the NDEF message TLV with that integer (mod 256). If
an NDEF message TLV is not present it will be created with a
length of zero.
Despite it's name, the :meth:`format` method can not format a
blank tag to make it NDEF compatible. This is because the user
data are of a type 2 tag can not be safely determined, also
reading all memory pages until an error response yields only
the total memory size which includes an undetermined number of
special pages at the end of memory.
It is also not possible to change the NDEF mapping version,
located in a one-time-programmable area of the tag memory. | Below is the the instruction that describes the task:
### Input:
Erase the NDEF message on a Type 2 Tag.
The :meth:`format` method will reset the length of the NDEF
message on a type 2 tag to zero, thus the tag will appear to
be empty. Additionally, if the *wipe* argument is set to some
integer then :meth:`format` will overwrite all user date that
follows the NDEF message TLV with that integer (mod 256). If
an NDEF message TLV is not present it will be created with a
length of zero.
Despite it's name, the :meth:`format` method can not format a
blank tag to make it NDEF compatible. This is because the user
data are of a type 2 tag can not be safely determined, also
reading all memory pages until an error response yields only
the total memory size which includes an undetermined number of
special pages at the end of memory.
It is also not possible to change the NDEF mapping version,
located in a one-time-programmable area of the tag memory.
### Response:
def format(self, version=None, wipe=None):
"""Erase the NDEF message on a Type 2 Tag.
The :meth:`format` method will reset the length of the NDEF
message on a type 2 tag to zero, thus the tag will appear to
be empty. Additionally, if the *wipe* argument is set to some
integer then :meth:`format` will overwrite all user date that
follows the NDEF message TLV with that integer (mod 256). If
an NDEF message TLV is not present it will be created with a
length of zero.
Despite it's name, the :meth:`format` method can not format a
blank tag to make it NDEF compatible. This is because the user
data are of a type 2 tag can not be safely determined, also
reading all memory pages until an error response yields only
the total memory size which includes an undetermined number of
special pages at the end of memory.
It is also not possible to change the NDEF mapping version,
located in a one-time-programmable area of the tag memory.
"""
return super(Type2Tag, self).format(version, wipe) |
def get(self, keys, default=None):
"""
To get a value by dot notation
:param keys: dot notaion string
:param default:
:return:
"""
try:
d = self
for k in keys.split("."):
if not k in d:
d[k] = {}
d = d[k]
if isinstance(d, bool):
return d
return d or default
except (TypeError, KeyError) as e:
return default | To get a value by dot notation
:param keys: dot notaion string
:param default:
:return: | Below is the the instruction that describes the task:
### Input:
To get a value by dot notation
:param keys: dot notaion string
:param default:
:return:
### Response:
def get(self, keys, default=None):
"""
To get a value by dot notation
:param keys: dot notaion string
:param default:
:return:
"""
try:
d = self
for k in keys.split("."):
if not k in d:
d[k] = {}
d = d[k]
if isinstance(d, bool):
return d
return d or default
except (TypeError, KeyError) as e:
return default |
def _close_rpc_interface(self, connection_id, callback):
"""Disable RPC interface for this IOTile device
Args:
connection_id (int): The unique identifier for the connection
callback (callback): Callback to be called when this command finishes
callback(conn_id, adapter_id, success, failure_reason)
"""
try:
context = self.connections.get_context(connection_id)
except ArgumentError:
callback(connection_id, self.id, False, "Could not find connection information")
return
self.connections.begin_operation(connection_id, 'close_interface', callback, self.get_config('default_timeout'))
try:
service = context['services'][TileBusService]
header_characteristic = service[ReceiveHeaderChar]
payload_characteristic = service[ReceivePayloadChar]
except KeyError:
self.connections.finish_operation(connection_id, False, "Can't find characteristics to open rpc interface")
return
self.bable.set_notification(
enabled=False,
connection_handle=context['connection_handle'],
characteristic=header_characteristic,
on_notification_set=[self._on_interface_closed, context, payload_characteristic],
timeout=1.0
) | Disable RPC interface for this IOTile device
Args:
connection_id (int): The unique identifier for the connection
callback (callback): Callback to be called when this command finishes
callback(conn_id, adapter_id, success, failure_reason) | Below is the the instruction that describes the task:
### Input:
Disable RPC interface for this IOTile device
Args:
connection_id (int): The unique identifier for the connection
callback (callback): Callback to be called when this command finishes
callback(conn_id, adapter_id, success, failure_reason)
### Response:
def _close_rpc_interface(self, connection_id, callback):
"""Disable RPC interface for this IOTile device
Args:
connection_id (int): The unique identifier for the connection
callback (callback): Callback to be called when this command finishes
callback(conn_id, adapter_id, success, failure_reason)
"""
try:
context = self.connections.get_context(connection_id)
except ArgumentError:
callback(connection_id, self.id, False, "Could not find connection information")
return
self.connections.begin_operation(connection_id, 'close_interface', callback, self.get_config('default_timeout'))
try:
service = context['services'][TileBusService]
header_characteristic = service[ReceiveHeaderChar]
payload_characteristic = service[ReceivePayloadChar]
except KeyError:
self.connections.finish_operation(connection_id, False, "Can't find characteristics to open rpc interface")
return
self.bable.set_notification(
enabled=False,
connection_handle=context['connection_handle'],
characteristic=header_characteristic,
on_notification_set=[self._on_interface_closed, context, payload_characteristic],
timeout=1.0
) |
def _generate_union(self, union_type):
"""
Emits a JSDoc @typedef for a union type.
"""
union_name = fmt_type_name(union_type)
self._emit_jsdoc_header(union_type.doc)
self.emit(' * @typedef {Object} %s' % union_name)
variant_types = []
for variant in union_type.all_fields:
variant_types.append("'%s'" % variant.name)
variant_data_type, _, _ = unwrap(variant.data_type)
# Don't emit fields for void types.
if not is_void_type(variant_data_type):
variant_doc = ' - Available if .tag is %s.' % variant.name
if variant.doc:
variant_doc += ' ' + variant.doc
self.emit_wrapped_text(
'@property {%s} [%s]%s' % (
fmt_type(variant_data_type),
variant.name,
variant_doc,
),
prefix=' * ',
)
jsdoc_tag_union = fmt_jsdoc_union(variant_types)
self.emit(' * @property {%s} .tag - Tag identifying the union variant.' % jsdoc_tag_union)
self.emit(' */') | Emits a JSDoc @typedef for a union type. | Below is the the instruction that describes the task:
### Input:
Emits a JSDoc @typedef for a union type.
### Response:
def _generate_union(self, union_type):
"""
Emits a JSDoc @typedef for a union type.
"""
union_name = fmt_type_name(union_type)
self._emit_jsdoc_header(union_type.doc)
self.emit(' * @typedef {Object} %s' % union_name)
variant_types = []
for variant in union_type.all_fields:
variant_types.append("'%s'" % variant.name)
variant_data_type, _, _ = unwrap(variant.data_type)
# Don't emit fields for void types.
if not is_void_type(variant_data_type):
variant_doc = ' - Available if .tag is %s.' % variant.name
if variant.doc:
variant_doc += ' ' + variant.doc
self.emit_wrapped_text(
'@property {%s} [%s]%s' % (
fmt_type(variant_data_type),
variant.name,
variant_doc,
),
prefix=' * ',
)
jsdoc_tag_union = fmt_jsdoc_union(variant_types)
self.emit(' * @property {%s} .tag - Tag identifying the union variant.' % jsdoc_tag_union)
self.emit(' */') |
def _convert_pooling_param(param):
"""Convert the pooling layer parameter
"""
param_string = "pooling_convention='full', "
if param.global_pooling:
param_string += "global_pool=True, kernel=(1,1)"
else:
param_string += "pad=(%d,%d), kernel=(%d,%d), stride=(%d,%d)" % (
param.pad, param.pad, param.kernel_size, param.kernel_size,
param.stride, param.stride)
if param.pool == 0:
param_string += ", pool_type='max'"
elif param.pool == 1:
param_string += ", pool_type='avg'"
else:
raise ValueError("Unknown Pooling Method!")
return param_string | Convert the pooling layer parameter | Below is the the instruction that describes the task:
### Input:
Convert the pooling layer parameter
### Response:
def _convert_pooling_param(param):
"""Convert the pooling layer parameter
"""
param_string = "pooling_convention='full', "
if param.global_pooling:
param_string += "global_pool=True, kernel=(1,1)"
else:
param_string += "pad=(%d,%d), kernel=(%d,%d), stride=(%d,%d)" % (
param.pad, param.pad, param.kernel_size, param.kernel_size,
param.stride, param.stride)
if param.pool == 0:
param_string += ", pool_type='max'"
elif param.pool == 1:
param_string += ", pool_type='avg'"
else:
raise ValueError("Unknown Pooling Method!")
return param_string |
def invalidate_authorization_code(self, client_id, code, request,
*args, **kwargs):
"""Invalidate an authorization code after use.
We keep the temporary code in a grant, which has a `delete`
function to destroy itself.
"""
log.debug('Destroy grant token for client %r, %r', client_id, code)
grant = self._grantgetter(client_id=client_id, code=code)
if grant:
grant.delete() | Invalidate an authorization code after use.
We keep the temporary code in a grant, which has a `delete`
function to destroy itself. | Below is the the instruction that describes the task:
### Input:
Invalidate an authorization code after use.
We keep the temporary code in a grant, which has a `delete`
function to destroy itself.
### Response:
def invalidate_authorization_code(self, client_id, code, request,
*args, **kwargs):
"""Invalidate an authorization code after use.
We keep the temporary code in a grant, which has a `delete`
function to destroy itself.
"""
log.debug('Destroy grant token for client %r, %r', client_id, code)
grant = self._grantgetter(client_id=client_id, code=code)
if grant:
grant.delete() |
def filterAcceptsRow(self, sourceRow, sourceParent):
""" If onlyShowImported is True, regItems that were not (successfully) imported are
filtered out.
"""
if not self.onlyShowImported:
return True
item = self.sourceModel().registry.items[sourceRow]
return bool(item.successfullyImported) | If onlyShowImported is True, regItems that were not (successfully) imported are
filtered out. | Below is the the instruction that describes the task:
### Input:
If onlyShowImported is True, regItems that were not (successfully) imported are
filtered out.
### Response:
def filterAcceptsRow(self, sourceRow, sourceParent):
""" If onlyShowImported is True, regItems that were not (successfully) imported are
filtered out.
"""
if not self.onlyShowImported:
return True
item = self.sourceModel().registry.items[sourceRow]
return bool(item.successfullyImported) |
def create_topics(self, new_topics, **kwargs):
"""
Create new topics in cluster.
The future result() value is None.
:param list(NewTopic) new_topics: New topics to be created.
:param float operation_timeout: Set broker's operation timeout in seconds,
controlling how long the CreateTopics request will block
on the broker waiting for the topic creation to propagate
in the cluster. A value of 0 returns immediately. Default: 0
:param float request_timeout: Set the overall request timeout in seconds,
including broker lookup, request transmission, operation time
on broker, and response. Default: `socket.timeout.ms*1000.0`
:param bool validate_only: Tell broker to only validate the request,
without creating the topic. Default: False
:returns: a dict of futures for each topic, keyed by the topic name.
:rtype: dict(<topic_name, future>)
:raises KafkaException: Operation failed locally or on broker.
:raises TypeException: Invalid input.
:raises ValueException: Invalid input.
"""
f, futmap = AdminClient._make_futures([x.topic for x in new_topics],
None,
AdminClient._make_topics_result)
super(AdminClient, self).create_topics(new_topics, f, **kwargs)
return futmap | Create new topics in cluster.
The future result() value is None.
:param list(NewTopic) new_topics: New topics to be created.
:param float operation_timeout: Set broker's operation timeout in seconds,
controlling how long the CreateTopics request will block
on the broker waiting for the topic creation to propagate
in the cluster. A value of 0 returns immediately. Default: 0
:param float request_timeout: Set the overall request timeout in seconds,
including broker lookup, request transmission, operation time
on broker, and response. Default: `socket.timeout.ms*1000.0`
:param bool validate_only: Tell broker to only validate the request,
without creating the topic. Default: False
:returns: a dict of futures for each topic, keyed by the topic name.
:rtype: dict(<topic_name, future>)
:raises KafkaException: Operation failed locally or on broker.
:raises TypeException: Invalid input.
:raises ValueException: Invalid input. | Below is the the instruction that describes the task:
### Input:
Create new topics in cluster.
The future result() value is None.
:param list(NewTopic) new_topics: New topics to be created.
:param float operation_timeout: Set broker's operation timeout in seconds,
controlling how long the CreateTopics request will block
on the broker waiting for the topic creation to propagate
in the cluster. A value of 0 returns immediately. Default: 0
:param float request_timeout: Set the overall request timeout in seconds,
including broker lookup, request transmission, operation time
on broker, and response. Default: `socket.timeout.ms*1000.0`
:param bool validate_only: Tell broker to only validate the request,
without creating the topic. Default: False
:returns: a dict of futures for each topic, keyed by the topic name.
:rtype: dict(<topic_name, future>)
:raises KafkaException: Operation failed locally or on broker.
:raises TypeException: Invalid input.
:raises ValueException: Invalid input.
### Response:
def create_topics(self, new_topics, **kwargs):
"""
Create new topics in cluster.
The future result() value is None.
:param list(NewTopic) new_topics: New topics to be created.
:param float operation_timeout: Set broker's operation timeout in seconds,
controlling how long the CreateTopics request will block
on the broker waiting for the topic creation to propagate
in the cluster. A value of 0 returns immediately. Default: 0
:param float request_timeout: Set the overall request timeout in seconds,
including broker lookup, request transmission, operation time
on broker, and response. Default: `socket.timeout.ms*1000.0`
:param bool validate_only: Tell broker to only validate the request,
without creating the topic. Default: False
:returns: a dict of futures for each topic, keyed by the topic name.
:rtype: dict(<topic_name, future>)
:raises KafkaException: Operation failed locally or on broker.
:raises TypeException: Invalid input.
:raises ValueException: Invalid input.
"""
f, futmap = AdminClient._make_futures([x.topic for x in new_topics],
None,
AdminClient._make_topics_result)
super(AdminClient, self).create_topics(new_topics, f, **kwargs)
return futmap |
def get_long():
"""
Generates a random long. The length of said long varies by platform.
"""
# The C long type to populate.
pbRandomData = c_ulong()
# Determine the byte size of this machine's long type.
size_of_long = wintypes.DWORD(sizeof(pbRandomData))
# Used to keep track of status. 1 = success, 0 = error.
ok = c_int()
# Provider?
hProv = c_ulong()
ok = windll.Advapi32.CryptAcquireContextA(byref(hProv), None, None, PROV_RSA_FULL, 0)
ok = windll.Advapi32.CryptGenRandom(hProv, size_of_long, byref(pbRandomData))
return pbRandomData.value | Generates a random long. The length of said long varies by platform. | Below is the the instruction that describes the task:
### Input:
Generates a random long. The length of said long varies by platform.
### Response:
def get_long():
"""
Generates a random long. The length of said long varies by platform.
"""
# The C long type to populate.
pbRandomData = c_ulong()
# Determine the byte size of this machine's long type.
size_of_long = wintypes.DWORD(sizeof(pbRandomData))
# Used to keep track of status. 1 = success, 0 = error.
ok = c_int()
# Provider?
hProv = c_ulong()
ok = windll.Advapi32.CryptAcquireContextA(byref(hProv), None, None, PROV_RSA_FULL, 0)
ok = windll.Advapi32.CryptGenRandom(hProv, size_of_long, byref(pbRandomData))
return pbRandomData.value |
def _IncludeFields(encoded_message, message, include_fields):
"""Add the requested fields to the encoded message."""
if include_fields is None:
return encoded_message
result = json.loads(encoded_message)
for field_name in include_fields:
try:
value = _GetField(message, field_name.split('.'))
nullvalue = None
if isinstance(value, list):
nullvalue = []
except KeyError:
raise exceptions.InvalidDataError(
'No field named %s in message of type %s' % (
field_name, type(message)))
_SetField(result, field_name.split('.'), nullvalue)
return json.dumps(result) | Add the requested fields to the encoded message. | Below is the the instruction that describes the task:
### Input:
Add the requested fields to the encoded message.
### Response:
def _IncludeFields(encoded_message, message, include_fields):
"""Add the requested fields to the encoded message."""
if include_fields is None:
return encoded_message
result = json.loads(encoded_message)
for field_name in include_fields:
try:
value = _GetField(message, field_name.split('.'))
nullvalue = None
if isinstance(value, list):
nullvalue = []
except KeyError:
raise exceptions.InvalidDataError(
'No field named %s in message of type %s' % (
field_name, type(message)))
_SetField(result, field_name.split('.'), nullvalue)
return json.dumps(result) |
def discoverAllNodes(self):
'''
.. todo:: node discovery has changed, breathe no longer used...update docs
'''
doxygen_index_xml = os.path.join(
configs._doxygen_xml_output_directory,
"index.xml"
)
try:
with codecs.open(doxygen_index_xml, "r", "utf-8") as index:
index_contents = index.read()
except:
raise RuntimeError("Could not read the contents of [{0}].".format(doxygen_index_xml))
try:
index_soup = BeautifulSoup(index_contents, "lxml-xml")
except:
raise RuntimeError("Could not parse the contents of [{0}] as an xml.".format(doxygen_index_xml))
doxygen_root = index_soup.doxygenindex
if not doxygen_root:
raise RuntimeError(
"Did not find root XML node named 'doxygenindex' parsing [{0}].".format(doxygen_index_xml)
)
for compound in doxygen_root.find_all("compound"):
if compound.find("name") and "kind" in compound.attrs and "refid" in compound.attrs:
curr_name = compound.find("name").get_text()
curr_kind = compound.attrs["kind"]
curr_refid = compound.attrs["refid"]
curr_node = ExhaleNode(curr_name, curr_kind, curr_refid)
self.trackNodeIfUnseen(curr_node)
# For things like files and namespaces, a "member" list will include
# things like defines, enums, etc. For classes and structs, we don't
# need to pay attention because the members are the various methods or
# data members by the class
if curr_kind in ["file", "namespace"]:
for member in compound.find_all("member"):
if member.find("name") and "kind" in member.attrs and "refid" in member.attrs:
child_name = member.find("name").get_text()
child_kind = member.attrs["kind"]
child_refid = member.attrs["refid"]
child_node = ExhaleNode(child_name, child_kind, child_refid)
self.trackNodeIfUnseen(child_node)
if curr_kind == "namespace":
child_node.parent = curr_node
else: # curr_kind == "file"
child_node.def_in_file = curr_node
curr_node.children.append(child_node)
# Now that we have discovered everything, we need to explicitly parse the file
# xml documents to determine where leaf-like nodes have been declared.
#
# TODO: change formatting of namespace to provide a listing of all files using it
for f in self.files:
node_xml_contents = utils.nodeCompoundXMLContents(f)
if node_xml_contents:
try:
f.soup = BeautifulSoup(node_xml_contents, "lxml-xml")
except:
utils.fancyError("Unable to parse file xml [{0}]:".format(f.name))
try:
cdef = f.soup.doxygen.compounddef
if "language" in cdef.attrs:
f.language = cdef.attrs["language"]
err_non = "[CRITICAL] did not find refid [{0}] in `self.node_by_refid`."
err_dup = "Conflicting file definition: [{0}] appears to be defined in both [{1}] and [{2}]." # noqa
# process classes
inner_classes = cdef.find_all("innerclass", recursive=False)
# << verboseBuild
utils.verbose_log(
"*** [{0}] had [{1}] innerclasses found".format(f.name, len(inner_classes)),
utils.AnsiColors.BOLD_MAGENTA
)
for class_like in inner_classes:
if "refid" in class_like.attrs:
refid = class_like.attrs["refid"]
if refid in self.node_by_refid:
node = self.node_by_refid[refid]
# << verboseBuild
utils.verbose_log(
" - [{0}]".format(node.name),
utils.AnsiColors.BOLD_MAGENTA
)
if not node.def_in_file:
node.def_in_file = f
elif node.def_in_file != f:
# << verboseBuild
utils.verbose_log(
err_dup.format(node.name, node.def_in_file.name, f.name),
utils.AnsiColors.BOLD_YELLOW
)
else:
# << verboseBuild
utils.verbose_log(err_non.format(refid), utils.AnsiColors.BOLD_RED)
else:
# TODO: can this ever happen?
# << verboseBuild
catastrophe = "CATASTROPHIC: doxygen xml for `{0}` found `innerclass` [{1}] that"
catastrophe += " does *NOT* have a `refid` attribute!"
catastrophe = catastrophe.format(f, str(class_like))
utils.verbose_log(
utils.prefix("(!) ", catastrophe),
utils.AnsiColors.BOLD_RED
)
# try and find anything else
memberdefs = cdef.find_all("memberdef", recursive=False)
# << verboseBuild
utils.verbose_log(
"*** [{0}] had [{1}] memberdef".format(f.name, len(memberdefs)),
utils.AnsiColors.BOLD_MAGENTA
)
for member in cdef.find_all("memberdef", recursive=False):
if "id" in member.attrs:
refid = member.attrs["id"]
if refid in self.node_by_refid:
node = self.node_by_refid[refid]
# << verboseBuild
utils.verbose_log(
" - [{0}]".format(node.name),
utils.AnsiColors.BOLD_MAGENTA
)
if not node.def_in_file:
node.def_in_file = f
# the location of the file as determined by doxygen
location = cdef.find("location")
if location and "file" in location.attrs:
location_str = os.path.normpath(location.attrs["file"])
# some older versions of doxygen don't reliably strip from path
# so make sure to remove it
abs_strip_path = os.path.normpath(os.path.abspath(
configs.doxygenStripFromPath
))
if location_str.startswith(abs_strip_path):
location_str = os.path.relpath(location_str, abs_strip_path)
f.location = os.path.normpath(location_str)
except:
utils.fancyError(
"Could not process Doxygen xml for file [{0}]".format(f.name)
)
###### TODO: explain how the parsing works // move it to exhale.parse
# last chance: we will still miss some, but need to pause and establish namespace relationships
for nspace in self.namespaces:
node_xml_contents = utils.nodeCompoundXMLContents(nspace)
if node_xml_contents:
try:
name_soup = BeautifulSoup(node_xml_contents, "lxml-xml")
except:
continue
cdef = name_soup.doxygen.compounddef
for class_like in cdef.find_all("innerclass", recursive=False):
if "refid" in class_like.attrs:
refid = class_like.attrs["refid"]
if refid in self.node_by_refid:
node = self.node_by_refid[refid]
if node not in nspace.children:
nspace.children.append(node)
node.parent = nspace
for nested_nspace in cdef.find_all("innernamespace", recursive=False):
if "refid" in nested_nspace.attrs:
refid = nested_nspace.attrs["refid"]
if refid in self.node_by_refid:
node = self.node_by_refid[refid]
if node not in nspace.children:
nspace.children.append(node)
node.parent = nspace
# This is where things get interesting
for sectiondef in cdef.find_all("sectiondef", recursive=False):
for memberdef in sectiondef.find_all("memberdef", recursive=False):
if "id" in memberdef.attrs:
refid = memberdef.attrs["id"]
if refid in self.node_by_refid:
node = self.node_by_refid[refid]
location = memberdef.find("location")
if location and "file" in location.attrs:
filedef = os.path.normpath(location.attrs["file"])
for f in self.files:
if filedef == f.location:
node.def_in_file = f
if node not in f.children:
f.children.append(node)
break
# Find the nodes that did not have their file location definition assigned
missing_file_def = {} # keys: refid, values: ExhaleNode
missing_file_def_candidates = {} # keys: refid, values: ExhaleNode (file kind only!)
for refid in self.node_by_refid:
node = self.node_by_refid[refid]
if node.def_in_file is None and node.kind not in ("file", "dir", "group", "namespace", "enumvalue"):
missing_file_def[refid] = node
missing_file_def_candidates[refid] = []
# Some compounds like class / struct have their own XML file and if documented
# correctly will have a <location> tag. For example, one may need to add the
#
# \class namespace::ClassName file_basename.hpp full/file/path/file_basename.hpp
#
# in order for the <location> tag to be generated. And in the case of forward
# declarations (e.g., for PIMPL patterns), in order for the class XML to be
# generated at all it seems this must be used.
#
# <?xml version='1.0' encoding='UTF-8' standalone='no'?>
# <doxygen xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="compound.xsd" version="1.8.13">
# <compounddef id="classpimpl_1_1EarthImpl" kind="class" language="C++" prot="public">
# <compoundname>pimpl::EarthImpl</compoundname>
# <includes refid="earth_8hpp" local="no">include/pimpl/earth.hpp</includes>
# <briefdescription>
# <para>The <ref refid="classpimpl_1_1Earth" kindref="compound">Earth</ref> PIMPL. </para> </briefdescription>
# <detaileddescription>
# </detaileddescription>
# <location file="include/pimpl/earth.hpp" line="30" column="1"/>
# <listofallmembers>
# </listofallmembers>
# </compounddef>
# </doxygen>
#
# So we're taking advantage of the fact that
#
# namespace pimpl {
# /**
# * \class pimpl::EarthImpl earth.hpp include/pimpl/earth.hpp
# * \brief The Earth PIMPL.
# */
# class EarthImpl;
# }
#
# Has a <location file="include/pimpl/earth.hpp" line="30" column="1"/>
#
# TODO: clarify this in the docs? You don't understand the full cause though.
refid_removals = []
for refid in missing_file_def:
node = missing_file_def[refid]
node_xml_contents = utils.nodeCompoundXMLContents(node)
# None is returned when no {refid}.xml exists (e.g., for enum or union).
if not node_xml_contents:
pass
try:
node_soup = BeautifulSoup(node_xml_contents, "lxml-xml")
cdef = node_soup.doxygen.compounddef
location = cdef.find("location", recursive=False)
if location and "file" in location.attrs:
file_path = os.path.normpath(location["file"])
for f in self.files:
if f.location == file_path:
node.def_in_file = f
f.children.append(node)
refid_removals.append(refid)
except:
pass
# We found the def_in_file, don't parse the programlisting for these nodes.
for refid in refid_removals:
del missing_file_def[refid]
# Go through every file and see if the refid associated with a node missing a
# file definition location is present in the <programlisting>
for f in self.files:
cdef = f.soup.doxygen.compounddef
# try and find things in the programlisting as a last resort
programlisting = cdef.find("programlisting")
if programlisting:
for ref in programlisting.find_all("ref"):
if "refid" in ref.attrs:
refid = ref.attrs["refid"]
# be careful not to just consider any refid found, e.g. don't
# use the `compound` kindref's because those are just stating
# it was used in this file, not that it was declared here
if "kindref" in ref.attrs and ref.attrs["kindref"] == "member":
if refid in missing_file_def and f not in missing_file_def_candidates[refid]:
missing_file_def_candidates[refid].append(f)
# For every refid missing a file definition location, see if we found it only
# once in a file node's <programlisting>. If so, assign that as the file the
# node was defined in
for refid in missing_file_def:
node = missing_file_def[refid]
candidates = missing_file_def_candidates[refid]
# If only one found, life is good!
if len(candidates) == 1:
node.def_in_file = candidates[0]
# << verboseBuild
utils.verbose_log(utils.info(
"Manually setting file definition of {0} {1} to [{2}]".format(
node.kind, node.name, node.def_in_file.location
),
utils.AnsiColors.BOLD_CYAN
))
# More than one found, don't know what to do...
elif len(candidates) > 1:
# << verboseBuild
err_msg = StringIO()
err_msg.write(textwrap.dedent('''
While attempting to discover the file that Doxygen refid `{0}` was
defined in, more than one candidate was found. The candidates were:
'''.format(refid)))
# NOTE: candidates should only ever contain File nodes (thus c.location
# should exist, and already be populated).
for c in candidates:
err_msg.write(" - path=[{0}], refid={1}\n".format(c.location, c.refid))
err_msg.write("\n")
utils.verbose_log(utils.critical(err_msg.getvalue()))
# NOTE: no 'else' clause here, a warning about no file link generated is
# produced when the rst file is written
# now that all nodes have been discovered, process template parameters, and
# coordinate any base / derived inheritance relationships
for node in self.class_like:
node_xml_contents = utils.nodeCompoundXMLContents(node)
if node_xml_contents:
try:
name_soup = BeautifulSoup(node_xml_contents, "lxml-xml")
except:
utils.fancyError("Could not process [{0}]".format(
os.path.join(configs._doxygen_xml_output_directory, "{0}".format(node.refid))
))
try:
cdef = name_soup.doxygen.compounddef
tparams = cdef.find("templateparamlist", recursive=False)
#
# DANGER DANGER DANGER
# No, you may not build links directly right now. Cuz they aren't initialized
#
# first, find template parameters
if tparams:
for param in tparams.find_all("param", recursive=False):
# Doxygen seems to produce unreliable results. For example,
# sometimes you will get `param.type <- class X` with empty
# decloname and defname, and sometimes you will get
# `param.type <- class` and declname `X`. Similar behavior
# is observed with `typename X`. These are generally just
# ignored (falling in the broader category of a typename)
#
# Sometimes you will get a refid in the type, so deal with
# that as they come too (yay)!
param_t = param.type
decl_n = param.declname
def_n = param.defname
# TODO: this doesn't seem to happen, should probably investigate more
# do something with `param.defval` ?
# By the end:
# param_t <- (None | str, str) tuple
# ^^^^^^^^^^
# only a refid, or None
# decl_n <- str; declared name
# def_n <- None | str; defined name
#
# When decl_n and def_n are the same, this means no explicit
# default template parameter is given. This will ultimately
# mean that def_n is set to None for consistency.
if param_t.ref:
if "refid" in param_t.ref.attrs:
refid = param_t.ref.attrs["refid"]
else:
# I hope this never happens.
refid = None
param_t = (refid, param_t.ref.string)
else:
param_t = (None, param_t.string)
# Right now these are the soup tags, get the strings
if decl_n:
decl_n = decl_n.string
if def_n:
def_n = def_n.string
# Unset def_n if same as decl_n
if decl_n and def_n and decl_n == def_n:
def_n = None
node.template_params.append((param_t, decl_n, def_n))
def prot_ref_str(soup_node):
if "prot" in soup_node.attrs:
prot = soup_node.attrs["prot"]
else:
prot = None
if "refid" in soup_node.attrs:
refid = soup_node.attrs["refid"]
else:
refid = None
return (prot, refid, soup_node.string)
# Now see if there is a reference to any base classes
for base in cdef.find_all("basecompoundref", recursive=False):
node.base_compounds.append(prot_ref_str(base))
# Now see if there is a reference to any derived classes
for derived in cdef.find_all("derivedcompoundref", recursive=False):
node.derived_compounds.append(prot_ref_str(derived))
except:
utils.fancyError("Error processing Doxygen XML for [{0}]".format(node.name), "txt") | .. todo:: node discovery has changed, breathe no longer used...update docs | Below is the the instruction that describes the task:
### Input:
.. todo:: node discovery has changed, breathe no longer used...update docs
### Response:
def discoverAllNodes(self):
'''
.. todo:: node discovery has changed, breathe no longer used...update docs
'''
doxygen_index_xml = os.path.join(
configs._doxygen_xml_output_directory,
"index.xml"
)
try:
with codecs.open(doxygen_index_xml, "r", "utf-8") as index:
index_contents = index.read()
except:
raise RuntimeError("Could not read the contents of [{0}].".format(doxygen_index_xml))
try:
index_soup = BeautifulSoup(index_contents, "lxml-xml")
except:
raise RuntimeError("Could not parse the contents of [{0}] as an xml.".format(doxygen_index_xml))
doxygen_root = index_soup.doxygenindex
if not doxygen_root:
raise RuntimeError(
"Did not find root XML node named 'doxygenindex' parsing [{0}].".format(doxygen_index_xml)
)
for compound in doxygen_root.find_all("compound"):
if compound.find("name") and "kind" in compound.attrs and "refid" in compound.attrs:
curr_name = compound.find("name").get_text()
curr_kind = compound.attrs["kind"]
curr_refid = compound.attrs["refid"]
curr_node = ExhaleNode(curr_name, curr_kind, curr_refid)
self.trackNodeIfUnseen(curr_node)
# For things like files and namespaces, a "member" list will include
# things like defines, enums, etc. For classes and structs, we don't
# need to pay attention because the members are the various methods or
# data members by the class
if curr_kind in ["file", "namespace"]:
for member in compound.find_all("member"):
if member.find("name") and "kind" in member.attrs and "refid" in member.attrs:
child_name = member.find("name").get_text()
child_kind = member.attrs["kind"]
child_refid = member.attrs["refid"]
child_node = ExhaleNode(child_name, child_kind, child_refid)
self.trackNodeIfUnseen(child_node)
if curr_kind == "namespace":
child_node.parent = curr_node
else: # curr_kind == "file"
child_node.def_in_file = curr_node
curr_node.children.append(child_node)
# Now that we have discovered everything, we need to explicitly parse the file
# xml documents to determine where leaf-like nodes have been declared.
#
# TODO: change formatting of namespace to provide a listing of all files using it
for f in self.files:
node_xml_contents = utils.nodeCompoundXMLContents(f)
if node_xml_contents:
try:
f.soup = BeautifulSoup(node_xml_contents, "lxml-xml")
except:
utils.fancyError("Unable to parse file xml [{0}]:".format(f.name))
try:
cdef = f.soup.doxygen.compounddef
if "language" in cdef.attrs:
f.language = cdef.attrs["language"]
err_non = "[CRITICAL] did not find refid [{0}] in `self.node_by_refid`."
err_dup = "Conflicting file definition: [{0}] appears to be defined in both [{1}] and [{2}]." # noqa
# process classes
inner_classes = cdef.find_all("innerclass", recursive=False)
# << verboseBuild
utils.verbose_log(
"*** [{0}] had [{1}] innerclasses found".format(f.name, len(inner_classes)),
utils.AnsiColors.BOLD_MAGENTA
)
for class_like in inner_classes:
if "refid" in class_like.attrs:
refid = class_like.attrs["refid"]
if refid in self.node_by_refid:
node = self.node_by_refid[refid]
# << verboseBuild
utils.verbose_log(
" - [{0}]".format(node.name),
utils.AnsiColors.BOLD_MAGENTA
)
if not node.def_in_file:
node.def_in_file = f
elif node.def_in_file != f:
# << verboseBuild
utils.verbose_log(
err_dup.format(node.name, node.def_in_file.name, f.name),
utils.AnsiColors.BOLD_YELLOW
)
else:
# << verboseBuild
utils.verbose_log(err_non.format(refid), utils.AnsiColors.BOLD_RED)
else:
# TODO: can this ever happen?
# << verboseBuild
catastrophe = "CATASTROPHIC: doxygen xml for `{0}` found `innerclass` [{1}] that"
catastrophe += " does *NOT* have a `refid` attribute!"
catastrophe = catastrophe.format(f, str(class_like))
utils.verbose_log(
utils.prefix("(!) ", catastrophe),
utils.AnsiColors.BOLD_RED
)
# try and find anything else
memberdefs = cdef.find_all("memberdef", recursive=False)
# << verboseBuild
utils.verbose_log(
"*** [{0}] had [{1}] memberdef".format(f.name, len(memberdefs)),
utils.AnsiColors.BOLD_MAGENTA
)
for member in cdef.find_all("memberdef", recursive=False):
if "id" in member.attrs:
refid = member.attrs["id"]
if refid in self.node_by_refid:
node = self.node_by_refid[refid]
# << verboseBuild
utils.verbose_log(
" - [{0}]".format(node.name),
utils.AnsiColors.BOLD_MAGENTA
)
if not node.def_in_file:
node.def_in_file = f
# the location of the file as determined by doxygen
location = cdef.find("location")
if location and "file" in location.attrs:
location_str = os.path.normpath(location.attrs["file"])
# some older versions of doxygen don't reliably strip from path
# so make sure to remove it
abs_strip_path = os.path.normpath(os.path.abspath(
configs.doxygenStripFromPath
))
if location_str.startswith(abs_strip_path):
location_str = os.path.relpath(location_str, abs_strip_path)
f.location = os.path.normpath(location_str)
except:
utils.fancyError(
"Could not process Doxygen xml for file [{0}]".format(f.name)
)
###### TODO: explain how the parsing works // move it to exhale.parse
# last chance: we will still miss some, but need to pause and establish namespace relationships
for nspace in self.namespaces:
node_xml_contents = utils.nodeCompoundXMLContents(nspace)
if node_xml_contents:
try:
name_soup = BeautifulSoup(node_xml_contents, "lxml-xml")
except:
continue
cdef = name_soup.doxygen.compounddef
for class_like in cdef.find_all("innerclass", recursive=False):
if "refid" in class_like.attrs:
refid = class_like.attrs["refid"]
if refid in self.node_by_refid:
node = self.node_by_refid[refid]
if node not in nspace.children:
nspace.children.append(node)
node.parent = nspace
for nested_nspace in cdef.find_all("innernamespace", recursive=False):
if "refid" in nested_nspace.attrs:
refid = nested_nspace.attrs["refid"]
if refid in self.node_by_refid:
node = self.node_by_refid[refid]
if node not in nspace.children:
nspace.children.append(node)
node.parent = nspace
# This is where things get interesting
for sectiondef in cdef.find_all("sectiondef", recursive=False):
for memberdef in sectiondef.find_all("memberdef", recursive=False):
if "id" in memberdef.attrs:
refid = memberdef.attrs["id"]
if refid in self.node_by_refid:
node = self.node_by_refid[refid]
location = memberdef.find("location")
if location and "file" in location.attrs:
filedef = os.path.normpath(location.attrs["file"])
for f in self.files:
if filedef == f.location:
node.def_in_file = f
if node not in f.children:
f.children.append(node)
break
# Find the nodes that did not have their file location definition assigned
missing_file_def = {} # keys: refid, values: ExhaleNode
missing_file_def_candidates = {} # keys: refid, values: ExhaleNode (file kind only!)
for refid in self.node_by_refid:
node = self.node_by_refid[refid]
if node.def_in_file is None and node.kind not in ("file", "dir", "group", "namespace", "enumvalue"):
missing_file_def[refid] = node
missing_file_def_candidates[refid] = []
# Some compounds like class / struct have their own XML file and if documented
# correctly will have a <location> tag. For example, one may need to add the
#
# \class namespace::ClassName file_basename.hpp full/file/path/file_basename.hpp
#
# in order for the <location> tag to be generated. And in the case of forward
# declarations (e.g., for PIMPL patterns), in order for the class XML to be
# generated at all it seems this must be used.
#
# <?xml version='1.0' encoding='UTF-8' standalone='no'?>
# <doxygen xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="compound.xsd" version="1.8.13">
# <compounddef id="classpimpl_1_1EarthImpl" kind="class" language="C++" prot="public">
# <compoundname>pimpl::EarthImpl</compoundname>
# <includes refid="earth_8hpp" local="no">include/pimpl/earth.hpp</includes>
# <briefdescription>
# <para>The <ref refid="classpimpl_1_1Earth" kindref="compound">Earth</ref> PIMPL. </para> </briefdescription>
# <detaileddescription>
# </detaileddescription>
# <location file="include/pimpl/earth.hpp" line="30" column="1"/>
# <listofallmembers>
# </listofallmembers>
# </compounddef>
# </doxygen>
#
# So we're taking advantage of the fact that
#
# namespace pimpl {
# /**
# * \class pimpl::EarthImpl earth.hpp include/pimpl/earth.hpp
# * \brief The Earth PIMPL.
# */
# class EarthImpl;
# }
#
# Has a <location file="include/pimpl/earth.hpp" line="30" column="1"/>
#
# TODO: clarify this in the docs? You don't understand the full cause though.
refid_removals = []
for refid in missing_file_def:
node = missing_file_def[refid]
node_xml_contents = utils.nodeCompoundXMLContents(node)
# None is returned when no {refid}.xml exists (e.g., for enum or union).
if not node_xml_contents:
pass
try:
node_soup = BeautifulSoup(node_xml_contents, "lxml-xml")
cdef = node_soup.doxygen.compounddef
location = cdef.find("location", recursive=False)
if location and "file" in location.attrs:
file_path = os.path.normpath(location["file"])
for f in self.files:
if f.location == file_path:
node.def_in_file = f
f.children.append(node)
refid_removals.append(refid)
except:
pass
# We found the def_in_file, don't parse the programlisting for these nodes.
for refid in refid_removals:
del missing_file_def[refid]
# Go through every file and see if the refid associated with a node missing a
# file definition location is present in the <programlisting>
for f in self.files:
cdef = f.soup.doxygen.compounddef
# try and find things in the programlisting as a last resort
programlisting = cdef.find("programlisting")
if programlisting:
for ref in programlisting.find_all("ref"):
if "refid" in ref.attrs:
refid = ref.attrs["refid"]
# be careful not to just consider any refid found, e.g. don't
# use the `compound` kindref's because those are just stating
# it was used in this file, not that it was declared here
if "kindref" in ref.attrs and ref.attrs["kindref"] == "member":
if refid in missing_file_def and f not in missing_file_def_candidates[refid]:
missing_file_def_candidates[refid].append(f)
# For every refid missing a file definition location, see if we found it only
# once in a file node's <programlisting>. If so, assign that as the file the
# node was defined in
for refid in missing_file_def:
node = missing_file_def[refid]
candidates = missing_file_def_candidates[refid]
# If only one found, life is good!
if len(candidates) == 1:
node.def_in_file = candidates[0]
# << verboseBuild
utils.verbose_log(utils.info(
"Manually setting file definition of {0} {1} to [{2}]".format(
node.kind, node.name, node.def_in_file.location
),
utils.AnsiColors.BOLD_CYAN
))
# More than one found, don't know what to do...
elif len(candidates) > 1:
# << verboseBuild
err_msg = StringIO()
err_msg.write(textwrap.dedent('''
While attempting to discover the file that Doxygen refid `{0}` was
defined in, more than one candidate was found. The candidates were:
'''.format(refid)))
# NOTE: candidates should only ever contain File nodes (thus c.location
# should exist, and already be populated).
for c in candidates:
err_msg.write(" - path=[{0}], refid={1}\n".format(c.location, c.refid))
err_msg.write("\n")
utils.verbose_log(utils.critical(err_msg.getvalue()))
# NOTE: no 'else' clause here, a warning about no file link generated is
# produced when the rst file is written
# now that all nodes have been discovered, process template parameters, and
# coordinate any base / derived inheritance relationships
for node in self.class_like:
node_xml_contents = utils.nodeCompoundXMLContents(node)
if node_xml_contents:
try:
name_soup = BeautifulSoup(node_xml_contents, "lxml-xml")
except:
utils.fancyError("Could not process [{0}]".format(
os.path.join(configs._doxygen_xml_output_directory, "{0}".format(node.refid))
))
try:
cdef = name_soup.doxygen.compounddef
tparams = cdef.find("templateparamlist", recursive=False)
#
# DANGER DANGER DANGER
# No, you may not build links directly right now. Cuz they aren't initialized
#
# first, find template parameters
if tparams:
for param in tparams.find_all("param", recursive=False):
# Doxygen seems to produce unreliable results. For example,
# sometimes you will get `param.type <- class X` with empty
# decloname and defname, and sometimes you will get
# `param.type <- class` and declname `X`. Similar behavior
# is observed with `typename X`. These are generally just
# ignored (falling in the broader category of a typename)
#
# Sometimes you will get a refid in the type, so deal with
# that as they come too (yay)!
param_t = param.type
decl_n = param.declname
def_n = param.defname
# TODO: this doesn't seem to happen, should probably investigate more
# do something with `param.defval` ?
# By the end:
# param_t <- (None | str, str) tuple
# ^^^^^^^^^^
# only a refid, or None
# decl_n <- str; declared name
# def_n <- None | str; defined name
#
# When decl_n and def_n are the same, this means no explicit
# default template parameter is given. This will ultimately
# mean that def_n is set to None for consistency.
if param_t.ref:
if "refid" in param_t.ref.attrs:
refid = param_t.ref.attrs["refid"]
else:
# I hope this never happens.
refid = None
param_t = (refid, param_t.ref.string)
else:
param_t = (None, param_t.string)
# Right now these are the soup tags, get the strings
if decl_n:
decl_n = decl_n.string
if def_n:
def_n = def_n.string
# Unset def_n if same as decl_n
if decl_n and def_n and decl_n == def_n:
def_n = None
node.template_params.append((param_t, decl_n, def_n))
def prot_ref_str(soup_node):
if "prot" in soup_node.attrs:
prot = soup_node.attrs["prot"]
else:
prot = None
if "refid" in soup_node.attrs:
refid = soup_node.attrs["refid"]
else:
refid = None
return (prot, refid, soup_node.string)
# Now see if there is a reference to any base classes
for base in cdef.find_all("basecompoundref", recursive=False):
node.base_compounds.append(prot_ref_str(base))
# Now see if there is a reference to any derived classes
for derived in cdef.find_all("derivedcompoundref", recursive=False):
node.derived_compounds.append(prot_ref_str(derived))
except:
utils.fancyError("Error processing Doxygen XML for [{0}]".format(node.name), "txt") |
def add_data(self, addr, data):
"""Callback when data is received from the Crazyflie"""
data_len = len(data)
if not addr == self._current_addr:
logger.warning(
'Address did not match when adding data to read request!')
return
# Add the data and calculate the next address to fetch
self.data += data
self._bytes_left -= data_len
self._current_addr += data_len
if self._bytes_left > 0:
self._request_new_chunk()
return False
else:
return True | Callback when data is received from the Crazyflie | Below is the the instruction that describes the task:
### Input:
Callback when data is received from the Crazyflie
### Response:
def add_data(self, addr, data):
"""Callback when data is received from the Crazyflie"""
data_len = len(data)
if not addr == self._current_addr:
logger.warning(
'Address did not match when adding data to read request!')
return
# Add the data and calculate the next address to fetch
self.data += data
self._bytes_left -= data_len
self._current_addr += data_len
if self._bytes_left > 0:
self._request_new_chunk()
return False
else:
return True |
def uni_to_beta(text):
"""
Convert unicode text to a betacode equivalent.
This method can handle tónos or oxeîa characters in the input.
Args:
text: The text to convert to betacode. This text does not have to all be
Greek polytonic text, and only Greek characters will be converted. Note
that in this case, you cannot convert to beta and then back to unicode.
Returns:
The betacode equivalent of the inputted text where applicable.
"""
u = _UNICODE_MAP
transform = []
for ch in text:
try:
conv = u[ch]
except KeyError:
conv = ch
transform.append(conv)
converted = ''.join(transform)
return converted | Convert unicode text to a betacode equivalent.
This method can handle tónos or oxeîa characters in the input.
Args:
text: The text to convert to betacode. This text does not have to all be
Greek polytonic text, and only Greek characters will be converted. Note
that in this case, you cannot convert to beta and then back to unicode.
Returns:
The betacode equivalent of the inputted text where applicable. | Below is the the instruction that describes the task:
### Input:
Convert unicode text to a betacode equivalent.
This method can handle tónos or oxeîa characters in the input.
Args:
text: The text to convert to betacode. This text does not have to all be
Greek polytonic text, and only Greek characters will be converted. Note
that in this case, you cannot convert to beta and then back to unicode.
Returns:
The betacode equivalent of the inputted text where applicable.
### Response:
def uni_to_beta(text):
"""
Convert unicode text to a betacode equivalent.
This method can handle tónos or oxeîa characters in the input.
Args:
text: The text to convert to betacode. This text does not have to all be
Greek polytonic text, and only Greek characters will be converted. Note
that in this case, you cannot convert to beta and then back to unicode.
Returns:
The betacode equivalent of the inputted text where applicable.
"""
u = _UNICODE_MAP
transform = []
for ch in text:
try:
conv = u[ch]
except KeyError:
conv = ch
transform.append(conv)
converted = ''.join(transform)
return converted |
def format_sensor_bar(self, sensor):
""" Build and format a sensor bar. If pango is enabled bar color is per sensor."""
percentage = self.percentage(sensor.current, sensor.critical)
bar = make_vertical_bar(int(percentage))
if self.pango_enabled:
if self.dynamic_color:
color = self.colors[int(percentage)]
return self.format_pango(color, bar)
return bar | Build and format a sensor bar. If pango is enabled bar color is per sensor. | Below is the the instruction that describes the task:
### Input:
Build and format a sensor bar. If pango is enabled bar color is per sensor.
### Response:
def format_sensor_bar(self, sensor):
""" Build and format a sensor bar. If pango is enabled bar color is per sensor."""
percentage = self.percentage(sensor.current, sensor.critical)
bar = make_vertical_bar(int(percentage))
if self.pango_enabled:
if self.dynamic_color:
color = self.colors[int(percentage)]
return self.format_pango(color, bar)
return bar |
def add_filter(ds, patterns):
"""
Add a filter or list of filters to a datasource. A filter is a simple
string, and it matches if it is contained anywhere within a line.
Args:
ds (@datasource component): The datasource to filter
patterns (str, [str]): A string, list of strings, or set of strings to
add to the datasource's filters.
"""
if not plugins.is_datasource(ds):
raise Exception("Filters are applicable only to datasources.")
delegate = dr.get_delegate(ds)
if delegate.raw:
raise Exception("Filters aren't applicable to raw datasources.")
if not delegate.filterable:
raise Exception("Filters aren't applicable to %s." % dr.get_name(ds))
if ds in _CACHE:
del _CACHE[ds]
if isinstance(patterns, six.string_types):
FILTERS[ds].add(patterns)
elif isinstance(patterns, list):
FILTERS[ds] |= set(patterns)
elif isinstance(patterns, set):
FILTERS[ds] |= patterns
else:
raise TypeError("patterns must be string, list, or set.") | Add a filter or list of filters to a datasource. A filter is a simple
string, and it matches if it is contained anywhere within a line.
Args:
ds (@datasource component): The datasource to filter
patterns (str, [str]): A string, list of strings, or set of strings to
add to the datasource's filters. | Below is the the instruction that describes the task:
### Input:
Add a filter or list of filters to a datasource. A filter is a simple
string, and it matches if it is contained anywhere within a line.
Args:
ds (@datasource component): The datasource to filter
patterns (str, [str]): A string, list of strings, or set of strings to
add to the datasource's filters.
### Response:
def add_filter(ds, patterns):
"""
Add a filter or list of filters to a datasource. A filter is a simple
string, and it matches if it is contained anywhere within a line.
Args:
ds (@datasource component): The datasource to filter
patterns (str, [str]): A string, list of strings, or set of strings to
add to the datasource's filters.
"""
if not plugins.is_datasource(ds):
raise Exception("Filters are applicable only to datasources.")
delegate = dr.get_delegate(ds)
if delegate.raw:
raise Exception("Filters aren't applicable to raw datasources.")
if not delegate.filterable:
raise Exception("Filters aren't applicable to %s." % dr.get_name(ds))
if ds in _CACHE:
del _CACHE[ds]
if isinstance(patterns, six.string_types):
FILTERS[ds].add(patterns)
elif isinstance(patterns, list):
FILTERS[ds] |= set(patterns)
elif isinstance(patterns, set):
FILTERS[ds] |= patterns
else:
raise TypeError("patterns must be string, list, or set.") |
def openTypeNameCompatibleFullNameFallback(info):
"""
Fallback to *styleMapFamilyName styleMapStyleName*.
If *styleMapStyleName* is *regular* this will not add
the style name.
"""
familyName = getAttrWithFallback(info, "styleMapFamilyName")
styleMapStyleName = getAttrWithFallback(info, "styleMapStyleName")
if styleMapStyleName != "regular":
familyName += " " + styleMapStyleName.title()
return familyName | Fallback to *styleMapFamilyName styleMapStyleName*.
If *styleMapStyleName* is *regular* this will not add
the style name. | Below is the the instruction that describes the task:
### Input:
Fallback to *styleMapFamilyName styleMapStyleName*.
If *styleMapStyleName* is *regular* this will not add
the style name.
### Response:
def openTypeNameCompatibleFullNameFallback(info):
"""
Fallback to *styleMapFamilyName styleMapStyleName*.
If *styleMapStyleName* is *regular* this will not add
the style name.
"""
familyName = getAttrWithFallback(info, "styleMapFamilyName")
styleMapStyleName = getAttrWithFallback(info, "styleMapStyleName")
if styleMapStyleName != "regular":
familyName += " " + styleMapStyleName.title()
return familyName |
def ilist(self, in_list=[]):
"""Return a list that uses this server's IRC casemapping.
All strings in this list are lowercased using the server's casemapping before inserting
them into the list, and the ``in`` operator takes casemapping into account.
"""
new_list = IList(in_list)
new_list.set_std(self.features.get('casemapping'))
if not self._casemap_set:
self._imaps.append(new_list)
return new_list | Return a list that uses this server's IRC casemapping.
All strings in this list are lowercased using the server's casemapping before inserting
them into the list, and the ``in`` operator takes casemapping into account. | Below is the the instruction that describes the task:
### Input:
Return a list that uses this server's IRC casemapping.
All strings in this list are lowercased using the server's casemapping before inserting
them into the list, and the ``in`` operator takes casemapping into account.
### Response:
def ilist(self, in_list=[]):
"""Return a list that uses this server's IRC casemapping.
All strings in this list are lowercased using the server's casemapping before inserting
them into the list, and the ``in`` operator takes casemapping into account.
"""
new_list = IList(in_list)
new_list.set_std(self.features.get('casemapping'))
if not self._casemap_set:
self._imaps.append(new_list)
return new_list |
def __init_from_list_np2d(self, mats, params_str, ref_dataset):
"""Initialize data from a list of 2-D numpy matrices."""
ncol = mats[0].shape[1]
nrow = np.zeros((len(mats),), np.int32)
if mats[0].dtype == np.float64:
ptr_data = (ctypes.POINTER(ctypes.c_double) * len(mats))()
else:
ptr_data = (ctypes.POINTER(ctypes.c_float) * len(mats))()
holders = []
type_ptr_data = None
for i, mat in enumerate(mats):
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray must be 2 dimensional')
if mat.shape[1] != ncol:
raise ValueError('Input arrays must have same number of columns')
nrow[i] = mat.shape[0]
if mat.dtype == np.float32 or mat.dtype == np.float64:
mats[i] = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else:
# change non-float data to float data, need to copy
mats[i] = np.array(mat.reshape(mat.size), dtype=np.float32)
chunk_ptr_data, chunk_type_ptr_data, holder = c_float_array(mats[i])
if type_ptr_data is not None and chunk_type_ptr_data != type_ptr_data:
raise ValueError('Input chunks must have same type')
ptr_data[i] = chunk_ptr_data
type_ptr_data = chunk_type_ptr_data
holders.append(holder)
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_DatasetCreateFromMats(
ctypes.c_int(len(mats)),
ctypes.cast(ptr_data, ctypes.POINTER(ctypes.POINTER(ctypes.c_double))),
ctypes.c_int(type_ptr_data),
nrow.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ctypes.c_int(ncol),
ctypes.c_int(C_API_IS_ROW_MAJOR),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self | Initialize data from a list of 2-D numpy matrices. | Below is the the instruction that describes the task:
### Input:
Initialize data from a list of 2-D numpy matrices.
### Response:
def __init_from_list_np2d(self, mats, params_str, ref_dataset):
"""Initialize data from a list of 2-D numpy matrices."""
ncol = mats[0].shape[1]
nrow = np.zeros((len(mats),), np.int32)
if mats[0].dtype == np.float64:
ptr_data = (ctypes.POINTER(ctypes.c_double) * len(mats))()
else:
ptr_data = (ctypes.POINTER(ctypes.c_float) * len(mats))()
holders = []
type_ptr_data = None
for i, mat in enumerate(mats):
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray must be 2 dimensional')
if mat.shape[1] != ncol:
raise ValueError('Input arrays must have same number of columns')
nrow[i] = mat.shape[0]
if mat.dtype == np.float32 or mat.dtype == np.float64:
mats[i] = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else:
# change non-float data to float data, need to copy
mats[i] = np.array(mat.reshape(mat.size), dtype=np.float32)
chunk_ptr_data, chunk_type_ptr_data, holder = c_float_array(mats[i])
if type_ptr_data is not None and chunk_type_ptr_data != type_ptr_data:
raise ValueError('Input chunks must have same type')
ptr_data[i] = chunk_ptr_data
type_ptr_data = chunk_type_ptr_data
holders.append(holder)
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_DatasetCreateFromMats(
ctypes.c_int(len(mats)),
ctypes.cast(ptr_data, ctypes.POINTER(ctypes.POINTER(ctypes.c_double))),
ctypes.c_int(type_ptr_data),
nrow.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ctypes.c_int(ncol),
ctypes.c_int(C_API_IS_ROW_MAJOR),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self |
def report_collection_diff(from_collection, to_collection, from_id, to_id):
"""Report the collected test difference between two nodes.
:returns: detailed message describing the difference between the given
collections, or None if they are equal.
"""
if from_collection == to_collection:
return None
diff = unified_diff(from_collection, to_collection, fromfile=from_id, tofile=to_id)
error_message = (
u"Different tests were collected between {from_id} and {to_id}. "
u"The difference is:\n"
u"{diff}"
).format(from_id=from_id, to_id=to_id, diff="\n".join(diff))
msg = "\n".join([x.rstrip() for x in error_message.split("\n")])
return msg | Report the collected test difference between two nodes.
:returns: detailed message describing the difference between the given
collections, or None if they are equal. | Below is the the instruction that describes the task:
### Input:
Report the collected test difference between two nodes.
:returns: detailed message describing the difference between the given
collections, or None if they are equal.
### Response:
def report_collection_diff(from_collection, to_collection, from_id, to_id):
"""Report the collected test difference between two nodes.
:returns: detailed message describing the difference between the given
collections, or None if they are equal.
"""
if from_collection == to_collection:
return None
diff = unified_diff(from_collection, to_collection, fromfile=from_id, tofile=to_id)
error_message = (
u"Different tests were collected between {from_id} and {to_id}. "
u"The difference is:\n"
u"{diff}"
).format(from_id=from_id, to_id=to_id, diff="\n".join(diff))
msg = "\n".join([x.rstrip() for x in error_message.split("\n")])
return msg |
def customer_stop(self, nick, session):
'''taobao.increment.customer.stop 关闭用户的增量消息服务
供应用关闭其用户的增量消息服务功能,这样可以节省ISV的流量。'''
request = TOPRequest('taobao.increment.customer.stop')
request['nick'] = nick
self.create(self.execute(request, session), fields=['is_success',])
return self.is_success | taobao.increment.customer.stop 关闭用户的增量消息服务
供应用关闭其用户的增量消息服务功能,这样可以节省ISV的流量。 | Below is the the instruction that describes the task:
### Input:
taobao.increment.customer.stop 关闭用户的增量消息服务
供应用关闭其用户的增量消息服务功能,这样可以节省ISV的流量。
### Response:
def customer_stop(self, nick, session):
'''taobao.increment.customer.stop 关闭用户的增量消息服务
供应用关闭其用户的增量消息服务功能,这样可以节省ISV的流量。'''
request = TOPRequest('taobao.increment.customer.stop')
request['nick'] = nick
self.create(self.execute(request, session), fields=['is_success',])
return self.is_success |
def xor(key, data):
"""
Perform cyclical exclusive or operations on ``data``.
The ``key`` can be a an integer *(0 <= key < 256)* or a byte sequence. If
the key is smaller than the provided ``data``, the ``key`` will be
repeated.
Args:
key(int or bytes): The key to xor ``data`` with.
data(bytes): The data to perform the xor operation on.
Returns:
bytes: The result of the exclusive or operation.
Examples:
>>> from pwny import *
>>> xor(5, b'ABCD')
b'DGFA'
>>> xor(5, b'DGFA')
b'ABCD'
>>> xor(b'pwny', b'ABCDEFGHIJKLMNOPQRSTUVWXYZ')
b'15-=51)19=%5=9!)!%=-%!9!)-'
>>> xor(b'pwny', b'15-=51)19=%5=9!)!%=-%!9!)-')
b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
"""
if type(key) is int:
key = six.int2byte(key)
key_len = len(key)
return b''.join(
six.int2byte(c ^ six.indexbytes(key, i % key_len))
for i, c in enumerate(six.iterbytes(data))
) | Perform cyclical exclusive or operations on ``data``.
The ``key`` can be a an integer *(0 <= key < 256)* or a byte sequence. If
the key is smaller than the provided ``data``, the ``key`` will be
repeated.
Args:
key(int or bytes): The key to xor ``data`` with.
data(bytes): The data to perform the xor operation on.
Returns:
bytes: The result of the exclusive or operation.
Examples:
>>> from pwny import *
>>> xor(5, b'ABCD')
b'DGFA'
>>> xor(5, b'DGFA')
b'ABCD'
>>> xor(b'pwny', b'ABCDEFGHIJKLMNOPQRSTUVWXYZ')
b'15-=51)19=%5=9!)!%=-%!9!)-'
>>> xor(b'pwny', b'15-=51)19=%5=9!)!%=-%!9!)-')
b'ABCDEFGHIJKLMNOPQRSTUVWXYZ' | Below is the the instruction that describes the task:
### Input:
Perform cyclical exclusive or operations on ``data``.
The ``key`` can be a an integer *(0 <= key < 256)* or a byte sequence. If
the key is smaller than the provided ``data``, the ``key`` will be
repeated.
Args:
key(int or bytes): The key to xor ``data`` with.
data(bytes): The data to perform the xor operation on.
Returns:
bytes: The result of the exclusive or operation.
Examples:
>>> from pwny import *
>>> xor(5, b'ABCD')
b'DGFA'
>>> xor(5, b'DGFA')
b'ABCD'
>>> xor(b'pwny', b'ABCDEFGHIJKLMNOPQRSTUVWXYZ')
b'15-=51)19=%5=9!)!%=-%!9!)-'
>>> xor(b'pwny', b'15-=51)19=%5=9!)!%=-%!9!)-')
b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
### Response:
def xor(key, data):
"""
Perform cyclical exclusive or operations on ``data``.
The ``key`` can be a an integer *(0 <= key < 256)* or a byte sequence. If
the key is smaller than the provided ``data``, the ``key`` will be
repeated.
Args:
key(int or bytes): The key to xor ``data`` with.
data(bytes): The data to perform the xor operation on.
Returns:
bytes: The result of the exclusive or operation.
Examples:
>>> from pwny import *
>>> xor(5, b'ABCD')
b'DGFA'
>>> xor(5, b'DGFA')
b'ABCD'
>>> xor(b'pwny', b'ABCDEFGHIJKLMNOPQRSTUVWXYZ')
b'15-=51)19=%5=9!)!%=-%!9!)-'
>>> xor(b'pwny', b'15-=51)19=%5=9!)!%=-%!9!)-')
b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
"""
if type(key) is int:
key = six.int2byte(key)
key_len = len(key)
return b''.join(
six.int2byte(c ^ six.indexbytes(key, i % key_len))
for i, c in enumerate(six.iterbytes(data))
) |
def read_register(self, addr, numBytes):
"""Reads @numBytes bytes from the grizzly starting at @addr. Due
to packet format, cannot read more than 127 packets at a time.
Returns a byte array of the requested data in little endian.
@addr should be from the Addr class e.g. Addr.Speed"""
assert numBytes <= 0x7f, "Cannot read more than 127 bytes at a time"
cmd = chr(addr) + chr(numBytes)
cmd += (16 - len(cmd)) * chr(0)
return self._dev.exchange_bytes(cmd) | Reads @numBytes bytes from the grizzly starting at @addr. Due
to packet format, cannot read more than 127 packets at a time.
Returns a byte array of the requested data in little endian.
@addr should be from the Addr class e.g. Addr.Speed | Below is the the instruction that describes the task:
### Input:
Reads @numBytes bytes from the grizzly starting at @addr. Due
to packet format, cannot read more than 127 packets at a time.
Returns a byte array of the requested data in little endian.
@addr should be from the Addr class e.g. Addr.Speed
### Response:
def read_register(self, addr, numBytes):
"""Reads @numBytes bytes from the grizzly starting at @addr. Due
to packet format, cannot read more than 127 packets at a time.
Returns a byte array of the requested data in little endian.
@addr should be from the Addr class e.g. Addr.Speed"""
assert numBytes <= 0x7f, "Cannot read more than 127 bytes at a time"
cmd = chr(addr) + chr(numBytes)
cmd += (16 - len(cmd)) * chr(0)
return self._dev.exchange_bytes(cmd) |
def search(self, keyword, types=[], terr=KKBOXTerritory.TAIWAN):
'''
Searches within KKBOX's database.
:param keyword: the keyword.
:type keyword: str
:param types: the search types.
:return: list
:param terr: the current territory.
:return: API response.
:rtype: dict
See `https://docs-en.kkbox.codes/v1.1/reference#search_1`.
'''
url = 'https://api.kkbox.com/v1.1/search'
url += '?' + url_parse.urlencode({'q': keyword, 'territory': terr})
if len(types) > 0:
url += '&type=' + ','.join(types)
return self.http._post_data(url, None, self.http._headers_with_access_token()) | Searches within KKBOX's database.
:param keyword: the keyword.
:type keyword: str
:param types: the search types.
:return: list
:param terr: the current territory.
:return: API response.
:rtype: dict
See `https://docs-en.kkbox.codes/v1.1/reference#search_1`. | Below is the the instruction that describes the task:
### Input:
Searches within KKBOX's database.
:param keyword: the keyword.
:type keyword: str
:param types: the search types.
:return: list
:param terr: the current territory.
:return: API response.
:rtype: dict
See `https://docs-en.kkbox.codes/v1.1/reference#search_1`.
### Response:
def search(self, keyword, types=[], terr=KKBOXTerritory.TAIWAN):
'''
Searches within KKBOX's database.
:param keyword: the keyword.
:type keyword: str
:param types: the search types.
:return: list
:param terr: the current territory.
:return: API response.
:rtype: dict
See `https://docs-en.kkbox.codes/v1.1/reference#search_1`.
'''
url = 'https://api.kkbox.com/v1.1/search'
url += '?' + url_parse.urlencode({'q': keyword, 'territory': terr})
if len(types) > 0:
url += '&type=' + ','.join(types)
return self.http._post_data(url, None, self.http._headers_with_access_token()) |
def coerce_to_synchronous(func):
'''
Given a function that might be async, wrap it in an explicit loop so it can
be run in a synchronous context.
'''
if inspect.iscoroutinefunction(func):
@functools.wraps(func)
def sync_wrapper(*args, **kwargs):
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(func(*args, **kwargs))
finally:
loop.close()
return sync_wrapper
return func | Given a function that might be async, wrap it in an explicit loop so it can
be run in a synchronous context. | Below is the the instruction that describes the task:
### Input:
Given a function that might be async, wrap it in an explicit loop so it can
be run in a synchronous context.
### Response:
def coerce_to_synchronous(func):
'''
Given a function that might be async, wrap it in an explicit loop so it can
be run in a synchronous context.
'''
if inspect.iscoroutinefunction(func):
@functools.wraps(func)
def sync_wrapper(*args, **kwargs):
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(func(*args, **kwargs))
finally:
loop.close()
return sync_wrapper
return func |
def get_video(self, node):
"""
Create a video object from a video embed
"""
video = Video()
video.embed_code = self.get_embed_code(node)
video.embed_type = self.get_embed_type(node)
video.width = self.get_width(node)
video.height = self.get_height(node)
video.src = self.get_src(node)
video.provider = self.get_provider(video.src)
return video | Create a video object from a video embed | Below is the the instruction that describes the task:
### Input:
Create a video object from a video embed
### Response:
def get_video(self, node):
"""
Create a video object from a video embed
"""
video = Video()
video.embed_code = self.get_embed_code(node)
video.embed_type = self.get_embed_type(node)
video.width = self.get_width(node)
video.height = self.get_height(node)
video.src = self.get_src(node)
video.provider = self.get_provider(video.src)
return video |
def Ceil(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Applies the Ceiling operator to a vertex.
This maps a vertex to the smallest integer greater than or equal to its value
:param input_vertex: the vertex to be ceil'd
"""
return Double(context.jvm_view().CeilVertex, label, cast_to_double_vertex(input_vertex)) | Applies the Ceiling operator to a vertex.
This maps a vertex to the smallest integer greater than or equal to its value
:param input_vertex: the vertex to be ceil'd | Below is the the instruction that describes the task:
### Input:
Applies the Ceiling operator to a vertex.
This maps a vertex to the smallest integer greater than or equal to its value
:param input_vertex: the vertex to be ceil'd
### Response:
def Ceil(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Applies the Ceiling operator to a vertex.
This maps a vertex to the smallest integer greater than or equal to its value
:param input_vertex: the vertex to be ceil'd
"""
return Double(context.jvm_view().CeilVertex, label, cast_to_double_vertex(input_vertex)) |
def AgregarDatosAutorizacion(self, nro_remito=None, cod_autorizacion=None, fecha_emision=None, fecha_vencimiento=None, **kwargs):
"Agrega la información referente a los datos de autorización del remito electrónico cárnico"
self.remito['datosEmision'] = dict(nroRemito=nro_remito, codAutorizacion=cod_autorizacion,
fechaEmision=fecha_emision, fechaVencimiento=fecha_vencimiento,
)
return True | Agrega la información referente a los datos de autorización del remito electrónico cárnico | Below is the the instruction that describes the task:
### Input:
Agrega la información referente a los datos de autorización del remito electrónico cárnico
### Response:
def AgregarDatosAutorizacion(self, nro_remito=None, cod_autorizacion=None, fecha_emision=None, fecha_vencimiento=None, **kwargs):
"Agrega la información referente a los datos de autorización del remito electrónico cárnico"
self.remito['datosEmision'] = dict(nroRemito=nro_remito, codAutorizacion=cod_autorizacion,
fechaEmision=fecha_emision, fechaVencimiento=fecha_vencimiento,
)
return True |
def get_created_by(self):
'''Returns created by'''
created_by = parsers.get_created_by(self.__chebi_id)
if created_by is None:
created_by = parsers.get_created_by(self.get_parent_id())
if created_by is None:
for parent_or_child_id in self.__get_all_ids():
created_by = parsers.get_created_by(parent_or_child_id)
if created_by is not None:
break
return created_by | Returns created by | Below is the the instruction that describes the task:
### Input:
Returns created by
### Response:
def get_created_by(self):
'''Returns created by'''
created_by = parsers.get_created_by(self.__chebi_id)
if created_by is None:
created_by = parsers.get_created_by(self.get_parent_id())
if created_by is None:
for parent_or_child_id in self.__get_all_ids():
created_by = parsers.get_created_by(parent_or_child_id)
if created_by is not None:
break
return created_by |
def QA_SU_save_future_min_all(engine, client=DATABASE):
"""[summary]
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
engine = select_save_engine(engine)
engine.QA_SU_save_future_min_all(client=client) | [summary]
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE}) | Below is the the instruction that describes the task:
### Input:
[summary]
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
### Response:
def QA_SU_save_future_min_all(engine, client=DATABASE):
"""[summary]
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
engine = select_save_engine(engine)
engine.QA_SU_save_future_min_all(client=client) |
def convert_fig_elements(self):
"""
Responsible for the correct conversion of JPTS 3.0 <fig> elements to
EPUB xhtml. Aside from translating <fig> to <img>, the content model
must be edited.
"""
for fig in self.main.getroot().findall('.//fig'):
if fig.getparent().tag == 'p':
elevate_element(fig)
for fig in self.main.getroot().findall('.//fig'):
#self.convert_fn_elements(fig)
#self.convert_disp_formula_elements(fig)
#Find label and caption
label_el = fig.find('label')
caption_el = fig.find('caption')
#Get the graphic node, this should be mandatory later on
graphic_el = fig.find('graphic')
#Create a file reference for the image
xlink_href = ns_format(graphic_el, 'xlink:href')
graphic_xlink_href = graphic_el.attrib[xlink_href]
file_name = graphic_xlink_href.split('.')[-1] + '.png'
img_dir = 'images-' + self.doi_suffix()
img_path = '/'.join([img_dir, file_name])
#Create the content: using image path, label, and caption
img_el = etree.Element('img', {'alt': 'A Figure', 'src': img_path,
'class': 'figure'})
if 'id' in fig.attrib:
img_el.attrib['id'] = fig.attrib['id']
insert_before(fig, img_el)
#Create content for the label and caption
if caption_el is not None or label_el is not None:
img_caption_div = etree.Element('div', {'class': 'figure-caption'})
img_caption_div_b = etree.SubElement(img_caption_div, 'b')
if label_el is not None:
append_all_below(img_caption_div_b, label_el)
append_new_text(img_caption_div_b, '. ', join_str='')
if caption_el is not None:
caption_title = caption_el.find('title')
if caption_title is not None:
append_all_below(img_caption_div_b, caption_title)
append_new_text(img_caption_div_b, ' ', join_str='')
for each_p in caption_el.findall('p'):
append_all_below(img_caption_div, each_p)
insert_before(fig, img_caption_div)
#Remove the original <fig>
remove(fig) | Responsible for the correct conversion of JPTS 3.0 <fig> elements to
EPUB xhtml. Aside from translating <fig> to <img>, the content model
must be edited. | Below is the the instruction that describes the task:
### Input:
Responsible for the correct conversion of JPTS 3.0 <fig> elements to
EPUB xhtml. Aside from translating <fig> to <img>, the content model
must be edited.
### Response:
def convert_fig_elements(self):
"""
Responsible for the correct conversion of JPTS 3.0 <fig> elements to
EPUB xhtml. Aside from translating <fig> to <img>, the content model
must be edited.
"""
for fig in self.main.getroot().findall('.//fig'):
if fig.getparent().tag == 'p':
elevate_element(fig)
for fig in self.main.getroot().findall('.//fig'):
#self.convert_fn_elements(fig)
#self.convert_disp_formula_elements(fig)
#Find label and caption
label_el = fig.find('label')
caption_el = fig.find('caption')
#Get the graphic node, this should be mandatory later on
graphic_el = fig.find('graphic')
#Create a file reference for the image
xlink_href = ns_format(graphic_el, 'xlink:href')
graphic_xlink_href = graphic_el.attrib[xlink_href]
file_name = graphic_xlink_href.split('.')[-1] + '.png'
img_dir = 'images-' + self.doi_suffix()
img_path = '/'.join([img_dir, file_name])
#Create the content: using image path, label, and caption
img_el = etree.Element('img', {'alt': 'A Figure', 'src': img_path,
'class': 'figure'})
if 'id' in fig.attrib:
img_el.attrib['id'] = fig.attrib['id']
insert_before(fig, img_el)
#Create content for the label and caption
if caption_el is not None or label_el is not None:
img_caption_div = etree.Element('div', {'class': 'figure-caption'})
img_caption_div_b = etree.SubElement(img_caption_div, 'b')
if label_el is not None:
append_all_below(img_caption_div_b, label_el)
append_new_text(img_caption_div_b, '. ', join_str='')
if caption_el is not None:
caption_title = caption_el.find('title')
if caption_title is not None:
append_all_below(img_caption_div_b, caption_title)
append_new_text(img_caption_div_b, ' ', join_str='')
for each_p in caption_el.findall('p'):
append_all_below(img_caption_div, each_p)
insert_before(fig, img_caption_div)
#Remove the original <fig>
remove(fig) |
def herald_message(self, herald_svc, message):
"""
Handles a message received by Herald
:param herald_svc: Herald service
:param message: Received message
"""
subject = message.subject
if subject == SUBJECT_DISCOVERY_STEP_1:
# Step 1: Register the remote peer and reply with our dump
try:
# Delayed registration
notification = self._directory.register_delayed(
self.__load_dump(message))
peer = notification.peer
if peer is not None:
# Registration succeeded
self.__delayed_notifs[peer.uid] = notification
# Reply with our dump
herald_svc.reply(
message, self._directory.get_local_peer().dump(),
SUBJECT_DISCOVERY_STEP_2)
except ValueError:
self._logger.error("Error registering a discovered peer")
elif subject == SUBJECT_DISCOVERY_STEP_2:
# Step 2: Register the dump, notify local listeners, then let
# the remote peer notify its listeners
try:
# Register the peer
notification = self._directory.register_delayed(
self.__load_dump(message))
if notification.peer is not None:
# Let the remote peer notify its listeners
herald_svc.reply(message, None, SUBJECT_DISCOVERY_STEP_3)
# Now we can notify listeners
notification.notify()
except ValueError:
self._logger.error("Error registering a peer using the "
"description it sent")
elif subject == SUBJECT_DISCOVERY_STEP_3:
# Step 3: notify local listeners about the remote peer
try:
self.__delayed_notifs.pop(message.sender).notify()
except KeyError:
# Unknown peer
pass
else:
# Unknown subject
self._logger.warning("Unknown discovery step: %s", subject) | Handles a message received by Herald
:param herald_svc: Herald service
:param message: Received message | Below is the the instruction that describes the task:
### Input:
Handles a message received by Herald
:param herald_svc: Herald service
:param message: Received message
### Response:
def herald_message(self, herald_svc, message):
"""
Handles a message received by Herald
:param herald_svc: Herald service
:param message: Received message
"""
subject = message.subject
if subject == SUBJECT_DISCOVERY_STEP_1:
# Step 1: Register the remote peer and reply with our dump
try:
# Delayed registration
notification = self._directory.register_delayed(
self.__load_dump(message))
peer = notification.peer
if peer is not None:
# Registration succeeded
self.__delayed_notifs[peer.uid] = notification
# Reply with our dump
herald_svc.reply(
message, self._directory.get_local_peer().dump(),
SUBJECT_DISCOVERY_STEP_2)
except ValueError:
self._logger.error("Error registering a discovered peer")
elif subject == SUBJECT_DISCOVERY_STEP_2:
# Step 2: Register the dump, notify local listeners, then let
# the remote peer notify its listeners
try:
# Register the peer
notification = self._directory.register_delayed(
self.__load_dump(message))
if notification.peer is not None:
# Let the remote peer notify its listeners
herald_svc.reply(message, None, SUBJECT_DISCOVERY_STEP_3)
# Now we can notify listeners
notification.notify()
except ValueError:
self._logger.error("Error registering a peer using the "
"description it sent")
elif subject == SUBJECT_DISCOVERY_STEP_3:
# Step 3: notify local listeners about the remote peer
try:
self.__delayed_notifs.pop(message.sender).notify()
except KeyError:
# Unknown peer
pass
else:
# Unknown subject
self._logger.warning("Unknown discovery step: %s", subject) |
def _build_job_meta(cls, job_dir):
"""Build meta file for job.
Args:
job_dir (str): Directory path of the job.
Return:
A dict of job meta info.
"""
meta_file = os.path.join(job_dir, JOB_META_FILE)
meta = parse_json(meta_file)
if not meta:
job_name = job_dir.split("/")[-1]
user = os.environ.get("USER", None)
meta = {
"job_id": job_name,
"job_name": job_name,
"user": user,
"type": "ray",
"start_time": os.path.getctime(job_dir),
"end_time": None,
"best_trial_id": None,
}
if meta.get("start_time", None):
meta["start_time"] = timestamp2date(meta["start_time"])
return meta | Build meta file for job.
Args:
job_dir (str): Directory path of the job.
Return:
A dict of job meta info. | Below is the the instruction that describes the task:
### Input:
Build meta file for job.
Args:
job_dir (str): Directory path of the job.
Return:
A dict of job meta info.
### Response:
def _build_job_meta(cls, job_dir):
"""Build meta file for job.
Args:
job_dir (str): Directory path of the job.
Return:
A dict of job meta info.
"""
meta_file = os.path.join(job_dir, JOB_META_FILE)
meta = parse_json(meta_file)
if not meta:
job_name = job_dir.split("/")[-1]
user = os.environ.get("USER", None)
meta = {
"job_id": job_name,
"job_name": job_name,
"user": user,
"type": "ray",
"start_time": os.path.getctime(job_dir),
"end_time": None,
"best_trial_id": None,
}
if meta.get("start_time", None):
meta["start_time"] = timestamp2date(meta["start_time"])
return meta |
def result(self, res):
'''Return a value according to the parameter `res` when parse successfully.'''
return self >> Parser(lambda _, index: Value.success(index, res)) | Return a value according to the parameter `res` when parse successfully. | Below is the the instruction that describes the task:
### Input:
Return a value according to the parameter `res` when parse successfully.
### Response:
def result(self, res):
'''Return a value according to the parameter `res` when parse successfully.'''
return self >> Parser(lambda _, index: Value.success(index, res)) |
def _annotate_fn_args(stack, fn_opname, nargs, nkw=-1, consume_fn_name=True):
"""Add commas and equals as appropriate to function argument lists in the stack."""
kwarg_names = []
if nkw == -1:
if sys.version_info[0] < 3:
# Compute nkw and nargs from nargs for python 2.7
nargs, nkw = (nargs % 256, 2 * nargs // 256)
else:
if fn_opname == 'CALL_FUNCTION_KW':
if qj._DEBUG_QJ:
assert len(stack) and stack[-1].opname == 'LOAD_CONST'
if not len(stack) or stack[-1].opname != 'LOAD_CONST':
return
se = stack.pop()
kwarg_names = se.oparg_repr[::-1]
se.oparg_repr = ['']
nkw = len(kwarg_names)
nargs -= nkw
if qj._DEBUG_QJ:
assert nargs >= 0 and nkw > 0
else:
nkw = 0
for i in range(nkw):
se = stack.pop()
if se.stack_depth == 0 and (len(se.oparg_repr) == 0 or se.oparg_repr[0] == ''):
# Skip stack entries that don't have any effect on the stack
continue
if i % 2 == 1 and sys.version_info[0] < 3:
if qj._DEBUG_QJ:
assert se.opname == 'LOAD_CONST'
if se.opname == 'LOAD_CONST':
# kwargs are pairs of key=value in code
se.oparg_repr += ['=']
else:
pops = []
if se.opname.startswith('CALL_FUNCTION'):
_annotate_fn_args(stack[:], se.opname, se.oparg, -1, True)
pops = _collect_pops(stack, se.stack_depth - 1 if se.opname.startswith('CALL_FUNCTION') else 0, [], False)
if i > 1 and len(pops):
pops[-1].oparg_repr += [',']
if sys.version_info[0] >= 3:
target_se = pops[-1] if len(pops) else se
target_se.oparg_repr = [kwarg_names[i], '='] + target_se.oparg_repr
for i in range(nargs):
se = stack.pop()
if se.opname.startswith('CALL_FUNCTION'):
_annotate_fn_args(stack, se.opname, se.oparg, -1, True)
elif len(se.oparg_repr) and se.oparg_repr[0] in {']', '}', ')'}:
if (i > 0 or nkw > 0):
se.oparg_repr += [',']
else:
pops = _collect_pops(stack, se.stack_depth, [], False)
if (i > 0 or nkw > 0) and len(pops):
pops[-1].oparg_repr += [',']
if consume_fn_name:
_collect_pops(stack, -1, [], False) | Add commas and equals as appropriate to function argument lists in the stack. | Below is the the instruction that describes the task:
### Input:
Add commas and equals as appropriate to function argument lists in the stack.
### Response:
def _annotate_fn_args(stack, fn_opname, nargs, nkw=-1, consume_fn_name=True):
"""Add commas and equals as appropriate to function argument lists in the stack."""
kwarg_names = []
if nkw == -1:
if sys.version_info[0] < 3:
# Compute nkw and nargs from nargs for python 2.7
nargs, nkw = (nargs % 256, 2 * nargs // 256)
else:
if fn_opname == 'CALL_FUNCTION_KW':
if qj._DEBUG_QJ:
assert len(stack) and stack[-1].opname == 'LOAD_CONST'
if not len(stack) or stack[-1].opname != 'LOAD_CONST':
return
se = stack.pop()
kwarg_names = se.oparg_repr[::-1]
se.oparg_repr = ['']
nkw = len(kwarg_names)
nargs -= nkw
if qj._DEBUG_QJ:
assert nargs >= 0 and nkw > 0
else:
nkw = 0
for i in range(nkw):
se = stack.pop()
if se.stack_depth == 0 and (len(se.oparg_repr) == 0 or se.oparg_repr[0] == ''):
# Skip stack entries that don't have any effect on the stack
continue
if i % 2 == 1 and sys.version_info[0] < 3:
if qj._DEBUG_QJ:
assert se.opname == 'LOAD_CONST'
if se.opname == 'LOAD_CONST':
# kwargs are pairs of key=value in code
se.oparg_repr += ['=']
else:
pops = []
if se.opname.startswith('CALL_FUNCTION'):
_annotate_fn_args(stack[:], se.opname, se.oparg, -1, True)
pops = _collect_pops(stack, se.stack_depth - 1 if se.opname.startswith('CALL_FUNCTION') else 0, [], False)
if i > 1 and len(pops):
pops[-1].oparg_repr += [',']
if sys.version_info[0] >= 3:
target_se = pops[-1] if len(pops) else se
target_se.oparg_repr = [kwarg_names[i], '='] + target_se.oparg_repr
for i in range(nargs):
se = stack.pop()
if se.opname.startswith('CALL_FUNCTION'):
_annotate_fn_args(stack, se.opname, se.oparg, -1, True)
elif len(se.oparg_repr) and se.oparg_repr[0] in {']', '}', ')'}:
if (i > 0 or nkw > 0):
se.oparg_repr += [',']
else:
pops = _collect_pops(stack, se.stack_depth, [], False)
if (i > 0 or nkw > 0) and len(pops):
pops[-1].oparg_repr += [',']
if consume_fn_name:
_collect_pops(stack, -1, [], False) |
def purge_stream(self, stream_id, remove_definition=False, sandbox=None):
"""
Clears all the data in a given stream and the calculated intervals
:param stream_id: The stream id
:param remove_definition: Whether to remove the stream definition as well
:param sandbox: The sandbox id
:return: None
"""
if sandbox is not None:
raise NotImplementedError
if stream_id not in self.streams:
raise StreamNotFoundError(stream_id)
self.data[stream_id] = StreamInstanceCollection()
self.streams[stream_id].calculated_intervals = TimeIntervals()
if remove_definition:
del self.data[stream_id]
del self.streams[stream_id] | Clears all the data in a given stream and the calculated intervals
:param stream_id: The stream id
:param remove_definition: Whether to remove the stream definition as well
:param sandbox: The sandbox id
:return: None | Below is the the instruction that describes the task:
### Input:
Clears all the data in a given stream and the calculated intervals
:param stream_id: The stream id
:param remove_definition: Whether to remove the stream definition as well
:param sandbox: The sandbox id
:return: None
### Response:
def purge_stream(self, stream_id, remove_definition=False, sandbox=None):
"""
Clears all the data in a given stream and the calculated intervals
:param stream_id: The stream id
:param remove_definition: Whether to remove the stream definition as well
:param sandbox: The sandbox id
:return: None
"""
if sandbox is not None:
raise NotImplementedError
if stream_id not in self.streams:
raise StreamNotFoundError(stream_id)
self.data[stream_id] = StreamInstanceCollection()
self.streams[stream_id].calculated_intervals = TimeIntervals()
if remove_definition:
del self.data[stream_id]
del self.streams[stream_id] |
def validate(self):
''' Perform integrity checks on the modes in this document.
Returns:
None
'''
for r in self.roots:
refs = r.references()
check_integrity(refs) | Perform integrity checks on the modes in this document.
Returns:
None | Below is the the instruction that describes the task:
### Input:
Perform integrity checks on the modes in this document.
Returns:
None
### Response:
def validate(self):
''' Perform integrity checks on the modes in this document.
Returns:
None
'''
for r in self.roots:
refs = r.references()
check_integrity(refs) |
def new_session(
self,
session_name=None,
kill_session=False,
attach=False,
start_directory=None,
window_name=None,
window_command=None,
*args,
**kwargs
):
"""
Return :class:`Session` from ``$ tmux new-session``.
Uses ``-P`` flag to print session info, ``-F`` for return formatting
returns new Session object.
``$ tmux new-session -d`` will create the session in the background
``$ tmux new-session -Ad`` will move to the session name if it already
exists. todo: make an option to handle this.
Parameters
----------
session_name : str, optional
::
$ tmux new-session -s <session_name>
attach : bool, optional
create session in the foreground. ``attach=False`` is equivalent
to::
$ tmux new-session -d
Other Parameters
----------------
kill_session : bool, optional
Kill current session if ``$ tmux has-session``.
Useful for testing workspaces.
start_directory : str, optional
specifies the working directory in which the
new session is created.
window_name : str, optional
::
$ tmux new-session -n <window_name>
window_command : str
execute a command on starting the session. The window will close
when the command exits. NOTE: When this command exits the window
will close. This feature is useful for long-running processes
where the closing of the window upon completion is desired.
Returns
-------
:class:`Session`
Raises
------
:exc:`exc.BadSessionName`
"""
session_check_name(session_name)
if self.has_session(session_name):
if kill_session:
self.cmd('kill-session', '-t%s' % session_name)
logger.info('session %s exists. killed it.' % session_name)
else:
raise exc.TmuxSessionExists('Session named %s exists' % session_name)
logger.debug('creating session %s' % session_name)
sformats = formats.SESSION_FORMATS
tmux_formats = ['#{%s}' % f for f in sformats]
env = os.environ.get('TMUX')
if env:
del os.environ['TMUX']
tmux_args = (
'-s%s' % session_name,
'-P',
'-F%s' % '\t'.join(tmux_formats), # output
)
if not attach:
tmux_args += ('-d',)
if start_directory:
tmux_args += ('-c', start_directory)
if window_name:
tmux_args += ('-n', window_name)
# tmux 2.6 gives unattached sessions a tiny default area
# no need send in -x/-y if they're in a client already, though
if has_gte_version('2.6') and 'TMUX' not in os.environ:
tmux_args += ('-x', 800, '-y', 600)
if window_command:
tmux_args += (window_command,)
proc = self.cmd('new-session', *tmux_args)
if proc.stderr:
raise exc.LibTmuxException(proc.stderr)
session = proc.stdout[0]
if env:
os.environ['TMUX'] = env
# combine format keys with values returned from ``tmux list-windows``
session = dict(zip(sformats, session.split('\t')))
# clear up empty dict
session = dict((k, v) for k, v in session.items() if v)
session = Session(server=self, **session)
return session | Return :class:`Session` from ``$ tmux new-session``.
Uses ``-P`` flag to print session info, ``-F`` for return formatting
returns new Session object.
``$ tmux new-session -d`` will create the session in the background
``$ tmux new-session -Ad`` will move to the session name if it already
exists. todo: make an option to handle this.
Parameters
----------
session_name : str, optional
::
$ tmux new-session -s <session_name>
attach : bool, optional
create session in the foreground. ``attach=False`` is equivalent
to::
$ tmux new-session -d
Other Parameters
----------------
kill_session : bool, optional
Kill current session if ``$ tmux has-session``.
Useful for testing workspaces.
start_directory : str, optional
specifies the working directory in which the
new session is created.
window_name : str, optional
::
$ tmux new-session -n <window_name>
window_command : str
execute a command on starting the session. The window will close
when the command exits. NOTE: When this command exits the window
will close. This feature is useful for long-running processes
where the closing of the window upon completion is desired.
Returns
-------
:class:`Session`
Raises
------
:exc:`exc.BadSessionName` | Below is the the instruction that describes the task:
### Input:
Return :class:`Session` from ``$ tmux new-session``.
Uses ``-P`` flag to print session info, ``-F`` for return formatting
returns new Session object.
``$ tmux new-session -d`` will create the session in the background
``$ tmux new-session -Ad`` will move to the session name if it already
exists. todo: make an option to handle this.
Parameters
----------
session_name : str, optional
::
$ tmux new-session -s <session_name>
attach : bool, optional
create session in the foreground. ``attach=False`` is equivalent
to::
$ tmux new-session -d
Other Parameters
----------------
kill_session : bool, optional
Kill current session if ``$ tmux has-session``.
Useful for testing workspaces.
start_directory : str, optional
specifies the working directory in which the
new session is created.
window_name : str, optional
::
$ tmux new-session -n <window_name>
window_command : str
execute a command on starting the session. The window will close
when the command exits. NOTE: When this command exits the window
will close. This feature is useful for long-running processes
where the closing of the window upon completion is desired.
Returns
-------
:class:`Session`
Raises
------
:exc:`exc.BadSessionName`
### Response:
def new_session(
self,
session_name=None,
kill_session=False,
attach=False,
start_directory=None,
window_name=None,
window_command=None,
*args,
**kwargs
):
"""
Return :class:`Session` from ``$ tmux new-session``.
Uses ``-P`` flag to print session info, ``-F`` for return formatting
returns new Session object.
``$ tmux new-session -d`` will create the session in the background
``$ tmux new-session -Ad`` will move to the session name if it already
exists. todo: make an option to handle this.
Parameters
----------
session_name : str, optional
::
$ tmux new-session -s <session_name>
attach : bool, optional
create session in the foreground. ``attach=False`` is equivalent
to::
$ tmux new-session -d
Other Parameters
----------------
kill_session : bool, optional
Kill current session if ``$ tmux has-session``.
Useful for testing workspaces.
start_directory : str, optional
specifies the working directory in which the
new session is created.
window_name : str, optional
::
$ tmux new-session -n <window_name>
window_command : str
execute a command on starting the session. The window will close
when the command exits. NOTE: When this command exits the window
will close. This feature is useful for long-running processes
where the closing of the window upon completion is desired.
Returns
-------
:class:`Session`
Raises
------
:exc:`exc.BadSessionName`
"""
session_check_name(session_name)
if self.has_session(session_name):
if kill_session:
self.cmd('kill-session', '-t%s' % session_name)
logger.info('session %s exists. killed it.' % session_name)
else:
raise exc.TmuxSessionExists('Session named %s exists' % session_name)
logger.debug('creating session %s' % session_name)
sformats = formats.SESSION_FORMATS
tmux_formats = ['#{%s}' % f for f in sformats]
env = os.environ.get('TMUX')
if env:
del os.environ['TMUX']
tmux_args = (
'-s%s' % session_name,
'-P',
'-F%s' % '\t'.join(tmux_formats), # output
)
if not attach:
tmux_args += ('-d',)
if start_directory:
tmux_args += ('-c', start_directory)
if window_name:
tmux_args += ('-n', window_name)
# tmux 2.6 gives unattached sessions a tiny default area
# no need send in -x/-y if they're in a client already, though
if has_gte_version('2.6') and 'TMUX' not in os.environ:
tmux_args += ('-x', 800, '-y', 600)
if window_command:
tmux_args += (window_command,)
proc = self.cmd('new-session', *tmux_args)
if proc.stderr:
raise exc.LibTmuxException(proc.stderr)
session = proc.stdout[0]
if env:
os.environ['TMUX'] = env
# combine format keys with values returned from ``tmux list-windows``
session = dict(zip(sformats, session.split('\t')))
# clear up empty dict
session = dict((k, v) for k, v in session.items() if v)
session = Session(server=self, **session)
return session |
def V_horiz_spherical(D, L, a, h, headonly=False):
r'''Calculates volume of a tank with spherical heads, according to [1]_.
.. math::
V_f = A_fL + \frac{\pi a}{6}(3R^2 + a^2),\;\; h = R, |a|\le R
.. math::
V_f = A_fL + \frac{\pi a}{3}(3R^2 + a^2),\;\; h = D, |a|\le R
.. math::
V_f = A_fL + \pi a h^2\left(1 - \frac{h}{3R}\right),\;\; h = 0,
\text{ or } |a| = 0, R, -R
.. math::
V_f = A_fL + \frac{a}{|a|}\left\{\frac{2r^3}{3}\left[\cos^{-1}
\frac{R^2 - rw}{R(w-r)} + \cos^{-1}\frac{R^2 + rw}{R(w+r)}
- \frac{z}{r}\left(2 + \left(\frac{R}{r}\right)^2\right)
\cos^{-1}\frac{w}{R}\right] - 2\left(wr^2 - \frac{w^3}{3}\right)
\tan^{-1}\frac{y}{z} + \frac{4wyz}{3}\right\}
,\;\; h \ne R, D; a \ne 0, R, -R, |a| \ge 0.01D
.. math::
V_f = A_fL + \frac{a}{|a|}\left[2\int_w^R(r^2 - x^2)\tan^{-1}
\sqrt{\frac{R^2-x^2}{r^2-R^2}}dx - A_f z\right]
,\;\; h \ne R, D; a \ne 0, R, -R, |a| < 0.01D
.. math::
Af = R^2\cos^{-1}\frac{R-h}{R} - (R-h)\sqrt{2Rh - h^2}
.. math::
r = \frac{a^2 + R^2}{2|a|}
.. math::
w = R - h
.. math::
y = \sqrt{2Rh-h^2}
.. math::
z = \sqrt{r^2 - R^2}
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
L : float
Length of the main cylindrical section, [m]
a : float
Distance the spherical head extends on one side, [m]
h : float
Height, as measured up to where the fluid ends, [m]
headonly : bool, optional
Function returns only the volume of a single head side if True
Returns
-------
V : float
Volume [m^3]
Examples
--------
Matching example from [1]_, with inputs in inches and volume in gallons.
>>> V_horiz_spherical(D=108., L=156., a=42., h=36)/231.
2303.9615116986183
References
----------
.. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015.
http://www.webcalc.com.br/blog/Tank_Volume.PDF'''
R = D/2.
r = (a**2 + R**2)/2./abs(a)
w = R - h
y = (2*R*h - h**2)**0.5
z = (r**2 - R**2)**0.5
Af = R**2*acos((R-h)/R) - (R-h)*(2*R*h - h**2)**0.5
if h == R and abs(a) <= R:
Vf = pi*a/6*(3*R**2 + a**2)
elif h == D and abs(a) <= R:
Vf = pi*a/3*(3*R**2 + a**2)
elif h == 0 or a == 0 or a == R or a == -R:
Vf = pi*a*h**2*(1 - h/3./R)
elif abs(a) >= 0.01*D:
Vf = a/abs(a)*(
2*r**3/3.*(acos((R**2 - r*w)/(R*(w-r))) + acos((R**2+r*w)/(R*(w+r)))
- z/r*(2+(R/r)**2)*acos(w/R))
- 2*(w*r**2 - w**3/3)*atan(y/z) + 4*w*y*z/3)
else:
def V_horiz_spherical_toint(x):
return (r**2 - x**2)*atan(((R**2 - x**2)/(r**2 - R**2))**0.5)
from scipy.integrate import quad
integrated = quad(V_horiz_spherical_toint, w, R)[0]
Vf = a/abs(a)*(2*integrated - Af*z)
if headonly:
Vf = Vf/2.
else:
Vf += Af*L
return Vf | r'''Calculates volume of a tank with spherical heads, according to [1]_.
.. math::
V_f = A_fL + \frac{\pi a}{6}(3R^2 + a^2),\;\; h = R, |a|\le R
.. math::
V_f = A_fL + \frac{\pi a}{3}(3R^2 + a^2),\;\; h = D, |a|\le R
.. math::
V_f = A_fL + \pi a h^2\left(1 - \frac{h}{3R}\right),\;\; h = 0,
\text{ or } |a| = 0, R, -R
.. math::
V_f = A_fL + \frac{a}{|a|}\left\{\frac{2r^3}{3}\left[\cos^{-1}
\frac{R^2 - rw}{R(w-r)} + \cos^{-1}\frac{R^2 + rw}{R(w+r)}
- \frac{z}{r}\left(2 + \left(\frac{R}{r}\right)^2\right)
\cos^{-1}\frac{w}{R}\right] - 2\left(wr^2 - \frac{w^3}{3}\right)
\tan^{-1}\frac{y}{z} + \frac{4wyz}{3}\right\}
,\;\; h \ne R, D; a \ne 0, R, -R, |a| \ge 0.01D
.. math::
V_f = A_fL + \frac{a}{|a|}\left[2\int_w^R(r^2 - x^2)\tan^{-1}
\sqrt{\frac{R^2-x^2}{r^2-R^2}}dx - A_f z\right]
,\;\; h \ne R, D; a \ne 0, R, -R, |a| < 0.01D
.. math::
Af = R^2\cos^{-1}\frac{R-h}{R} - (R-h)\sqrt{2Rh - h^2}
.. math::
r = \frac{a^2 + R^2}{2|a|}
.. math::
w = R - h
.. math::
y = \sqrt{2Rh-h^2}
.. math::
z = \sqrt{r^2 - R^2}
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
L : float
Length of the main cylindrical section, [m]
a : float
Distance the spherical head extends on one side, [m]
h : float
Height, as measured up to where the fluid ends, [m]
headonly : bool, optional
Function returns only the volume of a single head side if True
Returns
-------
V : float
Volume [m^3]
Examples
--------
Matching example from [1]_, with inputs in inches and volume in gallons.
>>> V_horiz_spherical(D=108., L=156., a=42., h=36)/231.
2303.9615116986183
References
----------
.. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015.
http://www.webcalc.com.br/blog/Tank_Volume.PDF | Below is the the instruction that describes the task:
### Input:
r'''Calculates volume of a tank with spherical heads, according to [1]_.
.. math::
V_f = A_fL + \frac{\pi a}{6}(3R^2 + a^2),\;\; h = R, |a|\le R
.. math::
V_f = A_fL + \frac{\pi a}{3}(3R^2 + a^2),\;\; h = D, |a|\le R
.. math::
V_f = A_fL + \pi a h^2\left(1 - \frac{h}{3R}\right),\;\; h = 0,
\text{ or } |a| = 0, R, -R
.. math::
V_f = A_fL + \frac{a}{|a|}\left\{\frac{2r^3}{3}\left[\cos^{-1}
\frac{R^2 - rw}{R(w-r)} + \cos^{-1}\frac{R^2 + rw}{R(w+r)}
- \frac{z}{r}\left(2 + \left(\frac{R}{r}\right)^2\right)
\cos^{-1}\frac{w}{R}\right] - 2\left(wr^2 - \frac{w^3}{3}\right)
\tan^{-1}\frac{y}{z} + \frac{4wyz}{3}\right\}
,\;\; h \ne R, D; a \ne 0, R, -R, |a| \ge 0.01D
.. math::
V_f = A_fL + \frac{a}{|a|}\left[2\int_w^R(r^2 - x^2)\tan^{-1}
\sqrt{\frac{R^2-x^2}{r^2-R^2}}dx - A_f z\right]
,\;\; h \ne R, D; a \ne 0, R, -R, |a| < 0.01D
.. math::
Af = R^2\cos^{-1}\frac{R-h}{R} - (R-h)\sqrt{2Rh - h^2}
.. math::
r = \frac{a^2 + R^2}{2|a|}
.. math::
w = R - h
.. math::
y = \sqrt{2Rh-h^2}
.. math::
z = \sqrt{r^2 - R^2}
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
L : float
Length of the main cylindrical section, [m]
a : float
Distance the spherical head extends on one side, [m]
h : float
Height, as measured up to where the fluid ends, [m]
headonly : bool, optional
Function returns only the volume of a single head side if True
Returns
-------
V : float
Volume [m^3]
Examples
--------
Matching example from [1]_, with inputs in inches and volume in gallons.
>>> V_horiz_spherical(D=108., L=156., a=42., h=36)/231.
2303.9615116986183
References
----------
.. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015.
http://www.webcalc.com.br/blog/Tank_Volume.PDF
### Response:
def V_horiz_spherical(D, L, a, h, headonly=False):
r'''Calculates volume of a tank with spherical heads, according to [1]_.
.. math::
V_f = A_fL + \frac{\pi a}{6}(3R^2 + a^2),\;\; h = R, |a|\le R
.. math::
V_f = A_fL + \frac{\pi a}{3}(3R^2 + a^2),\;\; h = D, |a|\le R
.. math::
V_f = A_fL + \pi a h^2\left(1 - \frac{h}{3R}\right),\;\; h = 0,
\text{ or } |a| = 0, R, -R
.. math::
V_f = A_fL + \frac{a}{|a|}\left\{\frac{2r^3}{3}\left[\cos^{-1}
\frac{R^2 - rw}{R(w-r)} + \cos^{-1}\frac{R^2 + rw}{R(w+r)}
- \frac{z}{r}\left(2 + \left(\frac{R}{r}\right)^2\right)
\cos^{-1}\frac{w}{R}\right] - 2\left(wr^2 - \frac{w^3}{3}\right)
\tan^{-1}\frac{y}{z} + \frac{4wyz}{3}\right\}
,\;\; h \ne R, D; a \ne 0, R, -R, |a| \ge 0.01D
.. math::
V_f = A_fL + \frac{a}{|a|}\left[2\int_w^R(r^2 - x^2)\tan^{-1}
\sqrt{\frac{R^2-x^2}{r^2-R^2}}dx - A_f z\right]
,\;\; h \ne R, D; a \ne 0, R, -R, |a| < 0.01D
.. math::
Af = R^2\cos^{-1}\frac{R-h}{R} - (R-h)\sqrt{2Rh - h^2}
.. math::
r = \frac{a^2 + R^2}{2|a|}
.. math::
w = R - h
.. math::
y = \sqrt{2Rh-h^2}
.. math::
z = \sqrt{r^2 - R^2}
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
L : float
Length of the main cylindrical section, [m]
a : float
Distance the spherical head extends on one side, [m]
h : float
Height, as measured up to where the fluid ends, [m]
headonly : bool, optional
Function returns only the volume of a single head side if True
Returns
-------
V : float
Volume [m^3]
Examples
--------
Matching example from [1]_, with inputs in inches and volume in gallons.
>>> V_horiz_spherical(D=108., L=156., a=42., h=36)/231.
2303.9615116986183
References
----------
.. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015.
http://www.webcalc.com.br/blog/Tank_Volume.PDF'''
R = D/2.
r = (a**2 + R**2)/2./abs(a)
w = R - h
y = (2*R*h - h**2)**0.5
z = (r**2 - R**2)**0.5
Af = R**2*acos((R-h)/R) - (R-h)*(2*R*h - h**2)**0.5
if h == R and abs(a) <= R:
Vf = pi*a/6*(3*R**2 + a**2)
elif h == D and abs(a) <= R:
Vf = pi*a/3*(3*R**2 + a**2)
elif h == 0 or a == 0 or a == R or a == -R:
Vf = pi*a*h**2*(1 - h/3./R)
elif abs(a) >= 0.01*D:
Vf = a/abs(a)*(
2*r**3/3.*(acos((R**2 - r*w)/(R*(w-r))) + acos((R**2+r*w)/(R*(w+r)))
- z/r*(2+(R/r)**2)*acos(w/R))
- 2*(w*r**2 - w**3/3)*atan(y/z) + 4*w*y*z/3)
else:
def V_horiz_spherical_toint(x):
return (r**2 - x**2)*atan(((R**2 - x**2)/(r**2 - R**2))**0.5)
from scipy.integrate import quad
integrated = quad(V_horiz_spherical_toint, w, R)[0]
Vf = a/abs(a)*(2*integrated - Af*z)
if headonly:
Vf = Vf/2.
else:
Vf += Af*L
return Vf |
def run(self, tasks=None, timeout=None):
"""Block, run loop until all tasks completed."""
timeout = self._timeout if timeout is None else timeout
if self.async_running or self.loop.is_running():
return self.wait_all_tasks_done(timeout)
else:
tasks = tasks or self.todo_tasks
return self.loop.run_until_complete(asyncio.gather(*tasks, loop=self.loop)) | Block, run loop until all tasks completed. | Below is the the instruction that describes the task:
### Input:
Block, run loop until all tasks completed.
### Response:
def run(self, tasks=None, timeout=None):
"""Block, run loop until all tasks completed."""
timeout = self._timeout if timeout is None else timeout
if self.async_running or self.loop.is_running():
return self.wait_all_tasks_done(timeout)
else:
tasks = tasks or self.todo_tasks
return self.loop.run_until_complete(asyncio.gather(*tasks, loop=self.loop)) |
def _draw_legend(self):
"""
Draw legend onto the figure
"""
legend_box = self.guides.build(self)
if not legend_box:
return
figure = self.figure
left = figure.subplotpars.left
right = figure.subplotpars.right
top = figure.subplotpars.top
bottom = figure.subplotpars.bottom
W, H = figure.get_size_inches()
position = self.guides.position
get_property = self.theme.themeables.property
# defaults
spacing = 0.1
strip_margin_x = 0
strip_margin_y = 0
with suppress(KeyError):
spacing = get_property('legend_box_spacing')
with suppress(KeyError):
strip_margin_x = get_property('strip_margin_x')
with suppress(KeyError):
strip_margin_y = get_property('strip_margin_y')
right_strip_width = self.facet.strip_size('right')
top_strip_height = self.facet.strip_size('top')
# Other than when the legend is on the right the rest of
# the computed x, y locations are not gauranteed not to
# overlap with the axes or the labels. The user must then
# use the legend_margin theme parameter to adjust the
# location. This should get fixed when MPL has a better
# layout manager.
if position == 'right':
loc = 6
pad = right_strip_width*(1+strip_margin_x) + spacing
x = right + pad/W
y = 0.5
elif position == 'left':
loc = 7
x = left - spacing/W
y = 0.5
elif position == 'top':
loc = 8
x = 0.5
pad = top_strip_height*(1+strip_margin_y) + spacing
y = top + pad/H
elif position == 'bottom':
loc = 9
x = 0.5
y = bottom - spacing/H
else:
loc = 10
x, y = position
anchored_box = AnchoredOffsetbox(
loc=loc,
child=legend_box,
pad=0.,
frameon=False,
bbox_to_anchor=(x, y),
bbox_transform=figure.transFigure,
borderpad=0.)
anchored_box.set_zorder(90.1)
self.figure._themeable['legend_background'] = anchored_box
ax = self.axs[0]
ax.add_artist(anchored_box) | Draw legend onto the figure | Below is the the instruction that describes the task:
### Input:
Draw legend onto the figure
### Response:
def _draw_legend(self):
"""
Draw legend onto the figure
"""
legend_box = self.guides.build(self)
if not legend_box:
return
figure = self.figure
left = figure.subplotpars.left
right = figure.subplotpars.right
top = figure.subplotpars.top
bottom = figure.subplotpars.bottom
W, H = figure.get_size_inches()
position = self.guides.position
get_property = self.theme.themeables.property
# defaults
spacing = 0.1
strip_margin_x = 0
strip_margin_y = 0
with suppress(KeyError):
spacing = get_property('legend_box_spacing')
with suppress(KeyError):
strip_margin_x = get_property('strip_margin_x')
with suppress(KeyError):
strip_margin_y = get_property('strip_margin_y')
right_strip_width = self.facet.strip_size('right')
top_strip_height = self.facet.strip_size('top')
# Other than when the legend is on the right the rest of
# the computed x, y locations are not gauranteed not to
# overlap with the axes or the labels. The user must then
# use the legend_margin theme parameter to adjust the
# location. This should get fixed when MPL has a better
# layout manager.
if position == 'right':
loc = 6
pad = right_strip_width*(1+strip_margin_x) + spacing
x = right + pad/W
y = 0.5
elif position == 'left':
loc = 7
x = left - spacing/W
y = 0.5
elif position == 'top':
loc = 8
x = 0.5
pad = top_strip_height*(1+strip_margin_y) + spacing
y = top + pad/H
elif position == 'bottom':
loc = 9
x = 0.5
y = bottom - spacing/H
else:
loc = 10
x, y = position
anchored_box = AnchoredOffsetbox(
loc=loc,
child=legend_box,
pad=0.,
frameon=False,
bbox_to_anchor=(x, y),
bbox_transform=figure.transFigure,
borderpad=0.)
anchored_box.set_zorder(90.1)
self.figure._themeable['legend_background'] = anchored_box
ax = self.axs[0]
ax.add_artist(anchored_box) |
def get_listeners(self):
"""Returns a :class:`list` of (name, function) listener pairs that are defined in this cog."""
return [(name, getattr(self, method_name)) for name, method_name in self.__cog_listeners__] | Returns a :class:`list` of (name, function) listener pairs that are defined in this cog. | Below is the the instruction that describes the task:
### Input:
Returns a :class:`list` of (name, function) listener pairs that are defined in this cog.
### Response:
def get_listeners(self):
"""Returns a :class:`list` of (name, function) listener pairs that are defined in this cog."""
return [(name, getattr(self, method_name)) for name, method_name in self.__cog_listeners__] |
def checkState(self, checkState):
""" Sets the data to given a Qt.CheckState (Qt.Checked or Qt.Unchecked).
"""
logger.debug("checkState setter: {}".format(checkState))
if checkState == Qt.Checked:
commonData = True
elif checkState == Qt.Unchecked:
commonData = False
elif checkState == Qt.PartiallyChecked:
commonData = None
# This never occurs, see remarks above in the classes' docstring
assert False, "This never happens. Please report if it does."
else:
raise ValueError("Unexpected check state: {!r}".format(checkState))
for child in self.childItems:
if isinstance(child, BoolCti):
child.data = commonData | Sets the data to given a Qt.CheckState (Qt.Checked or Qt.Unchecked). | Below is the the instruction that describes the task:
### Input:
Sets the data to given a Qt.CheckState (Qt.Checked or Qt.Unchecked).
### Response:
def checkState(self, checkState):
""" Sets the data to given a Qt.CheckState (Qt.Checked or Qt.Unchecked).
"""
logger.debug("checkState setter: {}".format(checkState))
if checkState == Qt.Checked:
commonData = True
elif checkState == Qt.Unchecked:
commonData = False
elif checkState == Qt.PartiallyChecked:
commonData = None
# This never occurs, see remarks above in the classes' docstring
assert False, "This never happens. Please report if it does."
else:
raise ValueError("Unexpected check state: {!r}".format(checkState))
for child in self.childItems:
if isinstance(child, BoolCti):
child.data = commonData |
def source_analysis(
source_path, group, encoding='automatic', fallback_encoding='cp1252',
generated_regexes=pygount.common.regexes_from(DEFAULT_GENERATED_PATTERNS_TEXT),
duplicate_pool=None):
"""
Analysis for line counts in source code stored in ``source_path``.
:param source_path:
:param group: name of a logical group the sourc code belongs to, e.g. a
package.
:param encoding: encoding according to :func:`encoding_for`
:param fallback_encoding: fallback encoding according to
:func:`encoding_for`
:return: a :class:`SourceAnalysis`
"""
assert encoding is not None
assert generated_regexes is not None
result = None
lexer = None
source_code = None
source_size = os.path.getsize(source_path)
if source_size == 0:
_log.info('%s: is empty', source_path)
result = pseudo_source_analysis(source_path, group, SourceState.empty)
elif is_binary_file(source_path):
_log.info('%s: is binary', source_path)
result = pseudo_source_analysis(source_path, group, SourceState.binary)
elif not has_lexer(source_path):
_log.info('%s: unknown language', source_path)
result = pseudo_source_analysis(source_path, group, SourceState.unknown)
elif duplicate_pool is not None:
duplicate_path = duplicate_pool.duplicate_path(source_path)
if duplicate_path is not None:
_log.info('%s: is a duplicate of %s', source_path, duplicate_path)
result = pseudo_source_analysis(source_path, group, SourceState.duplicate, duplicate_path)
if result is None:
if encoding in ('automatic', 'chardet'):
encoding = encoding_for(source_path, encoding, fallback_encoding)
try:
with open(source_path, 'r', encoding=encoding) as source_file:
source_code = source_file.read()
except (LookupError, OSError, UnicodeError) as error:
_log.warning('cannot read %s using encoding %s: %s', source_path, encoding, error)
result = pseudo_source_analysis(source_path, group, SourceState.error, error)
if result is None:
lexer = guess_lexer(source_path, source_code)
assert lexer is not None
if (result is None) and (len(generated_regexes) != 0):
number_line_and_regex = matching_number_line_and_regex(
pygount.common.lines(source_code), generated_regexes
)
if number_line_and_regex is not None:
number, _, regex = number_line_and_regex
message = 'line {0} matches {1}'.format(number, regex)
_log.info('%s: is generated code because %s', source_path, message)
result = pseudo_source_analysis(source_path, group, SourceState.generated, message)
if result is None:
assert lexer is not None
assert source_code is not None
language = lexer.name
if ('xml' in language.lower()) or (language == 'Genshi'):
dialect = pygount.xmldialect.xml_dialect(source_path, source_code)
if dialect is not None:
language = dialect
_log.info('%s: analyze as %s using encoding %s', source_path, language, encoding)
mark_to_count_map = {'c': 0, 'd': 0, 'e': 0, 's': 0}
for line_parts in _line_parts(lexer, source_code):
mark_to_increment = 'e'
for mark_to_check in ('d', 's', 'c'):
if mark_to_check in line_parts:
mark_to_increment = mark_to_check
mark_to_count_map[mark_to_increment] += 1
result = SourceAnalysis(
path=source_path,
language=language,
group=group,
code=mark_to_count_map['c'],
documentation=mark_to_count_map['d'],
empty=mark_to_count_map['e'],
string=mark_to_count_map['s'],
state=SourceState.analyzed.name,
state_info=None,
)
assert result is not None
return result | Analysis for line counts in source code stored in ``source_path``.
:param source_path:
:param group: name of a logical group the sourc code belongs to, e.g. a
package.
:param encoding: encoding according to :func:`encoding_for`
:param fallback_encoding: fallback encoding according to
:func:`encoding_for`
:return: a :class:`SourceAnalysis` | Below is the the instruction that describes the task:
### Input:
Analysis for line counts in source code stored in ``source_path``.
:param source_path:
:param group: name of a logical group the sourc code belongs to, e.g. a
package.
:param encoding: encoding according to :func:`encoding_for`
:param fallback_encoding: fallback encoding according to
:func:`encoding_for`
:return: a :class:`SourceAnalysis`
### Response:
def source_analysis(
source_path, group, encoding='automatic', fallback_encoding='cp1252',
generated_regexes=pygount.common.regexes_from(DEFAULT_GENERATED_PATTERNS_TEXT),
duplicate_pool=None):
"""
Analysis for line counts in source code stored in ``source_path``.
:param source_path:
:param group: name of a logical group the sourc code belongs to, e.g. a
package.
:param encoding: encoding according to :func:`encoding_for`
:param fallback_encoding: fallback encoding according to
:func:`encoding_for`
:return: a :class:`SourceAnalysis`
"""
assert encoding is not None
assert generated_regexes is not None
result = None
lexer = None
source_code = None
source_size = os.path.getsize(source_path)
if source_size == 0:
_log.info('%s: is empty', source_path)
result = pseudo_source_analysis(source_path, group, SourceState.empty)
elif is_binary_file(source_path):
_log.info('%s: is binary', source_path)
result = pseudo_source_analysis(source_path, group, SourceState.binary)
elif not has_lexer(source_path):
_log.info('%s: unknown language', source_path)
result = pseudo_source_analysis(source_path, group, SourceState.unknown)
elif duplicate_pool is not None:
duplicate_path = duplicate_pool.duplicate_path(source_path)
if duplicate_path is not None:
_log.info('%s: is a duplicate of %s', source_path, duplicate_path)
result = pseudo_source_analysis(source_path, group, SourceState.duplicate, duplicate_path)
if result is None:
if encoding in ('automatic', 'chardet'):
encoding = encoding_for(source_path, encoding, fallback_encoding)
try:
with open(source_path, 'r', encoding=encoding) as source_file:
source_code = source_file.read()
except (LookupError, OSError, UnicodeError) as error:
_log.warning('cannot read %s using encoding %s: %s', source_path, encoding, error)
result = pseudo_source_analysis(source_path, group, SourceState.error, error)
if result is None:
lexer = guess_lexer(source_path, source_code)
assert lexer is not None
if (result is None) and (len(generated_regexes) != 0):
number_line_and_regex = matching_number_line_and_regex(
pygount.common.lines(source_code), generated_regexes
)
if number_line_and_regex is not None:
number, _, regex = number_line_and_regex
message = 'line {0} matches {1}'.format(number, regex)
_log.info('%s: is generated code because %s', source_path, message)
result = pseudo_source_analysis(source_path, group, SourceState.generated, message)
if result is None:
assert lexer is not None
assert source_code is not None
language = lexer.name
if ('xml' in language.lower()) or (language == 'Genshi'):
dialect = pygount.xmldialect.xml_dialect(source_path, source_code)
if dialect is not None:
language = dialect
_log.info('%s: analyze as %s using encoding %s', source_path, language, encoding)
mark_to_count_map = {'c': 0, 'd': 0, 'e': 0, 's': 0}
for line_parts in _line_parts(lexer, source_code):
mark_to_increment = 'e'
for mark_to_check in ('d', 's', 'c'):
if mark_to_check in line_parts:
mark_to_increment = mark_to_check
mark_to_count_map[mark_to_increment] += 1
result = SourceAnalysis(
path=source_path,
language=language,
group=group,
code=mark_to_count_map['c'],
documentation=mark_to_count_map['d'],
empty=mark_to_count_map['e'],
string=mark_to_count_map['s'],
state=SourceState.analyzed.name,
state_info=None,
)
assert result is not None
return result |
def read(self, filehandle):
"""Read data into a :class:`~mwtab.mwtab.MWTabFile` instance.
:param filehandle: file-like object.
:type filehandle: :py:class:`io.TextIOWrapper`, :py:class:`gzip.GzipFile`,
:py:class:`bz2.BZ2File`, :py:class:`zipfile.ZipFile`
:return: None
:rtype: :py:obj:`None`
"""
input_str = filehandle.read()
mwtab_str = self._is_mwtab(input_str)
json_str = self._is_json(input_str)
if not input_str:
pass
elif json_str:
self.update(json_str)
elif mwtab_str:
self._build_mwtabfile(mwtab_str)
else:
raise TypeError("Unknown file format")
self.study_id = self["METABOLOMICS WORKBENCH"].get("STUDY_ID")
self.analysis_id = self["METABOLOMICS WORKBENCH"].get("ANALYSIS_ID")
self.header = self["METABOLOMICS WORKBENCH"].get("HEADER")
filehandle.close() | Read data into a :class:`~mwtab.mwtab.MWTabFile` instance.
:param filehandle: file-like object.
:type filehandle: :py:class:`io.TextIOWrapper`, :py:class:`gzip.GzipFile`,
:py:class:`bz2.BZ2File`, :py:class:`zipfile.ZipFile`
:return: None
:rtype: :py:obj:`None` | Below is the the instruction that describes the task:
### Input:
Read data into a :class:`~mwtab.mwtab.MWTabFile` instance.
:param filehandle: file-like object.
:type filehandle: :py:class:`io.TextIOWrapper`, :py:class:`gzip.GzipFile`,
:py:class:`bz2.BZ2File`, :py:class:`zipfile.ZipFile`
:return: None
:rtype: :py:obj:`None`
### Response:
def read(self, filehandle):
"""Read data into a :class:`~mwtab.mwtab.MWTabFile` instance.
:param filehandle: file-like object.
:type filehandle: :py:class:`io.TextIOWrapper`, :py:class:`gzip.GzipFile`,
:py:class:`bz2.BZ2File`, :py:class:`zipfile.ZipFile`
:return: None
:rtype: :py:obj:`None`
"""
input_str = filehandle.read()
mwtab_str = self._is_mwtab(input_str)
json_str = self._is_json(input_str)
if not input_str:
pass
elif json_str:
self.update(json_str)
elif mwtab_str:
self._build_mwtabfile(mwtab_str)
else:
raise TypeError("Unknown file format")
self.study_id = self["METABOLOMICS WORKBENCH"].get("STUDY_ID")
self.analysis_id = self["METABOLOMICS WORKBENCH"].get("ANALYSIS_ID")
self.header = self["METABOLOMICS WORKBENCH"].get("HEADER")
filehandle.close() |
def setDocuments(self, documenting_pid, documented_pid):
"""Add a CiTO, the Citation Typing Ontology, triple asserting that
``documenting_pid`` documents ``documented_pid``.
Adds assertion: ``documenting_pid cito:documents documented_pid``
Args:
documenting_pid: str
PID of a Science Object that documents ``documented_pid``.
documented_pid: str
PID of a Science Object that is documented by ``documenting_pid``.
"""
self._check_initialized()
documenting_id = self.getObjectByPid(documenting_pid)
documented_id = self.getObjectByPid(documented_pid)
self.add((documenting_id, CITO.documents, documented_id)) | Add a CiTO, the Citation Typing Ontology, triple asserting that
``documenting_pid`` documents ``documented_pid``.
Adds assertion: ``documenting_pid cito:documents documented_pid``
Args:
documenting_pid: str
PID of a Science Object that documents ``documented_pid``.
documented_pid: str
PID of a Science Object that is documented by ``documenting_pid``. | Below is the the instruction that describes the task:
### Input:
Add a CiTO, the Citation Typing Ontology, triple asserting that
``documenting_pid`` documents ``documented_pid``.
Adds assertion: ``documenting_pid cito:documents documented_pid``
Args:
documenting_pid: str
PID of a Science Object that documents ``documented_pid``.
documented_pid: str
PID of a Science Object that is documented by ``documenting_pid``.
### Response:
def setDocuments(self, documenting_pid, documented_pid):
"""Add a CiTO, the Citation Typing Ontology, triple asserting that
``documenting_pid`` documents ``documented_pid``.
Adds assertion: ``documenting_pid cito:documents documented_pid``
Args:
documenting_pid: str
PID of a Science Object that documents ``documented_pid``.
documented_pid: str
PID of a Science Object that is documented by ``documenting_pid``.
"""
self._check_initialized()
documenting_id = self.getObjectByPid(documenting_pid)
documented_id = self.getObjectByPid(documented_pid)
self.add((documenting_id, CITO.documents, documented_id)) |
def identify(path_name, *, override=None, check_exists=True, default=ISDIR):
"""
Identify the type of a given path name (file or directory). If check_exists
is specified to be false, then the function will not set the identity based
on the path existing or not. If override is specified as either ISDIR or
ISFILE, then the function will try its best to over-ride the identity to be
what you have specified. The 'default' parameter is what the function will
default the identity to if 'override' is not specified, and the path looks
like it could be both a directory and a file.
"""
head, tail = os.path.split(path_name)
if check_exists and os.path.exists(path_name):
if os.path.isfile(path_name):
if override == ISDIR:
raise ValueError("Cannot override a path as a directory if it "
"is a file that already exists")
result = ISFILE
elif os.path.isdir(path_name):
if override == ISFILE:
raise ValueError("Cannot override a path as a file if it is "
"a directory that already exists")
result = ISDIR
else:
raise Exception("Path exists but isn't a file or a directory...")
elif not tail:
if override == ISFILE:
raise ValueError("Cannot interpret a path with a slash at the end "
"to be a file")
result = ISDIR
elif has_ext(tail, if_all_ext=True):
if override is None:
result = ISFILE
else:
result = override
else:
if override is None:
result = default
else:
result = override
return result | Identify the type of a given path name (file or directory). If check_exists
is specified to be false, then the function will not set the identity based
on the path existing or not. If override is specified as either ISDIR or
ISFILE, then the function will try its best to over-ride the identity to be
what you have specified. The 'default' parameter is what the function will
default the identity to if 'override' is not specified, and the path looks
like it could be both a directory and a file. | Below is the the instruction that describes the task:
### Input:
Identify the type of a given path name (file or directory). If check_exists
is specified to be false, then the function will not set the identity based
on the path existing or not. If override is specified as either ISDIR or
ISFILE, then the function will try its best to over-ride the identity to be
what you have specified. The 'default' parameter is what the function will
default the identity to if 'override' is not specified, and the path looks
like it could be both a directory and a file.
### Response:
def identify(path_name, *, override=None, check_exists=True, default=ISDIR):
"""
Identify the type of a given path name (file or directory). If check_exists
is specified to be false, then the function will not set the identity based
on the path existing or not. If override is specified as either ISDIR or
ISFILE, then the function will try its best to over-ride the identity to be
what you have specified. The 'default' parameter is what the function will
default the identity to if 'override' is not specified, and the path looks
like it could be both a directory and a file.
"""
head, tail = os.path.split(path_name)
if check_exists and os.path.exists(path_name):
if os.path.isfile(path_name):
if override == ISDIR:
raise ValueError("Cannot override a path as a directory if it "
"is a file that already exists")
result = ISFILE
elif os.path.isdir(path_name):
if override == ISFILE:
raise ValueError("Cannot override a path as a file if it is "
"a directory that already exists")
result = ISDIR
else:
raise Exception("Path exists but isn't a file or a directory...")
elif not tail:
if override == ISFILE:
raise ValueError("Cannot interpret a path with a slash at the end "
"to be a file")
result = ISDIR
elif has_ext(tail, if_all_ext=True):
if override is None:
result = ISFILE
else:
result = override
else:
if override is None:
result = default
else:
result = override
return result |
def _point_in_bbox(point, bounds):
"""
valid whether the point is inside the bounding box
"""
return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2]
or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3]) | valid whether the point is inside the bounding box | Below is the the instruction that describes the task:
### Input:
valid whether the point is inside the bounding box
### Response:
def _point_in_bbox(point, bounds):
"""
valid whether the point is inside the bounding box
"""
return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2]
or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3]) |
def get_return_description_indexes(self, data):
"""Get from a docstring the return parameter description indexes.
In javadoc style it is after @return.
:param data: string to parse
:returns: start and end indexes of found element else (-1, -1)
Note: the end index is the index after the last included character or -1 if
reached the end
:rtype: tuple
"""
start, end = -1, -1
stl_return = self.opt['return'][self.style['in']]['name']
if self.style['in'] in self.tagstyles + ['unknown']:
idx = self.get_key_index(data, 'return')
idx_abs = idx
# search starting description
if idx >= 0:
# FIXME: take care if a return description starts with <, >, =,...
m = re.match(r'\W*(\w+)', data[idx_abs + len(stl_return):])
if m:
first = m.group(1)
idx = data[idx_abs:].find(first)
idx_abs += idx
start = idx_abs
else:
idx = -1
# search the end
idx = self.get_elem_index(data[idx_abs:])
if idx > 0:
idx_abs += idx
end = idx_abs
if self.style['in'] in ['params', 'unknown'] and (start, end) == (-1, -1):
# TODO: manage this
pass
return start, end | Get from a docstring the return parameter description indexes.
In javadoc style it is after @return.
:param data: string to parse
:returns: start and end indexes of found element else (-1, -1)
Note: the end index is the index after the last included character or -1 if
reached the end
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Get from a docstring the return parameter description indexes.
In javadoc style it is after @return.
:param data: string to parse
:returns: start and end indexes of found element else (-1, -1)
Note: the end index is the index after the last included character or -1 if
reached the end
:rtype: tuple
### Response:
def get_return_description_indexes(self, data):
"""Get from a docstring the return parameter description indexes.
In javadoc style it is after @return.
:param data: string to parse
:returns: start and end indexes of found element else (-1, -1)
Note: the end index is the index after the last included character or -1 if
reached the end
:rtype: tuple
"""
start, end = -1, -1
stl_return = self.opt['return'][self.style['in']]['name']
if self.style['in'] in self.tagstyles + ['unknown']:
idx = self.get_key_index(data, 'return')
idx_abs = idx
# search starting description
if idx >= 0:
# FIXME: take care if a return description starts with <, >, =,...
m = re.match(r'\W*(\w+)', data[idx_abs + len(stl_return):])
if m:
first = m.group(1)
idx = data[idx_abs:].find(first)
idx_abs += idx
start = idx_abs
else:
idx = -1
# search the end
idx = self.get_elem_index(data[idx_abs:])
if idx > 0:
idx_abs += idx
end = idx_abs
if self.style['in'] in ['params', 'unknown'] and (start, end) == (-1, -1):
# TODO: manage this
pass
return start, end |
def process_raw_data(cls, raw_data):
"""Create a new model using raw API response."""
properties = raw_data["properties"]
raw_content = properties.get("addressSpace", None)
if raw_content is not None:
address_space = AddressSpace.from_raw_data(raw_content)
properties["addressSpace"] = address_space
raw_content = properties.get("dhcpOptions")
if raw_content is not None:
dhcp_options = DHCPOptions.from_raw_data(raw_content)
properties["dhcpOptions"] = dhcp_options
raw_content = properties.get("logicalNetwork", None)
if raw_content is not None:
properties["logicalNetwork"] = Resource.from_raw_data(raw_content)
subnetworks = []
for raw_subnet in properties.get("subnets", []):
raw_subnet["parentResourceID"] = raw_data["resourceId"]
subnetworks.append(SubNetworks.from_raw_data(raw_subnet))
properties["subnets"] = subnetworks
return super(VirtualNetworks, cls).process_raw_data(raw_data) | Create a new model using raw API response. | Below is the the instruction that describes the task:
### Input:
Create a new model using raw API response.
### Response:
def process_raw_data(cls, raw_data):
"""Create a new model using raw API response."""
properties = raw_data["properties"]
raw_content = properties.get("addressSpace", None)
if raw_content is not None:
address_space = AddressSpace.from_raw_data(raw_content)
properties["addressSpace"] = address_space
raw_content = properties.get("dhcpOptions")
if raw_content is not None:
dhcp_options = DHCPOptions.from_raw_data(raw_content)
properties["dhcpOptions"] = dhcp_options
raw_content = properties.get("logicalNetwork", None)
if raw_content is not None:
properties["logicalNetwork"] = Resource.from_raw_data(raw_content)
subnetworks = []
for raw_subnet in properties.get("subnets", []):
raw_subnet["parentResourceID"] = raw_data["resourceId"]
subnetworks.append(SubNetworks.from_raw_data(raw_subnet))
properties["subnets"] = subnetworks
return super(VirtualNetworks, cls).process_raw_data(raw_data) |
def Matches(self, file_entry):
"""Compares the file entry against the filter collection.
Args:
file_entry (dfvfs.FileEntry): file entry to compare.
Returns:
bool: True if the file entry matches one of the filters. If no filters
are provided or applicable the result will be True.
"""
if not self._filters:
return True
results = []
for file_entry_filter in self._filters:
result = file_entry_filter.Matches(file_entry)
results.append(result)
return True in results or False not in results | Compares the file entry against the filter collection.
Args:
file_entry (dfvfs.FileEntry): file entry to compare.
Returns:
bool: True if the file entry matches one of the filters. If no filters
are provided or applicable the result will be True. | Below is the the instruction that describes the task:
### Input:
Compares the file entry against the filter collection.
Args:
file_entry (dfvfs.FileEntry): file entry to compare.
Returns:
bool: True if the file entry matches one of the filters. If no filters
are provided or applicable the result will be True.
### Response:
def Matches(self, file_entry):
"""Compares the file entry against the filter collection.
Args:
file_entry (dfvfs.FileEntry): file entry to compare.
Returns:
bool: True if the file entry matches one of the filters. If no filters
are provided or applicable the result will be True.
"""
if not self._filters:
return True
results = []
for file_entry_filter in self._filters:
result = file_entry_filter.Matches(file_entry)
results.append(result)
return True in results or False not in results |
def _load_build(self):
"""See `pickle.py` in Python's source code."""
# if the ctor. function (penultimate on the stack) is the `Ref` class...
if isinstance(self.stack[-2], Ref):
# Ref.__setstate__ will know it's a remote ref if the state is a tuple
self.stack[-1] = (self.stack[-1], self.node)
self.load_build() # continue with the default implementation
# detect our own refs sent back to us
ref = self.stack[-1]
if ref.uri.node == self.node.nid:
ref.is_local = True
ref._cell = self.node.guardian.lookup_cell(ref.uri)
# dbg(("dead " if not ref._cell else "") + "local ref detected")
del ref.node # local refs never need access to the node
else: # pragma: no cover
self.load_build() | See `pickle.py` in Python's source code. | Below is the the instruction that describes the task:
### Input:
See `pickle.py` in Python's source code.
### Response:
def _load_build(self):
"""See `pickle.py` in Python's source code."""
# if the ctor. function (penultimate on the stack) is the `Ref` class...
if isinstance(self.stack[-2], Ref):
# Ref.__setstate__ will know it's a remote ref if the state is a tuple
self.stack[-1] = (self.stack[-1], self.node)
self.load_build() # continue with the default implementation
# detect our own refs sent back to us
ref = self.stack[-1]
if ref.uri.node == self.node.nid:
ref.is_local = True
ref._cell = self.node.guardian.lookup_cell(ref.uri)
# dbg(("dead " if not ref._cell else "") + "local ref detected")
del ref.node # local refs never need access to the node
else: # pragma: no cover
self.load_build() |
def initialize(self):
"""Initialize the corresponding DXclass from the data.
class = DXInitObject.initialize()
"""
return self.DXclasses[self.type](self.id,**self.args) | Initialize the corresponding DXclass from the data.
class = DXInitObject.initialize() | Below is the the instruction that describes the task:
### Input:
Initialize the corresponding DXclass from the data.
class = DXInitObject.initialize()
### Response:
def initialize(self):
"""Initialize the corresponding DXclass from the data.
class = DXInitObject.initialize()
"""
return self.DXclasses[self.type](self.id,**self.args) |
def PintPars(datablock, araiblock, zijdblock, start, end, accept, **kwargs):
"""
calculate the paleointensity magic parameters make some definitions
"""
if 'version' in list(kwargs.keys()) and kwargs['version'] == 3:
meth_key = 'method_codes'
beta_key = 'int_b_beta'
temp_key, min_key, max_key = 'treat_temp', 'meas_step_min', 'meas_step_max'
dc_theta_key, dc_phi_key = 'treat_dc_field_theta', 'treat_dc_field_phi'
# convert dataframe to list of dictionaries
datablock = datablock.to_dict('records')
z_key = 'int_z'
drats_key = 'int_drats'
drat_key = 'int_drat'
md_key = 'int_md'
dec_key = 'dir_dec'
inc_key = 'dir_inc'
mad_key = 'int_mad_free'
dang_key = 'int_dang'
ptrm_key = 'int_n_ptrm'
theta_key = 'int_theta'
gamma_key = 'int_gamma'
delta_key = 'int_delta'
frac_key = 'int_frac'
gmax_key = 'int_gmax'
scat_key = 'int_scat'
else:
beta_key = 'specimen_b_beta'
meth_key = 'magic_method_codes'
temp_key, min_key, max_key = 'treatment_temp', 'measurement_step_min', 'measurement_step_max'
z_key = 'specimen_z'
drats_key = 'specimen_drats'
drat_key = 'specimen_drat'
md_key = 'specimen_md'
dec_key = 'specimen_dec'
inc_key = 'specimen_inc'
mad_key = 'specimen_int_mad'
dang_key = 'specimen_dang'
ptrm_key = 'specimen_int_ptrm_n'
theta_key = 'specimen_theta'
gamma_key = 'specimen_gamma'
delta_key = 'specimen_delta'
frac_key = 'specimen_frac'
gmax_key = 'specimen_gmax'
scat_key = 'specimen_scat'
first_Z, first_I, zptrm_check, ptrm_check, ptrm_tail = [], [], [], [], []
methcode, ThetaChecks, DeltaChecks, GammaChecks = "", "", "", ""
zptrm_check = []
first_Z, first_I, ptrm_check, ptrm_tail, zptrm_check, GammaChecks = araiblock[
0], araiblock[1], araiblock[2], araiblock[3], araiblock[4], araiblock[5]
if len(araiblock) > 6:
# used only for perpendicular method of paleointensity
ThetaChecks = araiblock[6]
# used only for perpendicular method of paleointensity
DeltaChecks = araiblock[7]
xi, yi, diffcum = [], [], 0
xiz, xzi, yiz, yzi = [], [], [], []
Nptrm, dmax = 0, -1e-22
# check if even zero and infield steps
if len(first_Z) > len(first_I):
maxe = len(first_I) - 1
else:
maxe = len(first_Z) - 1
if end == 0 or end > maxe:
end = maxe
# get the MAD, DANG, etc. for directional data
bstep = araiblock[0][start][0]
estep = araiblock[0][end][0]
zstart, zend = 0, len(zijdblock)
for k in range(len(zijdblock)):
zrec = zijdblock[k]
if zrec[0] == bstep:
zstart = k
if zrec[0] == estep:
zend = k
PCA = domean(zijdblock, zstart, zend, 'DE-BFL')
D, Diz, Dzi, Du = [], [], [], [] # list of NRM vectors, and separated by zi and iz
for rec in zijdblock:
D.append((rec[1], rec[2], rec[3]))
Du.append((rec[1], rec[2]))
if rec[4] == 1:
Dzi.append((rec[1], rec[2])) # if this is ZI step
else:
Diz.append((rec[1], rec[2])) # if this is IZ step
# calculate the vector difference sum
vds = dovds(D)
b_zi, b_iz = [], []
# collect data included in ZigZag calculation
if end + 1 >= len(first_Z):
stop = end - 1
else:
stop = end
for k in range(start, end + 1):
for l in range(len(first_I)):
irec = first_I[l]
if irec[0] == first_Z[k][0]:
xi.append(irec[3])
yi.append(first_Z[k][3])
pars, errcode = int_pars(xi, yi, vds)
if errcode == 1:
return pars, errcode
# for k in range(start,end+1):
for k in range(len(first_Z) - 1):
for l in range(k):
# only go down to 10% of NRM.....
if old_div(first_Z[k][3], vds) > 0.1:
irec = first_I[l]
if irec[4] == 1 and first_I[l + 1][4] == 0: # a ZI step
xzi = irec[3]
yzi = first_Z[k][3]
xiz = first_I[l + 1][3]
yiz = first_Z[k + 1][3]
slope = np.arctan2((yzi - yiz), (xiz - xzi))
r = np.sqrt((yzi - yiz)**2 + (xiz - xzi)**2)
if r > .1 * vds:
b_zi.append(slope) # suppress noise
elif irec[4] == 0 and first_I[l + 1][4] == 1: # an IZ step
xiz = irec[3]
yiz = first_Z[k][3]
xzi = first_I[l + 1][3]
yzi = first_Z[k + 1][3]
slope = np.arctan2((yiz - yzi), (xzi - xiz))
r = np.sqrt((yiz - yzi)**2 + (xzi - xiz)**2)
if r > .1 * vds:
b_iz.append(slope) # suppress noise
#
ZigZag, Frat, Trat = -1, 0, 0
if len(Diz) > 2 and len(Dzi) > 2:
ZigZag = 0
dizp = fisher_mean(Diz) # get Fisher stats on IZ steps
dzip = fisher_mean(Dzi) # get Fisher stats on ZI steps
dup = fisher_mean(Du) # get Fisher stats on all steps
#
# if directions are TOO well grouped, can get false positive for ftest, so
# angles must be > 3 degrees apart.
#
if angle([dizp['dec'], dizp['inc']], [dzip['dec'], dzip['inc']]) > 3.:
F = (dup['n'] - 2.) * (dzip['r'] + dizp['r'] - dup['r']) / \
(dup['n'] - dzip['r'] - dizp['r']
) # Watson test for common mean
nf = 2. * (dup['n'] - 2.) # number of degees of freedom
ftest = fcalc(2, nf)
Frat = old_div(F, ftest)
if Frat > 1.:
ZigZag = Frat # fails zigzag on directions
methcode = "SM-FTEST"
# now do slopes
if len(b_zi) > 2 and len(b_iz) > 2:
bzi_m, bzi_sig = gausspars(b_zi) # mean, std dev
biz_m, biz_sig = gausspars(b_iz)
n_zi = float(len(b_zi))
n_iz = float(len(b_iz))
b_diff = abs(bzi_m - biz_m) # difference in means
#
# avoid false positives - set 3 degree slope difference here too
if b_diff > 3 * np.pi / 180.:
nf = n_zi + n_iz - 2. # degrees of freedom
svar = old_div(((n_zi - 1.) * bzi_sig**2 +
(n_iz - 1.) * biz_sig**2), nf)
T = old_div((b_diff), np.sqrt(
svar * (old_div(1.0, n_zi) + old_div(1.0, n_iz)))) # student's t
ttest = tcalc(nf, .05) # t-test at 95% conf.
Trat = old_div(T, ttest)
if Trat > 1 and Trat > Frat:
ZigZag = Trat # fails zigzag on directions
methcode = "SM-TTEST"
pars[z_key] = ZigZag
pars[meth_key] = methcode
# do drats
if len(ptrm_check) != 0:
diffcum, drat_max = 0, 0
for prec in ptrm_check:
step = prec[0]
endbak = end
zend = end
while zend > len(zijdblock) - 1:
zend = zend - 2 # don't count alteration that happens after this step
if step < zijdblock[zend][0]:
Nptrm += 1
for irec in first_I:
if irec[0] == step:
break
diffcum += prec[3] - irec[3]
if abs(prec[3] - irec[3]) > drat_max:
drat_max = abs(prec[3] - irec[3])
pars[drats_key] = (100 * abs(diffcum) / first_I[zend][3])
pars[drat_key] = (100 * abs(drat_max) / first_I[zend][3])
elif len(zptrm_check) != 0:
diffcum = 0
for prec in zptrm_check:
step = prec[0]
endbak = end
zend = end
while zend > len(zijdblock) - 1:
zend = zend - 1
if step < zijdblock[zend][0]:
Nptrm += 1
for irec in first_I:
if irec[0] == step:
break
diffcum += prec[3] - irec[3]
pars[drats_key] = (100 * abs(diffcum) / first_I[zend][3])
else:
pars[drats_key] = -1
pars[drat_key] = -1
# and the pTRM tails
if len(ptrm_tail) != 0:
for trec in ptrm_tail:
step = trec[0]
for irec in first_I:
if irec[0] == step:
break
if abs(trec[3]) > dmax:
dmax = abs(trec[3])
pars[md_key] = (100 * dmax / vds)
else:
pars[md_key] = -1
pars[min_key] = bstep
pars[max_key] = estep
pars[dec_key] = PCA["specimen_dec"]
pars[inc_key] = PCA["specimen_inc"]
pars[mad_key] = PCA["specimen_mad"]
pars[dang_key] = PCA["specimen_dang"]
pars[ptrm_key] = Nptrm
# and the ThetaChecks
if ThetaChecks != "":
t = 0
for theta in ThetaChecks:
if theta[0] >= bstep and theta[0] <= estep and theta[1] > t:
t = theta[1]
pars[theta_key] = t
else:
pars[theta_key] = -1
# and the DeltaChecks
if DeltaChecks != "":
d = 0
for delta in DeltaChecks:
if delta[0] >= bstep and delta[0] <= estep and delta[1] > d:
d = delta[1]
pars[delta_key]
else:
pars[delta_key] = -1
pars[gamma_key] = -1
if GammaChecks != "":
for gamma in GammaChecks:
if gamma[0] <= estep:
pars['specimen_gamma'] = gamma[1]
# --------------------------------------------------------------
# From here added By Ron Shaar 11-Dec 2012
# New parameters defined in Shaar and Tauxe (2012):
# FRAC (specimen_frac) - ranges from 0. to 1.
# SCAT (specimen_scat) - takes 1/0
# gap_max (specimen_gmax) - ranges from 0. to 1.
# --------------------------------------------------------------
# --------------------------------------------------------------
# FRAC is similar to Fvds, but the numerator is the vds fraction:
# FRAC= [ vds (start,end)] / total vds ]
# gap_max= max [ (vector difference) / vds (start,end)]
# --------------------------------------------------------------
# collect all zijderveld data to arrays and calculate VDS
z_temperatures = [row[0] for row in zijdblock]
zdata = [] # array of zero-fields measurements in Cartezian coordinates
# array of vector differences (for vds calculation)
vector_diffs = []
NRM = zijdblock[0][3] # NRM
for k in range(len(zijdblock)):
DIR = [zijdblock[k][1], zijdblock[k][2], old_div(zijdblock[k][3], NRM)]
cart = dir2cart(DIR)
zdata.append(np.array([cart[0], cart[1], cart[2]]))
if k > 0:
vector_diffs.append(
np.sqrt(sum((np.array(zdata[-2]) - np.array(zdata[-1]))**2)))
# last vector difference: from the last point to the origin.
vector_diffs.append(np.sqrt(sum(np.array(zdata[-1])**2)))
vds = sum(vector_diffs) # vds calculation
zdata = np.array(zdata)
vector_diffs = np.array(vector_diffs)
# calculate the vds within the chosen segment
vector_diffs_segment = vector_diffs[zstart:zend]
# FRAC calculation
FRAC = old_div(sum(vector_diffs_segment), vds)
pars[frac_key] = FRAC
# gap_max calculation
max_FRAC_gap = max(
old_div(vector_diffs_segment, sum(vector_diffs_segment)))
pars[gmax_key] = max_FRAC_gap
# ---------------------------------------------------------------------
# Calculate the "scat box"
# all data-points, pTRM checks, and tail-checks, should be inside a "scat box"
# ---------------------------------------------------------------------
# intialization
# fail scat due to arai plot data points
pars["fail_arai_beta_box_scatter"] = False
pars["fail_ptrm_beta_box_scatter"] = False # fail scat due to pTRM checks
pars["fail_tail_beta_box_scatter"] = False # fail scat due to tail checks
pars[scat_key] = "t" # Pass by default
# --------------------------------------------------------------
# collect all Arai plot data points in arrays
x_Arai, y_Arai, t_Arai, steps_Arai = [], [], [], []
NRMs = araiblock[0]
PTRMs = araiblock[1]
ptrm_checks = araiblock[2]
ptrm_tail = araiblock[3]
PTRMs_temperatures = [row[0] for row in PTRMs]
NRMs_temperatures = [row[0] for row in NRMs]
NRM = NRMs[0][3]
for k in range(len(NRMs)):
index_pTRMs = PTRMs_temperatures.index(NRMs[k][0])
x_Arai.append(old_div(PTRMs[index_pTRMs][3], NRM))
y_Arai.append(old_div(NRMs[k][3], NRM))
t_Arai.append(NRMs[k][0])
if NRMs[k][4] == 1:
steps_Arai.append('ZI')
else:
steps_Arai.append('IZ')
x_Arai = np.array(x_Arai)
y_Arai = np.array(y_Arai)
# --------------------------------------------------------------
# collect all pTRM check to arrays
x_ptrm_check, y_ptrm_check, ptrm_checks_temperatures, = [], [], []
x_ptrm_check_starting_point, y_ptrm_check_starting_point, ptrm_checks_starting_temperatures = [], [], []
for k in range(len(ptrm_checks)):
if ptrm_checks[k][0] in NRMs_temperatures:
# find the starting point of the pTRM check:
for i in range(len(datablock)):
rec = datablock[i]
if "LT-PTRM-I" in rec[meth_key] and float(rec[temp_key]) == ptrm_checks[k][0]:
starting_temperature = (float(datablock[i - 1][temp_key]))
try:
index = t_Arai.index(starting_temperature)
x_ptrm_check_starting_point.append(x_Arai[index])
y_ptrm_check_starting_point.append(y_Arai[index])
ptrm_checks_starting_temperatures.append(
starting_temperature)
index_zerofield = zerofield_temperatures.index(
ptrm_checks[k][0])
x_ptrm_check.append(old_div(ptrm_checks[k][3], NRM))
y_ptrm_check.append(
old_div(zerofields[index_zerofield][3], NRM))
ptrm_checks_temperatures.append(ptrm_checks[k][0])
break
except:
pass
x_ptrm_check_starting_point = np.array(x_ptrm_check_starting_point)
y_ptrm_check_starting_point = np.array(y_ptrm_check_starting_point)
ptrm_checks_starting_temperatures = np.array(
ptrm_checks_starting_temperatures)
x_ptrm_check = np.array(x_ptrm_check)
y_ptrm_check = np.array(y_ptrm_check)
ptrm_checks_temperatures = np.array(ptrm_checks_temperatures)
# --------------------------------------------------------------
# collect tail checks to arrays
x_tail_check, y_tail_check, tail_check_temperatures = [], [], []
x_tail_check_starting_point, y_tail_check_starting_point, tail_checks_starting_temperatures = [], [], []
for k in range(len(ptrm_tail)):
if ptrm_tail[k][0] in NRMs_temperatures:
# find the starting point of the pTRM check:
for i in range(len(datablock)):
rec = datablock[i]
if "LT-PTRM-MD" in rec[meth_key] and float(rec[temp_key]) == ptrm_tail[k][0]:
starting_temperature = (float(datablock[i - 1][temp_key]))
try:
index = t_Arai.index(starting_temperature)
x_tail_check_starting_point.append(x_Arai[index])
y_tail_check_starting_point.append(y_Arai[index])
tail_checks_starting_temperatures.append(
starting_temperature)
index_infield = infield_temperatures.index(
ptrm_tail[k][0])
x_tail_check.append(
old_div(infields[index_infield][3], NRM))
y_tail_check.append(
old_div(ptrm_tail[k][3], NRM) + old_div(zerofields[index_infield][3], NRM))
tail_check_temperatures.append(ptrm_tail[k][0])
break
except:
pass
x_tail_check = np.array(x_tail_check)
y_tail_check = np.array(y_tail_check)
tail_check_temperatures = np.array(tail_check_temperatures)
x_tail_check_starting_point = np.array(x_tail_check_starting_point)
y_tail_check_starting_point = np.array(y_tail_check_starting_point)
tail_checks_starting_temperatures = np.array(
tail_checks_starting_temperatures)
# --------------------------------------------------------------
# collect the chosen segment in the Arai plot to arrays
x_Arai_segment = x_Arai[start:end + 1] # chosen segent in the Arai plot
y_Arai_segment = y_Arai[start:end + 1] # chosen segent in the Arai plot
# --------------------------------------------------------------
# collect pTRM checks in segment to arrays
# notice, this is different than the conventional DRATS.
# for scat calculation we take only the pTRM checks which were carried out
# before reaching the highest temperature in the chosen segment
x_ptrm_check_for_SCAT, y_ptrm_check_for_SCAT = [], []
for k in range(len(ptrm_checks_temperatures)):
if ptrm_checks_temperatures[k] >= pars[min_key] and ptrm_checks_starting_temperatures <= pars[max_key]:
x_ptrm_check_for_SCAT.append(x_ptrm_check[k])
y_ptrm_check_for_SCAT.append(y_ptrm_check[k])
x_ptrm_check_for_SCAT = np.array(x_ptrm_check_for_SCAT)
y_ptrm_check_for_SCAT = np.array(y_ptrm_check_for_SCAT)
# --------------------------------------------------------------
# collect Tail checks in segment to arrays
# for scat calculation we take only the tail checks which were carried out
# before reaching the highest temperature in the chosen segment
x_tail_check_for_SCAT, y_tail_check_for_SCAT = [], []
for k in range(len(tail_check_temperatures)):
if tail_check_temperatures[k] >= pars[min_key] and tail_checks_starting_temperatures[k] <= pars[max_key]:
x_tail_check_for_SCAT.append(x_tail_check[k])
y_tail_check_for_SCAT.append(y_tail_check[k])
x_tail_check_for_SCAT = np.array(x_tail_check_for_SCAT)
y_tail_check_for_SCAT = np.array(y_tail_check_for_SCAT)
# --------------------------------------------------------------
# calculate the lines that define the scat box:
# if threshold value for beta is not defined, then scat cannot be calculated (pass)
# in this case, scat pass
if beta_key in list(accept.keys()) and accept[beta_key] != "":
b_beta_threshold = float(accept[beta_key])
b = pars[b_key] # best fit line
cm_x = np.mean(np.array(x_Arai_segment)) # x center of mass
cm_y = np.mean(np.array(y_Arai_segment)) # y center of mass
a = cm_y - b * cm_x
# lines with slope = slope +/- 2*(specimen_b_beta)
two_sigma_beta_threshold = 2 * b_beta_threshold
two_sigma_slope_threshold = abs(two_sigma_beta_threshold * b)
# a line with a shallower slope (b + 2*beta*b) passing through the center of mass
# y=a1+b1x
b1 = b + two_sigma_slope_threshold
a1 = cm_y - b1 * cm_x
# bounding line with steeper slope (b - 2*beta*b) passing through the center of mass
# y=a2+b2x
b2 = b - two_sigma_slope_threshold
a2 = cm_y - b2 * cm_x
# lower bounding line of the 'beta box'
# y=intercept1+slop1x
slop1 = old_div(a1, ((old_div(a2, b2))))
intercept1 = a1
# higher bounding line of the 'beta box'
# y=intercept2+slop2x
slop2 = old_div(a2, ((old_div(a1, b1))))
intercept2 = a2
pars['specimen_scat_bounding_line_high'] = [intercept2, slop2]
pars['specimen_scat_bounding_line_low'] = [intercept1, slop1]
# --------------------------------------------------------------
# check if the Arai data points are in the 'box'
# the two bounding lines
ymin = intercept1 + x_Arai_segment * slop1
ymax = intercept2 + x_Arai_segment * slop2
# arrays of "True" or "False"
check_1 = y_Arai_segment > ymax
check_2 = y_Arai_segment < ymin
# check if at least one "True"
if (sum(check_1) + sum(check_2)) > 0:
pars["fail_arai_beta_box_scatter"] = True
# --------------------------------------------------------------
# check if the pTRM checks data points are in the 'box'
if len(x_ptrm_check_for_SCAT) > 0:
# the two bounding lines
ymin = intercept1 + x_ptrm_check_for_SCAT * slop1
ymax = intercept2 + x_ptrm_check_for_SCAT * slop2
# arrays of "True" or "False"
check_1 = y_ptrm_check_for_SCAT > ymax
check_2 = y_ptrm_check_for_SCAT < ymin
# check if at least one "True"
if (sum(check_1) + sum(check_2)) > 0:
pars["fail_ptrm_beta_box_scatter"] = True
# --------------------------------------------------------------
# check if the tail checks data points are in the 'box'
if len(x_tail_check_for_SCAT) > 0:
# the two bounding lines
ymin = intercept1 + x_tail_check_for_SCAT * slop1
ymax = intercept2 + x_tail_check_for_SCAT * slop2
# arrays of "True" or "False"
check_1 = y_tail_check_for_SCAT > ymax
check_2 = y_tail_check_for_SCAT < ymin
# check if at least one "True"
if (sum(check_1) + sum(check_2)) > 0:
pars["fail_tail_beta_box_scatter"] = True
# --------------------------------------------------------------
# check if specimen_scat is PASS or FAIL:
if pars["fail_tail_beta_box_scatter"] or pars["fail_ptrm_beta_box_scatter"] or pars["fail_arai_beta_box_scatter"]:
pars[scat_key] = 'f'
else:
pars[scat_key] = 't'
return pars, 0 | calculate the paleointensity magic parameters make some definitions | Below is the the instruction that describes the task:
### Input:
calculate the paleointensity magic parameters make some definitions
### Response:
def PintPars(datablock, araiblock, zijdblock, start, end, accept, **kwargs):
"""
calculate the paleointensity magic parameters make some definitions
"""
if 'version' in list(kwargs.keys()) and kwargs['version'] == 3:
meth_key = 'method_codes'
beta_key = 'int_b_beta'
temp_key, min_key, max_key = 'treat_temp', 'meas_step_min', 'meas_step_max'
dc_theta_key, dc_phi_key = 'treat_dc_field_theta', 'treat_dc_field_phi'
# convert dataframe to list of dictionaries
datablock = datablock.to_dict('records')
z_key = 'int_z'
drats_key = 'int_drats'
drat_key = 'int_drat'
md_key = 'int_md'
dec_key = 'dir_dec'
inc_key = 'dir_inc'
mad_key = 'int_mad_free'
dang_key = 'int_dang'
ptrm_key = 'int_n_ptrm'
theta_key = 'int_theta'
gamma_key = 'int_gamma'
delta_key = 'int_delta'
frac_key = 'int_frac'
gmax_key = 'int_gmax'
scat_key = 'int_scat'
else:
beta_key = 'specimen_b_beta'
meth_key = 'magic_method_codes'
temp_key, min_key, max_key = 'treatment_temp', 'measurement_step_min', 'measurement_step_max'
z_key = 'specimen_z'
drats_key = 'specimen_drats'
drat_key = 'specimen_drat'
md_key = 'specimen_md'
dec_key = 'specimen_dec'
inc_key = 'specimen_inc'
mad_key = 'specimen_int_mad'
dang_key = 'specimen_dang'
ptrm_key = 'specimen_int_ptrm_n'
theta_key = 'specimen_theta'
gamma_key = 'specimen_gamma'
delta_key = 'specimen_delta'
frac_key = 'specimen_frac'
gmax_key = 'specimen_gmax'
scat_key = 'specimen_scat'
first_Z, first_I, zptrm_check, ptrm_check, ptrm_tail = [], [], [], [], []
methcode, ThetaChecks, DeltaChecks, GammaChecks = "", "", "", ""
zptrm_check = []
first_Z, first_I, ptrm_check, ptrm_tail, zptrm_check, GammaChecks = araiblock[
0], araiblock[1], araiblock[2], araiblock[3], araiblock[4], araiblock[5]
if len(araiblock) > 6:
# used only for perpendicular method of paleointensity
ThetaChecks = araiblock[6]
# used only for perpendicular method of paleointensity
DeltaChecks = araiblock[7]
xi, yi, diffcum = [], [], 0
xiz, xzi, yiz, yzi = [], [], [], []
Nptrm, dmax = 0, -1e-22
# check if even zero and infield steps
if len(first_Z) > len(first_I):
maxe = len(first_I) - 1
else:
maxe = len(first_Z) - 1
if end == 0 or end > maxe:
end = maxe
# get the MAD, DANG, etc. for directional data
bstep = araiblock[0][start][0]
estep = araiblock[0][end][0]
zstart, zend = 0, len(zijdblock)
for k in range(len(zijdblock)):
zrec = zijdblock[k]
if zrec[0] == bstep:
zstart = k
if zrec[0] == estep:
zend = k
PCA = domean(zijdblock, zstart, zend, 'DE-BFL')
D, Diz, Dzi, Du = [], [], [], [] # list of NRM vectors, and separated by zi and iz
for rec in zijdblock:
D.append((rec[1], rec[2], rec[3]))
Du.append((rec[1], rec[2]))
if rec[4] == 1:
Dzi.append((rec[1], rec[2])) # if this is ZI step
else:
Diz.append((rec[1], rec[2])) # if this is IZ step
# calculate the vector difference sum
vds = dovds(D)
b_zi, b_iz = [], []
# collect data included in ZigZag calculation
if end + 1 >= len(first_Z):
stop = end - 1
else:
stop = end
for k in range(start, end + 1):
for l in range(len(first_I)):
irec = first_I[l]
if irec[0] == first_Z[k][0]:
xi.append(irec[3])
yi.append(first_Z[k][3])
pars, errcode = int_pars(xi, yi, vds)
if errcode == 1:
return pars, errcode
# for k in range(start,end+1):
for k in range(len(first_Z) - 1):
for l in range(k):
# only go down to 10% of NRM.....
if old_div(first_Z[k][3], vds) > 0.1:
irec = first_I[l]
if irec[4] == 1 and first_I[l + 1][4] == 0: # a ZI step
xzi = irec[3]
yzi = first_Z[k][3]
xiz = first_I[l + 1][3]
yiz = first_Z[k + 1][3]
slope = np.arctan2((yzi - yiz), (xiz - xzi))
r = np.sqrt((yzi - yiz)**2 + (xiz - xzi)**2)
if r > .1 * vds:
b_zi.append(slope) # suppress noise
elif irec[4] == 0 and first_I[l + 1][4] == 1: # an IZ step
xiz = irec[3]
yiz = first_Z[k][3]
xzi = first_I[l + 1][3]
yzi = first_Z[k + 1][3]
slope = np.arctan2((yiz - yzi), (xzi - xiz))
r = np.sqrt((yiz - yzi)**2 + (xzi - xiz)**2)
if r > .1 * vds:
b_iz.append(slope) # suppress noise
#
ZigZag, Frat, Trat = -1, 0, 0
if len(Diz) > 2 and len(Dzi) > 2:
ZigZag = 0
dizp = fisher_mean(Diz) # get Fisher stats on IZ steps
dzip = fisher_mean(Dzi) # get Fisher stats on ZI steps
dup = fisher_mean(Du) # get Fisher stats on all steps
#
# if directions are TOO well grouped, can get false positive for ftest, so
# angles must be > 3 degrees apart.
#
if angle([dizp['dec'], dizp['inc']], [dzip['dec'], dzip['inc']]) > 3.:
F = (dup['n'] - 2.) * (dzip['r'] + dizp['r'] - dup['r']) / \
(dup['n'] - dzip['r'] - dizp['r']
) # Watson test for common mean
nf = 2. * (dup['n'] - 2.) # number of degees of freedom
ftest = fcalc(2, nf)
Frat = old_div(F, ftest)
if Frat > 1.:
ZigZag = Frat # fails zigzag on directions
methcode = "SM-FTEST"
# now do slopes
if len(b_zi) > 2 and len(b_iz) > 2:
bzi_m, bzi_sig = gausspars(b_zi) # mean, std dev
biz_m, biz_sig = gausspars(b_iz)
n_zi = float(len(b_zi))
n_iz = float(len(b_iz))
b_diff = abs(bzi_m - biz_m) # difference in means
#
# avoid false positives - set 3 degree slope difference here too
if b_diff > 3 * np.pi / 180.:
nf = n_zi + n_iz - 2. # degrees of freedom
svar = old_div(((n_zi - 1.) * bzi_sig**2 +
(n_iz - 1.) * biz_sig**2), nf)
T = old_div((b_diff), np.sqrt(
svar * (old_div(1.0, n_zi) + old_div(1.0, n_iz)))) # student's t
ttest = tcalc(nf, .05) # t-test at 95% conf.
Trat = old_div(T, ttest)
if Trat > 1 and Trat > Frat:
ZigZag = Trat # fails zigzag on directions
methcode = "SM-TTEST"
pars[z_key] = ZigZag
pars[meth_key] = methcode
# do drats
if len(ptrm_check) != 0:
diffcum, drat_max = 0, 0
for prec in ptrm_check:
step = prec[0]
endbak = end
zend = end
while zend > len(zijdblock) - 1:
zend = zend - 2 # don't count alteration that happens after this step
if step < zijdblock[zend][0]:
Nptrm += 1
for irec in first_I:
if irec[0] == step:
break
diffcum += prec[3] - irec[3]
if abs(prec[3] - irec[3]) > drat_max:
drat_max = abs(prec[3] - irec[3])
pars[drats_key] = (100 * abs(diffcum) / first_I[zend][3])
pars[drat_key] = (100 * abs(drat_max) / first_I[zend][3])
elif len(zptrm_check) != 0:
diffcum = 0
for prec in zptrm_check:
step = prec[0]
endbak = end
zend = end
while zend > len(zijdblock) - 1:
zend = zend - 1
if step < zijdblock[zend][0]:
Nptrm += 1
for irec in first_I:
if irec[0] == step:
break
diffcum += prec[3] - irec[3]
pars[drats_key] = (100 * abs(diffcum) / first_I[zend][3])
else:
pars[drats_key] = -1
pars[drat_key] = -1
# and the pTRM tails
if len(ptrm_tail) != 0:
for trec in ptrm_tail:
step = trec[0]
for irec in first_I:
if irec[0] == step:
break
if abs(trec[3]) > dmax:
dmax = abs(trec[3])
pars[md_key] = (100 * dmax / vds)
else:
pars[md_key] = -1
pars[min_key] = bstep
pars[max_key] = estep
pars[dec_key] = PCA["specimen_dec"]
pars[inc_key] = PCA["specimen_inc"]
pars[mad_key] = PCA["specimen_mad"]
pars[dang_key] = PCA["specimen_dang"]
pars[ptrm_key] = Nptrm
# and the ThetaChecks
if ThetaChecks != "":
t = 0
for theta in ThetaChecks:
if theta[0] >= bstep and theta[0] <= estep and theta[1] > t:
t = theta[1]
pars[theta_key] = t
else:
pars[theta_key] = -1
# and the DeltaChecks
if DeltaChecks != "":
d = 0
for delta in DeltaChecks:
if delta[0] >= bstep and delta[0] <= estep and delta[1] > d:
d = delta[1]
pars[delta_key]
else:
pars[delta_key] = -1
pars[gamma_key] = -1
if GammaChecks != "":
for gamma in GammaChecks:
if gamma[0] <= estep:
pars['specimen_gamma'] = gamma[1]
# --------------------------------------------------------------
# From here added By Ron Shaar 11-Dec 2012
# New parameters defined in Shaar and Tauxe (2012):
# FRAC (specimen_frac) - ranges from 0. to 1.
# SCAT (specimen_scat) - takes 1/0
# gap_max (specimen_gmax) - ranges from 0. to 1.
# --------------------------------------------------------------
# --------------------------------------------------------------
# FRAC is similar to Fvds, but the numerator is the vds fraction:
# FRAC= [ vds (start,end)] / total vds ]
# gap_max= max [ (vector difference) / vds (start,end)]
# --------------------------------------------------------------
# collect all zijderveld data to arrays and calculate VDS
z_temperatures = [row[0] for row in zijdblock]
zdata = [] # array of zero-fields measurements in Cartezian coordinates
# array of vector differences (for vds calculation)
vector_diffs = []
NRM = zijdblock[0][3] # NRM
for k in range(len(zijdblock)):
DIR = [zijdblock[k][1], zijdblock[k][2], old_div(zijdblock[k][3], NRM)]
cart = dir2cart(DIR)
zdata.append(np.array([cart[0], cart[1], cart[2]]))
if k > 0:
vector_diffs.append(
np.sqrt(sum((np.array(zdata[-2]) - np.array(zdata[-1]))**2)))
# last vector difference: from the last point to the origin.
vector_diffs.append(np.sqrt(sum(np.array(zdata[-1])**2)))
vds = sum(vector_diffs) # vds calculation
zdata = np.array(zdata)
vector_diffs = np.array(vector_diffs)
# calculate the vds within the chosen segment
vector_diffs_segment = vector_diffs[zstart:zend]
# FRAC calculation
FRAC = old_div(sum(vector_diffs_segment), vds)
pars[frac_key] = FRAC
# gap_max calculation
max_FRAC_gap = max(
old_div(vector_diffs_segment, sum(vector_diffs_segment)))
pars[gmax_key] = max_FRAC_gap
# ---------------------------------------------------------------------
# Calculate the "scat box"
# all data-points, pTRM checks, and tail-checks, should be inside a "scat box"
# ---------------------------------------------------------------------
# intialization
# fail scat due to arai plot data points
pars["fail_arai_beta_box_scatter"] = False
pars["fail_ptrm_beta_box_scatter"] = False # fail scat due to pTRM checks
pars["fail_tail_beta_box_scatter"] = False # fail scat due to tail checks
pars[scat_key] = "t" # Pass by default
# --------------------------------------------------------------
# collect all Arai plot data points in arrays
x_Arai, y_Arai, t_Arai, steps_Arai = [], [], [], []
NRMs = araiblock[0]
PTRMs = araiblock[1]
ptrm_checks = araiblock[2]
ptrm_tail = araiblock[3]
PTRMs_temperatures = [row[0] for row in PTRMs]
NRMs_temperatures = [row[0] for row in NRMs]
NRM = NRMs[0][3]
for k in range(len(NRMs)):
index_pTRMs = PTRMs_temperatures.index(NRMs[k][0])
x_Arai.append(old_div(PTRMs[index_pTRMs][3], NRM))
y_Arai.append(old_div(NRMs[k][3], NRM))
t_Arai.append(NRMs[k][0])
if NRMs[k][4] == 1:
steps_Arai.append('ZI')
else:
steps_Arai.append('IZ')
x_Arai = np.array(x_Arai)
y_Arai = np.array(y_Arai)
# --------------------------------------------------------------
# collect all pTRM check to arrays
x_ptrm_check, y_ptrm_check, ptrm_checks_temperatures, = [], [], []
x_ptrm_check_starting_point, y_ptrm_check_starting_point, ptrm_checks_starting_temperatures = [], [], []
for k in range(len(ptrm_checks)):
if ptrm_checks[k][0] in NRMs_temperatures:
# find the starting point of the pTRM check:
for i in range(len(datablock)):
rec = datablock[i]
if "LT-PTRM-I" in rec[meth_key] and float(rec[temp_key]) == ptrm_checks[k][0]:
starting_temperature = (float(datablock[i - 1][temp_key]))
try:
index = t_Arai.index(starting_temperature)
x_ptrm_check_starting_point.append(x_Arai[index])
y_ptrm_check_starting_point.append(y_Arai[index])
ptrm_checks_starting_temperatures.append(
starting_temperature)
index_zerofield = zerofield_temperatures.index(
ptrm_checks[k][0])
x_ptrm_check.append(old_div(ptrm_checks[k][3], NRM))
y_ptrm_check.append(
old_div(zerofields[index_zerofield][3], NRM))
ptrm_checks_temperatures.append(ptrm_checks[k][0])
break
except:
pass
x_ptrm_check_starting_point = np.array(x_ptrm_check_starting_point)
y_ptrm_check_starting_point = np.array(y_ptrm_check_starting_point)
ptrm_checks_starting_temperatures = np.array(
ptrm_checks_starting_temperatures)
x_ptrm_check = np.array(x_ptrm_check)
y_ptrm_check = np.array(y_ptrm_check)
ptrm_checks_temperatures = np.array(ptrm_checks_temperatures)
# --------------------------------------------------------------
# collect tail checks to arrays
x_tail_check, y_tail_check, tail_check_temperatures = [], [], []
x_tail_check_starting_point, y_tail_check_starting_point, tail_checks_starting_temperatures = [], [], []
for k in range(len(ptrm_tail)):
if ptrm_tail[k][0] in NRMs_temperatures:
# find the starting point of the pTRM check:
for i in range(len(datablock)):
rec = datablock[i]
if "LT-PTRM-MD" in rec[meth_key] and float(rec[temp_key]) == ptrm_tail[k][0]:
starting_temperature = (float(datablock[i - 1][temp_key]))
try:
index = t_Arai.index(starting_temperature)
x_tail_check_starting_point.append(x_Arai[index])
y_tail_check_starting_point.append(y_Arai[index])
tail_checks_starting_temperatures.append(
starting_temperature)
index_infield = infield_temperatures.index(
ptrm_tail[k][0])
x_tail_check.append(
old_div(infields[index_infield][3], NRM))
y_tail_check.append(
old_div(ptrm_tail[k][3], NRM) + old_div(zerofields[index_infield][3], NRM))
tail_check_temperatures.append(ptrm_tail[k][0])
break
except:
pass
x_tail_check = np.array(x_tail_check)
y_tail_check = np.array(y_tail_check)
tail_check_temperatures = np.array(tail_check_temperatures)
x_tail_check_starting_point = np.array(x_tail_check_starting_point)
y_tail_check_starting_point = np.array(y_tail_check_starting_point)
tail_checks_starting_temperatures = np.array(
tail_checks_starting_temperatures)
# --------------------------------------------------------------
# collect the chosen segment in the Arai plot to arrays
x_Arai_segment = x_Arai[start:end + 1] # chosen segent in the Arai plot
y_Arai_segment = y_Arai[start:end + 1] # chosen segent in the Arai plot
# --------------------------------------------------------------
# collect pTRM checks in segment to arrays
# notice, this is different than the conventional DRATS.
# for scat calculation we take only the pTRM checks which were carried out
# before reaching the highest temperature in the chosen segment
x_ptrm_check_for_SCAT, y_ptrm_check_for_SCAT = [], []
for k in range(len(ptrm_checks_temperatures)):
if ptrm_checks_temperatures[k] >= pars[min_key] and ptrm_checks_starting_temperatures <= pars[max_key]:
x_ptrm_check_for_SCAT.append(x_ptrm_check[k])
y_ptrm_check_for_SCAT.append(y_ptrm_check[k])
x_ptrm_check_for_SCAT = np.array(x_ptrm_check_for_SCAT)
y_ptrm_check_for_SCAT = np.array(y_ptrm_check_for_SCAT)
# --------------------------------------------------------------
# collect Tail checks in segment to arrays
# for scat calculation we take only the tail checks which were carried out
# before reaching the highest temperature in the chosen segment
x_tail_check_for_SCAT, y_tail_check_for_SCAT = [], []
for k in range(len(tail_check_temperatures)):
if tail_check_temperatures[k] >= pars[min_key] and tail_checks_starting_temperatures[k] <= pars[max_key]:
x_tail_check_for_SCAT.append(x_tail_check[k])
y_tail_check_for_SCAT.append(y_tail_check[k])
x_tail_check_for_SCAT = np.array(x_tail_check_for_SCAT)
y_tail_check_for_SCAT = np.array(y_tail_check_for_SCAT)
# --------------------------------------------------------------
# calculate the lines that define the scat box:
# if threshold value for beta is not defined, then scat cannot be calculated (pass)
# in this case, scat pass
if beta_key in list(accept.keys()) and accept[beta_key] != "":
b_beta_threshold = float(accept[beta_key])
b = pars[b_key] # best fit line
cm_x = np.mean(np.array(x_Arai_segment)) # x center of mass
cm_y = np.mean(np.array(y_Arai_segment)) # y center of mass
a = cm_y - b * cm_x
# lines with slope = slope +/- 2*(specimen_b_beta)
two_sigma_beta_threshold = 2 * b_beta_threshold
two_sigma_slope_threshold = abs(two_sigma_beta_threshold * b)
# a line with a shallower slope (b + 2*beta*b) passing through the center of mass
# y=a1+b1x
b1 = b + two_sigma_slope_threshold
a1 = cm_y - b1 * cm_x
# bounding line with steeper slope (b - 2*beta*b) passing through the center of mass
# y=a2+b2x
b2 = b - two_sigma_slope_threshold
a2 = cm_y - b2 * cm_x
# lower bounding line of the 'beta box'
# y=intercept1+slop1x
slop1 = old_div(a1, ((old_div(a2, b2))))
intercept1 = a1
# higher bounding line of the 'beta box'
# y=intercept2+slop2x
slop2 = old_div(a2, ((old_div(a1, b1))))
intercept2 = a2
pars['specimen_scat_bounding_line_high'] = [intercept2, slop2]
pars['specimen_scat_bounding_line_low'] = [intercept1, slop1]
# --------------------------------------------------------------
# check if the Arai data points are in the 'box'
# the two bounding lines
ymin = intercept1 + x_Arai_segment * slop1
ymax = intercept2 + x_Arai_segment * slop2
# arrays of "True" or "False"
check_1 = y_Arai_segment > ymax
check_2 = y_Arai_segment < ymin
# check if at least one "True"
if (sum(check_1) + sum(check_2)) > 0:
pars["fail_arai_beta_box_scatter"] = True
# --------------------------------------------------------------
# check if the pTRM checks data points are in the 'box'
if len(x_ptrm_check_for_SCAT) > 0:
# the two bounding lines
ymin = intercept1 + x_ptrm_check_for_SCAT * slop1
ymax = intercept2 + x_ptrm_check_for_SCAT * slop2
# arrays of "True" or "False"
check_1 = y_ptrm_check_for_SCAT > ymax
check_2 = y_ptrm_check_for_SCAT < ymin
# check if at least one "True"
if (sum(check_1) + sum(check_2)) > 0:
pars["fail_ptrm_beta_box_scatter"] = True
# --------------------------------------------------------------
# check if the tail checks data points are in the 'box'
if len(x_tail_check_for_SCAT) > 0:
# the two bounding lines
ymin = intercept1 + x_tail_check_for_SCAT * slop1
ymax = intercept2 + x_tail_check_for_SCAT * slop2
# arrays of "True" or "False"
check_1 = y_tail_check_for_SCAT > ymax
check_2 = y_tail_check_for_SCAT < ymin
# check if at least one "True"
if (sum(check_1) + sum(check_2)) > 0:
pars["fail_tail_beta_box_scatter"] = True
# --------------------------------------------------------------
# check if specimen_scat is PASS or FAIL:
if pars["fail_tail_beta_box_scatter"] or pars["fail_ptrm_beta_box_scatter"] or pars["fail_arai_beta_box_scatter"]:
pars[scat_key] = 'f'
else:
pars[scat_key] = 't'
return pars, 0 |
def logout(request, next_page=None):
"""
Redirects to CAS logout page
:param: request RequestObj
:param: next_page Page to redirect to
"""
auth.logout(request)
if not next_page:
next_page = _redirect_url(request)
if settings.CAS_LOGOUT_COMPLETELY:
return HttpResponseRedirect(_logout_url(request, next_page))
else:
return HttpResponseRedirect(next_page) | Redirects to CAS logout page
:param: request RequestObj
:param: next_page Page to redirect to | Below is the the instruction that describes the task:
### Input:
Redirects to CAS logout page
:param: request RequestObj
:param: next_page Page to redirect to
### Response:
def logout(request, next_page=None):
"""
Redirects to CAS logout page
:param: request RequestObj
:param: next_page Page to redirect to
"""
auth.logout(request)
if not next_page:
next_page = _redirect_url(request)
if settings.CAS_LOGOUT_COMPLETELY:
return HttpResponseRedirect(_logout_url(request, next_page))
else:
return HttpResponseRedirect(next_page) |
def export(self, nidm_version, export_dir):
"""
Create prov graph.
"""
self.stat = None
if isinstance(self.stat_type, QualifiedName):
stat = self.stat_type
elif self.stat_type is not None:
if self.stat_type.lower() == "t":
stat = STATO_TSTATISTIC
elif self.stat_type.lower() == "z":
stat = STATO_ZSTATISTIC
elif self.stat_type.lower() == "f":
stat = STATO_FSTATISTIC
elif self.stat_type.startswith('http'):
stat = Identifier(self.stat_type)
self.add_attributes((
(PROV['type'], STATO_CONTRAST_WEIGHT_MATRIX),
(NIDM_STATISTIC_TYPE, stat),
(PROV['label'], self.label),
(NIDM_CONTRAST_NAME, self.contrast_name),
(PROV['value'], json.dumps(self.contrast_weights)))) | Create prov graph. | Below is the the instruction that describes the task:
### Input:
Create prov graph.
### Response:
def export(self, nidm_version, export_dir):
"""
Create prov graph.
"""
self.stat = None
if isinstance(self.stat_type, QualifiedName):
stat = self.stat_type
elif self.stat_type is not None:
if self.stat_type.lower() == "t":
stat = STATO_TSTATISTIC
elif self.stat_type.lower() == "z":
stat = STATO_ZSTATISTIC
elif self.stat_type.lower() == "f":
stat = STATO_FSTATISTIC
elif self.stat_type.startswith('http'):
stat = Identifier(self.stat_type)
self.add_attributes((
(PROV['type'], STATO_CONTRAST_WEIGHT_MATRIX),
(NIDM_STATISTIC_TYPE, stat),
(PROV['label'], self.label),
(NIDM_CONTRAST_NAME, self.contrast_name),
(PROV['value'], json.dumps(self.contrast_weights)))) |
def doane(data):
"""
Modified Doane modified
"""
from scipy.stats import skew
n = len(data)
sigma = np.sqrt(6. * (n - 2.) / (n + 1.) / (n + 3.))
return 1 + np.log2(n) + \
np.log2(1 + np.abs(skew(data)) / sigma) | Modified Doane modified | Below is the the instruction that describes the task:
### Input:
Modified Doane modified
### Response:
def doane(data):
"""
Modified Doane modified
"""
from scipy.stats import skew
n = len(data)
sigma = np.sqrt(6. * (n - 2.) / (n + 1.) / (n + 3.))
return 1 + np.log2(n) + \
np.log2(1 + np.abs(skew(data)) / sigma) |
def monte_carlo_vol(self, ctrs, ndraws=10000, rstate=None,
return_overlap=False, kdtree=None):
"""Using `ndraws` Monte Carlo draws, estimate the volume of the
*union* of cubes. If `return_overlap=True`, also returns the
estimated fractional overlap with the unit cube. Uses a K-D Tree
to perform the search if provided."""
if rstate is None:
rstate = np.random
# Estimate the volume using Monte Carlo integration.
samples = [self.sample(ctrs, rstate=rstate, return_q=True,
kdtree=kdtree)
for i in range(ndraws)]
qsum = sum([q for (x, q) in samples])
vol = 1. * ndraws / qsum * len(ctrs) * self.vol_cube
if return_overlap:
# Estimate the fractional overlap with the unit cube using
# the same set of samples.
qin = sum([q * unitcheck(x) for (x, q) in samples])
overlap = 1. * qin / qsum
return vol, overlap
else:
return vol | Using `ndraws` Monte Carlo draws, estimate the volume of the
*union* of cubes. If `return_overlap=True`, also returns the
estimated fractional overlap with the unit cube. Uses a K-D Tree
to perform the search if provided. | Below is the the instruction that describes the task:
### Input:
Using `ndraws` Monte Carlo draws, estimate the volume of the
*union* of cubes. If `return_overlap=True`, also returns the
estimated fractional overlap with the unit cube. Uses a K-D Tree
to perform the search if provided.
### Response:
def monte_carlo_vol(self, ctrs, ndraws=10000, rstate=None,
return_overlap=False, kdtree=None):
"""Using `ndraws` Monte Carlo draws, estimate the volume of the
*union* of cubes. If `return_overlap=True`, also returns the
estimated fractional overlap with the unit cube. Uses a K-D Tree
to perform the search if provided."""
if rstate is None:
rstate = np.random
# Estimate the volume using Monte Carlo integration.
samples = [self.sample(ctrs, rstate=rstate, return_q=True,
kdtree=kdtree)
for i in range(ndraws)]
qsum = sum([q for (x, q) in samples])
vol = 1. * ndraws / qsum * len(ctrs) * self.vol_cube
if return_overlap:
# Estimate the fractional overlap with the unit cube using
# the same set of samples.
qin = sum([q * unitcheck(x) for (x, q) in samples])
overlap = 1. * qin / qsum
return vol, overlap
else:
return vol |
def create_nic(client, target, nic):
"""Return a NIC spec"""
# Iterate through the networks and look for one matching
# the requested name
for network in target.network:
if network.name == nic["network_name"]:
net = network
break
else:
return None
# Success! Create a nic attached to this network
backing = client.create("VirtualEthernetCardNetworkBackingInfo")
backing.deviceName = nic["network_name"]
backing.network = net
connect_info = client.create("VirtualDeviceConnectInfo")
connect_info.allowGuestControl = True
connect_info.connected = False
connect_info.startConnected = True
new_nic = client.create(nic["type"])
new_nic.backing = backing
new_nic.key = 2
# TODO: Work out a way to automatically increment this
new_nic.unitNumber = 1
new_nic.addressType = "generated"
new_nic.connectable = connect_info
nic_spec = client.create("VirtualDeviceConfigSpec")
nic_spec.device = new_nic
nic_spec.fileOperation = None
operation = client.create("VirtualDeviceConfigSpecOperation")
nic_spec.operation = (operation.add)
return nic_spec | Return a NIC spec | Below is the the instruction that describes the task:
### Input:
Return a NIC spec
### Response:
def create_nic(client, target, nic):
"""Return a NIC spec"""
# Iterate through the networks and look for one matching
# the requested name
for network in target.network:
if network.name == nic["network_name"]:
net = network
break
else:
return None
# Success! Create a nic attached to this network
backing = client.create("VirtualEthernetCardNetworkBackingInfo")
backing.deviceName = nic["network_name"]
backing.network = net
connect_info = client.create("VirtualDeviceConnectInfo")
connect_info.allowGuestControl = True
connect_info.connected = False
connect_info.startConnected = True
new_nic = client.create(nic["type"])
new_nic.backing = backing
new_nic.key = 2
# TODO: Work out a way to automatically increment this
new_nic.unitNumber = 1
new_nic.addressType = "generated"
new_nic.connectable = connect_info
nic_spec = client.create("VirtualDeviceConfigSpec")
nic_spec.device = new_nic
nic_spec.fileOperation = None
operation = client.create("VirtualDeviceConfigSpecOperation")
nic_spec.operation = (operation.add)
return nic_spec |
def is_draft(request):
"""
A request is considered to be in draft mode if:
- it is for *any* admin resource, since the admin site deals only with
draft objects and hides the published version from admin users
- it is for *any* view in *any* app that deals only with draft objects
- user is a member of the "Content Reviewer" group, since content
reviewers' sole purpose is to review draft content and they need not
see the published content
- the user is a staff member and therefore can see draft versions of
pages if they wish, and the 'preview' GET parameter flag is included
to show the draft page is definitely wanted instead of a normal
published page.
- the 'preview' GET parameter flag is included with a valid HMAC for
the requested URL, regardless of authenticated permissions.
"""
# Admin resource requested.
if PublishingMiddleware.is_admin_request(request):
return True
# API resource requested.
if PublishingMiddleware.is_api_request(request):
return True
# Draft-only view requested.
if PublishingMiddleware.is_draft_only_view(request):
return True
# Content reviewer made request.
if PublishingMiddleware.is_content_reviewer_user(request):
return True
# Draft mode requested.
if PublishingMiddleware.is_draft_request(request):
# User is staff.
if PublishingMiddleware.is_staff_user(request):
return True
# Request contains a valid draft mode HMAC in the querystring.
if verify_draft_url(request.get_full_path()):
return True
# Not draft mode.
return False | A request is considered to be in draft mode if:
- it is for *any* admin resource, since the admin site deals only with
draft objects and hides the published version from admin users
- it is for *any* view in *any* app that deals only with draft objects
- user is a member of the "Content Reviewer" group, since content
reviewers' sole purpose is to review draft content and they need not
see the published content
- the user is a staff member and therefore can see draft versions of
pages if they wish, and the 'preview' GET parameter flag is included
to show the draft page is definitely wanted instead of a normal
published page.
- the 'preview' GET parameter flag is included with a valid HMAC for
the requested URL, regardless of authenticated permissions. | Below is the the instruction that describes the task:
### Input:
A request is considered to be in draft mode if:
- it is for *any* admin resource, since the admin site deals only with
draft objects and hides the published version from admin users
- it is for *any* view in *any* app that deals only with draft objects
- user is a member of the "Content Reviewer" group, since content
reviewers' sole purpose is to review draft content and they need not
see the published content
- the user is a staff member and therefore can see draft versions of
pages if they wish, and the 'preview' GET parameter flag is included
to show the draft page is definitely wanted instead of a normal
published page.
- the 'preview' GET parameter flag is included with a valid HMAC for
the requested URL, regardless of authenticated permissions.
### Response:
def is_draft(request):
"""
A request is considered to be in draft mode if:
- it is for *any* admin resource, since the admin site deals only with
draft objects and hides the published version from admin users
- it is for *any* view in *any* app that deals only with draft objects
- user is a member of the "Content Reviewer" group, since content
reviewers' sole purpose is to review draft content and they need not
see the published content
- the user is a staff member and therefore can see draft versions of
pages if they wish, and the 'preview' GET parameter flag is included
to show the draft page is definitely wanted instead of a normal
published page.
- the 'preview' GET parameter flag is included with a valid HMAC for
the requested URL, regardless of authenticated permissions.
"""
# Admin resource requested.
if PublishingMiddleware.is_admin_request(request):
return True
# API resource requested.
if PublishingMiddleware.is_api_request(request):
return True
# Draft-only view requested.
if PublishingMiddleware.is_draft_only_view(request):
return True
# Content reviewer made request.
if PublishingMiddleware.is_content_reviewer_user(request):
return True
# Draft mode requested.
if PublishingMiddleware.is_draft_request(request):
# User is staff.
if PublishingMiddleware.is_staff_user(request):
return True
# Request contains a valid draft mode HMAC in the querystring.
if verify_draft_url(request.get_full_path()):
return True
# Not draft mode.
return False |
def _process(self, request, result=None):
"""
Process the rules for the request.
:param request: The Request object provided by WebOb.
:param result: The Result object to store the results in. If
None, one will be allocated.
:returns: A Result object, containing the selected version and
content type.
"""
# Allocate a result and process all the rules
result = result if result is not None else Result()
self._proc_uri(request, result)
self._proc_ctype_header(request, result)
self._proc_accept_header(request, result)
return result | Process the rules for the request.
:param request: The Request object provided by WebOb.
:param result: The Result object to store the results in. If
None, one will be allocated.
:returns: A Result object, containing the selected version and
content type. | Below is the the instruction that describes the task:
### Input:
Process the rules for the request.
:param request: The Request object provided by WebOb.
:param result: The Result object to store the results in. If
None, one will be allocated.
:returns: A Result object, containing the selected version and
content type.
### Response:
def _process(self, request, result=None):
"""
Process the rules for the request.
:param request: The Request object provided by WebOb.
:param result: The Result object to store the results in. If
None, one will be allocated.
:returns: A Result object, containing the selected version and
content type.
"""
# Allocate a result and process all the rules
result = result if result is not None else Result()
self._proc_uri(request, result)
self._proc_ctype_header(request, result)
self._proc_accept_header(request, result)
return result |
def get_waveform_filter_norm(approximant, psd, length, delta_f, f_lower):
""" Return the normalization vector for the approximant
"""
if approximant in _filter_norms:
return _filter_norms[approximant](psd, length, delta_f, f_lower)
else:
return None | Return the normalization vector for the approximant | Below is the the instruction that describes the task:
### Input:
Return the normalization vector for the approximant
### Response:
def get_waveform_filter_norm(approximant, psd, length, delta_f, f_lower):
""" Return the normalization vector for the approximant
"""
if approximant in _filter_norms:
return _filter_norms[approximant](psd, length, delta_f, f_lower)
else:
return None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.