code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def _make_entities_from_ids(entity_cls, entity_objs_and_ids, server_config):
"""Given an iterable of entities and/or IDs, return a list of entities.
:param entity_cls: An :class:`Entity` subclass.
:param entity_obj_or_id: An iterable of
:class:`nailgun.entity_mixins.Entity` objects and/or entity IDs. All of
the entities in this iterable should be of type ``entity_cls``.
:returns: A list of ``entity_cls`` objects.
"""
return [
_make_entity_from_id(entity_cls, entity_or_id, server_config)
for entity_or_id
in entity_objs_and_ids
] | Given an iterable of entities and/or IDs, return a list of entities.
:param entity_cls: An :class:`Entity` subclass.
:param entity_obj_or_id: An iterable of
:class:`nailgun.entity_mixins.Entity` objects and/or entity IDs. All of
the entities in this iterable should be of type ``entity_cls``.
:returns: A list of ``entity_cls`` objects. |
def find(self, name):
"""Returns the extension pack with the specified name if found.
in name of type str
The name of the extension pack to locate.
return return_data of type :class:`IExtPack`
The extension pack if found.
raises :class:`VBoxErrorObjectNotFound`
No extension pack matching @a name was found.
"""
if not isinstance(name, basestring):
raise TypeError("name can only be an instance of type basestring")
return_data = self._call("find",
in_p=[name])
return_data = IExtPack(return_data)
return return_data | Returns the extension pack with the specified name if found.
in name of type str
The name of the extension pack to locate.
return return_data of type :class:`IExtPack`
The extension pack if found.
raises :class:`VBoxErrorObjectNotFound`
No extension pack matching @a name was found. |
def _compute_mean(self, C, mag, rhypo, hypo_depth, mean, idx):
"""
Compute mean value according to equations 10 and 11 page 226.
"""
mean[idx] = (C['C1'] + C['C2'] * mag + C['C3'] * np.log(rhypo[idx] +
C['C4'] * np.exp(C['C5'] * mag)) + C['C6'] * hypo_depth) | Compute mean value according to equations 10 and 11 page 226. |
def delete(self, client=None):
"""Deletes a task from Task Queue.
:type client: :class:`gcloud.taskqueue.client.Client` or ``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the task's taskqueue.
:rtype: :class:`Task`
:returns: The task that was just deleted.
:raises: :class:`gcloud.exceptions.NotFound`
(propagated from
:meth:`gcloud.taskqueue.taskqueue.Taskqueue.delete_task`).
"""
return self.taskqueue.delete_task(self.id, client=client) | Deletes a task from Task Queue.
:type client: :class:`gcloud.taskqueue.client.Client` or ``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the task's taskqueue.
:rtype: :class:`Task`
:returns: The task that was just deleted.
:raises: :class:`gcloud.exceptions.NotFound`
(propagated from
:meth:`gcloud.taskqueue.taskqueue.Taskqueue.delete_task`). |
def RegisterProtoDescriptors(db, *additional_descriptors):
"""Registers all API-releated descriptors in a given symbol DB."""
db.RegisterFileDescriptor(artifact_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(client_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(config_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(cron_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(flow_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(hunt_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(output_plugin_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(reflection_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(stats_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(user_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(vfs_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(checks_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(deprecated_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(flows_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(jobs_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(osquery_pb2.DESCRIPTOR)
db.RegisterFileDescriptor(wrappers_pb2.DESCRIPTOR)
for d in additional_descriptors:
db.RegisterFileDescriptor(d) | Registers all API-releated descriptors in a given symbol DB. |
def get_condarc_channels(self,
normalize=False,
conda_url='https://conda.anaconda.org',
channels=None):
"""Return all the channel urls defined in .condarc.
If no condarc file is found, use the default channels.
the `default_channel_alias` key is ignored and only the anaconda client
`url` key is used.
"""
# https://docs.continuum.io/anaconda-repository/configuration
# They can only exist on a system condarc
default_channels = self.load_rc(system=True).get('default_channels',
self.DEFAULT_CHANNELS)
normalized_channels = []
if channels is None:
condarc = self.load_rc()
channels = condarc.get('channels')
if channels is None:
channels = ['defaults']
if normalize:
template = '{0}/{1}' if conda_url[-1] != '/' else '{0}{1}'
for channel in channels:
if channel == 'defaults':
normalized_channels += default_channels
elif channel.startswith('http'):
normalized_channels.append(channel)
else:
# Append to the conda_url that comes from anaconda client
# default_channel_alias key is deliberately ignored
normalized_channels.append(template.format(conda_url,
channel))
channels = normalized_channels
return channels | Return all the channel urls defined in .condarc.
If no condarc file is found, use the default channels.
the `default_channel_alias` key is ignored and only the anaconda client
`url` key is used. |
def layout(self, dimensions=None, **kwargs):
"""Group by supplied dimension(s) and lay out groups
Groups data by supplied dimension(s) laying the groups along
the dimension(s) out in a NdLayout.
Args:
dimensions: Dimension(s) to group by
Returns:
NdLayout with supplied dimensions
"""
dimensions = self._valid_dimensions(dimensions)
if len(dimensions) == self.ndims:
with item_check(False):
return NdLayout(self, **kwargs).reindex(dimensions)
return self.groupby(dimensions, container_type=NdLayout, **kwargs) | Group by supplied dimension(s) and lay out groups
Groups data by supplied dimension(s) laying the groups along
the dimension(s) out in a NdLayout.
Args:
dimensions: Dimension(s) to group by
Returns:
NdLayout with supplied dimensions |
def cli(env, package_keyname):
"""List Datacenters a package can be ordered in.
Use the location Key Name to place orders
"""
manager = ordering.OrderingManager(env.client)
table = formatting.Table(COLUMNS)
locations = manager.package_locations(package_keyname)
for region in locations:
for datacenter in region['locations']:
table.add_row([
datacenter['location']['id'],
datacenter['location']['name'],
region['description'],
region['keyname']
])
env.fout(table) | List Datacenters a package can be ordered in.
Use the location Key Name to place orders |
def _check_cargs(self, cargs):
"""Raise exception if clbit is not in this circuit or bad format."""
if not all(isinstance(i, tuple) and
isinstance(i[0], ClassicalRegister) and
isinstance(i[1], int) for i in cargs):
raise QiskitError("carg not (ClassicalRegister, int) tuple")
if not all(self.has_register(i[0]) for i in cargs):
raise QiskitError("register not in this circuit")
for clbit in cargs:
clbit[0].check_range(clbit[1]) | Raise exception if clbit is not in this circuit or bad format. |
def combobox_set_model_from_list(cb, items):
"""Setup a ComboBox or ComboBoxEntry based on a list of strings."""
cb.clear()
model = gtk.ListStore(str)
for i in items:
model.append([i])
cb.set_model(model)
if type(cb) == gtk.ComboBoxEntry:
cb.set_text_column(0)
elif type(cb) == gtk.ComboBox:
cell = gtk.CellRendererText()
cb.pack_start(cell, True)
cb.add_attribute(cell, 'text', 0) | Setup a ComboBox or ComboBoxEntry based on a list of strings. |
def fastaIterator(fn, useMutableString=False, verbose=False):
"""
A generator function which yields fastaSequence objects from a fasta-format
file or stream.
:param fn: a file-like stream or a string; if this is a string, it's
treated as a filename, else it's treated it as a file-like
object, which must have a readline() method.
:param useMustableString: if True, construct sequences from lists of chars,
rather than python string objects, to allow
more efficient editing. Use with caution.
:param verbose: if True, output additional status messages to stderr about
progress
"""
fh = fn
if type(fh).__name__ == "str":
fh = open(fh)
if verbose:
try:
pind = __build_progress_indicator(fh)
except ProgressIndicatorError as e:
sys.stderr.write("Warning: unable to show progress for stream. " +
"Reason: " + str(e))
verbose = False
prev_line = None
while True:
seqHeader = __read_seq_header(fh, prev_line)
name = seqHeader[1:].strip()
seq_data, prev_line = __read_seq_data(fh)
if verbose:
pind.done = fh.tell()
pind.showProgress(to_strm=sys.stderr)
yield Sequence(name, seq_data, useMutableString)
# remember where we stopped for next call, or finish
if prev_line == "":
break | A generator function which yields fastaSequence objects from a fasta-format
file or stream.
:param fn: a file-like stream or a string; if this is a string, it's
treated as a filename, else it's treated it as a file-like
object, which must have a readline() method.
:param useMustableString: if True, construct sequences from lists of chars,
rather than python string objects, to allow
more efficient editing. Use with caution.
:param verbose: if True, output additional status messages to stderr about
progress |
def substitute_variables(cls, configuration, value, ref):
"""
Substitute variables in `value` from `configuration` where any path reference is relative to
`ref`.
Parameters
----------
configuration : dict
configuration (required to resolve intra-document references)
value :
value to resolve substitutions for
ref : str
path to `value` in the `configuration`
Returns
-------
value :
value after substitution
"""
if isinstance(value, str):
# Substitute all intra-document references
while True:
match = cls.REF_PATTERN.search(value)
if match is None:
break
path = os.path.join(os.path.dirname(ref), match.group('path'))
try:
value = value.replace(
match.group(0), str(util.get_value(configuration, path)))
except KeyError:
raise KeyError(path)
# Substitute all variable references
while True:
match = cls.VAR_PATTERN.search(value)
if match is None:
break
value = value.replace(
match.group(0),
str(util.get_value(cls.VARIABLES, match.group('path'), '/')))
return value | Substitute variables in `value` from `configuration` where any path reference is relative to
`ref`.
Parameters
----------
configuration : dict
configuration (required to resolve intra-document references)
value :
value to resolve substitutions for
ref : str
path to `value` in the `configuration`
Returns
-------
value :
value after substitution |
def plot_best_worst_fits(assignments_df, data, modality_col='Modality',
score='$\log_2 K$'):
"""Violinplots of the highest and lowest scoring of each modality"""
ncols = 2
nrows = len(assignments_df.groupby(modality_col).groups.keys())
fig, axes = plt.subplots(nrows=nrows, ncols=ncols,
figsize=(nrows*4, ncols*6))
axes_iter = axes.flat
fits = 'Highest', 'Lowest'
for modality, df in assignments_df.groupby(modality_col):
df = df.sort_values(score)
color = MODALITY_TO_COLOR[modality]
for fit in fits:
if fit == 'Highest':
ids = df['Feature ID'][-10:]
else:
ids = df['Feature ID'][:10]
fit_psi = data[ids]
tidy_fit_psi = fit_psi.stack().reset_index()
tidy_fit_psi = tidy_fit_psi.rename(columns={'level_0': 'Sample ID',
'level_1':
'Feature ID',
0: '$\Psi$'})
if tidy_fit_psi.empty:
continue
ax = six.next(axes_iter)
violinplot(x='Feature ID', y='$\Psi$', data=tidy_fit_psi,
color=color, ax=ax)
ax.set(title='{} {} {}'.format(fit, score, modality), xticks=[])
sns.despine()
fig.tight_layout() | Violinplots of the highest and lowest scoring of each modality |
def build_opener(self):
"""
Builds url opener, initializing proxy.
@return: OpenerDirector
"""
http_handler = urllib2.HTTPHandler() # debuglevel=self.transport.debug
if util.empty(self.transport.proxy_url):
return urllib2.build_opener(http_handler)
proxy_handler = urllib2.ProxyHandler(
{self.transport.proxy_url[:4]: self.transport.proxy_url})
return urllib2.build_opener(http_handler, proxy_handler) | Builds url opener, initializing proxy.
@return: OpenerDirector |
def int_filter(text):
"""Extract integer from text.
**δΈζζζ‘£**
ζι€ζζ¬ε
ηζ΄ζ°γ
"""
res = list()
for char in text:
if char.isdigit():
res.append(char)
return int("".join(res)) | Extract integer from text.
**δΈζζζ‘£**
ζι€ζζ¬ε
ηζ΄ζ°γ |
def regularrun(
shell,
prompt_template="default",
aliases=None,
envvars=None,
extra_commands=None,
speed=1,
test_mode=False,
commentecho=False,
):
"""Allow user to run their own live commands until CTRL-Z is pressed again.
"""
loop_again = True
command_string = regulartype(prompt_template)
if command_string == TAB:
loop_again = False
return loop_again
run_command(
command_string,
shell,
aliases=aliases,
envvars=envvars,
extra_commands=extra_commands,
test_mode=test_mode,
)
return loop_again | Allow user to run their own live commands until CTRL-Z is pressed again. |
def delete_device(name, safety_on=True):
'''
Deletes a device from Vistara based on DNS name or partial name. By default,
delete_device will only perform the delete if a single host is returned. Set
safety_on=False to delete all matches (up to default API search page size)
CLI Example:
.. code-block:: bash
salt-run vistara.delete_device 'hostname-101.mycompany.com'
salt-run vistara.delete_device 'hostname-101'
salt-run vistara.delete_device 'hostname-1' safety_on=False
'''
config = _get_vistara_configuration()
if not config:
return False
access_token = _get_oath2_access_token(config['client_key'], config['client_secret'])
if not access_token:
return 'Vistara access token not available'
query_string = 'dnsName:{0}'.format(name)
devices = _search_devices(query_string, config['client_id'], access_token)
if not devices:
return "No devices found"
device_count = len(devices)
if safety_on and device_count != 1:
return "Expected to delete 1 device and found {0}. "\
"Set safety_on=False to override.".format(device_count)
delete_responses = []
for device in devices:
device_id = device['id']
log.debug(device_id)
delete_response = _delete_resource(device_id, config['client_id'], access_token)
if not delete_response:
return False
delete_responses.append(delete_response)
return delete_responses | Deletes a device from Vistara based on DNS name or partial name. By default,
delete_device will only perform the delete if a single host is returned. Set
safety_on=False to delete all matches (up to default API search page size)
CLI Example:
.. code-block:: bash
salt-run vistara.delete_device 'hostname-101.mycompany.com'
salt-run vistara.delete_device 'hostname-101'
salt-run vistara.delete_device 'hostname-1' safety_on=False |
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object | Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning) |
def random_jpath(depth = 3):
"""
Generate random JPath with given node depth.
"""
chunks = []
while depth > 0:
length = random.randint(5, 15)
ident = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase) for _ in range(length))
if random.choice((True, False)):
index = random.randint(0, 10)
ident = "{:s}[{:d}]".format(ident, index)
chunks.append(ident)
depth -= 1
return ".".join(chunks) | Generate random JPath with given node depth. |
def reinit(self):
"""
Re-initialize the socket connection
close current socket (if open)
and start a fresh connection
raise ConnectionError on error
"""
log.debug("Reinitializing socket connection for %s:%d" % (self.host, self.port))
if self._sock:
self.close()
try:
self._sock = socket.create_connection((self.host, self.port), self.timeout)
except socket.error:
log.exception('Unable to connect to kafka broker at %s:%d' % (self.host, self.port))
self._raise_connection_error() | Re-initialize the socket connection
close current socket (if open)
and start a fresh connection
raise ConnectionError on error |
def _setup_freqs(self):
"""Updating frequency borders from channel values
"""
if self.header[b'foff'] > 0:
self.f_start = self.f_begin + self.chan_start_idx*abs(self.header[b'foff'])
self.f_stop = self.f_begin + self.chan_stop_idx*abs(self.header[b'foff'])
else:
self.f_start = self.f_end - self.chan_stop_idx*abs(self.header[b'foff'])
self.f_stop = self.f_end - self.chan_start_idx*abs(self.header[b'foff']) | Updating frequency borders from channel values |
def make_optimize_tensor(self, model, session=None, var_list=None, **kwargs):
"""
Make Tensorflow optimization tensor.
This method builds optimization tensor and initializes all necessary variables
created by optimizer.
:param model: GPflow model.
:param session: Tensorflow session.
:param var_list: List of variables for training.
:param kwargs: Dictionary of extra parameters passed to Tensorflow
optimizer's minimize method.
:return: Tensorflow optimization tensor or operation.
"""
session = model.enquire_session(session)
objective = model.objective
full_var_list = self._gen_var_list(model, var_list)
# Create optimizer variables before initialization.
with session.as_default():
minimize = self.optimizer.minimize(objective, var_list=full_var_list, **kwargs)
model.initialize(session=session)
self._initialize_optimizer(session)
return minimize | Make Tensorflow optimization tensor.
This method builds optimization tensor and initializes all necessary variables
created by optimizer.
:param model: GPflow model.
:param session: Tensorflow session.
:param var_list: List of variables for training.
:param kwargs: Dictionary of extra parameters passed to Tensorflow
optimizer's minimize method.
:return: Tensorflow optimization tensor or operation. |
def get_service_definitions(self, service_type=None):
"""GetServiceDefinitions.
[Preview API]
:param str service_type:
:rtype: [ServiceDefinition]
"""
route_values = {}
if service_type is not None:
route_values['serviceType'] = self._serialize.url('service_type', service_type, 'str')
response = self._send(http_method='GET',
location_id='d810a47d-f4f4-4a62-a03f-fa1860585c4c',
version='5.0-preview.1',
route_values=route_values)
return self._deserialize('[ServiceDefinition]', self._unwrap_collection(response)) | GetServiceDefinitions.
[Preview API]
:param str service_type:
:rtype: [ServiceDefinition] |
def parse_line(self, line, lineno):
"""Check a single line for an error. Keeps track of the linenumber"""
# TaskCluster logs are a bit wonky.
#
# TaskCluster logs begin with output coming from TaskCluster itself,
# before it has transitioned control of the task to the configured
# process. These "internal" logs look like the following:
#
# [taskcluster 2016-09-09 17:41:43.544Z] Worker Group: us-west-2b
#
# If an error occurs during this "setup" phase, TaskCluster may emit
# lines beginning with ``[taskcluster:error]``.
#
# Once control has transitioned from TaskCluster to the configured
# task process, lines can be whatever the configured process emits.
# The popular ``run-task`` wrapper prefixes output to emulate
# TaskCluster's "internal" logs. e.g.
#
# [vcs 2016-09-09T17:45:02.842230Z] adding changesets
#
# This prefixing can confuse error parsing. So, we strip it.
#
# Because regular expression matching and string manipulation can be
# expensive when performed on every line, we only strip the TaskCluster
# log prefix if we know we're in a TaskCluster log.
# First line of TaskCluster logs almost certainly has this.
if line.startswith('[taskcluster '):
self.is_taskcluster = True
# For performance reasons, only do this if we have identified as
# a TC task.
if self.is_taskcluster:
line = re.sub(self.RE_TASKCLUSTER_NORMAL_PREFIX, "", line)
if self.is_error_line(line):
self.add(line, lineno) | Check a single line for an error. Keeps track of the linenumber |
def parse_commandline(argv):
"""
Returns the arguments parsed from *argv* as a namespace.
"""
ap = ArgumentParser(
prog='wdiffhtml',
description=DESCRIPTION,
epilog=EPILOG,
)
ap.add_argument(
'--version', action='version', version='wdiffhtml v{}'.format(version),
help="shows version and exits"
)
ap.add_argument(
'org_file', metavar='FILENAME',
help="original file"
)
ap.add_argument(
'new_file', metavar='FILENAME',
help="changed file"
)
g_html = ap.add_argument_group(
'Wrapper',
"Without these settings, only the `wdiff` output is returned (with INS "
"and DEL tags). Here are some options to wrap the output in a HTML "
"document."
)
g_html.add_argument(
'-w', '--wrap-with-html', action='store_true',
help="wrap the diff with a HTML document"
)
g_html.add_argument(
'-f', '--fold-tags', action='store_true',
help="allow INS and DEL tags to span linebraks"
)
g_html.add_argument(
'-b', '--hard-breaks', action='store_true',
help="replace line breaks with BR tags"
)
g_context = ap.add_argument_group(
'Context',
"With these options you can add additional information to the HTML "
"output (means these only work alongside the `--wrap-with-html` option)."
)
g_context.add_argument(
'-r', '--revision', metavar='STRING',
help="add a revision tag or version number to the output"
)
x_stamp = g_context.add_mutually_exclusive_group()
x_stamp.add_argument(
'-d', '--datestamp', action='store_true',
help="add a date to the output (UTC now)"
)
x_stamp.add_argument(
'-D', '--timestamp', action='store_true',
help="add date and time to the output (UTC now)"
)
g_files = ap.add_argument_group(
'Files',
"Instead of using the default templates, you can use your own files. "
"These only work alongside the `--wrap-with-html` option"
)
g_files.add_argument(
'-t', '--template', type=FileType('r'), metavar='FILE',
help="load the Jinja2 template from this file"
)
g_files.add_argument(
'-c', '--css', type=FileType('r'), metavar='FILE',
help="load CSS from this file"
)
g_files.add_argument(
'-j', '--js', type=FileType('r'), metavar='FILE',
help="load Javascript from this file"
)
g_files.add_argument(
'-J', '--js2', type=FileType('r'), metavar='FILE',
help="load another Javascript from this file (like Zepto)"
)
# parse args
args = ap.parse_args(argv)
# check for wrapper
if not args.wrap_with_html:
# check context arguments and file arguments
for group in (g_context, g_files):
args_to_check = [opt.dest for opt in group._group_actions]
if any([getattr(args, attr) for attr in args_to_check]):
msg = "the options require that `--wrap-with-html` is used"
ap.error(msg)
return args | Returns the arguments parsed from *argv* as a namespace. |
def on_post(self):
"""Extracts the request, feeds the module, and returns the response."""
request = self.environ['wsgi.input']
try:
return self.process_request(request)
except ClientError as exc:
return self.on_client_error(exc)
except BadGateway as exc:
return self.on_bad_gateway(exc)
except InvalidConfig:
raise
except Exception as exc: # pragma: no cover # pylint: disable=W0703
logging.error('Unknown exception: ', exc_info=exc)
return self.on_internal_error() | Extracts the request, feeds the module, and returns the response. |
def random_word(length,dictionary = False):#may return offensive words if dictionary = True
'''
Creates random lowercase words from dictionary or by alternating vowels and consonants
The second method chooses from 85**length words.
The dictionary method chooses from 3000--12000 words for 3<=length<=12
(though this of course depends on the available dictionary)
:param length: word length
:param dictionary: Try reading from dictionary, else fall back to artificial words
'''
if dictionary:
try:
with open('/usr/share/dict/words') as fp:
words = [word.lower()[:-1] for word in fp.readlines() if re.match('[A-Za-z0-9]{}$'.format('{'+str(length)+'}'),word)]
return random.choice(words)
except FileNotFoundError:
pass
vowels = list('aeiou')
consonants = list('bcdfghklmnprstvwz')
pairs = [(random.choice(consonants),random.choice(vowels)) for _ in range(length//2+1)]
return ''.join([l for p in pairs for l in p])[:length] | Creates random lowercase words from dictionary or by alternating vowels and consonants
The second method chooses from 85**length words.
The dictionary method chooses from 3000--12000 words for 3<=length<=12
(though this of course depends on the available dictionary)
:param length: word length
:param dictionary: Try reading from dictionary, else fall back to artificial words |
def mkdir_command(endpoint_plus_path):
"""
Executor for `globus mkdir`
"""
endpoint_id, path = endpoint_plus_path
client = get_client()
autoactivate(client, endpoint_id, if_expires_in=60)
res = client.operation_mkdir(endpoint_id, path=path)
formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message") | Executor for `globus mkdir` |
def info(self, *msg):
"""
Prints a message with an info prefix
"""
label = colors.blue("INFO")
self._msg(label, *msg) | Prints a message with an info prefix |
def optimize(population, toolbox, ngen, archive=None, stats=None, verbose=False, history=None):
"""
Optimize a population of individuals.
:param population:
:param toolbox:
:param mut_prob:
:param ngen:
:param archive:
:param stats:
:param verbose:
:param history:
:return:
"""
start = time.time()
if history is not None:
history.update(population)
logbook = tools.Logbook()
logbook.header = ['gen', 'nevals', 'cpu_time'] + (stats.fields if stats else [])
render_fitness(population, toolbox, history)
record_information(population, stats, start, archive, logbook, verbose)
for gen in range(1, ngen + 1):
offspring = generate_next_population(population, toolbox)
render_fitness(offspring, toolbox, history)
population = offspring
record_information(population, stats, start, archive, logbook, verbose)
return population, logbook, history | Optimize a population of individuals.
:param population:
:param toolbox:
:param mut_prob:
:param ngen:
:param archive:
:param stats:
:param verbose:
:param history:
:return: |
def unimapping(arg, level):
"""
Mapping object to unicode string.
:type arg: collections.Mapping
:param arg: mapping object
:type level: int
:param level: deep level
:rtype: unicode
:return: mapping object as unicode string
"""
if not isinstance(arg, collections.Mapping):
raise TypeError(
'expected collections.Mapping, {} received'.format(type(arg).__name__)
)
result = []
for i in arg.items():
result.append(
pretty_spaces(level) + u': '.join(map(functools.partial(convert, level=level), i))
)
string = join_strings(result, level)
if level is not None:
string += pretty_spaces(level - 1)
return u'{{{}}}'.format(string) | Mapping object to unicode string.
:type arg: collections.Mapping
:param arg: mapping object
:type level: int
:param level: deep level
:rtype: unicode
:return: mapping object as unicode string |
def store(self, text, tier):
"""
Writes text to the underlying Store mapped at tier. If the store doesn't exists, yet, it creates it
:param text: the text to write
:param tier: the tier used to identify the store
:return:
"""
store = self._stores.get(tier, None)
if not store:
store = AutoSplittingFile(self._dir, self._lines_per_store, self._file_name, tier)
self._stores[tier] = store
store.write(text) | Writes text to the underlying Store mapped at tier. If the store doesn't exists, yet, it creates it
:param text: the text to write
:param tier: the tier used to identify the store
:return: |
def check_file_for_tabs(cls, filename, verbose=True):
"""identifies if the file contains tabs and returns True if it
does. It also prints the location of the lines and columns. If
verbose is set to False, the location is not printed.
:param verbose: if true prints issues
:param filename: the filename
:type filename: str
:rtype: True if there are tabs in the file
"""
filename = path_expand(filename)
file_contains_tabs = False
with open(filename, 'r') as f:
lines = f.read().split("\n")
line_no = 1
for line in lines:
if "\t" in line:
file_contains_tabs = True
location = [
i for i in range(len(line)) if line.startswith('\t', i)]
if verbose:
print("Tab found in line", line_no, "and column(s)",
location)
line_no += 1
return file_contains_tabs | identifies if the file contains tabs and returns True if it
does. It also prints the location of the lines and columns. If
verbose is set to False, the location is not printed.
:param verbose: if true prints issues
:param filename: the filename
:type filename: str
:rtype: True if there are tabs in the file |
def remove_repositories(repositories, default_repositories):
"""
Remove no default repositories
"""
repos = []
for repo in repositories:
if repo in default_repositories:
repos.append(repo)
return repos | Remove no default repositories |
def combine_mv_and_lv(mv, lv):
"""Combine MV and LV grid topology in PyPSA format
"""
combined = {
c: pd.concat([mv[c], lv[c]], axis=0) for c in list(lv.keys())
}
combined['Transformer'] = mv['Transformer']
return combined | Combine MV and LV grid topology in PyPSA format |
def is_deletion(self):
"""
Does this variant represent the deletion of nucleotides from the
reference genome?
"""
# A deletion would appear in a VCF like CT>C, so that the
# reference allele starts with the alternate nucleotides.
# This is true even in the normalized case, where the alternate
# nucleotides are an empty string.
return (len(self.ref) > len(self.alt)) and self.ref.startswith(self.alt) | Does this variant represent the deletion of nucleotides from the
reference genome? |
def continue_abort(self,
root_pipeline_key,
cursor=None,
max_to_notify=_MAX_ABORTS_TO_BEGIN):
"""Sends the abort signal to all children for a root pipeline.
Args:
root_pipeline_key: db.Key of the root pipeline to abort.
cursor: The query cursor for enumerating _PipelineRecords when inserting
tasks to cause child pipelines to terminate.
max_to_notify: Used for testing.
"""
if not isinstance(root_pipeline_key, db.Key):
root_pipeline_key = db.Key(root_pipeline_key)
# NOTE: The results of this query may include _PipelineRecord instances
# that are not actually "reachable", meaning you cannot get to them by
# starting at the root pipeline and following "fanned_out" onward. This
# is acceptable because even these defunct _PipelineRecords will properly
# set their status to ABORTED when the signal comes, regardless of any
# other status they may have had.
#
# The only gotcha here is if a Pipeline's finalize method somehow modifies
# its inputs (like deleting an input file). In the case there are
# unreachable child pipelines, it will appear as if two finalize methods
# have been called instead of just one. The saving grace here is that
# finalize must be idempotent, so this *should* be harmless.
query = (
_PipelineRecord.all(cursor=cursor)
.filter('root_pipeline =', root_pipeline_key))
results = query.fetch(max_to_notify)
task_list = []
for pipeline_record in results:
if pipeline_record.status not in (
_PipelineRecord.RUN, _PipelineRecord.WAITING):
continue
pipeline_key = pipeline_record.key()
task_list.append(taskqueue.Task(
name='%s-%s-abort' % (self.task_name, pipeline_key.name()),
url=self.abort_handler_path,
params=dict(pipeline_key=pipeline_key, purpose=_BarrierRecord.ABORT),
headers={'X-Ae-Pipeline-Key': pipeline_key}))
# Task continuation with sequence number to prevent fork-bombs.
if len(results) == max_to_notify:
the_match = re.match('(.*)-([0-9]+)', self.task_name)
if the_match:
prefix = the_match.group(1)
end = int(the_match.group(2)) + 1
else:
prefix = self.task_name
end = 0
task_list.append(taskqueue.Task(
name='%s-%d' % (prefix, end),
url=self.fanout_abort_handler_path,
params=dict(root_pipeline_key=root_pipeline_key,
cursor=query.cursor())))
if task_list:
try:
taskqueue.Queue(self.queue_name).add(task_list)
except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError):
pass | Sends the abort signal to all children for a root pipeline.
Args:
root_pipeline_key: db.Key of the root pipeline to abort.
cursor: The query cursor for enumerating _PipelineRecords when inserting
tasks to cause child pipelines to terminate.
max_to_notify: Used for testing. |
def from_array(array):
"""
Deserialize a new LabeledPrice from a given dictionary.
:return: new LabeledPrice instance.
:rtype: LabeledPrice
"""
if array is None or not array:
return None
# end if
assert_type_or_raise(array, dict, parameter_name="array")
data = {}
data['label'] = u(array.get('label'))
data['amount'] = int(array.get('amount'))
instance = LabeledPrice(**data)
instance._raw = array
return instance | Deserialize a new LabeledPrice from a given dictionary.
:return: new LabeledPrice instance.
:rtype: LabeledPrice |
def guinierplot(*args, **kwargs):
"""Make a Guinier plot. This is simply a wrapper around plotsascurve()."""
ret=plotsascurve(*args, **kwargs)
plt.xscale('power',exponent=2)
plt.yscale('log')
return ret | Make a Guinier plot. This is simply a wrapper around plotsascurve(). |
def handle_exists(self, spec, checkable):
'''The implementation of this one is weird. By the time
the {'$exists': True} spec gets to the dispatched
handler, the key presumably exists.
So we just parrot the assertion the spec makes. If it
asserts the key exists, we return True. If it asserts
the key doesn't exist, we return False, because that
can't be true.
'''
if not isinstance(spec, bool):
msg = 'The argument of an exists query must be of type bool.'
raise InvalidQuery(msg)
return spec | The implementation of this one is weird. By the time
the {'$exists': True} spec gets to the dispatched
handler, the key presumably exists.
So we just parrot the assertion the spec makes. If it
asserts the key exists, we return True. If it asserts
the key doesn't exist, we return False, because that
can't be true. |
def find(self, which, param):
'''Get a parameter from a layer in the network.
Parameters
----------
which : int or str
The layer that owns the parameter to return.
If this is an integer, then 0 refers to the input layer, 1 refers
to the first hidden layer, 2 to the second, and so on.
If this is a string, the layer with the corresponding name, if any,
will be used.
param : int or str
Name of the parameter to retrieve from the specified layer, or its
index in the parameter list of the layer.
Raises
------
KeyError
If there is no such layer, or if there is no such parameter in the
specified layer.
Returns
-------
param : Theano shared variable
A shared parameter variable from the indicated layer.
'''
for i, layer in enumerate(self.layers):
if which == i or which == layer.name:
return layer.find(param)
raise KeyError(which) | Get a parameter from a layer in the network.
Parameters
----------
which : int or str
The layer that owns the parameter to return.
If this is an integer, then 0 refers to the input layer, 1 refers
to the first hidden layer, 2 to the second, and so on.
If this is a string, the layer with the corresponding name, if any,
will be used.
param : int or str
Name of the parameter to retrieve from the specified layer, or its
index in the parameter list of the layer.
Raises
------
KeyError
If there is no such layer, or if there is no such parameter in the
specified layer.
Returns
-------
param : Theano shared variable
A shared parameter variable from the indicated layer. |
def user_filter(config, message, fasnick=None, *args, **kw):
""" A particular user
Use this rule to include messages that are associated with a
specific user.
"""
fasnick = kw.get('fasnick', fasnick)
if fasnick:
return fasnick in fmn.rules.utils.msg2usernames(message, **config) | A particular user
Use this rule to include messages that are associated with a
specific user. |
def remove_core_element(self, model):
"""Remove respective core element of handed global variable name
:param str model: String that is the key/gv_name of core element which should be removed
:return:
"""
gv_name = model
if self.global_variable_is_editable(gv_name, "Deletion"):
try:
self.model.global_variable_manager.delete_variable(gv_name)
except AttributeError as e:
logger.warning("The respective global variable '{1}' couldn't be removed. -> {0}"
"".format(e, model)) | Remove respective core element of handed global variable name
:param str model: String that is the key/gv_name of core element which should be removed
:return: |
def get_tree(cls, *condition, **kwargs):
"""
parent is root parent value, default is None
current is current value
condition is extra condition for select root records
mode is search method, value is 'wide' or 'deep'
"""
parent_field = kwargs.pop('parent_field', 'parent')
parent = kwargs.pop('parent', None)
parent_order_by = kwargs.pop('parent_order_by', None)
current = kwargs.pop('current', None)
order_by = kwargs.pop('order_by', None)
id_field = kwargs.pop('id_field', 'id')
mode = kwargs.pop('mode', 'wide')
if mode not in ('wide', 'deep'):
raise Exception("mode parameter should be 'wide' or 'deep', but '{}' found.".format(mode))
def _f(parent):
query = cls.filter(cls.c[parent_field]==parent, *condition)
if order_by is not None:
query.order_by(order_by)
for row in query:
if mode == 'wide':
yield row
for _row in _f(getattr(row, id_field)):
yield _row
if mode == 'deep':
yield row
if current:
query = cls.filter(cls.c[id_field]==current)
else:
if is_condition(parent):
query = cls.filter(parent)
else:
query = cls.filter(cls.c[parent_field]==parent)
if parent_order_by is not None:
query.order_by(parent_order_by)
for row in query:
if mode == 'wide':
yield row
for r in _f(getattr(row, id_field)):
yield r
if mode == 'deep':
yield row | parent is root parent value, default is None
current is current value
condition is extra condition for select root records
mode is search method, value is 'wide' or 'deep' |
def get_is_value(tag):
"""
Getters for data that also work with implicit transfersyntax
:param tag: the tag to read
"""
# data is int formatted as string so convert te string first and cast to int
if tag.VR == 'OB' or tag.VR == 'UN':
value = int(tag.value.decode("ascii").replace(" ", ""))
return value
return int(tag.value) | Getters for data that also work with implicit transfersyntax
:param tag: the tag to read |
def get_obs_route(value):
""" obs-route = obs-domain-list ":"
obs-domain-list = *(CFWS / ",") "@" domain *("," [CFWS] ["@" domain])
Returns an obs-route token with the appropriate sub-tokens (that is,
there is no obs-domain-list in the parse tree).
"""
obs_route = ObsRoute()
while value and (value[0]==',' or value[0] in CFWS_LEADER):
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
obs_route.append(token)
elif value[0] == ',':
obs_route.append(ListSeparator)
value = value[1:]
if not value or value[0] != '@':
raise errors.HeaderParseError(
"expected obs-route domain but found '{}'".format(value))
obs_route.append(RouteComponentMarker)
token, value = get_domain(value[1:])
obs_route.append(token)
while value and value[0]==',':
obs_route.append(ListSeparator)
value = value[1:]
if not value:
break
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
obs_route.append(token)
if value[0] == '@':
obs_route.append(RouteComponentMarker)
token, value = get_domain(value[1:])
obs_route.append(token)
if not value:
raise errors.HeaderParseError("end of header while parsing obs-route")
if value[0] != ':':
raise errors.HeaderParseError( "expected ':' marking end of "
"obs-route but found '{}'".format(value))
obs_route.append(ValueTerminal(':', 'end-of-obs-route-marker'))
return obs_route, value[1:] | obs-route = obs-domain-list ":"
obs-domain-list = *(CFWS / ",") "@" domain *("," [CFWS] ["@" domain])
Returns an obs-route token with the appropriate sub-tokens (that is,
there is no obs-domain-list in the parse tree). |
def _recv(self):
"""
Receives and returns a message from Scratch
"""
prefix = self._read(self.prefix_len)
msg = self._read(self._extract_len(prefix))
return prefix + msg | Receives and returns a message from Scratch |
def extract_notebook_metatab(nb_path: Path):
"""Extract the metatab lines from a notebook and return a Metapack doc """
from metatab.rowgenerators import TextRowGenerator
import nbformat
with nb_path.open() as f:
nb = nbformat.read(f, as_version=4)
lines = '\n'.join(['Declare: metatab-latest'] + [get_cell_source(nb, tag) for tag in ['metadata', 'resources',
'schema']])
doc = MetapackDoc(TextRowGenerator(lines))
doc['Root'].get_or_new_term('Root.Title').value = get_cell_source(nb, 'Title').strip('#').strip()
doc['Root'].get_or_new_term('Root.Description').value = get_cell_source(nb, 'Description')
doc['Documentation'].get_or_new_term('Root.Readme').value = get_cell_source(nb, 'readme')
return doc | Extract the metatab lines from a notebook and return a Metapack doc |
def read_from_cache(self, domains=None):
"""
Returns:
dict: Dict[str, DataFrame]
"""
logger.info(f'Reading data from cache ({self.EXTRACTION_CACHE_PATH})')
if domains is not None and isinstance(domains, list):
dfs = {domain: self.read_entry(domain) for domain in domains}
else:
dfs = {name: self.read_entry(name)
for name in os.listdir(self.EXTRACTION_CACHE_PATH)}
return dfs | Returns:
dict: Dict[str, DataFrame] |
def _python_rpath(self):
"""The relative path (from environment root) to python."""
# Windows virtualenv installation installs pip to the [Ss]cripts
# folder. Here's a simple check to support:
if sys.platform == 'win32':
return os.path.join('Scripts', 'python.exe')
return os.path.join('bin', 'python') | The relative path (from environment root) to python. |
def compute_checksum(self, payload_offset: Optional[int]=None):
'''Compute and add the checksum data to the record fields.
This function also sets the content length.
'''
if not self.block_file:
self.fields['Content-Length'] = '0'
return
block_hasher = hashlib.sha1()
payload_hasher = hashlib.sha1()
with wpull.util.reset_file_offset(self.block_file):
if payload_offset is not None:
data = self.block_file.read(payload_offset)
block_hasher.update(data)
while True:
data = self.block_file.read(4096)
if data == b'':
break
block_hasher.update(data)
payload_hasher.update(data)
content_length = self.block_file.tell()
content_hash = block_hasher.digest()
self.fields['WARC-Block-Digest'] = 'sha1:{0}'.format(
base64.b32encode(content_hash).decode()
)
if payload_offset is not None:
payload_hash = payload_hasher.digest()
self.fields['WARC-Payload-Digest'] = 'sha1:{0}'.format(
base64.b32encode(payload_hash).decode()
)
self.fields['Content-Length'] = str(content_length) | Compute and add the checksum data to the record fields.
This function also sets the content length. |
def _recipients_from_cloud(self, recipients, field=None):
""" Transform a recipient from cloud data to object data """
recipients_data = []
for recipient in recipients:
recipients_data.append(
self._recipient_from_cloud(recipient, field=field))
return Recipients(recipients_data, parent=self, field=field) | Transform a recipient from cloud data to object data |
def _get_event_id(object_type: str) -> str:
"""Return an event key for the event on the object type.
This must be a unique event id for the object.
Args:
object_type (str): Type of object
Returns:
str, event id
"""
key = _keys.event_counter(object_type)
DB.watch(key, pipeline=True)
count = DB.get_value(key)
DB.increment(key)
DB.execute()
if count is None:
count = 0
return '{}_event_{:08d}'.format(object_type, int(count)) | Return an event key for the event on the object type.
This must be a unique event id for the object.
Args:
object_type (str): Type of object
Returns:
str, event id |
def ParseOptions(cls, options, configuration_object):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
BadConfigOption: when a configuration parameter fails validation.
"""
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
hashers = cls._ParseStringOption(
options, 'hashers', default_value=cls._DEFAULT_HASHER_STRING)
hasher_file_size_limit = cls._ParseNumericOption(
options, 'hasher_file_size_limit', default_value=0)
# TODO: validate hasher names.
if hasher_file_size_limit < 0:
raise errors.BadConfigOption(
'Invalid hasher file size limit value cannot be negative.')
setattr(configuration_object, '_hasher_names_string', hashers)
setattr(
configuration_object, '_hasher_file_size_limit', hasher_file_size_limit) | Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
BadConfigOption: when a configuration parameter fails validation. |
def get_parser(self):
"""
Returns :class:`monolith.cli.Parser` instance for this
*ExecutionManager*.
"""
parser = self.parser_cls(prog=self.prog_name, usage=self.get_usage(),
stream=self.stderr)
subparsers = parser.add_subparsers(
title='subcommands',
)
for name, command in self.registry.items():
cmdparser = subparsers.add_parser(name, help=command.help)
for argument in command.get_args():
cmdparser.add_argument(*argument.args, **argument.kwargs)
command.setup_parser(parser, cmdparser)
cmdparser.set_defaults(func=command.handle)
return parser | Returns :class:`monolith.cli.Parser` instance for this
*ExecutionManager*. |
def from_conversation_event(conversation, conv_event, prev_conv_event,
datetimefmt, watermark_users=None):
"""Return MessageWidget representing a ConversationEvent.
Returns None if the ConversationEvent does not have a widget
representation.
"""
user = conversation.get_user(conv_event.user_id)
# Check whether the previous event occurred on the same day as this
# event.
if prev_conv_event is not None:
is_new_day = (conv_event.timestamp.astimezone(tz=None).date() !=
prev_conv_event.timestamp.astimezone(tz=None).date())
else:
is_new_day = False
if isinstance(conv_event, hangups.ChatMessageEvent):
return MessageWidget(conv_event.timestamp, conv_event.text,
datetimefmt, user, show_date=is_new_day,
watermark_users=watermark_users)
elif isinstance(conv_event, hangups.RenameEvent):
if conv_event.new_name == '':
text = ('{} cleared the conversation name'
.format(user.first_name))
else:
text = ('{} renamed the conversation to {}'
.format(user.first_name, conv_event.new_name))
return MessageWidget(conv_event.timestamp, text, datetimefmt,
show_date=is_new_day,
watermark_users=watermark_users)
elif isinstance(conv_event, hangups.MembershipChangeEvent):
event_users = [conversation.get_user(user_id) for user_id
in conv_event.participant_ids]
names = ', '.join([user.full_name for user in event_users])
if conv_event.type_ == hangups.MEMBERSHIP_CHANGE_TYPE_JOIN:
text = ('{} added {} to the conversation'
.format(user.first_name, names))
else: # LEAVE
text = ('{} left the conversation'.format(names))
return MessageWidget(conv_event.timestamp, text, datetimefmt,
show_date=is_new_day,
watermark_users=watermark_users)
elif isinstance(conv_event, hangups.HangoutEvent):
text = {
hangups.HANGOUT_EVENT_TYPE_START: (
'A Hangout call is starting.'
),
hangups.HANGOUT_EVENT_TYPE_END: (
'A Hangout call ended.'
),
hangups.HANGOUT_EVENT_TYPE_ONGOING: (
'A Hangout call is ongoing.'
),
}.get(conv_event.event_type, 'Unknown Hangout call event.')
return MessageWidget(conv_event.timestamp, text, datetimefmt,
show_date=is_new_day,
watermark_users=watermark_users)
elif isinstance(conv_event, hangups.GroupLinkSharingModificationEvent):
status_on = hangups.GROUP_LINK_SHARING_STATUS_ON
status_text = ('on' if conv_event.new_status == status_on
else 'off')
text = '{} turned {} joining by link.'.format(user.first_name,
status_text)
return MessageWidget(conv_event.timestamp, text, datetimefmt,
show_date=is_new_day,
watermark_users=watermark_users)
else:
# conv_event is a generic hangups.ConversationEvent.
text = 'Unknown conversation event'
return MessageWidget(conv_event.timestamp, text, datetimefmt,
show_date=is_new_day,
watermark_users=watermark_users) | Return MessageWidget representing a ConversationEvent.
Returns None if the ConversationEvent does not have a widget
representation. |
def create_ip_arp_reply(srchw, dsthw, srcip, targetip):
'''
Create an ARP reply (just change what needs to be changed
from a request)
'''
pkt = create_ip_arp_request(srchw, srcip, targetip)
pkt[0].dst = dsthw
pkt[1].operation = ArpOperation.Reply
pkt[1].targethwaddr = dsthw
return pkt | Create an ARP reply (just change what needs to be changed
from a request) |
def layers(self):
"""
similar as parent images, except that it uses /history API endpoint
:return:
"""
# sample output:
# {
# "Created": 1457116802,
# "Id": "sha256:507cb13a216097710f0d234668bf64a4c92949c573ba15eba13d05aad392fe04",
# "Size": 204692029,
# "Tags": [
# "docker.io/fedora:latest"
# ],
# "Comment": "",
# "CreatedBy": "/bin/sh -c #(nop) ADD file:bcb5e5c... in /"
# }
try:
response = self.d.history(self.image_id)
except docker.errors.NotFound:
raise NotAvailableAnymore()
layers = []
for l in response:
layer_id = l["Id"]
if layer_id == "<missing>":
layers.append(DockerImage(l, self.docker_backend))
else:
layers.append(self.docker_backend.get_image_by_id(layer_id))
return layers | similar as parent images, except that it uses /history API endpoint
:return: |
def parse_relations(
belstr: str, char_locs: CharLocs, parsed: Parsed, errors: Errors
) -> Tuple[Parsed, Errors]:
"""Parse relations from BEL string
Args:
belstr: BEL string as one single string (not list of chars)
char_locs: paren, comma and quote char locations
parsed: data structure for parsed functions, relations, nested
errors: error messages
Returns:
(parsed, errors):
"""
quotes = char_locs["quotes"]
quoted_range = set([i for start, end in quotes.items() for i in range(start, end)])
for match in relations_pattern_middle.finditer(belstr):
(start, end) = match.span(1)
# log.debug(f'Relation-middle {match}')
end = end - 1 # adjust end to match actual end character index
if start != end:
test_range = set(range(start, end))
else:
test_range = set(start)
# Skip if relation overlaps with quoted string
if test_range.intersection(quoted_range):
continue
span_key = (start, end)
parsed[span_key] = {
"type": "Relation",
"name": match.group(1),
"span": (start, end),
}
for match in relations_pattern_end.finditer(belstr):
(start, end) = match.span(1)
log.debug(f"Relation-end {match}")
end = end - 1 # adjust end to match actual end character index
if start != end:
test_range = set(range(start, end))
else:
test_range = set(start)
# Skip if relation overlaps with quoted string
if test_range.intersection(quoted_range):
continue
span_key = (start, end)
parsed[span_key] = {
"type": "Relation",
"name": match.group(1),
"span": (start, end),
}
return parsed, errors | Parse relations from BEL string
Args:
belstr: BEL string as one single string (not list of chars)
char_locs: paren, comma and quote char locations
parsed: data structure for parsed functions, relations, nested
errors: error messages
Returns:
(parsed, errors): |
def set_channel_created(self, channel_link, channel_id):
""" set_channel_created: records progress after creating channel on Kolibri Studio
Args:
channel_link (str): link to uploaded channel
channel_id (str): id of channel that has been uploaded
Returns: None
"""
self.channel_link = channel_link
self.channel_id = channel_id
self.__record_progress(Status.PUBLISH_CHANNEL if config.PUBLISH else Status.DONE) | set_channel_created: records progress after creating channel on Kolibri Studio
Args:
channel_link (str): link to uploaded channel
channel_id (str): id of channel that has been uploaded
Returns: None |
def apply_sfr_seg_parameters(seg_pars=True, reach_pars=False):
"""apply the SFR segement multiplier parameters. Expected to be run in the same dir
as the model exists
Parameters
----------
reach_pars : bool
if reach paramters need to be applied
Returns
-------
sfr : flopy.modflow.ModflowSfr instance
Note
----
expects "sfr_seg_pars.config" to exist
expects <nam_file>+"_backup_.sfr" to exist
"""
if not seg_pars and not reach_pars:
raise Exception("gw_utils.apply_sfr_pars() error: both seg_pars and reach_pars are False")
#if seg_pars and reach_pars:
# raise Exception("gw_utils.apply_sfr_pars() error: both seg_pars and reach_pars are True")
import flopy
bak_sfr_file,pars = None,None
# if seg_pars:
# config_file = "sfr_seg_pars.config"
# idx_cols = ['nseg', 'icalc', 'outseg', 'iupseg', 'iprior', 'nstrpts']
# else:
# config_file = "sfr_reach_pars.config"
# idx_cols = ["node", "k", "i", "j", "iseg", "ireach", "reachID", "outreach"]
#
# assert os.path.exists(config_file),"gw_utils.apply_sfr_pars() error: config file {0} missing".format(config_file)
# with open(config_file, 'r') as f:
# pars = {}
# for line in f:
# line = line.strip().split()
# pars[line[0]] = line[1]
#
# m = flopy.modflow.Modflow.load(pars["nam_file"], load_only=[], check=False)
# bak_sfr_file = pars["nam_file"] + "_backup_.sfr"
# sfr = flopy.modflow.ModflowSfr2.load(os.path.join(bak_sfr_file), m)
# sfrfile = pars["sfr_filename"]
#
#
# mlt_df = pd.read_csv(pars["mult_file"], delim_whitespace=False, index_col=0)
# present_cols = [c for c in idx_cols if c in mlt_df.columns]
# mlt_cols = mlt_df.columns.drop(present_cols)
#
# if seg_pars:
# for key, val in m.sfr.segment_data.items():
# df = pd.DataFrame.from_records(val)
# df.loc[:, mlt_cols] *= mlt_df.loc[:, mlt_cols]
# val = df.to_records(index=False)
# sfr.segment_data[key] = val
# else:
# df = pd.DataFrame.from_records(m.sfr.reach_data)
# df.loc[:, mlt_cols] *= mlt_df.loc[:, mlt_cols]
# sfr.reach_data = df.to_records(index=False)
#
#
# if "time_mult_file" in pars:
# time_mult_file = pars["time_mult_file"]
# time_mlt_df = pd.read_csv(pars["time_mult_file"], delim_whitespace=False, index_col=0)
# for kper,sdata in m.sfr.segment_data.items():
# assert kper in time_mlt_df.index,"gw_utils.apply_sfr_seg_parameters() error: kper "+\
# "{0} not in time_mlt_df index".format(kper)
# for col in time_mlt_df.columns:
# sdata[col] *= time_mlt_df.loc[kper,col]
#
#
# sfr.write_file(filename=sfrfile)
# return sfr
if seg_pars:
assert os.path.exists("sfr_seg_pars.config")
with open("sfr_seg_pars.config",'r') as f:
pars = {}
for line in f:
line = line.strip().split()
pars[line[0]] = line[1]
bak_sfr_file = pars["nam_file"]+"_backup_.sfr"
#m = flopy.modflow.Modflow.load(pars["nam_file"],model_ws=pars["model_ws"],load_only=["sfr"],check=False)
m = flopy.modflow.Modflow.load(pars["nam_file"], load_only=[], check=False)
sfr = flopy.modflow.ModflowSfr2.load(os.path.join(bak_sfr_file), m)
sfrfile = pars["sfr_filename"]
mlt_df = pd.read_csv(pars["mult_file"], delim_whitespace=False, index_col=0)
time_mlt_df = None
if "time_mult_file" in pars:
time_mult_file = pars["time_mult_file"]
time_mlt_df = pd.read_csv(pars["time_mult_file"], delim_whitespace=False,index_col=0)
idx_cols = ['nseg', 'icalc', 'outseg', 'iupseg', 'iprior', 'nstrpts']
present_cols = [c for c in idx_cols if c in mlt_df.columns]
mlt_cols = mlt_df.columns.drop(present_cols)
for key, val in m.sfr.segment_data.items():
df = pd.DataFrame.from_records(val)
df.loc[:, mlt_cols] *= mlt_df.loc[:, mlt_cols]
val = df.to_records(index=False)
sfr.segment_data[key] = val
if reach_pars:
assert os.path.exists("sfr_reach_pars.config")
with open("sfr_reach_pars.config", 'r') as f:
r_pars = {}
for line in f:
line = line.strip().split()
r_pars[line[0]] = line[1]
if bak_sfr_file is None: # will be the case is seg_pars is false
bak_sfr_file = r_pars["nam_file"]+"_backup_.sfr"
#m = flopy.modflow.Modflow.load(pars["nam_file"],model_ws=pars["model_ws"],load_only=["sfr"],check=False)
m = flopy.modflow.Modflow.load(r_pars["nam_file"], load_only=[], check=False)
sfr = flopy.modflow.ModflowSfr2.load(os.path.join(bak_sfr_file), m)
sfrfile = r_pars["sfr_filename"]
r_mlt_df = pd.read_csv(r_pars["mult_file"],sep=',',index_col=0)
r_idx_cols = ["node", "k", "i", "j", "iseg", "ireach", "reachID", "outreach"]
r_mlt_cols = r_mlt_df.columns.drop(r_idx_cols)
r_df = pd.DataFrame.from_records(m.sfr.reach_data)
r_df.loc[:, r_mlt_cols] *= r_mlt_df.loc[:, r_mlt_cols]
sfr.reach_data = r_df.to_records(index=False)
#m.remove_package("sfr")
if pars is not None and "time_mult_file" in pars:
time_mult_file = pars["time_mult_file"]
time_mlt_df = pd.read_csv(time_mult_file, delim_whitespace=False, index_col=0)
for kper, sdata in m.sfr.segment_data.items():
assert kper in time_mlt_df.index, "gw_utils.apply_sfr_seg_parameters() error: kper " + \
"{0} not in time_mlt_df index".format(kper)
for col in time_mlt_df.columns:
sdata[col] *= time_mlt_df.loc[kper, col]
sfr.write_file(filename=sfrfile)
return sfr | apply the SFR segement multiplier parameters. Expected to be run in the same dir
as the model exists
Parameters
----------
reach_pars : bool
if reach paramters need to be applied
Returns
-------
sfr : flopy.modflow.ModflowSfr instance
Note
----
expects "sfr_seg_pars.config" to exist
expects <nam_file>+"_backup_.sfr" to exist |
def resetAndRejoin(self, timeout):
"""reset and join back Thread Network with a given timeout delay
Args:
timeout: a timeout interval before rejoin Thread Network
Returns:
True: successful to reset and rejoin Thread Network
False: fail to reset and rejoin the Thread Network
"""
print '%s call resetAndRejoin' % self.port
print timeout
try:
self._sendline('reset')
self.isPowerDown = True
time.sleep(timeout)
if self.deviceRole == Thread_Device_Role.SED:
self.setPollingRate(self.sedPollingRate)
self.__startOpenThread()
time.sleep(3)
if self.__sendCommand('state')[0] == 'disabled':
print '[FAIL] reset and rejoin'
return False
return True
except Exception, e:
ModuleHelper.WriteIntoDebugLogger("resetAndRejoin() Error: " + str(e)) | reset and join back Thread Network with a given timeout delay
Args:
timeout: a timeout interval before rejoin Thread Network
Returns:
True: successful to reset and rejoin Thread Network
False: fail to reset and rejoin the Thread Network |
def generateSplines(self):
"""#TODO: docstring
"""
_ = returnSplineList(self.dependentVar, self.independentVar,
subsetPercentage=self.splineSubsetPercentage,
cycles=self.splineCycles,
minKnotPoints=self.splineMinKnotPoins,
initialKnots=self.splineInitialKnots,
splineOrder=self.splineOrder,
terminalExpansion=self.splineTerminalExpansion
)
self.splines = _ | #TODO: docstring |
def _equalizeHistogram(img):
'''
histogram equalisation not bounded to int() or an image depth of 8 bit
works also with negative numbers
'''
# to float if int:
intType = None
if 'f' not in img.dtype.str:
TO_FLOAT_TYPES = {np.dtype('uint8'): np.float16,
np.dtype('uint16'): np.float32,
np.dtype('uint32'): np.float64,
np.dtype('uint64'): np.float64}
intType = img.dtype
img = img.astype(TO_FLOAT_TYPES[intType], copy=False)
# get image deph
DEPTH_TO_NBINS = {np.dtype('float16'): 256, # uint8
np.dtype('float32'): 32768, # uint16
np.dtype('float64'): 2147483648} # uint32
nBins = DEPTH_TO_NBINS[img.dtype]
# scale to -1 to 1 due to skikit-image restrictions
mn, mx = np.amin(img), np.amax(img)
if abs(mn) > abs(mx):
mx = mn
img /= mx
img = exposure.equalize_hist(img, nbins=nBins)
img *= mx
if intType:
img = img.astype(intType)
return img | histogram equalisation not bounded to int() or an image depth of 8 bit
works also with negative numbers |
def check_purge_status(self, purge_id):
"""Get the status and times of a recently completed purge."""
content = self._fetch("/purge?id=%s" % purge_id)
return map(lambda x: FastlyPurgeStatus(self, x), content) | Get the status and times of a recently completed purge. |
def iptag_clear(self, iptag, x, y):
"""Clear an IPTag.
Parameters
----------
iptag : int
Index of the IPTag to clear.
"""
self._send_scp(x, y, 0, SCPCommands.iptag,
int(consts.IPTagCommands.clear) << 16 | iptag) | Clear an IPTag.
Parameters
----------
iptag : int
Index of the IPTag to clear. |
def mix(self, color1, color2, weight=50, *args):
"""This algorithm factors in both the user-provided weight
and the difference between the alpha values of the two colors
to decide how to perform the weighted average of the two RGB values.
It works by first normalizing both parameters to be within [-1, 1],
where 1 indicates "only use color1", -1 indicates "only use color 0",
and all values in between indicated a proportionately weighted average.
Once we have the normalized variables w and a,
we apply the formula (w + a)/(1 + w*a)
to get the combined weight (in [-1, 1]) of color1.
This formula has two especially nice properties:
* When either w or a are -1 or 1, the combined weight is also that number
(cases where w * a == -1 are undefined, and handled as a special case).
* When a is 0, the combined weight is w, and vice versa
Finally, the weight of color1 is renormalized to be within [0, 1]
and the weight of color2 is given by 1 minus the weight of color1.
Copyright (c) 2006-2009 Hampton Catlin, Nathan Weizenbaum, and Chris Eppstein
http://sass-lang.com
args:
color1 (str): first color
color2 (str): second color
weight (int/str): weight
raises:
ValueError
returns:
str
"""
if color1 and color2:
if isinstance(weight, string_types):
weight = float(weight.strip('%'))
weight = ((weight / 100.0) * 2) - 1
rgb1 = self._hextorgb(color1)
rgb2 = self._hextorgb(color2)
alpha = 0
w1 = (((weight if weight * alpha == -1 else weight + alpha) /
(1 + weight * alpha)) + 1)
w1 = w1 / 2.0
w2 = 1 - w1
rgb = [
rgb1[0] * w1 + rgb2[0] * w2,
rgb1[1] * w1 + rgb2[1] * w2,
rgb1[2] * w1 + rgb2[2] * w2,
]
return self._rgbatohex(rgb)
raise ValueError('Illegal color values') | This algorithm factors in both the user-provided weight
and the difference between the alpha values of the two colors
to decide how to perform the weighted average of the two RGB values.
It works by first normalizing both parameters to be within [-1, 1],
where 1 indicates "only use color1", -1 indicates "only use color 0",
and all values in between indicated a proportionately weighted average.
Once we have the normalized variables w and a,
we apply the formula (w + a)/(1 + w*a)
to get the combined weight (in [-1, 1]) of color1.
This formula has two especially nice properties:
* When either w or a are -1 or 1, the combined weight is also that number
(cases where w * a == -1 are undefined, and handled as a special case).
* When a is 0, the combined weight is w, and vice versa
Finally, the weight of color1 is renormalized to be within [0, 1]
and the weight of color2 is given by 1 minus the weight of color1.
Copyright (c) 2006-2009 Hampton Catlin, Nathan Weizenbaum, and Chris Eppstein
http://sass-lang.com
args:
color1 (str): first color
color2 (str): second color
weight (int/str): weight
raises:
ValueError
returns:
str |
def _create_archive_table(self, table_name):
'''
Dynamo implementation of BaseDataManager create_archive_table
waiter object is implemented to ensure table creation before moving on
this will slow down table creation. However, since we are only creating
table once this should no impact users.
Parameters
----------
table_name: str
Returns
-------
None
'''
if table_name in self._get_table_names():
raise KeyError('Table "{}" already exists'.format(table_name))
try:
table = self._resource.create_table(
TableName=table_name,
KeySchema=[{'AttributeName': '_id', 'KeyType': 'HASH'}],
AttributeDefinitions=[
{'AttributeName': '_id', 'AttributeType': 'S'}],
ProvisionedThroughput={
'ReadCapacityUnits': 123,
'WriteCapacityUnits': 123})
table.meta.client.get_waiter('table_exists').wait(
TableName=table_name)
except ValueError:
# Error handling for windows incompatability issue
msg = 'Table creation failed'
assert table_name in self._get_table_names(), msg | Dynamo implementation of BaseDataManager create_archive_table
waiter object is implemented to ensure table creation before moving on
this will slow down table creation. However, since we are only creating
table once this should no impact users.
Parameters
----------
table_name: str
Returns
-------
None |
def serialize_gen(
obj_pyxb, encoding='utf-8', pretty=False, strip_prolog=False, xslt_url=None
):
"""Serialize PyXB object to XML.
Args:
obj_pyxb: PyXB object
PyXB object to serialize.
encoding: str
Encoding to use for XML doc bytes
pretty: bool
True: Use pretty print formatting for human readability.
strip_prolog:
True: remove any XML prolog (e.g., ``<?xml version="1.0" encoding="utf-8"?>``),
from the resulting XML doc.
xslt_url: str
If specified, add a processing instruction to the XML doc that specifies the
download location for an XSLT stylesheet.
Returns:
XML document
"""
assert d1_common.type_conversions.is_pyxb(obj_pyxb)
assert encoding in (None, 'utf-8', 'UTF-8')
try:
obj_dom = obj_pyxb.toDOM()
except pyxb.ValidationError as e:
raise ValueError(
'Unable to serialize PyXB to XML. error="{}"'.format(e.details())
)
except pyxb.PyXBException as e:
raise ValueError('Unable to serialize PyXB to XML. error="{}"'.format(str(e)))
if xslt_url:
xslt_processing_instruction = obj_dom.createProcessingInstruction(
'xml-stylesheet', 'type="text/xsl" href="{}"'.format(xslt_url)
)
root = obj_dom.firstChild
obj_dom.insertBefore(xslt_processing_instruction, root)
if pretty:
xml_str = obj_dom.toprettyxml(indent=' ', encoding=encoding)
# Remove empty lines in the result caused by a bug in toprettyxml()
if encoding is None:
xml_str = re.sub(r'^\s*$\n', r'', xml_str, flags=re.MULTILINE)
else:
xml_str = re.sub(b'^\s*$\n', b'', xml_str, flags=re.MULTILINE)
else:
xml_str = obj_dom.toxml(encoding)
if strip_prolog:
if encoding is None:
xml_str = re.sub(r'^<\?(.*)\?>', r'', xml_str)
else:
xml_str = re.sub(b'^<\?(.*)\?>', b'', xml_str)
return xml_str.strip() | Serialize PyXB object to XML.
Args:
obj_pyxb: PyXB object
PyXB object to serialize.
encoding: str
Encoding to use for XML doc bytes
pretty: bool
True: Use pretty print formatting for human readability.
strip_prolog:
True: remove any XML prolog (e.g., ``<?xml version="1.0" encoding="utf-8"?>``),
from the resulting XML doc.
xslt_url: str
If specified, add a processing instruction to the XML doc that specifies the
download location for an XSLT stylesheet.
Returns:
XML document |
def toLily(self):
'''
Method which converts the object instance, its attributes and children to a string of lilypond code
:return: str of lilypond code
'''
lilystring = ""
if not self.autoBeam:
lilystring += "\\autoBeamOff"
children = self.SortedChildren()
if not hasattr(self, "transpose"):
self.transpose = None
for child in range(len(children)):
measureNode = self.GetChild(children[child])
measureNode.autoBeam = self.autoBeam
lilystring += " % measure " + str(children[child]) + "\n"
lilystring += measureNode.toLily() + "\n\n"
return lilystring | Method which converts the object instance, its attributes and children to a string of lilypond code
:return: str of lilypond code |
def basename_without_extension(self):
"""
Get the ``os.path.basename`` of the local file, if any, with extension removed.
"""
ret = self.basename.rsplit('.', 1)[0]
if ret.endswith('.tar'):
ret = ret[0:len(ret)-4]
return ret | Get the ``os.path.basename`` of the local file, if any, with extension removed. |
def insert_paulis(self, indices=None, paulis=None, pauli_labels=None):
"""
Insert or append pauli to the targeted indices.
If indices is None, it means append at the end.
Args:
indices (list[int]): the qubit indices to be inserted
paulis (Pauli): the to-be-inserted or appended pauli
pauli_labels (list[str]): the to-be-inserted or appended pauli label
Note:
the indices refers to the localion of original paulis,
e.g. if indices = [0, 2], pauli_labels = ['Z', 'I'] and original pauli = 'ZYXI'
the pauli will be updated to ZY'I'XI'Z'
'Z' and 'I' are inserted before the qubit at 0 and 2.
Returns:
Pauli: self
Raises:
QiskitError: provide both `paulis` and `pauli_labels` at the same time
"""
if pauli_labels is not None:
if paulis is not None:
raise QiskitError("Please only provide either `paulis` or `pauli_labels`")
if isinstance(pauli_labels, str):
pauli_labels = list(pauli_labels)
# since pauli label is in reversed order.
paulis = Pauli.from_label(pauli_labels[::-1])
if indices is None: # append
self._z = np.concatenate((self._z, paulis.z))
self._x = np.concatenate((self._x, paulis.x))
else:
if not isinstance(indices, list):
indices = [indices]
self._z = np.insert(self._z, indices, paulis.z)
self._x = np.insert(self._x, indices, paulis.x)
return self | Insert or append pauli to the targeted indices.
If indices is None, it means append at the end.
Args:
indices (list[int]): the qubit indices to be inserted
paulis (Pauli): the to-be-inserted or appended pauli
pauli_labels (list[str]): the to-be-inserted or appended pauli label
Note:
the indices refers to the localion of original paulis,
e.g. if indices = [0, 2], pauli_labels = ['Z', 'I'] and original pauli = 'ZYXI'
the pauli will be updated to ZY'I'XI'Z'
'Z' and 'I' are inserted before the qubit at 0 and 2.
Returns:
Pauli: self
Raises:
QiskitError: provide both `paulis` and `pauli_labels` at the same time |
def sparse_to_unmasked_sparse(self):
"""The 1D index mappings between the masked sparse-grid and unmasked sparse grid."""
return mapping_util.sparse_to_unmasked_sparse_from_mask_and_pixel_centres(
total_sparse_pixels=self.total_sparse_pixels, mask=self.regular_grid.mask,
unmasked_sparse_grid_pixel_centres=self.unmasked_sparse_grid_pixel_centres).astype('int') | The 1D index mappings between the masked sparse-grid and unmasked sparse grid. |
def rotate(a, th):
"""Return cartesian vectors, after rotation by specified angles about
each degree of freedom.
Parameters
----------
a: array, shape (n, d)
Input d-dimensional cartesian vectors, left unchanged.
th: array, shape (n, m)
Angles by which to rotate about each m rotational degree of freedom
(m=1 in 2 dimensions, m=3 in 3 dimensions).
Returns
-------
ar: array, shape of a
Rotated cartesian vectors.
"""
return np.sum(a[..., np.newaxis] * R_rot(th), axis=-2) | Return cartesian vectors, after rotation by specified angles about
each degree of freedom.
Parameters
----------
a: array, shape (n, d)
Input d-dimensional cartesian vectors, left unchanged.
th: array, shape (n, m)
Angles by which to rotate about each m rotational degree of freedom
(m=1 in 2 dimensions, m=3 in 3 dimensions).
Returns
-------
ar: array, shape of a
Rotated cartesian vectors. |
def find_converting_reactions(model, pair):
"""
Find all reactions which convert a given metabolite pair.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
pair: tuple or list
A pair of metabolite identifiers without compartment suffix.
Returns
-------
frozenset
The set of reactions that have one of the pair on their left-hand
side and the other on the right-hand side.
"""
first = set(find_met_in_model(model, pair[0]))
second = set(find_met_in_model(model, pair[1]))
hits = list()
for rxn in model.reactions:
# FIXME: Use `set.issubset` much more idiomatic.
if len(first & set(rxn.reactants)) > 0 and len(
second & set(rxn.products)) > 0:
hits.append(rxn)
elif len(first & set(rxn.products)) > 0 and len(
second & set(rxn.reactants)) > 0:
hits.append(rxn)
return frozenset(hits) | Find all reactions which convert a given metabolite pair.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
pair: tuple or list
A pair of metabolite identifiers without compartment suffix.
Returns
-------
frozenset
The set of reactions that have one of the pair on their left-hand
side and the other on the right-hand side. |
def combine_tax_scales(node):
"""
Combine all the MarginalRateTaxScales in the node into a single MarginalRateTaxScale.
"""
combined_tax_scales = None
for child_name in node:
child = node[child_name]
if not isinstance(child, AbstractTaxScale):
log.info('Skipping {} with value {} because it is not a tax scale'.format(child_name, child))
continue
if combined_tax_scales is None:
combined_tax_scales = MarginalRateTaxScale(name = child_name)
combined_tax_scales.add_bracket(0, 0)
combined_tax_scales.add_tax_scale(child)
return combined_tax_scales | Combine all the MarginalRateTaxScales in the node into a single MarginalRateTaxScale. |
def _distort_color(image, color_ordering=0, scope=None):
"""Distort the color of a Tensor image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: 3-D Tensor containing single image in [0, 1].
color_ordering: Python int, a type of distortion (valid values: 0-3).
scope: Optional scope for name_scope.
Returns:
3-D Tensor color-distorted image on range [0, 1]
Raises:
ValueError: if color_ordering not in [0, 3]
"""
with tf.name_scope(scope, "distort_color", [image]):
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
elif color_ordering == 2:
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
elif color_ordering == 3:
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
raise ValueError("color_ordering must be in [0, 3]")
# The random_* ops do not necessarily clamp.
return tf.clip_by_value(image, 0.0, 1.0) | Distort the color of a Tensor image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: 3-D Tensor containing single image in [0, 1].
color_ordering: Python int, a type of distortion (valid values: 0-3).
scope: Optional scope for name_scope.
Returns:
3-D Tensor color-distorted image on range [0, 1]
Raises:
ValueError: if color_ordering not in [0, 3] |
def from_text(cls, text, mapping='mapping'):
"""
Create a Profile instance from the Unicode graphemes found in `text`.
Parameters
----------
text
mapping
Returns
-------
A Profile instance.
"""
graphemes = Counter(grapheme_pattern.findall(text))
specs = [
OrderedDict([
(cls.GRAPHEME_COL, grapheme),
('frequency', frequency),
(mapping, grapheme)])
for grapheme, frequency in graphemes.most_common()]
return cls(*specs) | Create a Profile instance from the Unicode graphemes found in `text`.
Parameters
----------
text
mapping
Returns
-------
A Profile instance. |
def index_agreement(s, o):
"""
index of agreement
input:
s: simulated
o: observed
output:
ia: index of agreement
"""
# s,o = filter_nan(s,o)
ia = 1 - (np.sum((o-s)**2)) /\
(np.sum((np.abs(s-np.mean(o))+np.abs(o-np.mean(o)))**2))
return ia | index of agreement
input:
s: simulated
o: observed
output:
ia: index of agreement |
def random(cls, num_qubits, seed=None):
"""Return a random Pauli on number of qubits.
Args:
num_qubits (int): the number of qubits
seed (int): Optional. To set a random seed.
Returns:
Pauli: the random pauli
"""
if seed is not None:
np.random.seed(seed)
z = np.random.randint(2, size=num_qubits).astype(np.bool)
x = np.random.randint(2, size=num_qubits).astype(np.bool)
return cls(z, x) | Return a random Pauli on number of qubits.
Args:
num_qubits (int): the number of qubits
seed (int): Optional. To set a random seed.
Returns:
Pauli: the random pauli |
def _get_directives_and_roles_from_sphinx():
"""Return a tuple of Sphinx directive and roles."""
if SPHINX_INSTALLED:
sphinx_directives = list(sphinx.domains.std.StandardDomain.directives)
sphinx_roles = list(sphinx.domains.std.StandardDomain.roles)
for domain in [sphinx.domains.c.CDomain,
sphinx.domains.cpp.CPPDomain,
sphinx.domains.javascript.JavaScriptDomain,
sphinx.domains.python.PythonDomain]:
sphinx_directives += list(domain.directives) + [
'{}:{}'.format(domain.name, item)
for item in list(domain.directives)]
sphinx_roles += list(domain.roles) + [
'{}:{}'.format(domain.name, item)
for item in list(domain.roles)]
else:
sphinx_roles = [
'abbr',
'command',
'dfn',
'doc',
'download',
'envvar',
'file',
'guilabel',
'kbd',
'keyword',
'mailheader',
'makevar',
'manpage',
'menuselection',
'mimetype',
'newsgroup',
'option',
'program',
'py:func',
'ref',
'regexp',
'samp',
'term',
'token']
sphinx_directives = [
'autosummary',
'currentmodule',
'centered',
'c:function',
'c:type',
'include',
'deprecated',
'envvar',
'glossary',
'index',
'no-code-block',
'literalinclude',
'hlist',
'option',
'productionlist',
'py:function',
'seealso',
'toctree',
'todo',
'versionadded',
'versionchanged']
return (sphinx_directives, sphinx_roles) | Return a tuple of Sphinx directive and roles. |
def download_image(self, img_url):
""" Downloads a single image.
Downloads img_url using self.page_url as base.
Also, raises the appropriate exception if required.
"""
img_request = None
try:
img_request = requests.request(
'get', img_url, stream=True, proxies=self.proxies)
if img_request.status_code != 200:
raise ImageDownloadError(img_request.status_code)
except:
raise ImageDownloadError()
if img_url[-3:] == "svg" or (int(img_request.headers['content-length']) > self.min_filesize and\
int(img_request.headers['content-length']) < self.max_filesize):
img_content = img_request.content
with open(os.path.join(self.download_path, img_url.split('/')[-1]), 'wb') as f:
byte_image = bytes(img_content)
f.write(byte_image)
else:
raise ImageSizeError(img_request.headers['content-length'])
return True | Downloads a single image.
Downloads img_url using self.page_url as base.
Also, raises the appropriate exception if required. |
def load_gffutils_db(f):
"""
Load database for gffutils.
Parameters
----------
f : str
Path to database.
Returns
-------
db : gffutils.FeatureDB
gffutils feature database.
"""
import gffutils
db = gffutils.FeatureDB(f, keep_order=True)
return db | Load database for gffutils.
Parameters
----------
f : str
Path to database.
Returns
-------
db : gffutils.FeatureDB
gffutils feature database. |
def get_compatible_generator_action(self, filename):
"""
Return the **first** compatible :class:`GeneratorAction` for a given filename or ``None`` if none is found.
Args:
filename (str): The filename of the template to process.
"""
# find first compatible generator action
for action in self.__generator_actions:
if action.act_on_file(filename):
return action
return None | Return the **first** compatible :class:`GeneratorAction` for a given filename or ``None`` if none is found.
Args:
filename (str): The filename of the template to process. |
def rotate_content(day=None):
""" this method gets the parameters that are needed for rotate_latest
and rotate_featured_in_homepage methods, and calls them both"""
# getting the content rotation settings from site settings
for main in Main.objects.all():
site = main.sites_rooted_here.all().first()
main_lang = Languages.for_site(site).languages.filter(
is_main_language=True).first()
index = SectionIndexPage.objects.live().child_of(main).first()
site_settings = SiteSettings.for_site(site)
if day is None:
day = timezone.now().weekday()
# calls the two rotate methods with the necessary params
if main and index:
rotate_latest(main_lang, index, main, site_settings, day)
rotate_featured_in_homepage(main_lang, day, main) | this method gets the parameters that are needed for rotate_latest
and rotate_featured_in_homepage methods, and calls them both |
def read_from_LSQ(self, LSQ_file):
"""
Clears all current interpretations and replaces them with
interpretations read from LSQ file.
Parameters
----------
LSQ_file : path to LSQ file to read in
"""
cont = self.user_warning(
"LSQ import only works if all measurements are present and not averaged during import from magnetometer files to magic format. Do you wish to continue reading interpretations?")
if not cont:
return
self.clear_interpretations(
message="""Do you wish to clear all previous interpretations on import?""")
old_s = self.s
for specimen in self.specimens:
self.select_specimen(specimen)
for i in range(len(self.Data[specimen]['zijdblock'])):
self.mark_meas_good(i)
self.select_specimen(old_s)
print("Reading LSQ file")
interps = read_LSQ(LSQ_file)
for interp in interps:
specimen = interp['er_specimen_name']
if specimen not in self.specimens:
print(
("specimen %s has no registered measurement data, skipping interpretation import" % specimen))
continue
PCA_type = interp['magic_method_codes'].split(':')[0]
tmin = self.Data[specimen]['zijdblock_steps'][interp['measurement_min_index']]
tmax = self.Data[specimen]['zijdblock_steps'][interp['measurement_max_index']]
if 'specimen_comp_name' in list(interp.keys()):
name = interp['specimen_comp_name']
else:
name = None
new_fit = self.add_fit(specimen, name, tmin, tmax, PCA_type)
if 'bad_measurement_index' in list(interp.keys()):
old_s = self.s
self.select_specimen(specimen)
for bmi in interp["bad_measurement_index"]:
try:
self.mark_meas_bad(bmi)
except IndexError:
print(
"Magic Measurments length does not match that recorded in LSQ file")
self.select_specimen(old_s)
if self.ie_open:
self.ie.update_editor()
self.update_selection() | Clears all current interpretations and replaces them with
interpretations read from LSQ file.
Parameters
----------
LSQ_file : path to LSQ file to read in |
def concatenate_not_none(l, axis=0):
"""Construct a numpy array by stacking not-None arrays in a list
Parameters
----------
data : list of arrays
The list of arrays to be concatenated. Arrays have same shape in all
but one dimension or are None, in which case they are ignored.
axis : int, default = 0
Axis for the concatenation
Returns
-------
data_stacked : array
The resulting concatenated array.
"""
# Get the indexes of the arrays in the list
mask = []
for i in range(len(l)):
if l[i] is not None:
mask.append(i)
# Concatenate them
l_stacked = np.concatenate([l[i] for i in mask], axis=axis)
return l_stacked | Construct a numpy array by stacking not-None arrays in a list
Parameters
----------
data : list of arrays
The list of arrays to be concatenated. Arrays have same shape in all
but one dimension or are None, in which case they are ignored.
axis : int, default = 0
Axis for the concatenation
Returns
-------
data_stacked : array
The resulting concatenated array. |
def get_gtf_db(gtf, in_memory=False):
"""
create a gffutils DB
"""
db_file = gtf + '.db'
if gtf.endswith('.gz'):
db_file = gtf[:-3] + '.db'
if file_exists(db_file):
return gffutils.FeatureDB(db_file)
db_file = ':memory:' if in_memory else db_file
if in_memory or not file_exists(db_file):
debug('GTF database does not exist, creating...')
infer_extent = guess_infer_extent(gtf)
db = gffutils.create_db(gtf, dbfn=db_file,
infer_gene_extent=infer_extent)
return db
else:
return gffutils.FeatureDB(db_file) | create a gffutils DB |
def remove(self, flag, extra):
"""Remove Slackware binary packages
"""
self.flag = flag
self.extra = extra
self.dep_path = self.meta.log_path + "dep/"
dependencies, rmv_list = [], []
self.removed = self._view_removed()
if not self.removed:
print("") # new line at end
else:
msg = "package"
if len(self.removed) > 1:
msg = msg + "s"
try:
if self.meta.default_answer in ["y", "Y"]:
remove_pkg = self.meta.default_answer
else:
remove_pkg = raw_input(
"\nAre you sure to remove {0} {1} [y/N]? ".format(
str(len(self.removed)), msg))
except EOFError:
print("") # new line at exit
raise SystemExit()
if remove_pkg in ["y", "Y"]:
self._check_if_used(self.binary)
for rmv in self.removed:
# If package build and install with "slpkg -s sbo <package>"
# then look log file for dependencies in /var/log/slpkg/dep,
# read and remove all else remove only the package.
if (os.path.isfile(self.dep_path + rmv) and
self.meta.del_deps in ["on", "ON"] or
os.path.isfile(self.dep_path + rmv) and
"--deps" in self.extra):
dependencies = self._view_deps(self.dep_path, rmv)
if dependencies and self._rmv_deps_answer() in ["y",
"Y"]:
rmv_list += self._rmv_deps(dependencies, rmv)
else:
rmv_list += self._rmv_pkg(rmv)
else:
rmv_list += self._rmv_pkg(rmv)
# Prints all removed packages
self._reference_rmvs(rmv_list) | Remove Slackware binary packages |
def get_uuid(type=4):
"""
Get uuid value
"""
import uuid
name = 'uuid'+str(type)
u = getattr(uuid, name)
return u().hex | Get uuid value |
def encode(self, uuid, pad_length=22):
"""
Encodes a UUID into a string (LSB first) according to the alphabet
If leftmost (MSB) bits 0, string might be shorter
"""
return self._num_to_string(uuid.int, pad_to_length=pad_length) | Encodes a UUID into a string (LSB first) according to the alphabet
If leftmost (MSB) bits 0, string might be shorter |
def create_configuration(self, node, ports):
"""Create RAID configuration on the bare metal.
This method creates the desired RAID configuration as read from
node['target_raid_config'].
:param node: A dictionary of the node object
:param ports: A list of dictionaries containing information of ports
for the node
:returns: The current RAID configuration of the below format.
raid_config = {
'logical_disks': [{
'size_gb': 100,
'raid_level': 1,
'physical_disks': [
'5I:0:1',
'5I:0:2'],
'controller': 'Smart array controller'
},
]
}
"""
target_raid_config = node.get('target_raid_config', {}).copy()
return hpssa_manager.create_configuration(
raid_config=target_raid_config) | Create RAID configuration on the bare metal.
This method creates the desired RAID configuration as read from
node['target_raid_config'].
:param node: A dictionary of the node object
:param ports: A list of dictionaries containing information of ports
for the node
:returns: The current RAID configuration of the below format.
raid_config = {
'logical_disks': [{
'size_gb': 100,
'raid_level': 1,
'physical_disks': [
'5I:0:1',
'5I:0:2'],
'controller': 'Smart array controller'
},
]
} |
def get_energies(atoms_list):
""" Potential energy for a list of atoms objects"""
if len(atoms_list) == 1:
return atoms_list[0].get_potential_energy()
elif len(atoms_list) > 1:
energies = []
for atoms in atoms_list:
energies.append(atoms.get_potential_energy())
return energies | Potential energy for a list of atoms objects |
def get_thin_rect_vertices(ox, oy, dx, dy, r):
"""Given the starting point, ending point, and width, return a list of
vertex coordinates at the corners of the line segment
(really a thin rectangle).
"""
if ox < dx:
leftx = ox
rightx = dx
xco = 1
elif ox > dx:
leftx = ox * -1
rightx = dx * -1
xco = -1
else:
return [
ox - r, oy,
ox + r, oy,
ox + r, dy,
ox - r, dy
]
if oy < dy:
boty = oy
topy = dy
yco = 1
elif oy > dy:
boty = oy * -1
topy = dy * -1
yco = -1
else:
return [
ox, oy - r,
dx, oy - r,
dx, oy + r,
ox, oy + r
]
rise = topy - boty
run = rightx - leftx
theta = atan(rise/run)
theta_prime = ninety - theta
xoff = cos(theta_prime) * r
yoff = sin(theta_prime) * r
x1 = leftx + xoff
y1 = boty - yoff
x2 = rightx + xoff
y2 = topy - yoff
x3 = rightx - xoff
y3 = topy + yoff
x4 = leftx - xoff
y4 = boty + yoff
return [
x1 * xco, y1 * yco,
x2 * xco, y2 * yco,
x3 * xco, y3 * yco,
x4 * xco, y4 * yco
] | Given the starting point, ending point, and width, return a list of
vertex coordinates at the corners of the line segment
(really a thin rectangle). |
def get_arguments(self):
"""Returns the additional options for the grid (such as the queue, memory requirements, ...)."""
# In python 2, the command line is unicode, which needs to be converted to string before pickling;
# In python 3, the command line is bytes, which can be pickled directly
args = loads(self.grid_arguments)['kwargs'] if isinstance(self.grid_arguments, bytes) else loads(self.grid_arguments.encode())['kwargs']
# in any case, the commands have to be converted to str
retval = {}
if 'pe_opt' in args:
retval['pe_opt'] = args['pe_opt']
if 'memfree' in args and args['memfree'] is not None:
retval['memfree'] = args['memfree']
if 'hvmem' in args and args['hvmem'] is not None:
retval['hvmem'] = args['hvmem']
if 'gpumem' in args and args['gpumem'] is not None:
retval['gpumem'] = args['gpumem']
if 'env' in args and len(args['env']) > 0:
retval['env'] = args['env']
if 'io_big' in args and args['io_big']:
retval['io_big'] = True
# also add the queue
if self.queue_name is not None:
retval['queue'] = str(self.queue_name)
return retval | Returns the additional options for the grid (such as the queue, memory requirements, ...). |
def set_common_fields(self, warc_type: str, content_type: str):
'''Set the required fields for the record.'''
self.fields[self.WARC_TYPE] = warc_type
self.fields[self.CONTENT_TYPE] = content_type
self.fields[self.WARC_DATE] = wpull.util.datetime_str()
self.fields[self.WARC_RECORD_ID] = '<{0}>'.format(uuid.uuid4().urn) | Set the required fields for the record. |
def load_modes(node):
"""Load all observing modes"""
if isinstance(node, list):
values = [load_mode(child) for child in node]
keys = [mode.key for mode in values]
return dict(zip(keys,values))
elif isinstance(node, dict):
values = {key: load_mode(child) for key, child in node}
return values
else:
raise NotImplementedError | Load all observing modes |
def load_modules(self):
"""Should instance interfaces and set them to interface, following `modules`"""
if self.INTERFACES_MODULE is None:
raise NotImplementedError("A module containing interfaces modules "
"should be setup in INTERFACES_MODULE !")
else:
for module, permission in self.modules.items():
i = getattr(self.INTERFACES_MODULE,
module).Interface(self, permission)
self.interfaces[module] = i | Should instance interfaces and set them to interface, following `modules` |
def update_asset(self, asset_form=None):
"""Updates an existing asset.
:param asset_form: the form containing the elements to be updated
:type asset_form: ``osid.repository.AssetForm``
:raise: ``IllegalState`` -- ``asset_form`` already used in anupdate transaction
:raise: ``InvalidArgument`` -- the form contains an invalid value
:raise: ``NullArgument`` -- ``asset_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- ``asset_form`` did not originate from ``get_asset_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
if asset_form is None:
raise NullArgument()
if not isinstance(asset_form, abc_repository_objects.AssetForm):
raise InvalidArgument('argument type is not an AssetForm')
if not asset_form.is_for_update():
raise InvalidArgument('form is for create only, not update')
try:
if self._forms[asset_form.get_id().get_identifier()] == UPDATED:
raise IllegalState('form already used in an update transaction')
except KeyError:
raise Unsupported('form did not originate from this session')
if not asset_form.is_valid():
raise InvalidArgument('one or more of the form elements is invalid')
url_path = construct_url('assets',
bank_id=self._catalog_idstr)
try:
result = self._put_request(url_path, asset_form._my_map)
except Exception:
raise # OperationFailed()
self._forms[asset_form.get_id().get_identifier()] = UPDATED
return objects.Asset(result) | Updates an existing asset.
:param asset_form: the form containing the elements to be updated
:type asset_form: ``osid.repository.AssetForm``
:raise: ``IllegalState`` -- ``asset_form`` already used in anupdate transaction
:raise: ``InvalidArgument`` -- the form contains an invalid value
:raise: ``NullArgument`` -- ``asset_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- ``asset_form`` did not originate from ``get_asset_form_for_update()``
*compliance: mandatory -- This method must be implemented.* |
def downsample(self, factor):
"""
Compute a downsampled version of the skeleton by striding while
preserving endpoints.
factor: stride length for downsampling the saved skeleton paths.
Returns: downsampled PrecomputedSkeleton
"""
if int(factor) != factor or factor < 1:
raise ValueError("Argument `factor` must be a positive integer greater than or equal to 1. Got: <{}>({})", type(factor), factor)
paths = self.interjoint_paths()
for i, path in enumerate(paths):
paths[i] = np.concatenate(
(path[0::factor, :], path[-1:, :]) # preserve endpoints
)
ds_skel = PrecomputedSkeleton.simple_merge(
[ PrecomputedSkeleton.from_path(path) for path in paths ]
).consolidate()
ds_skel.id = self.id
# TODO: I'm sure this could be sped up if need be.
index = {}
for i, vert in enumerate(self.vertices):
vert = tuple(vert)
index[vert] = i
for i, vert in enumerate(ds_skel.vertices):
vert = tuple(vert)
ds_skel.radii[i] = self.radii[index[vert]]
ds_skel.vertex_types[i] = self.vertex_types[index[vert]]
return ds_skel | Compute a downsampled version of the skeleton by striding while
preserving endpoints.
factor: stride length for downsampling the saved skeleton paths.
Returns: downsampled PrecomputedSkeleton |
Subsets and Splits